]> git.ipfire.org Git - thirdparty/linux.git/blame - net/core/filter.c
test_bpf: flag tests that cannot be jited on s390
[thirdparty/linux.git] / net / core / filter.c
CommitLineData
1da177e4
LT
1/*
2 * Linux Socket Filter - Kernel level socket filtering
3 *
bd4cf0ed
AS
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
1da177e4 6 *
bd4cf0ed
AS
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8 *
9 * Authors:
10 *
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
1da177e4
LT
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 * Andi Kleen - Fix a few bad bugs and races.
4df95ff4 21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
1da177e4
LT
22 */
23
24#include <linux/module.h>
25#include <linux/types.h>
1da177e4
LT
26#include <linux/mm.h>
27#include <linux/fcntl.h>
28#include <linux/socket.h>
91b8270f 29#include <linux/sock_diag.h>
1da177e4
LT
30#include <linux/in.h>
31#include <linux/inet.h>
32#include <linux/netdevice.h>
33#include <linux/if_packet.h>
c491680f 34#include <linux/if_arp.h>
5a0e3ad6 35#include <linux/gfp.h>
d74bad4e 36#include <net/inet_common.h>
1da177e4
LT
37#include <net/ip.h>
38#include <net/protocol.h>
4738c1db 39#include <net/netlink.h>
1da177e4
LT
40#include <linux/skbuff.h>
41#include <net/sock.h>
10b89ee4 42#include <net/flow_dissector.h>
1da177e4
LT
43#include <linux/errno.h>
44#include <linux/timer.h>
7c0f6ba6 45#include <linux/uaccess.h>
40daafc8 46#include <asm/unaligned.h>
d66f2b91 47#include <asm/cmpxchg.h>
1da177e4 48#include <linux/filter.h>
86e4ca66 49#include <linux/ratelimit.h>
46b325c7 50#include <linux/seccomp.h>
f3335031 51#include <linux/if_vlan.h>
89aa0758 52#include <linux/bpf.h>
d691f9e8 53#include <net/sch_generic.h>
8d20aabe 54#include <net/cls_cgroup.h>
d3aa45ce 55#include <net/dst_metadata.h>
c46646d0 56#include <net/dst.h>
538950a1 57#include <net/sock_reuseport.h>
b1d9fc41 58#include <net/busy_poll.h>
8c4b4c7e 59#include <net/tcp.h>
12bed760 60#include <net/xfrm.h>
5acaee0a 61#include <linux/bpf_trace.h>
02671e23 62#include <net/xdp_sock.h>
87f5fc7e
DA
63#include <linux/inetdevice.h>
64#include <net/ip_fib.h>
65#include <net/flow.h>
66#include <net/arp.h>
fe94cc29
MX
67#include <net/ipv6.h>
68#include <linux/seg6_local.h>
69#include <net/seg6.h>
70#include <net/seg6_local.h>
1da177e4 71
43db6d65 72/**
f4979fce 73 * sk_filter_trim_cap - run a packet through a socket filter
43db6d65
SH
74 * @sk: sock associated with &sk_buff
75 * @skb: buffer to filter
f4979fce 76 * @cap: limit on how short the eBPF program may trim the packet
43db6d65 77 *
ff936a04
AS
78 * Run the eBPF program and then cut skb->data to correct size returned by
79 * the program. If pkt_len is 0 we toss packet. If skb->len is smaller
43db6d65 80 * than pkt_len we keep whole skb->data. This is the socket level
ff936a04 81 * wrapper to BPF_PROG_RUN. It returns 0 if the packet should
43db6d65
SH
82 * be accepted or -EPERM if the packet should be tossed.
83 *
84 */
f4979fce 85int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
43db6d65
SH
86{
87 int err;
88 struct sk_filter *filter;
89
c93bdd0e
MG
90 /*
91 * If the skb was allocated from pfmemalloc reserves, only
92 * allow SOCK_MEMALLOC sockets to use it as this socket is
93 * helping free memory
94 */
8fe809a9
ED
95 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) {
96 NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP);
c93bdd0e 97 return -ENOMEM;
8fe809a9 98 }
c11cd3a6
DM
99 err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb);
100 if (err)
101 return err;
102
43db6d65
SH
103 err = security_sock_rcv_skb(sk, skb);
104 if (err)
105 return err;
106
80f8f102
ED
107 rcu_read_lock();
108 filter = rcu_dereference(sk->sk_filter);
43db6d65 109 if (filter) {
8f917bba
WB
110 struct sock *save_sk = skb->sk;
111 unsigned int pkt_len;
112
113 skb->sk = sk;
114 pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
8f917bba 115 skb->sk = save_sk;
d1f496fd 116 err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
43db6d65 117 }
80f8f102 118 rcu_read_unlock();
43db6d65
SH
119
120 return err;
121}
f4979fce 122EXPORT_SYMBOL(sk_filter_trim_cap);
43db6d65 123
b390134c 124BPF_CALL_1(bpf_skb_get_pay_offset, struct sk_buff *, skb)
bd4cf0ed 125{
f3694e00 126 return skb_get_poff(skb);
bd4cf0ed
AS
127}
128
b390134c 129BPF_CALL_3(bpf_skb_get_nlattr, struct sk_buff *, skb, u32, a, u32, x)
bd4cf0ed 130{
bd4cf0ed
AS
131 struct nlattr *nla;
132
133 if (skb_is_nonlinear(skb))
134 return 0;
135
05ab8f26
MK
136 if (skb->len < sizeof(struct nlattr))
137 return 0;
138
30743837 139 if (a > skb->len - sizeof(struct nlattr))
bd4cf0ed
AS
140 return 0;
141
30743837 142 nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
bd4cf0ed
AS
143 if (nla)
144 return (void *) nla - (void *) skb->data;
145
146 return 0;
147}
148
b390134c 149BPF_CALL_3(bpf_skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x)
bd4cf0ed 150{
bd4cf0ed
AS
151 struct nlattr *nla;
152
153 if (skb_is_nonlinear(skb))
154 return 0;
155
05ab8f26
MK
156 if (skb->len < sizeof(struct nlattr))
157 return 0;
158
30743837 159 if (a > skb->len - sizeof(struct nlattr))
bd4cf0ed
AS
160 return 0;
161
30743837
DB
162 nla = (struct nlattr *) &skb->data[a];
163 if (nla->nla_len > skb->len - a)
bd4cf0ed
AS
164 return 0;
165
30743837 166 nla = nla_find_nested(nla, x);
bd4cf0ed
AS
167 if (nla)
168 return (void *) nla - (void *) skb->data;
169
170 return 0;
171}
172
e0cea7ce
DB
173BPF_CALL_4(bpf_skb_load_helper_8, const struct sk_buff *, skb, const void *,
174 data, int, headlen, int, offset)
175{
176 u8 tmp, *ptr;
177 const int len = sizeof(tmp);
178
179 if (offset >= 0) {
180 if (headlen - offset >= len)
181 return *(u8 *)(data + offset);
182 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
183 return tmp;
184 } else {
185 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
186 if (likely(ptr))
187 return *(u8 *)ptr;
188 }
189
190 return -EFAULT;
191}
192
193BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb,
194 int, offset)
195{
196 return ____bpf_skb_load_helper_8(skb, skb->data, skb->len - skb->data_len,
197 offset);
198}
199
200BPF_CALL_4(bpf_skb_load_helper_16, const struct sk_buff *, skb, const void *,
201 data, int, headlen, int, offset)
202{
203 u16 tmp, *ptr;
204 const int len = sizeof(tmp);
205
206 if (offset >= 0) {
207 if (headlen - offset >= len)
208 return get_unaligned_be16(data + offset);
209 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
210 return be16_to_cpu(tmp);
211 } else {
212 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
213 if (likely(ptr))
214 return get_unaligned_be16(ptr);
215 }
216
217 return -EFAULT;
218}
219
220BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb,
221 int, offset)
222{
223 return ____bpf_skb_load_helper_16(skb, skb->data, skb->len - skb->data_len,
224 offset);
225}
226
227BPF_CALL_4(bpf_skb_load_helper_32, const struct sk_buff *, skb, const void *,
228 data, int, headlen, int, offset)
229{
230 u32 tmp, *ptr;
231 const int len = sizeof(tmp);
232
233 if (likely(offset >= 0)) {
234 if (headlen - offset >= len)
235 return get_unaligned_be32(data + offset);
236 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
237 return be32_to_cpu(tmp);
238 } else {
239 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
240 if (likely(ptr))
241 return get_unaligned_be32(ptr);
242 }
243
244 return -EFAULT;
245}
246
247BPF_CALL_2(bpf_skb_load_helper_32_no_cache, const struct sk_buff *, skb,
248 int, offset)
249{
250 return ____bpf_skb_load_helper_32(skb, skb->data, skb->len - skb->data_len,
251 offset);
252}
253
b390134c 254BPF_CALL_0(bpf_get_raw_cpu_id)
bd4cf0ed
AS
255{
256 return raw_smp_processor_id();
257}
258
80b48c44 259static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
b390134c 260 .func = bpf_get_raw_cpu_id,
80b48c44
DB
261 .gpl_only = false,
262 .ret_type = RET_INTEGER,
263};
264
9bac3d6d
AS
265static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
266 struct bpf_insn *insn_buf)
267{
268 struct bpf_insn *insn = insn_buf;
269
270 switch (skb_field) {
271 case SKF_AD_MARK:
272 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
273
274 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
275 offsetof(struct sk_buff, mark));
276 break;
277
278 case SKF_AD_PKTTYPE:
279 *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET());
280 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX);
281#ifdef __BIG_ENDIAN_BITFIELD
282 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5);
283#endif
284 break;
285
286 case SKF_AD_QUEUE:
287 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
288
289 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
290 offsetof(struct sk_buff, queue_mapping));
291 break;
c2497395 292
c2497395
AS
293 case SKF_AD_VLAN_TAG:
294 case SKF_AD_VLAN_TAG_PRESENT:
295 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
296 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
297
298 /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
299 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
300 offsetof(struct sk_buff, vlan_tci));
301 if (skb_field == SKF_AD_VLAN_TAG) {
302 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg,
303 ~VLAN_TAG_PRESENT);
304 } else {
305 /* dst_reg >>= 12 */
306 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 12);
307 /* dst_reg &= 1 */
308 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1);
309 }
310 break;
9bac3d6d
AS
311 }
312
313 return insn - insn_buf;
314}
315
bd4cf0ed 316static bool convert_bpf_extensions(struct sock_filter *fp,
2695fb55 317 struct bpf_insn **insnp)
bd4cf0ed 318{
2695fb55 319 struct bpf_insn *insn = *insnp;
9bac3d6d 320 u32 cnt;
bd4cf0ed
AS
321
322 switch (fp->k) {
323 case SKF_AD_OFF + SKF_AD_PROTOCOL:
0b8c707d
DB
324 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
325
326 /* A = *(u16 *) (CTX + offsetof(protocol)) */
327 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
328 offsetof(struct sk_buff, protocol));
329 /* A = ntohs(A) [emitting a nop or swap16] */
330 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
bd4cf0ed
AS
331 break;
332
333 case SKF_AD_OFF + SKF_AD_PKTTYPE:
9bac3d6d
AS
334 cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn);
335 insn += cnt - 1;
bd4cf0ed
AS
336 break;
337
338 case SKF_AD_OFF + SKF_AD_IFINDEX:
339 case SKF_AD_OFF + SKF_AD_HATYPE:
bd4cf0ed
AS
340 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
341 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
f8f6d679 342
f035a515 343 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
f8f6d679
DB
344 BPF_REG_TMP, BPF_REG_CTX,
345 offsetof(struct sk_buff, dev));
346 /* if (tmp != 0) goto pc + 1 */
347 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
348 *insn++ = BPF_EXIT_INSN();
349 if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX)
350 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP,
351 offsetof(struct net_device, ifindex));
352 else
353 *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP,
354 offsetof(struct net_device, type));
bd4cf0ed
AS
355 break;
356
357 case SKF_AD_OFF + SKF_AD_MARK:
9bac3d6d
AS
358 cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn);
359 insn += cnt - 1;
bd4cf0ed
AS
360 break;
361
362 case SKF_AD_OFF + SKF_AD_RXHASH:
363 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
364
9739eef1
AS
365 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
366 offsetof(struct sk_buff, hash));
bd4cf0ed
AS
367 break;
368
369 case SKF_AD_OFF + SKF_AD_QUEUE:
9bac3d6d
AS
370 cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn);
371 insn += cnt - 1;
bd4cf0ed
AS
372 break;
373
374 case SKF_AD_OFF + SKF_AD_VLAN_TAG:
c2497395
AS
375 cnt = convert_skb_access(SKF_AD_VLAN_TAG,
376 BPF_REG_A, BPF_REG_CTX, insn);
377 insn += cnt - 1;
378 break;
bd4cf0ed 379
c2497395
AS
380 case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
381 cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
382 BPF_REG_A, BPF_REG_CTX, insn);
383 insn += cnt - 1;
bd4cf0ed
AS
384 break;
385
27cd5452
MS
386 case SKF_AD_OFF + SKF_AD_VLAN_TPID:
387 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2);
388
389 /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */
390 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
391 offsetof(struct sk_buff, vlan_proto));
392 /* A = ntohs(A) [emitting a nop or swap16] */
393 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
394 break;
395
bd4cf0ed
AS
396 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
397 case SKF_AD_OFF + SKF_AD_NLATTR:
398 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
399 case SKF_AD_OFF + SKF_AD_CPU:
4cd3675e 400 case SKF_AD_OFF + SKF_AD_RANDOM:
e430f34e 401 /* arg1 = CTX */
f8f6d679 402 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
bd4cf0ed 403 /* arg2 = A */
f8f6d679 404 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
bd4cf0ed 405 /* arg3 = X */
f8f6d679 406 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
e430f34e 407 /* Emit call(arg1=CTX, arg2=A, arg3=X) */
bd4cf0ed
AS
408 switch (fp->k) {
409 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
b390134c 410 *insn = BPF_EMIT_CALL(bpf_skb_get_pay_offset);
bd4cf0ed
AS
411 break;
412 case SKF_AD_OFF + SKF_AD_NLATTR:
b390134c 413 *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr);
bd4cf0ed
AS
414 break;
415 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
b390134c 416 *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr_nest);
bd4cf0ed
AS
417 break;
418 case SKF_AD_OFF + SKF_AD_CPU:
b390134c 419 *insn = BPF_EMIT_CALL(bpf_get_raw_cpu_id);
bd4cf0ed 420 break;
4cd3675e 421 case SKF_AD_OFF + SKF_AD_RANDOM:
3ad00405
DB
422 *insn = BPF_EMIT_CALL(bpf_user_rnd_u32);
423 bpf_user_rnd_init_once();
4cd3675e 424 break;
bd4cf0ed
AS
425 }
426 break;
427
428 case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
9739eef1
AS
429 /* A ^= X */
430 *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
bd4cf0ed
AS
431 break;
432
433 default:
434 /* This is just a dummy call to avoid letting the compiler
435 * evict __bpf_call_base() as an optimization. Placed here
436 * where no-one bothers.
437 */
438 BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
439 return false;
440 }
441
442 *insnp = insn;
443 return true;
444}
445
e0cea7ce
DB
446static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp)
447{
448 const bool unaligned_ok = IS_BUILTIN(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS);
449 int size = bpf_size_to_bytes(BPF_SIZE(fp->code));
450 bool endian = BPF_SIZE(fp->code) == BPF_H ||
451 BPF_SIZE(fp->code) == BPF_W;
452 bool indirect = BPF_MODE(fp->code) == BPF_IND;
453 const int ip_align = NET_IP_ALIGN;
454 struct bpf_insn *insn = *insnp;
455 int offset = fp->k;
456
457 if (!indirect &&
458 ((unaligned_ok && offset >= 0) ||
459 (!unaligned_ok && offset >= 0 &&
460 offset + ip_align >= 0 &&
461 offset + ip_align % size == 0))) {
462 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H);
463 *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset);
464 *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP, size, 2 + endian);
465 *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, BPF_REG_D,
466 offset);
467 if (endian)
468 *insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8);
469 *insn++ = BPF_JMP_A(8);
470 }
471
472 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
473 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_D);
474 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_H);
475 if (!indirect) {
476 *insn++ = BPF_MOV64_IMM(BPF_REG_ARG4, offset);
477 } else {
478 *insn++ = BPF_MOV64_REG(BPF_REG_ARG4, BPF_REG_X);
479 if (fp->k)
480 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG4, offset);
481 }
482
483 switch (BPF_SIZE(fp->code)) {
484 case BPF_B:
485 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8);
486 break;
487 case BPF_H:
488 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16);
489 break;
490 case BPF_W:
491 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32);
492 break;
493 default:
494 return false;
495 }
496
497 *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_A, 0, 2);
498 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
499 *insn = BPF_EXIT_INSN();
500
501 *insnp = insn;
502 return true;
503}
504
bd4cf0ed 505/**
8fb575ca 506 * bpf_convert_filter - convert filter program
bd4cf0ed
AS
507 * @prog: the user passed filter program
508 * @len: the length of the user passed filter program
50bbfed9 509 * @new_prog: allocated 'struct bpf_prog' or NULL
bd4cf0ed 510 * @new_len: pointer to store length of converted program
e0cea7ce 511 * @seen_ld_abs: bool whether we've seen ld_abs/ind
bd4cf0ed 512 *
1f504ec9
TK
513 * Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn'
514 * style extended BPF (eBPF).
bd4cf0ed
AS
515 * Conversion workflow:
516 *
517 * 1) First pass for calculating the new program length:
e0cea7ce 518 * bpf_convert_filter(old_prog, old_len, NULL, &new_len, &seen_ld_abs)
bd4cf0ed
AS
519 *
520 * 2) 2nd pass to remap in two passes: 1st pass finds new
521 * jump offsets, 2nd pass remapping:
e0cea7ce 522 * bpf_convert_filter(old_prog, old_len, new_prog, &new_len, &seen_ld_abs)
bd4cf0ed 523 */
d9e12f42 524static int bpf_convert_filter(struct sock_filter *prog, int len,
e0cea7ce
DB
525 struct bpf_prog *new_prog, int *new_len,
526 bool *seen_ld_abs)
bd4cf0ed 527{
50bbfed9
AS
528 int new_flen = 0, pass = 0, target, i, stack_off;
529 struct bpf_insn *new_insn, *first_insn = NULL;
bd4cf0ed
AS
530 struct sock_filter *fp;
531 int *addrs = NULL;
532 u8 bpf_src;
533
534 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
30743837 535 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
bd4cf0ed 536
6f9a093b 537 if (len <= 0 || len > BPF_MAXINSNS)
bd4cf0ed
AS
538 return -EINVAL;
539
540 if (new_prog) {
50bbfed9 541 first_insn = new_prog->insnsi;
658da937
DB
542 addrs = kcalloc(len, sizeof(*addrs),
543 GFP_KERNEL | __GFP_NOWARN);
bd4cf0ed
AS
544 if (!addrs)
545 return -ENOMEM;
546 }
547
548do_pass:
50bbfed9 549 new_insn = first_insn;
bd4cf0ed
AS
550 fp = prog;
551
8b614aeb 552 /* Classic BPF related prologue emission. */
50bbfed9 553 if (new_prog) {
8b614aeb
DB
554 /* Classic BPF expects A and X to be reset first. These need
555 * to be guaranteed to be the first two instructions.
556 */
1d621674
DB
557 *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
558 *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_X, BPF_REG_X);
8b614aeb
DB
559
560 /* All programs must keep CTX in callee saved BPF_REG_CTX.
561 * In eBPF case it's done by the compiler, here we need to
562 * do this ourself. Initial CTX is present in BPF_REG_ARG1.
563 */
564 *new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
e0cea7ce
DB
565 if (*seen_ld_abs) {
566 /* For packet access in classic BPF, cache skb->data
567 * in callee-saved BPF R8 and skb->len - skb->data_len
568 * (headlen) in BPF R9. Since classic BPF is read-only
569 * on CTX, we only need to cache it once.
570 */
571 *new_insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
572 BPF_REG_D, BPF_REG_CTX,
573 offsetof(struct sk_buff, data));
574 *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_H, BPF_REG_CTX,
575 offsetof(struct sk_buff, len));
576 *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_TMP, BPF_REG_CTX,
577 offsetof(struct sk_buff, data_len));
578 *new_insn++ = BPF_ALU32_REG(BPF_SUB, BPF_REG_H, BPF_REG_TMP);
579 }
8b614aeb
DB
580 } else {
581 new_insn += 3;
582 }
bd4cf0ed
AS
583
584 for (i = 0; i < len; fp++, i++) {
e0cea7ce 585 struct bpf_insn tmp_insns[32] = { };
2695fb55 586 struct bpf_insn *insn = tmp_insns;
bd4cf0ed
AS
587
588 if (addrs)
50bbfed9 589 addrs[i] = new_insn - first_insn;
bd4cf0ed
AS
590
591 switch (fp->code) {
592 /* All arithmetic insns and skb loads map as-is. */
593 case BPF_ALU | BPF_ADD | BPF_X:
594 case BPF_ALU | BPF_ADD | BPF_K:
595 case BPF_ALU | BPF_SUB | BPF_X:
596 case BPF_ALU | BPF_SUB | BPF_K:
597 case BPF_ALU | BPF_AND | BPF_X:
598 case BPF_ALU | BPF_AND | BPF_K:
599 case BPF_ALU | BPF_OR | BPF_X:
600 case BPF_ALU | BPF_OR | BPF_K:
601 case BPF_ALU | BPF_LSH | BPF_X:
602 case BPF_ALU | BPF_LSH | BPF_K:
603 case BPF_ALU | BPF_RSH | BPF_X:
604 case BPF_ALU | BPF_RSH | BPF_K:
605 case BPF_ALU | BPF_XOR | BPF_X:
606 case BPF_ALU | BPF_XOR | BPF_K:
607 case BPF_ALU | BPF_MUL | BPF_X:
608 case BPF_ALU | BPF_MUL | BPF_K:
609 case BPF_ALU | BPF_DIV | BPF_X:
610 case BPF_ALU | BPF_DIV | BPF_K:
611 case BPF_ALU | BPF_MOD | BPF_X:
612 case BPF_ALU | BPF_MOD | BPF_K:
613 case BPF_ALU | BPF_NEG:
614 case BPF_LD | BPF_ABS | BPF_W:
615 case BPF_LD | BPF_ABS | BPF_H:
616 case BPF_LD | BPF_ABS | BPF_B:
617 case BPF_LD | BPF_IND | BPF_W:
618 case BPF_LD | BPF_IND | BPF_H:
619 case BPF_LD | BPF_IND | BPF_B:
620 /* Check for overloaded BPF extension and
621 * directly convert it if found, otherwise
622 * just move on with mapping.
623 */
624 if (BPF_CLASS(fp->code) == BPF_LD &&
625 BPF_MODE(fp->code) == BPF_ABS &&
626 convert_bpf_extensions(fp, &insn))
627 break;
e0cea7ce
DB
628 if (BPF_CLASS(fp->code) == BPF_LD &&
629 convert_bpf_ld_abs(fp, &insn)) {
630 *seen_ld_abs = true;
631 break;
632 }
bd4cf0ed 633
68fda450 634 if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) ||
f6b1b3bf 635 fp->code == (BPF_ALU | BPF_MOD | BPF_X)) {
68fda450 636 *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X);
f6b1b3bf
DB
637 /* Error with exception code on div/mod by 0.
638 * For cBPF programs, this was always return 0.
639 */
640 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_X, 0, 2);
641 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
642 *insn++ = BPF_EXIT_INSN();
643 }
68fda450 644
f8f6d679 645 *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
bd4cf0ed
AS
646 break;
647
f8f6d679
DB
648 /* Jump transformation cannot use BPF block macros
649 * everywhere as offset calculation and target updates
650 * require a bit more work than the rest, i.e. jump
651 * opcodes map as-is, but offsets need adjustment.
652 */
653
654#define BPF_EMIT_JMP \
bd4cf0ed 655 do { \
050fad7c
DB
656 const s32 off_min = S16_MIN, off_max = S16_MAX; \
657 s32 off; \
658 \
bd4cf0ed
AS
659 if (target >= len || target < 0) \
660 goto err; \
050fad7c 661 off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
bd4cf0ed 662 /* Adjust pc relative offset for 2nd or 3rd insn. */ \
050fad7c
DB
663 off -= insn - tmp_insns; \
664 /* Reject anything not fitting into insn->off. */ \
665 if (off < off_min || off > off_max) \
666 goto err; \
667 insn->off = off; \
bd4cf0ed
AS
668 } while (0)
669
f8f6d679
DB
670 case BPF_JMP | BPF_JA:
671 target = i + fp->k + 1;
672 insn->code = fp->code;
673 BPF_EMIT_JMP;
bd4cf0ed
AS
674 break;
675
676 case BPF_JMP | BPF_JEQ | BPF_K:
677 case BPF_JMP | BPF_JEQ | BPF_X:
678 case BPF_JMP | BPF_JSET | BPF_K:
679 case BPF_JMP | BPF_JSET | BPF_X:
680 case BPF_JMP | BPF_JGT | BPF_K:
681 case BPF_JMP | BPF_JGT | BPF_X:
682 case BPF_JMP | BPF_JGE | BPF_K:
683 case BPF_JMP | BPF_JGE | BPF_X:
684 if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) {
685 /* BPF immediates are signed, zero extend
686 * immediate into tmp register and use it
687 * in compare insn.
688 */
f8f6d679 689 *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
bd4cf0ed 690
e430f34e
AS
691 insn->dst_reg = BPF_REG_A;
692 insn->src_reg = BPF_REG_TMP;
bd4cf0ed
AS
693 bpf_src = BPF_X;
694 } else {
e430f34e 695 insn->dst_reg = BPF_REG_A;
bd4cf0ed
AS
696 insn->imm = fp->k;
697 bpf_src = BPF_SRC(fp->code);
19539ce7 698 insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0;
1da177e4 699 }
bd4cf0ed
AS
700
701 /* Common case where 'jump_false' is next insn. */
702 if (fp->jf == 0) {
703 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
704 target = i + fp->jt + 1;
f8f6d679 705 BPF_EMIT_JMP;
bd4cf0ed 706 break;
1da177e4 707 }
bd4cf0ed 708
92b31a9a
DB
709 /* Convert some jumps when 'jump_true' is next insn. */
710 if (fp->jt == 0) {
711 switch (BPF_OP(fp->code)) {
712 case BPF_JEQ:
713 insn->code = BPF_JMP | BPF_JNE | bpf_src;
714 break;
715 case BPF_JGT:
716 insn->code = BPF_JMP | BPF_JLE | bpf_src;
717 break;
718 case BPF_JGE:
719 insn->code = BPF_JMP | BPF_JLT | bpf_src;
720 break;
721 default:
722 goto jmp_rest;
723 }
724
bd4cf0ed 725 target = i + fp->jf + 1;
f8f6d679 726 BPF_EMIT_JMP;
bd4cf0ed 727 break;
0b05b2a4 728 }
92b31a9a 729jmp_rest:
bd4cf0ed
AS
730 /* Other jumps are mapped into two insns: Jxx and JA. */
731 target = i + fp->jt + 1;
732 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
f8f6d679 733 BPF_EMIT_JMP;
bd4cf0ed
AS
734 insn++;
735
736 insn->code = BPF_JMP | BPF_JA;
737 target = i + fp->jf + 1;
f8f6d679 738 BPF_EMIT_JMP;
bd4cf0ed
AS
739 break;
740
741 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
e0cea7ce
DB
742 case BPF_LDX | BPF_MSH | BPF_B: {
743 struct sock_filter tmp = {
744 .code = BPF_LD | BPF_ABS | BPF_B,
745 .k = fp->k,
746 };
747
748 *seen_ld_abs = true;
749
750 /* X = A */
751 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
1268e253 752 /* A = BPF_R0 = *(u8 *) (skb->data + K) */
e0cea7ce
DB
753 convert_bpf_ld_abs(&tmp, &insn);
754 insn++;
9739eef1 755 /* A &= 0xf */
f8f6d679 756 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
9739eef1 757 /* A <<= 2 */
f8f6d679 758 *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
e0cea7ce
DB
759 /* tmp = X */
760 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_X);
9739eef1 761 /* X = A */
f8f6d679 762 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
9739eef1 763 /* A = tmp */
f8f6d679 764 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
bd4cf0ed 765 break;
e0cea7ce 766 }
6205b9cf
DB
767 /* RET_K is remaped into 2 insns. RET_A case doesn't need an
768 * extra mov as BPF_REG_0 is already mapped into BPF_REG_A.
769 */
bd4cf0ed
AS
770 case BPF_RET | BPF_A:
771 case BPF_RET | BPF_K:
6205b9cf
DB
772 if (BPF_RVAL(fp->code) == BPF_K)
773 *insn++ = BPF_MOV32_RAW(BPF_K, BPF_REG_0,
774 0, fp->k);
9739eef1 775 *insn = BPF_EXIT_INSN();
bd4cf0ed
AS
776 break;
777
778 /* Store to stack. */
779 case BPF_ST:
780 case BPF_STX:
50bbfed9 781 stack_off = fp->k * 4 + 4;
f8f6d679
DB
782 *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
783 BPF_ST ? BPF_REG_A : BPF_REG_X,
50bbfed9
AS
784 -stack_off);
785 /* check_load_and_stores() verifies that classic BPF can
786 * load from stack only after write, so tracking
787 * stack_depth for ST|STX insns is enough
788 */
789 if (new_prog && new_prog->aux->stack_depth < stack_off)
790 new_prog->aux->stack_depth = stack_off;
bd4cf0ed
AS
791 break;
792
793 /* Load from stack. */
794 case BPF_LD | BPF_MEM:
795 case BPF_LDX | BPF_MEM:
50bbfed9 796 stack_off = fp->k * 4 + 4;
f8f6d679
DB
797 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
798 BPF_REG_A : BPF_REG_X, BPF_REG_FP,
50bbfed9 799 -stack_off);
bd4cf0ed
AS
800 break;
801
802 /* A = K or X = K */
803 case BPF_LD | BPF_IMM:
804 case BPF_LDX | BPF_IMM:
f8f6d679
DB
805 *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
806 BPF_REG_A : BPF_REG_X, fp->k);
bd4cf0ed
AS
807 break;
808
809 /* X = A */
810 case BPF_MISC | BPF_TAX:
f8f6d679 811 *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
bd4cf0ed
AS
812 break;
813
814 /* A = X */
815 case BPF_MISC | BPF_TXA:
f8f6d679 816 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
bd4cf0ed
AS
817 break;
818
819 /* A = skb->len or X = skb->len */
820 case BPF_LD | BPF_W | BPF_LEN:
821 case BPF_LDX | BPF_W | BPF_LEN:
f8f6d679
DB
822 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
823 BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
824 offsetof(struct sk_buff, len));
bd4cf0ed
AS
825 break;
826
f8f6d679 827 /* Access seccomp_data fields. */
bd4cf0ed 828 case BPF_LDX | BPF_ABS | BPF_W:
9739eef1
AS
829 /* A = *(u32 *) (ctx + K) */
830 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
bd4cf0ed
AS
831 break;
832
ca9f1fd2 833 /* Unknown instruction. */
1da177e4 834 default:
bd4cf0ed 835 goto err;
1da177e4 836 }
bd4cf0ed
AS
837
838 insn++;
839 if (new_prog)
840 memcpy(new_insn, tmp_insns,
841 sizeof(*insn) * (insn - tmp_insns));
bd4cf0ed 842 new_insn += insn - tmp_insns;
1da177e4
LT
843 }
844
bd4cf0ed
AS
845 if (!new_prog) {
846 /* Only calculating new length. */
50bbfed9 847 *new_len = new_insn - first_insn;
e0cea7ce
DB
848 if (*seen_ld_abs)
849 *new_len += 4; /* Prologue bits. */
bd4cf0ed
AS
850 return 0;
851 }
852
853 pass++;
50bbfed9
AS
854 if (new_flen != new_insn - first_insn) {
855 new_flen = new_insn - first_insn;
bd4cf0ed
AS
856 if (pass > 2)
857 goto err;
bd4cf0ed
AS
858 goto do_pass;
859 }
860
861 kfree(addrs);
862 BUG_ON(*new_len != new_flen);
1da177e4 863 return 0;
bd4cf0ed
AS
864err:
865 kfree(addrs);
866 return -EINVAL;
1da177e4
LT
867}
868
bd4cf0ed 869/* Security:
bd4cf0ed 870 *
2d5311e4 871 * As we dont want to clear mem[] array for each packet going through
8ea6e345 872 * __bpf_prog_run(), we check that filter loaded by user never try to read
2d5311e4 873 * a cell if not previously written, and we check all branches to be sure
25985edc 874 * a malicious user doesn't try to abuse us.
2d5311e4 875 */
ec31a05c 876static int check_load_and_stores(const struct sock_filter *filter, int flen)
2d5311e4 877{
34805931 878 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
2d5311e4
ED
879 int pc, ret = 0;
880
881 BUILD_BUG_ON(BPF_MEMWORDS > 16);
34805931 882
99e72a0f 883 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
2d5311e4
ED
884 if (!masks)
885 return -ENOMEM;
34805931 886
2d5311e4
ED
887 memset(masks, 0xff, flen * sizeof(*masks));
888
889 for (pc = 0; pc < flen; pc++) {
890 memvalid &= masks[pc];
891
892 switch (filter[pc].code) {
34805931
DB
893 case BPF_ST:
894 case BPF_STX:
2d5311e4
ED
895 memvalid |= (1 << filter[pc].k);
896 break;
34805931
DB
897 case BPF_LD | BPF_MEM:
898 case BPF_LDX | BPF_MEM:
2d5311e4
ED
899 if (!(memvalid & (1 << filter[pc].k))) {
900 ret = -EINVAL;
901 goto error;
902 }
903 break;
34805931
DB
904 case BPF_JMP | BPF_JA:
905 /* A jump must set masks on target */
2d5311e4
ED
906 masks[pc + 1 + filter[pc].k] &= memvalid;
907 memvalid = ~0;
908 break;
34805931
DB
909 case BPF_JMP | BPF_JEQ | BPF_K:
910 case BPF_JMP | BPF_JEQ | BPF_X:
911 case BPF_JMP | BPF_JGE | BPF_K:
912 case BPF_JMP | BPF_JGE | BPF_X:
913 case BPF_JMP | BPF_JGT | BPF_K:
914 case BPF_JMP | BPF_JGT | BPF_X:
915 case BPF_JMP | BPF_JSET | BPF_K:
916 case BPF_JMP | BPF_JSET | BPF_X:
917 /* A jump must set masks on targets */
2d5311e4
ED
918 masks[pc + 1 + filter[pc].jt] &= memvalid;
919 masks[pc + 1 + filter[pc].jf] &= memvalid;
920 memvalid = ~0;
921 break;
922 }
923 }
924error:
925 kfree(masks);
926 return ret;
927}
928
34805931
DB
929static bool chk_code_allowed(u16 code_to_probe)
930{
931 static const bool codes[] = {
932 /* 32 bit ALU operations */
933 [BPF_ALU | BPF_ADD | BPF_K] = true,
934 [BPF_ALU | BPF_ADD | BPF_X] = true,
935 [BPF_ALU | BPF_SUB | BPF_K] = true,
936 [BPF_ALU | BPF_SUB | BPF_X] = true,
937 [BPF_ALU | BPF_MUL | BPF_K] = true,
938 [BPF_ALU | BPF_MUL | BPF_X] = true,
939 [BPF_ALU | BPF_DIV | BPF_K] = true,
940 [BPF_ALU | BPF_DIV | BPF_X] = true,
941 [BPF_ALU | BPF_MOD | BPF_K] = true,
942 [BPF_ALU | BPF_MOD | BPF_X] = true,
943 [BPF_ALU | BPF_AND | BPF_K] = true,
944 [BPF_ALU | BPF_AND | BPF_X] = true,
945 [BPF_ALU | BPF_OR | BPF_K] = true,
946 [BPF_ALU | BPF_OR | BPF_X] = true,
947 [BPF_ALU | BPF_XOR | BPF_K] = true,
948 [BPF_ALU | BPF_XOR | BPF_X] = true,
949 [BPF_ALU | BPF_LSH | BPF_K] = true,
950 [BPF_ALU | BPF_LSH | BPF_X] = true,
951 [BPF_ALU | BPF_RSH | BPF_K] = true,
952 [BPF_ALU | BPF_RSH | BPF_X] = true,
953 [BPF_ALU | BPF_NEG] = true,
954 /* Load instructions */
955 [BPF_LD | BPF_W | BPF_ABS] = true,
956 [BPF_LD | BPF_H | BPF_ABS] = true,
957 [BPF_LD | BPF_B | BPF_ABS] = true,
958 [BPF_LD | BPF_W | BPF_LEN] = true,
959 [BPF_LD | BPF_W | BPF_IND] = true,
960 [BPF_LD | BPF_H | BPF_IND] = true,
961 [BPF_LD | BPF_B | BPF_IND] = true,
962 [BPF_LD | BPF_IMM] = true,
963 [BPF_LD | BPF_MEM] = true,
964 [BPF_LDX | BPF_W | BPF_LEN] = true,
965 [BPF_LDX | BPF_B | BPF_MSH] = true,
966 [BPF_LDX | BPF_IMM] = true,
967 [BPF_LDX | BPF_MEM] = true,
968 /* Store instructions */
969 [BPF_ST] = true,
970 [BPF_STX] = true,
971 /* Misc instructions */
972 [BPF_MISC | BPF_TAX] = true,
973 [BPF_MISC | BPF_TXA] = true,
974 /* Return instructions */
975 [BPF_RET | BPF_K] = true,
976 [BPF_RET | BPF_A] = true,
977 /* Jump instructions */
978 [BPF_JMP | BPF_JA] = true,
979 [BPF_JMP | BPF_JEQ | BPF_K] = true,
980 [BPF_JMP | BPF_JEQ | BPF_X] = true,
981 [BPF_JMP | BPF_JGE | BPF_K] = true,
982 [BPF_JMP | BPF_JGE | BPF_X] = true,
983 [BPF_JMP | BPF_JGT | BPF_K] = true,
984 [BPF_JMP | BPF_JGT | BPF_X] = true,
985 [BPF_JMP | BPF_JSET | BPF_K] = true,
986 [BPF_JMP | BPF_JSET | BPF_X] = true,
987 };
988
989 if (code_to_probe >= ARRAY_SIZE(codes))
990 return false;
991
992 return codes[code_to_probe];
993}
994
f7bd9e36
DB
995static bool bpf_check_basics_ok(const struct sock_filter *filter,
996 unsigned int flen)
997{
998 if (filter == NULL)
999 return false;
1000 if (flen == 0 || flen > BPF_MAXINSNS)
1001 return false;
1002
1003 return true;
1004}
1005
1da177e4 1006/**
4df95ff4 1007 * bpf_check_classic - verify socket filter code
1da177e4
LT
1008 * @filter: filter to verify
1009 * @flen: length of filter
1010 *
1011 * Check the user's filter code. If we let some ugly
1012 * filter code slip through kaboom! The filter must contain
93699863
KK
1013 * no references or jumps that are out of range, no illegal
1014 * instructions, and must end with a RET instruction.
1da177e4 1015 *
7b11f69f
KK
1016 * All jumps are forward as they are not signed.
1017 *
1018 * Returns 0 if the rule set is legal or -EINVAL if not.
1da177e4 1019 */
d9e12f42
NS
1020static int bpf_check_classic(const struct sock_filter *filter,
1021 unsigned int flen)
1da177e4 1022{
aa1113d9 1023 bool anc_found;
34805931 1024 int pc;
1da177e4 1025
34805931 1026 /* Check the filter code now */
1da177e4 1027 for (pc = 0; pc < flen; pc++) {
ec31a05c 1028 const struct sock_filter *ftest = &filter[pc];
93699863 1029
34805931
DB
1030 /* May we actually operate on this code? */
1031 if (!chk_code_allowed(ftest->code))
cba328fc 1032 return -EINVAL;
34805931 1033
93699863 1034 /* Some instructions need special checks */
34805931
DB
1035 switch (ftest->code) {
1036 case BPF_ALU | BPF_DIV | BPF_K:
1037 case BPF_ALU | BPF_MOD | BPF_K:
1038 /* Check for division by zero */
b6069a95
ED
1039 if (ftest->k == 0)
1040 return -EINVAL;
1041 break;
229394e8
RV
1042 case BPF_ALU | BPF_LSH | BPF_K:
1043 case BPF_ALU | BPF_RSH | BPF_K:
1044 if (ftest->k >= 32)
1045 return -EINVAL;
1046 break;
34805931
DB
1047 case BPF_LD | BPF_MEM:
1048 case BPF_LDX | BPF_MEM:
1049 case BPF_ST:
1050 case BPF_STX:
1051 /* Check for invalid memory addresses */
93699863
KK
1052 if (ftest->k >= BPF_MEMWORDS)
1053 return -EINVAL;
1054 break;
34805931
DB
1055 case BPF_JMP | BPF_JA:
1056 /* Note, the large ftest->k might cause loops.
93699863
KK
1057 * Compare this with conditional jumps below,
1058 * where offsets are limited. --ANK (981016)
1059 */
34805931 1060 if (ftest->k >= (unsigned int)(flen - pc - 1))
93699863 1061 return -EINVAL;
01f2f3f6 1062 break;
34805931
DB
1063 case BPF_JMP | BPF_JEQ | BPF_K:
1064 case BPF_JMP | BPF_JEQ | BPF_X:
1065 case BPF_JMP | BPF_JGE | BPF_K:
1066 case BPF_JMP | BPF_JGE | BPF_X:
1067 case BPF_JMP | BPF_JGT | BPF_K:
1068 case BPF_JMP | BPF_JGT | BPF_X:
1069 case BPF_JMP | BPF_JSET | BPF_K:
1070 case BPF_JMP | BPF_JSET | BPF_X:
1071 /* Both conditionals must be safe */
e35bedf3 1072 if (pc + ftest->jt + 1 >= flen ||
93699863
KK
1073 pc + ftest->jf + 1 >= flen)
1074 return -EINVAL;
cba328fc 1075 break;
34805931
DB
1076 case BPF_LD | BPF_W | BPF_ABS:
1077 case BPF_LD | BPF_H | BPF_ABS:
1078 case BPF_LD | BPF_B | BPF_ABS:
aa1113d9 1079 anc_found = false;
34805931
DB
1080 if (bpf_anc_helper(ftest) & BPF_ANC)
1081 anc_found = true;
1082 /* Ancillary operation unknown or unsupported */
aa1113d9
DB
1083 if (anc_found == false && ftest->k >= SKF_AD_OFF)
1084 return -EINVAL;
01f2f3f6
HPP
1085 }
1086 }
93699863 1087
34805931 1088 /* Last instruction must be a RET code */
01f2f3f6 1089 switch (filter[flen - 1].code) {
34805931
DB
1090 case BPF_RET | BPF_K:
1091 case BPF_RET | BPF_A:
2d5311e4 1092 return check_load_and_stores(filter, flen);
cba328fc 1093 }
34805931 1094
cba328fc 1095 return -EINVAL;
1da177e4
LT
1096}
1097
7ae457c1
AS
1098static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
1099 const struct sock_fprog *fprog)
a3ea269b 1100{
009937e7 1101 unsigned int fsize = bpf_classic_proglen(fprog);
a3ea269b
DB
1102 struct sock_fprog_kern *fkprog;
1103
1104 fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
1105 if (!fp->orig_prog)
1106 return -ENOMEM;
1107
1108 fkprog = fp->orig_prog;
1109 fkprog->len = fprog->len;
658da937
DB
1110
1111 fkprog->filter = kmemdup(fp->insns, fsize,
1112 GFP_KERNEL | __GFP_NOWARN);
a3ea269b
DB
1113 if (!fkprog->filter) {
1114 kfree(fp->orig_prog);
1115 return -ENOMEM;
1116 }
1117
1118 return 0;
1119}
1120
7ae457c1 1121static void bpf_release_orig_filter(struct bpf_prog *fp)
a3ea269b
DB
1122{
1123 struct sock_fprog_kern *fprog = fp->orig_prog;
1124
1125 if (fprog) {
1126 kfree(fprog->filter);
1127 kfree(fprog);
1128 }
1129}
1130
7ae457c1
AS
1131static void __bpf_prog_release(struct bpf_prog *prog)
1132{
24701ece 1133 if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) {
89aa0758
AS
1134 bpf_prog_put(prog);
1135 } else {
1136 bpf_release_orig_filter(prog);
1137 bpf_prog_free(prog);
1138 }
7ae457c1
AS
1139}
1140
34c5bd66
PN
1141static void __sk_filter_release(struct sk_filter *fp)
1142{
7ae457c1
AS
1143 __bpf_prog_release(fp->prog);
1144 kfree(fp);
34c5bd66
PN
1145}
1146
47e958ea 1147/**
46bcf14f 1148 * sk_filter_release_rcu - Release a socket filter by rcu_head
47e958ea
PE
1149 * @rcu: rcu_head that contains the sk_filter to free
1150 */
fbc907f0 1151static void sk_filter_release_rcu(struct rcu_head *rcu)
47e958ea
PE
1152{
1153 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
1154
34c5bd66 1155 __sk_filter_release(fp);
47e958ea 1156}
fbc907f0
DB
1157
1158/**
1159 * sk_filter_release - release a socket filter
1160 * @fp: filter to remove
1161 *
1162 * Remove a filter from a socket and release its resources.
1163 */
1164static void sk_filter_release(struct sk_filter *fp)
1165{
4c355cdf 1166 if (refcount_dec_and_test(&fp->refcnt))
fbc907f0
DB
1167 call_rcu(&fp->rcu, sk_filter_release_rcu);
1168}
1169
1170void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
1171{
7ae457c1 1172 u32 filter_size = bpf_prog_size(fp->prog->len);
fbc907f0 1173
278571ba
AS
1174 atomic_sub(filter_size, &sk->sk_omem_alloc);
1175 sk_filter_release(fp);
fbc907f0 1176}
47e958ea 1177
278571ba
AS
1178/* try to charge the socket memory if there is space available
1179 * return true on success
1180 */
4c355cdf 1181static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
bd4cf0ed 1182{
7ae457c1 1183 u32 filter_size = bpf_prog_size(fp->prog->len);
278571ba
AS
1184
1185 /* same check as in sock_kmalloc() */
1186 if (filter_size <= sysctl_optmem_max &&
1187 atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
278571ba
AS
1188 atomic_add(filter_size, &sk->sk_omem_alloc);
1189 return true;
bd4cf0ed 1190 }
278571ba 1191 return false;
bd4cf0ed
AS
1192}
1193
4c355cdf
RE
1194bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
1195{
eefca20e
ED
1196 if (!refcount_inc_not_zero(&fp->refcnt))
1197 return false;
1198
1199 if (!__sk_filter_charge(sk, fp)) {
1200 sk_filter_release(fp);
1201 return false;
1202 }
1203 return true;
4c355cdf
RE
1204}
1205
7ae457c1 1206static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
bd4cf0ed
AS
1207{
1208 struct sock_filter *old_prog;
7ae457c1 1209 struct bpf_prog *old_fp;
34805931 1210 int err, new_len, old_len = fp->len;
e0cea7ce 1211 bool seen_ld_abs = false;
bd4cf0ed
AS
1212
1213 /* We are free to overwrite insns et al right here as it
1214 * won't be used at this point in time anymore internally
1215 * after the migration to the internal BPF instruction
1216 * representation.
1217 */
1218 BUILD_BUG_ON(sizeof(struct sock_filter) !=
2695fb55 1219 sizeof(struct bpf_insn));
bd4cf0ed 1220
bd4cf0ed
AS
1221 /* Conversion cannot happen on overlapping memory areas,
1222 * so we need to keep the user BPF around until the 2nd
1223 * pass. At this time, the user BPF is stored in fp->insns.
1224 */
1225 old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
658da937 1226 GFP_KERNEL | __GFP_NOWARN);
bd4cf0ed
AS
1227 if (!old_prog) {
1228 err = -ENOMEM;
1229 goto out_err;
1230 }
1231
1232 /* 1st pass: calculate the new program length. */
e0cea7ce
DB
1233 err = bpf_convert_filter(old_prog, old_len, NULL, &new_len,
1234 &seen_ld_abs);
bd4cf0ed
AS
1235 if (err)
1236 goto out_err_free;
1237
1238 /* Expand fp for appending the new filter representation. */
1239 old_fp = fp;
60a3b225 1240 fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0);
bd4cf0ed
AS
1241 if (!fp) {
1242 /* The old_fp is still around in case we couldn't
1243 * allocate new memory, so uncharge on that one.
1244 */
1245 fp = old_fp;
1246 err = -ENOMEM;
1247 goto out_err_free;
1248 }
1249
bd4cf0ed
AS
1250 fp->len = new_len;
1251
2695fb55 1252 /* 2nd pass: remap sock_filter insns into bpf_insn insns. */
e0cea7ce
DB
1253 err = bpf_convert_filter(old_prog, old_len, fp, &new_len,
1254 &seen_ld_abs);
bd4cf0ed 1255 if (err)
8fb575ca 1256 /* 2nd bpf_convert_filter() can fail only if it fails
bd4cf0ed
AS
1257 * to allocate memory, remapping must succeed. Note,
1258 * that at this time old_fp has already been released
278571ba 1259 * by krealloc().
bd4cf0ed
AS
1260 */
1261 goto out_err_free;
1262
d1c55ab5 1263 fp = bpf_prog_select_runtime(fp, &err);
290af866
AS
1264 if (err)
1265 goto out_err_free;
5fe821a9 1266
bd4cf0ed
AS
1267 kfree(old_prog);
1268 return fp;
1269
1270out_err_free:
1271 kfree(old_prog);
1272out_err:
7ae457c1 1273 __bpf_prog_release(fp);
bd4cf0ed
AS
1274 return ERR_PTR(err);
1275}
1276
ac67eb2c
DB
1277static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp,
1278 bpf_aux_classic_check_t trans)
302d6637
JP
1279{
1280 int err;
1281
bd4cf0ed 1282 fp->bpf_func = NULL;
a91263d5 1283 fp->jited = 0;
302d6637 1284
4df95ff4 1285 err = bpf_check_classic(fp->insns, fp->len);
418c96ac 1286 if (err) {
7ae457c1 1287 __bpf_prog_release(fp);
bd4cf0ed 1288 return ERR_PTR(err);
418c96ac 1289 }
302d6637 1290
4ae92bc7
NS
1291 /* There might be additional checks and transformations
1292 * needed on classic filters, f.e. in case of seccomp.
1293 */
1294 if (trans) {
1295 err = trans(fp->insns, fp->len);
1296 if (err) {
1297 __bpf_prog_release(fp);
1298 return ERR_PTR(err);
1299 }
1300 }
1301
bd4cf0ed
AS
1302 /* Probe if we can JIT compile the filter and if so, do
1303 * the compilation of the filter.
1304 */
302d6637 1305 bpf_jit_compile(fp);
bd4cf0ed
AS
1306
1307 /* JIT compiler couldn't process this filter, so do the
1308 * internal BPF translation for the optimized interpreter.
1309 */
5fe821a9 1310 if (!fp->jited)
7ae457c1 1311 fp = bpf_migrate_filter(fp);
bd4cf0ed
AS
1312
1313 return fp;
302d6637
JP
1314}
1315
1316/**
7ae457c1 1317 * bpf_prog_create - create an unattached filter
c6c4b97c 1318 * @pfp: the unattached filter that is created
677a9fd3 1319 * @fprog: the filter program
302d6637 1320 *
c6c4b97c 1321 * Create a filter independent of any socket. We first run some
302d6637
JP
1322 * sanity checks on it to make sure it does not explode on us later.
1323 * If an error occurs or there is insufficient memory for the filter
1324 * a negative errno code is returned. On success the return is zero.
1325 */
7ae457c1 1326int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
302d6637 1327{
009937e7 1328 unsigned int fsize = bpf_classic_proglen(fprog);
7ae457c1 1329 struct bpf_prog *fp;
302d6637
JP
1330
1331 /* Make sure new filter is there and in the right amounts. */
f7bd9e36 1332 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
302d6637
JP
1333 return -EINVAL;
1334
60a3b225 1335 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
302d6637
JP
1336 if (!fp)
1337 return -ENOMEM;
a3ea269b 1338
302d6637
JP
1339 memcpy(fp->insns, fprog->filter, fsize);
1340
302d6637 1341 fp->len = fprog->len;
a3ea269b
DB
1342 /* Since unattached filters are not copied back to user
1343 * space through sk_get_filter(), we do not need to hold
1344 * a copy here, and can spare us the work.
1345 */
1346 fp->orig_prog = NULL;
302d6637 1347
7ae457c1 1348 /* bpf_prepare_filter() already takes care of freeing
bd4cf0ed
AS
1349 * memory in case something goes wrong.
1350 */
4ae92bc7 1351 fp = bpf_prepare_filter(fp, NULL);
bd4cf0ed
AS
1352 if (IS_ERR(fp))
1353 return PTR_ERR(fp);
302d6637
JP
1354
1355 *pfp = fp;
1356 return 0;
302d6637 1357}
7ae457c1 1358EXPORT_SYMBOL_GPL(bpf_prog_create);
302d6637 1359
ac67eb2c
DB
1360/**
1361 * bpf_prog_create_from_user - create an unattached filter from user buffer
1362 * @pfp: the unattached filter that is created
1363 * @fprog: the filter program
1364 * @trans: post-classic verifier transformation handler
bab18991 1365 * @save_orig: save classic BPF program
ac67eb2c
DB
1366 *
1367 * This function effectively does the same as bpf_prog_create(), only
1368 * that it builds up its insns buffer from user space provided buffer.
1369 * It also allows for passing a bpf_aux_classic_check_t handler.
1370 */
1371int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
bab18991 1372 bpf_aux_classic_check_t trans, bool save_orig)
ac67eb2c
DB
1373{
1374 unsigned int fsize = bpf_classic_proglen(fprog);
1375 struct bpf_prog *fp;
bab18991 1376 int err;
ac67eb2c
DB
1377
1378 /* Make sure new filter is there and in the right amounts. */
f7bd9e36 1379 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
ac67eb2c
DB
1380 return -EINVAL;
1381
1382 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1383 if (!fp)
1384 return -ENOMEM;
1385
1386 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
1387 __bpf_prog_free(fp);
1388 return -EFAULT;
1389 }
1390
1391 fp->len = fprog->len;
ac67eb2c
DB
1392 fp->orig_prog = NULL;
1393
bab18991
DB
1394 if (save_orig) {
1395 err = bpf_prog_store_orig_filter(fp, fprog);
1396 if (err) {
1397 __bpf_prog_free(fp);
1398 return -ENOMEM;
1399 }
1400 }
1401
ac67eb2c
DB
1402 /* bpf_prepare_filter() already takes care of freeing
1403 * memory in case something goes wrong.
1404 */
1405 fp = bpf_prepare_filter(fp, trans);
1406 if (IS_ERR(fp))
1407 return PTR_ERR(fp);
1408
1409 *pfp = fp;
1410 return 0;
1411}
2ea273d7 1412EXPORT_SYMBOL_GPL(bpf_prog_create_from_user);
ac67eb2c 1413
7ae457c1 1414void bpf_prog_destroy(struct bpf_prog *fp)
302d6637 1415{
7ae457c1 1416 __bpf_prog_release(fp);
302d6637 1417}
7ae457c1 1418EXPORT_SYMBOL_GPL(bpf_prog_destroy);
302d6637 1419
8ced425e 1420static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
49b31e57
DB
1421{
1422 struct sk_filter *fp, *old_fp;
1423
1424 fp = kmalloc(sizeof(*fp), GFP_KERNEL);
1425 if (!fp)
1426 return -ENOMEM;
1427
1428 fp->prog = prog;
49b31e57 1429
4c355cdf 1430 if (!__sk_filter_charge(sk, fp)) {
49b31e57
DB
1431 kfree(fp);
1432 return -ENOMEM;
1433 }
4c355cdf 1434 refcount_set(&fp->refcnt, 1);
49b31e57 1435
8ced425e
HFS
1436 old_fp = rcu_dereference_protected(sk->sk_filter,
1437 lockdep_sock_is_held(sk));
49b31e57 1438 rcu_assign_pointer(sk->sk_filter, fp);
8ced425e 1439
49b31e57
DB
1440 if (old_fp)
1441 sk_filter_uncharge(sk, old_fp);
1442
1443 return 0;
1444}
1445
538950a1
CG
1446static int __reuseport_attach_prog(struct bpf_prog *prog, struct sock *sk)
1447{
1448 struct bpf_prog *old_prog;
1449 int err;
1450
1451 if (bpf_prog_size(prog->len) > sysctl_optmem_max)
1452 return -ENOMEM;
1453
fa463497 1454 if (sk_unhashed(sk) && sk->sk_reuseport) {
538950a1
CG
1455 err = reuseport_alloc(sk);
1456 if (err)
1457 return err;
1458 } else if (!rcu_access_pointer(sk->sk_reuseport_cb)) {
1459 /* The socket wasn't bound with SO_REUSEPORT */
1460 return -EINVAL;
1461 }
1462
1463 old_prog = reuseport_attach_prog(sk, prog);
1464 if (old_prog)
1465 bpf_prog_destroy(old_prog);
1466
1467 return 0;
1468}
1469
1470static
1471struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
1da177e4 1472{
009937e7 1473 unsigned int fsize = bpf_classic_proglen(fprog);
7ae457c1 1474 struct bpf_prog *prog;
1da177e4
LT
1475 int err;
1476
d59577b6 1477 if (sock_flag(sk, SOCK_FILTER_LOCKED))
538950a1 1478 return ERR_PTR(-EPERM);
d59577b6 1479
1da177e4 1480 /* Make sure new filter is there and in the right amounts. */
f7bd9e36 1481 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
538950a1 1482 return ERR_PTR(-EINVAL);
1da177e4 1483
f7bd9e36 1484 prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
7ae457c1 1485 if (!prog)
538950a1 1486 return ERR_PTR(-ENOMEM);
a3ea269b 1487
7ae457c1 1488 if (copy_from_user(prog->insns, fprog->filter, fsize)) {
c0d1379a 1489 __bpf_prog_free(prog);
538950a1 1490 return ERR_PTR(-EFAULT);
1da177e4
LT
1491 }
1492
7ae457c1 1493 prog->len = fprog->len;
1da177e4 1494
7ae457c1 1495 err = bpf_prog_store_orig_filter(prog, fprog);
a3ea269b 1496 if (err) {
c0d1379a 1497 __bpf_prog_free(prog);
538950a1 1498 return ERR_PTR(-ENOMEM);
a3ea269b
DB
1499 }
1500
7ae457c1 1501 /* bpf_prepare_filter() already takes care of freeing
bd4cf0ed
AS
1502 * memory in case something goes wrong.
1503 */
538950a1
CG
1504 return bpf_prepare_filter(prog, NULL);
1505}
1506
1507/**
1508 * sk_attach_filter - attach a socket filter
1509 * @fprog: the filter program
1510 * @sk: the socket to use
1511 *
1512 * Attach the user's filter code. We first run some sanity checks on
1513 * it to make sure it does not explode on us later. If an error
1514 * occurs or there is insufficient memory for the filter a negative
1515 * errno code is returned. On success the return is zero.
1516 */
8ced425e 1517int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
538950a1
CG
1518{
1519 struct bpf_prog *prog = __get_filter(fprog, sk);
1520 int err;
1521
7ae457c1
AS
1522 if (IS_ERR(prog))
1523 return PTR_ERR(prog);
1524
8ced425e 1525 err = __sk_attach_prog(prog, sk);
49b31e57 1526 if (err < 0) {
7ae457c1 1527 __bpf_prog_release(prog);
49b31e57 1528 return err;
278571ba
AS
1529 }
1530
d3904b73 1531 return 0;
1da177e4 1532}
8ced425e 1533EXPORT_SYMBOL_GPL(sk_attach_filter);
1da177e4 1534
538950a1 1535int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
89aa0758 1536{
538950a1 1537 struct bpf_prog *prog = __get_filter(fprog, sk);
49b31e57 1538 int err;
89aa0758 1539
538950a1
CG
1540 if (IS_ERR(prog))
1541 return PTR_ERR(prog);
1542
1543 err = __reuseport_attach_prog(prog, sk);
1544 if (err < 0) {
1545 __bpf_prog_release(prog);
1546 return err;
1547 }
1548
1549 return 0;
1550}
1551
1552static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk)
1553{
89aa0758 1554 if (sock_flag(sk, SOCK_FILTER_LOCKED))
538950a1 1555 return ERR_PTR(-EPERM);
89aa0758 1556
113214be 1557 return bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER);
538950a1
CG
1558}
1559
1560int sk_attach_bpf(u32 ufd, struct sock *sk)
1561{
1562 struct bpf_prog *prog = __get_bpf(ufd, sk);
1563 int err;
1564
1565 if (IS_ERR(prog))
1566 return PTR_ERR(prog);
1567
8ced425e 1568 err = __sk_attach_prog(prog, sk);
49b31e57 1569 if (err < 0) {
89aa0758 1570 bpf_prog_put(prog);
49b31e57 1571 return err;
89aa0758
AS
1572 }
1573
89aa0758
AS
1574 return 0;
1575}
1576
538950a1
CG
1577int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk)
1578{
1579 struct bpf_prog *prog = __get_bpf(ufd, sk);
1580 int err;
1581
1582 if (IS_ERR(prog))
1583 return PTR_ERR(prog);
1584
1585 err = __reuseport_attach_prog(prog, sk);
1586 if (err < 0) {
1587 bpf_prog_put(prog);
1588 return err;
1589 }
1590
1591 return 0;
1592}
1593
21cafc1d
DB
1594struct bpf_scratchpad {
1595 union {
1596 __be32 diff[MAX_BPF_STACK / sizeof(__be32)];
1597 u8 buff[MAX_BPF_STACK];
1598 };
1599};
1600
1601static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
91bc4822 1602
5293efe6
DB
1603static inline int __bpf_try_make_writable(struct sk_buff *skb,
1604 unsigned int write_len)
1605{
1606 return skb_ensure_writable(skb, write_len);
1607}
1608
db58ba45
AS
1609static inline int bpf_try_make_writable(struct sk_buff *skb,
1610 unsigned int write_len)
1611{
5293efe6 1612 int err = __bpf_try_make_writable(skb, write_len);
db58ba45 1613
6aaae2b6 1614 bpf_compute_data_pointers(skb);
db58ba45
AS
1615 return err;
1616}
1617
36bbef52
DB
1618static int bpf_try_make_head_writable(struct sk_buff *skb)
1619{
1620 return bpf_try_make_writable(skb, skb_headlen(skb));
1621}
1622
a2bfe6bf
DB
1623static inline void bpf_push_mac_rcsum(struct sk_buff *skb)
1624{
1625 if (skb_at_tc_ingress(skb))
1626 skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1627}
1628
8065694e
DB
1629static inline void bpf_pull_mac_rcsum(struct sk_buff *skb)
1630{
1631 if (skb_at_tc_ingress(skb))
1632 skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1633}
1634
f3694e00
DB
1635BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset,
1636 const void *, from, u32, len, u64, flags)
608cd71a 1637{
608cd71a
AS
1638 void *ptr;
1639
8afd54c8 1640 if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
781c53bc 1641 return -EINVAL;
0ed661d5 1642 if (unlikely(offset > 0xffff))
608cd71a 1643 return -EFAULT;
db58ba45 1644 if (unlikely(bpf_try_make_writable(skb, offset + len)))
608cd71a
AS
1645 return -EFAULT;
1646
0ed661d5 1647 ptr = skb->data + offset;
781c53bc 1648 if (flags & BPF_F_RECOMPUTE_CSUM)
479ffccc 1649 __skb_postpull_rcsum(skb, ptr, len, offset);
608cd71a
AS
1650
1651 memcpy(ptr, from, len);
1652
781c53bc 1653 if (flags & BPF_F_RECOMPUTE_CSUM)
479ffccc 1654 __skb_postpush_rcsum(skb, ptr, len, offset);
8afd54c8
DB
1655 if (flags & BPF_F_INVALIDATE_HASH)
1656 skb_clear_hash(skb);
f8ffad69 1657
608cd71a
AS
1658 return 0;
1659}
1660
577c50aa 1661static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
608cd71a
AS
1662 .func = bpf_skb_store_bytes,
1663 .gpl_only = false,
1664 .ret_type = RET_INTEGER,
1665 .arg1_type = ARG_PTR_TO_CTX,
1666 .arg2_type = ARG_ANYTHING,
39f19ebb
AS
1667 .arg3_type = ARG_PTR_TO_MEM,
1668 .arg4_type = ARG_CONST_SIZE,
91bc4822
AS
1669 .arg5_type = ARG_ANYTHING,
1670};
1671
f3694e00
DB
1672BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset,
1673 void *, to, u32, len)
05c74e5e 1674{
05c74e5e
DB
1675 void *ptr;
1676
0ed661d5 1677 if (unlikely(offset > 0xffff))
074f528e 1678 goto err_clear;
05c74e5e
DB
1679
1680 ptr = skb_header_pointer(skb, offset, len, to);
1681 if (unlikely(!ptr))
074f528e 1682 goto err_clear;
05c74e5e
DB
1683 if (ptr != to)
1684 memcpy(to, ptr, len);
1685
1686 return 0;
074f528e
DB
1687err_clear:
1688 memset(to, 0, len);
1689 return -EFAULT;
05c74e5e
DB
1690}
1691
577c50aa 1692static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
05c74e5e
DB
1693 .func = bpf_skb_load_bytes,
1694 .gpl_only = false,
1695 .ret_type = RET_INTEGER,
1696 .arg1_type = ARG_PTR_TO_CTX,
1697 .arg2_type = ARG_ANYTHING,
39f19ebb
AS
1698 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
1699 .arg4_type = ARG_CONST_SIZE,
05c74e5e
DB
1700};
1701
4e1ec56c
DB
1702BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb,
1703 u32, offset, void *, to, u32, len, u32, start_header)
1704{
1705 u8 *ptr;
1706
1707 if (unlikely(offset > 0xffff || len > skb_headlen(skb)))
1708 goto err_clear;
1709
1710 switch (start_header) {
1711 case BPF_HDR_START_MAC:
1712 ptr = skb_mac_header(skb) + offset;
1713 break;
1714 case BPF_HDR_START_NET:
1715 ptr = skb_network_header(skb) + offset;
1716 break;
1717 default:
1718 goto err_clear;
1719 }
1720
1721 if (likely(ptr >= skb_mac_header(skb) &&
1722 ptr + len <= skb_tail_pointer(skb))) {
1723 memcpy(to, ptr, len);
1724 return 0;
1725 }
1726
1727err_clear:
1728 memset(to, 0, len);
1729 return -EFAULT;
1730}
1731
1732static const struct bpf_func_proto bpf_skb_load_bytes_relative_proto = {
1733 .func = bpf_skb_load_bytes_relative,
1734 .gpl_only = false,
1735 .ret_type = RET_INTEGER,
1736 .arg1_type = ARG_PTR_TO_CTX,
1737 .arg2_type = ARG_ANYTHING,
1738 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
1739 .arg4_type = ARG_CONST_SIZE,
1740 .arg5_type = ARG_ANYTHING,
1741};
1742
36bbef52
DB
1743BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len)
1744{
1745 /* Idea is the following: should the needed direct read/write
1746 * test fail during runtime, we can pull in more data and redo
1747 * again, since implicitly, we invalidate previous checks here.
1748 *
1749 * Or, since we know how much we need to make read/writeable,
1750 * this can be done once at the program beginning for direct
1751 * access case. By this we overcome limitations of only current
1752 * headroom being accessible.
1753 */
1754 return bpf_try_make_writable(skb, len ? : skb_headlen(skb));
1755}
1756
1757static const struct bpf_func_proto bpf_skb_pull_data_proto = {
1758 .func = bpf_skb_pull_data,
1759 .gpl_only = false,
1760 .ret_type = RET_INTEGER,
1761 .arg1_type = ARG_PTR_TO_CTX,
1762 .arg2_type = ARG_ANYTHING,
1763};
1764
f3694e00
DB
1765BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset,
1766 u64, from, u64, to, u64, flags)
91bc4822 1767{
0ed661d5 1768 __sum16 *ptr;
91bc4822 1769
781c53bc
DB
1770 if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK)))
1771 return -EINVAL;
0ed661d5 1772 if (unlikely(offset > 0xffff || offset & 1))
91bc4822 1773 return -EFAULT;
0ed661d5 1774 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
91bc4822
AS
1775 return -EFAULT;
1776
0ed661d5 1777 ptr = (__sum16 *)(skb->data + offset);
781c53bc 1778 switch (flags & BPF_F_HDR_FIELD_MASK) {
8050c0f0
DB
1779 case 0:
1780 if (unlikely(from != 0))
1781 return -EINVAL;
1782
1783 csum_replace_by_diff(ptr, to);
1784 break;
91bc4822
AS
1785 case 2:
1786 csum_replace2(ptr, from, to);
1787 break;
1788 case 4:
1789 csum_replace4(ptr, from, to);
1790 break;
1791 default:
1792 return -EINVAL;
1793 }
1794
91bc4822
AS
1795 return 0;
1796}
1797
577c50aa 1798static const struct bpf_func_proto bpf_l3_csum_replace_proto = {
91bc4822
AS
1799 .func = bpf_l3_csum_replace,
1800 .gpl_only = false,
1801 .ret_type = RET_INTEGER,
1802 .arg1_type = ARG_PTR_TO_CTX,
1803 .arg2_type = ARG_ANYTHING,
1804 .arg3_type = ARG_ANYTHING,
1805 .arg4_type = ARG_ANYTHING,
1806 .arg5_type = ARG_ANYTHING,
1807};
1808
f3694e00
DB
1809BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset,
1810 u64, from, u64, to, u64, flags)
91bc4822 1811{
781c53bc 1812 bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
2f72959a 1813 bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
d1b662ad 1814 bool do_mforce = flags & BPF_F_MARK_ENFORCE;
0ed661d5 1815 __sum16 *ptr;
91bc4822 1816
d1b662ad
DB
1817 if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE |
1818 BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK)))
781c53bc 1819 return -EINVAL;
0ed661d5 1820 if (unlikely(offset > 0xffff || offset & 1))
91bc4822 1821 return -EFAULT;
0ed661d5 1822 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
91bc4822
AS
1823 return -EFAULT;
1824
0ed661d5 1825 ptr = (__sum16 *)(skb->data + offset);
d1b662ad 1826 if (is_mmzero && !do_mforce && !*ptr)
2f72959a 1827 return 0;
91bc4822 1828
781c53bc 1829 switch (flags & BPF_F_HDR_FIELD_MASK) {
7d672345
DB
1830 case 0:
1831 if (unlikely(from != 0))
1832 return -EINVAL;
1833
1834 inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo);
1835 break;
91bc4822
AS
1836 case 2:
1837 inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo);
1838 break;
1839 case 4:
1840 inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo);
1841 break;
1842 default:
1843 return -EINVAL;
1844 }
1845
2f72959a
DB
1846 if (is_mmzero && !*ptr)
1847 *ptr = CSUM_MANGLED_0;
91bc4822
AS
1848 return 0;
1849}
1850
577c50aa 1851static const struct bpf_func_proto bpf_l4_csum_replace_proto = {
91bc4822
AS
1852 .func = bpf_l4_csum_replace,
1853 .gpl_only = false,
1854 .ret_type = RET_INTEGER,
1855 .arg1_type = ARG_PTR_TO_CTX,
1856 .arg2_type = ARG_ANYTHING,
1857 .arg3_type = ARG_ANYTHING,
1858 .arg4_type = ARG_ANYTHING,
1859 .arg5_type = ARG_ANYTHING,
608cd71a
AS
1860};
1861
f3694e00
DB
1862BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size,
1863 __be32 *, to, u32, to_size, __wsum, seed)
7d672345 1864{
21cafc1d 1865 struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
f3694e00 1866 u32 diff_size = from_size + to_size;
7d672345
DB
1867 int i, j = 0;
1868
1869 /* This is quite flexible, some examples:
1870 *
1871 * from_size == 0, to_size > 0, seed := csum --> pushing data
1872 * from_size > 0, to_size == 0, seed := csum --> pulling data
1873 * from_size > 0, to_size > 0, seed := 0 --> diffing data
1874 *
1875 * Even for diffing, from_size and to_size don't need to be equal.
1876 */
1877 if (unlikely(((from_size | to_size) & (sizeof(__be32) - 1)) ||
1878 diff_size > sizeof(sp->diff)))
1879 return -EINVAL;
1880
1881 for (i = 0; i < from_size / sizeof(__be32); i++, j++)
1882 sp->diff[j] = ~from[i];
1883 for (i = 0; i < to_size / sizeof(__be32); i++, j++)
1884 sp->diff[j] = to[i];
1885
1886 return csum_partial(sp->diff, diff_size, seed);
1887}
1888
577c50aa 1889static const struct bpf_func_proto bpf_csum_diff_proto = {
7d672345
DB
1890 .func = bpf_csum_diff,
1891 .gpl_only = false,
36bbef52 1892 .pkt_access = true,
7d672345 1893 .ret_type = RET_INTEGER,
db1ac496 1894 .arg1_type = ARG_PTR_TO_MEM_OR_NULL,
39f19ebb 1895 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
db1ac496 1896 .arg3_type = ARG_PTR_TO_MEM_OR_NULL,
39f19ebb 1897 .arg4_type = ARG_CONST_SIZE_OR_ZERO,
7d672345
DB
1898 .arg5_type = ARG_ANYTHING,
1899};
1900
36bbef52
DB
1901BPF_CALL_2(bpf_csum_update, struct sk_buff *, skb, __wsum, csum)
1902{
1903 /* The interface is to be used in combination with bpf_csum_diff()
1904 * for direct packet writes. csum rotation for alignment as well
1905 * as emulating csum_sub() can be done from the eBPF program.
1906 */
1907 if (skb->ip_summed == CHECKSUM_COMPLETE)
1908 return (skb->csum = csum_add(skb->csum, csum));
1909
1910 return -ENOTSUPP;
1911}
1912
1913static const struct bpf_func_proto bpf_csum_update_proto = {
1914 .func = bpf_csum_update,
1915 .gpl_only = false,
1916 .ret_type = RET_INTEGER,
1917 .arg1_type = ARG_PTR_TO_CTX,
1918 .arg2_type = ARG_ANYTHING,
1919};
1920
a70b506e
DB
1921static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
1922{
a70b506e
DB
1923 return dev_forward_skb(dev, skb);
1924}
1925
4e3264d2
MKL
1926static inline int __bpf_rx_skb_no_mac(struct net_device *dev,
1927 struct sk_buff *skb)
1928{
1929 int ret = ____dev_forward_skb(dev, skb);
1930
1931 if (likely(!ret)) {
1932 skb->dev = dev;
1933 ret = netif_rx(skb);
1934 }
1935
1936 return ret;
1937}
1938
a70b506e
DB
1939static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
1940{
1941 int ret;
1942
1943 if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) {
1944 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
1945 kfree_skb(skb);
1946 return -ENETDOWN;
1947 }
1948
1949 skb->dev = dev;
1950
1951 __this_cpu_inc(xmit_recursion);
1952 ret = dev_queue_xmit(skb);
1953 __this_cpu_dec(xmit_recursion);
1954
1955 return ret;
1956}
1957
4e3264d2
MKL
1958static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
1959 u32 flags)
1960{
1961 /* skb->mac_len is not set on normal egress */
1962 unsigned int mlen = skb->network_header - skb->mac_header;
1963
1964 __skb_pull(skb, mlen);
1965
1966 /* At ingress, the mac header has already been pulled once.
1967 * At egress, skb_pospull_rcsum has to be done in case that
1968 * the skb is originated from ingress (i.e. a forwarded skb)
1969 * to ensure that rcsum starts at net header.
1970 */
1971 if (!skb_at_tc_ingress(skb))
1972 skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
1973 skb_pop_mac_header(skb);
1974 skb_reset_mac_len(skb);
1975 return flags & BPF_F_INGRESS ?
1976 __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb);
1977}
1978
1979static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev,
1980 u32 flags)
1981{
3a0af8fd
TG
1982 /* Verify that a link layer header is carried */
1983 if (unlikely(skb->mac_header >= skb->network_header)) {
1984 kfree_skb(skb);
1985 return -ERANGE;
1986 }
1987
4e3264d2
MKL
1988 bpf_push_mac_rcsum(skb);
1989 return flags & BPF_F_INGRESS ?
1990 __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
1991}
1992
1993static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev,
1994 u32 flags)
1995{
c491680f 1996 if (dev_is_mac_header_xmit(dev))
4e3264d2 1997 return __bpf_redirect_common(skb, dev, flags);
c491680f
DB
1998 else
1999 return __bpf_redirect_no_mac(skb, dev, flags);
4e3264d2
MKL
2000}
2001
f3694e00 2002BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
3896d655 2003{
3896d655 2004 struct net_device *dev;
36bbef52
DB
2005 struct sk_buff *clone;
2006 int ret;
3896d655 2007
781c53bc
DB
2008 if (unlikely(flags & ~(BPF_F_INGRESS)))
2009 return -EINVAL;
2010
3896d655
AS
2011 dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex);
2012 if (unlikely(!dev))
2013 return -EINVAL;
2014
36bbef52
DB
2015 clone = skb_clone(skb, GFP_ATOMIC);
2016 if (unlikely(!clone))
3896d655
AS
2017 return -ENOMEM;
2018
36bbef52
DB
2019 /* For direct write, we need to keep the invariant that the skbs
2020 * we're dealing with need to be uncloned. Should uncloning fail
2021 * here, we need to free the just generated clone to unclone once
2022 * again.
2023 */
2024 ret = bpf_try_make_head_writable(skb);
2025 if (unlikely(ret)) {
2026 kfree_skb(clone);
2027 return -ENOMEM;
2028 }
2029
4e3264d2 2030 return __bpf_redirect(clone, dev, flags);
3896d655
AS
2031}
2032
577c50aa 2033static const struct bpf_func_proto bpf_clone_redirect_proto = {
3896d655
AS
2034 .func = bpf_clone_redirect,
2035 .gpl_only = false,
2036 .ret_type = RET_INTEGER,
2037 .arg1_type = ARG_PTR_TO_CTX,
2038 .arg2_type = ARG_ANYTHING,
2039 .arg3_type = ARG_ANYTHING,
2040};
2041
27b29f63
AS
2042struct redirect_info {
2043 u32 ifindex;
2044 u32 flags;
97f91a7c 2045 struct bpf_map *map;
11393cc9 2046 struct bpf_map *map_to_flush;
7c300131 2047 unsigned long map_owner;
27b29f63
AS
2048};
2049
2050static DEFINE_PER_CPU(struct redirect_info, redirect_info);
781c53bc 2051
f3694e00 2052BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
27b29f63
AS
2053{
2054 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
2055
781c53bc
DB
2056 if (unlikely(flags & ~(BPF_F_INGRESS)))
2057 return TC_ACT_SHOT;
2058
27b29f63
AS
2059 ri->ifindex = ifindex;
2060 ri->flags = flags;
781c53bc 2061
27b29f63
AS
2062 return TC_ACT_REDIRECT;
2063}
2064
2065int skb_do_redirect(struct sk_buff *skb)
2066{
2067 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
2068 struct net_device *dev;
2069
2070 dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->ifindex);
2071 ri->ifindex = 0;
2072 if (unlikely(!dev)) {
2073 kfree_skb(skb);
2074 return -EINVAL;
2075 }
2076
4e3264d2 2077 return __bpf_redirect(skb, dev, ri->flags);
27b29f63
AS
2078}
2079
577c50aa 2080static const struct bpf_func_proto bpf_redirect_proto = {
27b29f63
AS
2081 .func = bpf_redirect,
2082 .gpl_only = false,
2083 .ret_type = RET_INTEGER,
2084 .arg1_type = ARG_ANYTHING,
2085 .arg2_type = ARG_ANYTHING,
2086};
2087
81110384
JF
2088BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
2089 struct bpf_map *, map, void *, key, u64, flags)
2090{
2091 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
2092
2093 /* If user passes invalid input drop the packet. */
2094 if (unlikely(flags & ~(BPF_F_INGRESS)))
2095 return SK_DROP;
2096
2097 tcb->bpf.flags = flags;
2098 tcb->bpf.sk_redir = __sock_hash_lookup_elem(map, key);
2099 if (!tcb->bpf.sk_redir)
2100 return SK_DROP;
2101
2102 return SK_PASS;
2103}
2104
2105static const struct bpf_func_proto bpf_sk_redirect_hash_proto = {
2106 .func = bpf_sk_redirect_hash,
2107 .gpl_only = false,
2108 .ret_type = RET_INTEGER,
2109 .arg1_type = ARG_PTR_TO_CTX,
2110 .arg2_type = ARG_CONST_MAP_PTR,
2111 .arg3_type = ARG_PTR_TO_MAP_KEY,
2112 .arg4_type = ARG_ANYTHING,
2113};
2114
34f79502
JF
2115BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
2116 struct bpf_map *, map, u32, key, u64, flags)
174a79ff 2117{
34f79502 2118 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
174a79ff 2119
bfa64075 2120 /* If user passes invalid input drop the packet. */
fa246693 2121 if (unlikely(flags & ~(BPF_F_INGRESS)))
bfa64075 2122 return SK_DROP;
174a79ff 2123
34f79502 2124 tcb->bpf.flags = flags;
e5cd3abc
JF
2125 tcb->bpf.sk_redir = __sock_map_lookup_elem(map, key);
2126 if (!tcb->bpf.sk_redir)
2127 return SK_DROP;
174a79ff 2128
bfa64075 2129 return SK_PASS;
174a79ff
JF
2130}
2131
34f79502 2132struct sock *do_sk_redirect_map(struct sk_buff *skb)
174a79ff 2133{
34f79502 2134 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
174a79ff 2135
e5cd3abc 2136 return tcb->bpf.sk_redir;
174a79ff
JF
2137}
2138
2139static const struct bpf_func_proto bpf_sk_redirect_map_proto = {
2140 .func = bpf_sk_redirect_map,
2141 .gpl_only = false,
2142 .ret_type = RET_INTEGER,
34f79502
JF
2143 .arg1_type = ARG_PTR_TO_CTX,
2144 .arg2_type = ARG_CONST_MAP_PTR,
174a79ff 2145 .arg3_type = ARG_ANYTHING,
34f79502 2146 .arg4_type = ARG_ANYTHING,
174a79ff
JF
2147};
2148
81110384
JF
2149BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg_buff *, msg,
2150 struct bpf_map *, map, void *, key, u64, flags)
2151{
2152 /* If user passes invalid input drop the packet. */
2153 if (unlikely(flags & ~(BPF_F_INGRESS)))
2154 return SK_DROP;
2155
2156 msg->flags = flags;
2157 msg->sk_redir = __sock_hash_lookup_elem(map, key);
2158 if (!msg->sk_redir)
2159 return SK_DROP;
2160
2161 return SK_PASS;
2162}
2163
2164static const struct bpf_func_proto bpf_msg_redirect_hash_proto = {
2165 .func = bpf_msg_redirect_hash,
2166 .gpl_only = false,
2167 .ret_type = RET_INTEGER,
2168 .arg1_type = ARG_PTR_TO_CTX,
2169 .arg2_type = ARG_CONST_MAP_PTR,
2170 .arg3_type = ARG_PTR_TO_MAP_KEY,
2171 .arg4_type = ARG_ANYTHING,
2172};
2173
4f738adb
JF
2174BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg_buff *, msg,
2175 struct bpf_map *, map, u32, key, u64, flags)
2176{
2177 /* If user passes invalid input drop the packet. */
8934ce2f 2178 if (unlikely(flags & ~(BPF_F_INGRESS)))
4f738adb
JF
2179 return SK_DROP;
2180
4f738adb 2181 msg->flags = flags;
e5cd3abc
JF
2182 msg->sk_redir = __sock_map_lookup_elem(map, key);
2183 if (!msg->sk_redir)
2184 return SK_DROP;
4f738adb
JF
2185
2186 return SK_PASS;
2187}
2188
2189struct sock *do_msg_redirect_map(struct sk_msg_buff *msg)
2190{
e5cd3abc 2191 return msg->sk_redir;
4f738adb
JF
2192}
2193
2194static const struct bpf_func_proto bpf_msg_redirect_map_proto = {
2195 .func = bpf_msg_redirect_map,
2196 .gpl_only = false,
2197 .ret_type = RET_INTEGER,
2198 .arg1_type = ARG_PTR_TO_CTX,
2199 .arg2_type = ARG_CONST_MAP_PTR,
2200 .arg3_type = ARG_ANYTHING,
2201 .arg4_type = ARG_ANYTHING,
2202};
2203
2a100317
JF
2204BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg_buff *, msg, u32, bytes)
2205{
2206 msg->apply_bytes = bytes;
2207 return 0;
2208}
2209
2210static const struct bpf_func_proto bpf_msg_apply_bytes_proto = {
2211 .func = bpf_msg_apply_bytes,
2212 .gpl_only = false,
2213 .ret_type = RET_INTEGER,
2214 .arg1_type = ARG_PTR_TO_CTX,
2215 .arg2_type = ARG_ANYTHING,
2216};
2217
91843d54
JF
2218BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg_buff *, msg, u32, bytes)
2219{
2220 msg->cork_bytes = bytes;
2221 return 0;
2222}
2223
2224static const struct bpf_func_proto bpf_msg_cork_bytes_proto = {
2225 .func = bpf_msg_cork_bytes,
2226 .gpl_only = false,
2227 .ret_type = RET_INTEGER,
2228 .arg1_type = ARG_PTR_TO_CTX,
2229 .arg2_type = ARG_ANYTHING,
2230};
2231
015632bb
JF
2232BPF_CALL_4(bpf_msg_pull_data,
2233 struct sk_msg_buff *, msg, u32, start, u32, end, u64, flags)
2234{
2235 unsigned int len = 0, offset = 0, copy = 0;
2236 struct scatterlist *sg = msg->sg_data;
2237 int first_sg, last_sg, i, shift;
2238 unsigned char *p, *to, *from;
2239 int bytes = end - start;
2240 struct page *page;
2241
2242 if (unlikely(flags || end <= start))
2243 return -EINVAL;
2244
2245 /* First find the starting scatterlist element */
2246 i = msg->sg_start;
2247 do {
2248 len = sg[i].length;
2249 offset += len;
2250 if (start < offset + len)
2251 break;
2252 i++;
2253 if (i == MAX_SKB_FRAGS)
2254 i = 0;
2255 } while (i != msg->sg_end);
2256
2257 if (unlikely(start >= offset + len))
2258 return -EINVAL;
2259
2260 if (!msg->sg_copy[i] && bytes <= len)
2261 goto out;
2262
2263 first_sg = i;
2264
2265 /* At this point we need to linearize multiple scatterlist
2266 * elements or a single shared page. Either way we need to
2267 * copy into a linear buffer exclusively owned by BPF. Then
2268 * place the buffer in the scatterlist and fixup the original
2269 * entries by removing the entries now in the linear buffer
2270 * and shifting the remaining entries. For now we do not try
2271 * to copy partial entries to avoid complexity of running out
2272 * of sg_entry slots. The downside is reading a single byte
2273 * will copy the entire sg entry.
2274 */
2275 do {
2276 copy += sg[i].length;
2277 i++;
2278 if (i == MAX_SKB_FRAGS)
2279 i = 0;
2280 if (bytes < copy)
2281 break;
2282 } while (i != msg->sg_end);
2283 last_sg = i;
2284
2285 if (unlikely(copy < end - start))
2286 return -EINVAL;
2287
2288 page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC, get_order(copy));
2289 if (unlikely(!page))
2290 return -ENOMEM;
2291 p = page_address(page);
2292 offset = 0;
2293
2294 i = first_sg;
2295 do {
2296 from = sg_virt(&sg[i]);
2297 len = sg[i].length;
2298 to = p + offset;
2299
2300 memcpy(to, from, len);
2301 offset += len;
2302 sg[i].length = 0;
2303 put_page(sg_page(&sg[i]));
2304
2305 i++;
2306 if (i == MAX_SKB_FRAGS)
2307 i = 0;
2308 } while (i != last_sg);
2309
2310 sg[first_sg].length = copy;
2311 sg_set_page(&sg[first_sg], page, copy, 0);
2312
2313 /* To repair sg ring we need to shift entries. If we only
2314 * had a single entry though we can just replace it and
2315 * be done. Otherwise walk the ring and shift the entries.
2316 */
2317 shift = last_sg - first_sg - 1;
2318 if (!shift)
2319 goto out;
2320
2321 i = first_sg + 1;
2322 do {
2323 int move_from;
2324
2325 if (i + shift >= MAX_SKB_FRAGS)
2326 move_from = i + shift - MAX_SKB_FRAGS;
2327 else
2328 move_from = i + shift;
2329
2330 if (move_from == msg->sg_end)
2331 break;
2332
2333 sg[i] = sg[move_from];
2334 sg[move_from].length = 0;
2335 sg[move_from].page_link = 0;
2336 sg[move_from].offset = 0;
2337
2338 i++;
2339 if (i == MAX_SKB_FRAGS)
2340 i = 0;
2341 } while (1);
2342 msg->sg_end -= shift;
2343 if (msg->sg_end < 0)
2344 msg->sg_end += MAX_SKB_FRAGS;
2345out:
2346 msg->data = sg_virt(&sg[i]) + start - offset;
2347 msg->data_end = msg->data + bytes;
2348
2349 return 0;
2350}
2351
2352static const struct bpf_func_proto bpf_msg_pull_data_proto = {
2353 .func = bpf_msg_pull_data,
2354 .gpl_only = false,
2355 .ret_type = RET_INTEGER,
2356 .arg1_type = ARG_PTR_TO_CTX,
2357 .arg2_type = ARG_ANYTHING,
2358 .arg3_type = ARG_ANYTHING,
2359 .arg4_type = ARG_ANYTHING,
2360};
2361
f3694e00 2362BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
8d20aabe 2363{
f3694e00 2364 return task_get_classid(skb);
8d20aabe
DB
2365}
2366
2367static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
2368 .func = bpf_get_cgroup_classid,
2369 .gpl_only = false,
2370 .ret_type = RET_INTEGER,
2371 .arg1_type = ARG_PTR_TO_CTX,
2372};
2373
f3694e00 2374BPF_CALL_1(bpf_get_route_realm, const struct sk_buff *, skb)
c46646d0 2375{
f3694e00 2376 return dst_tclassid(skb);
c46646d0
DB
2377}
2378
2379static const struct bpf_func_proto bpf_get_route_realm_proto = {
2380 .func = bpf_get_route_realm,
2381 .gpl_only = false,
2382 .ret_type = RET_INTEGER,
2383 .arg1_type = ARG_PTR_TO_CTX,
2384};
2385
f3694e00 2386BPF_CALL_1(bpf_get_hash_recalc, struct sk_buff *, skb)
13c5c240
DB
2387{
2388 /* If skb_clear_hash() was called due to mangling, we can
2389 * trigger SW recalculation here. Later access to hash
2390 * can then use the inline skb->hash via context directly
2391 * instead of calling this helper again.
2392 */
f3694e00 2393 return skb_get_hash(skb);
13c5c240
DB
2394}
2395
2396static const struct bpf_func_proto bpf_get_hash_recalc_proto = {
2397 .func = bpf_get_hash_recalc,
2398 .gpl_only = false,
2399 .ret_type = RET_INTEGER,
2400 .arg1_type = ARG_PTR_TO_CTX,
2401};
2402
7a4b28c6
DB
2403BPF_CALL_1(bpf_set_hash_invalid, struct sk_buff *, skb)
2404{
2405 /* After all direct packet write, this can be used once for
2406 * triggering a lazy recalc on next skb_get_hash() invocation.
2407 */
2408 skb_clear_hash(skb);
2409 return 0;
2410}
2411
2412static const struct bpf_func_proto bpf_set_hash_invalid_proto = {
2413 .func = bpf_set_hash_invalid,
2414 .gpl_only = false,
2415 .ret_type = RET_INTEGER,
2416 .arg1_type = ARG_PTR_TO_CTX,
2417};
2418
ded092cd
DB
2419BPF_CALL_2(bpf_set_hash, struct sk_buff *, skb, u32, hash)
2420{
2421 /* Set user specified hash as L4(+), so that it gets returned
2422 * on skb_get_hash() call unless BPF prog later on triggers a
2423 * skb_clear_hash().
2424 */
2425 __skb_set_sw_hash(skb, hash, true);
2426 return 0;
2427}
2428
2429static const struct bpf_func_proto bpf_set_hash_proto = {
2430 .func = bpf_set_hash,
2431 .gpl_only = false,
2432 .ret_type = RET_INTEGER,
2433 .arg1_type = ARG_PTR_TO_CTX,
2434 .arg2_type = ARG_ANYTHING,
2435};
2436
f3694e00
DB
2437BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto,
2438 u16, vlan_tci)
4e10df9a 2439{
db58ba45 2440 int ret;
4e10df9a
AS
2441
2442 if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
2443 vlan_proto != htons(ETH_P_8021AD)))
2444 vlan_proto = htons(ETH_P_8021Q);
2445
8065694e 2446 bpf_push_mac_rcsum(skb);
db58ba45 2447 ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
8065694e
DB
2448 bpf_pull_mac_rcsum(skb);
2449
6aaae2b6 2450 bpf_compute_data_pointers(skb);
db58ba45 2451 return ret;
4e10df9a
AS
2452}
2453
93731ef0 2454static const struct bpf_func_proto bpf_skb_vlan_push_proto = {
4e10df9a
AS
2455 .func = bpf_skb_vlan_push,
2456 .gpl_only = false,
2457 .ret_type = RET_INTEGER,
2458 .arg1_type = ARG_PTR_TO_CTX,
2459 .arg2_type = ARG_ANYTHING,
2460 .arg3_type = ARG_ANYTHING,
2461};
2462
f3694e00 2463BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb)
4e10df9a 2464{
db58ba45 2465 int ret;
4e10df9a 2466
8065694e 2467 bpf_push_mac_rcsum(skb);
db58ba45 2468 ret = skb_vlan_pop(skb);
8065694e
DB
2469 bpf_pull_mac_rcsum(skb);
2470
6aaae2b6 2471 bpf_compute_data_pointers(skb);
db58ba45 2472 return ret;
4e10df9a
AS
2473}
2474
93731ef0 2475static const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
4e10df9a
AS
2476 .func = bpf_skb_vlan_pop,
2477 .gpl_only = false,
2478 .ret_type = RET_INTEGER,
2479 .arg1_type = ARG_PTR_TO_CTX,
2480};
2481
6578171a
DB
2482static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len)
2483{
2484 /* Caller already did skb_cow() with len as headroom,
2485 * so no need to do it here.
2486 */
2487 skb_push(skb, len);
2488 memmove(skb->data, skb->data + len, off);
2489 memset(skb->data + off, 0, len);
2490
2491 /* No skb_postpush_rcsum(skb, skb->data + off, len)
2492 * needed here as it does not change the skb->csum
2493 * result for checksum complete when summing over
2494 * zeroed blocks.
2495 */
2496 return 0;
2497}
2498
2499static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len)
2500{
2501 /* skb_ensure_writable() is not needed here, as we're
2502 * already working on an uncloned skb.
2503 */
2504 if (unlikely(!pskb_may_pull(skb, off + len)))
2505 return -ENOMEM;
2506
2507 skb_postpull_rcsum(skb, skb->data + off, len);
2508 memmove(skb->data + len, skb->data, off);
2509 __skb_pull(skb, len);
2510
2511 return 0;
2512}
2513
2514static int bpf_skb_net_hdr_push(struct sk_buff *skb, u32 off, u32 len)
2515{
2516 bool trans_same = skb->transport_header == skb->network_header;
2517 int ret;
2518
2519 /* There's no need for __skb_push()/__skb_pull() pair to
2520 * get to the start of the mac header as we're guaranteed
2521 * to always start from here under eBPF.
2522 */
2523 ret = bpf_skb_generic_push(skb, off, len);
2524 if (likely(!ret)) {
2525 skb->mac_header -= len;
2526 skb->network_header -= len;
2527 if (trans_same)
2528 skb->transport_header = skb->network_header;
2529 }
2530
2531 return ret;
2532}
2533
2534static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len)
2535{
2536 bool trans_same = skb->transport_header == skb->network_header;
2537 int ret;
2538
2539 /* Same here, __skb_push()/__skb_pull() pair not needed. */
2540 ret = bpf_skb_generic_pop(skb, off, len);
2541 if (likely(!ret)) {
2542 skb->mac_header += len;
2543 skb->network_header += len;
2544 if (trans_same)
2545 skb->transport_header = skb->network_header;
2546 }
2547
2548 return ret;
2549}
2550
2551static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
2552{
2553 const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
0daf4349 2554 u32 off = skb_mac_header_len(skb);
6578171a
DB
2555 int ret;
2556
d02f51cb
DA
2557 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
2558 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2559 return -ENOTSUPP;
2560
6578171a
DB
2561 ret = skb_cow(skb, len_diff);
2562 if (unlikely(ret < 0))
2563 return ret;
2564
2565 ret = bpf_skb_net_hdr_push(skb, off, len_diff);
2566 if (unlikely(ret < 0))
2567 return ret;
2568
2569 if (skb_is_gso(skb)) {
d02f51cb
DA
2570 struct skb_shared_info *shinfo = skb_shinfo(skb);
2571
880388aa
DM
2572 /* SKB_GSO_TCPV4 needs to be changed into
2573 * SKB_GSO_TCPV6.
6578171a 2574 */
d02f51cb
DA
2575 if (shinfo->gso_type & SKB_GSO_TCPV4) {
2576 shinfo->gso_type &= ~SKB_GSO_TCPV4;
2577 shinfo->gso_type |= SKB_GSO_TCPV6;
6578171a
DB
2578 }
2579
2580 /* Due to IPv6 header, MSS needs to be downgraded. */
d02f51cb 2581 skb_decrease_gso_size(shinfo, len_diff);
6578171a 2582 /* Header must be checked, and gso_segs recomputed. */
d02f51cb
DA
2583 shinfo->gso_type |= SKB_GSO_DODGY;
2584 shinfo->gso_segs = 0;
6578171a
DB
2585 }
2586
2587 skb->protocol = htons(ETH_P_IPV6);
2588 skb_clear_hash(skb);
2589
2590 return 0;
2591}
2592
2593static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
2594{
2595 const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
0daf4349 2596 u32 off = skb_mac_header_len(skb);
6578171a
DB
2597 int ret;
2598
d02f51cb
DA
2599 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
2600 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2601 return -ENOTSUPP;
2602
6578171a
DB
2603 ret = skb_unclone(skb, GFP_ATOMIC);
2604 if (unlikely(ret < 0))
2605 return ret;
2606
2607 ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
2608 if (unlikely(ret < 0))
2609 return ret;
2610
2611 if (skb_is_gso(skb)) {
d02f51cb
DA
2612 struct skb_shared_info *shinfo = skb_shinfo(skb);
2613
880388aa
DM
2614 /* SKB_GSO_TCPV6 needs to be changed into
2615 * SKB_GSO_TCPV4.
6578171a 2616 */
d02f51cb
DA
2617 if (shinfo->gso_type & SKB_GSO_TCPV6) {
2618 shinfo->gso_type &= ~SKB_GSO_TCPV6;
2619 shinfo->gso_type |= SKB_GSO_TCPV4;
6578171a
DB
2620 }
2621
2622 /* Due to IPv4 header, MSS can be upgraded. */
d02f51cb 2623 skb_increase_gso_size(shinfo, len_diff);
6578171a 2624 /* Header must be checked, and gso_segs recomputed. */
d02f51cb
DA
2625 shinfo->gso_type |= SKB_GSO_DODGY;
2626 shinfo->gso_segs = 0;
6578171a
DB
2627 }
2628
2629 skb->protocol = htons(ETH_P_IP);
2630 skb_clear_hash(skb);
2631
2632 return 0;
2633}
2634
2635static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto)
2636{
2637 __be16 from_proto = skb->protocol;
2638
2639 if (from_proto == htons(ETH_P_IP) &&
2640 to_proto == htons(ETH_P_IPV6))
2641 return bpf_skb_proto_4_to_6(skb);
2642
2643 if (from_proto == htons(ETH_P_IPV6) &&
2644 to_proto == htons(ETH_P_IP))
2645 return bpf_skb_proto_6_to_4(skb);
2646
2647 return -ENOTSUPP;
2648}
2649
f3694e00
DB
2650BPF_CALL_3(bpf_skb_change_proto, struct sk_buff *, skb, __be16, proto,
2651 u64, flags)
6578171a 2652{
6578171a
DB
2653 int ret;
2654
2655 if (unlikely(flags))
2656 return -EINVAL;
2657
2658 /* General idea is that this helper does the basic groundwork
2659 * needed for changing the protocol, and eBPF program fills the
2660 * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace()
2661 * and other helpers, rather than passing a raw buffer here.
2662 *
2663 * The rationale is to keep this minimal and without a need to
2664 * deal with raw packet data. F.e. even if we would pass buffers
2665 * here, the program still needs to call the bpf_lX_csum_replace()
2666 * helpers anyway. Plus, this way we keep also separation of
2667 * concerns, since f.e. bpf_skb_store_bytes() should only take
2668 * care of stores.
2669 *
2670 * Currently, additional options and extension header space are
2671 * not supported, but flags register is reserved so we can adapt
2672 * that. For offloads, we mark packet as dodgy, so that headers
2673 * need to be verified first.
2674 */
2675 ret = bpf_skb_proto_xlat(skb, proto);
6aaae2b6 2676 bpf_compute_data_pointers(skb);
6578171a
DB
2677 return ret;
2678}
2679
2680static const struct bpf_func_proto bpf_skb_change_proto_proto = {
2681 .func = bpf_skb_change_proto,
2682 .gpl_only = false,
2683 .ret_type = RET_INTEGER,
2684 .arg1_type = ARG_PTR_TO_CTX,
2685 .arg2_type = ARG_ANYTHING,
2686 .arg3_type = ARG_ANYTHING,
2687};
2688
f3694e00 2689BPF_CALL_2(bpf_skb_change_type, struct sk_buff *, skb, u32, pkt_type)
d2485c42 2690{
d2485c42 2691 /* We only allow a restricted subset to be changed for now. */
45c7fffa
DB
2692 if (unlikely(!skb_pkt_type_ok(skb->pkt_type) ||
2693 !skb_pkt_type_ok(pkt_type)))
d2485c42
DB
2694 return -EINVAL;
2695
2696 skb->pkt_type = pkt_type;
2697 return 0;
2698}
2699
2700static const struct bpf_func_proto bpf_skb_change_type_proto = {
2701 .func = bpf_skb_change_type,
2702 .gpl_only = false,
2703 .ret_type = RET_INTEGER,
2704 .arg1_type = ARG_PTR_TO_CTX,
2705 .arg2_type = ARG_ANYTHING,
2706};
2707
2be7e212
DB
2708static u32 bpf_skb_net_base_len(const struct sk_buff *skb)
2709{
2710 switch (skb->protocol) {
2711 case htons(ETH_P_IP):
2712 return sizeof(struct iphdr);
2713 case htons(ETH_P_IPV6):
2714 return sizeof(struct ipv6hdr);
2715 default:
2716 return ~0U;
2717 }
2718}
2719
2720static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
2721{
2722 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
2723 int ret;
2724
d02f51cb
DA
2725 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
2726 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2727 return -ENOTSUPP;
2728
2be7e212
DB
2729 ret = skb_cow(skb, len_diff);
2730 if (unlikely(ret < 0))
2731 return ret;
2732
2733 ret = bpf_skb_net_hdr_push(skb, off, len_diff);
2734 if (unlikely(ret < 0))
2735 return ret;
2736
2737 if (skb_is_gso(skb)) {
d02f51cb
DA
2738 struct skb_shared_info *shinfo = skb_shinfo(skb);
2739
2be7e212 2740 /* Due to header grow, MSS needs to be downgraded. */
d02f51cb 2741 skb_decrease_gso_size(shinfo, len_diff);
2be7e212 2742 /* Header must be checked, and gso_segs recomputed. */
d02f51cb
DA
2743 shinfo->gso_type |= SKB_GSO_DODGY;
2744 shinfo->gso_segs = 0;
2be7e212
DB
2745 }
2746
2747 return 0;
2748}
2749
2750static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
2751{
2752 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
2753 int ret;
2754
d02f51cb
DA
2755 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
2756 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2757 return -ENOTSUPP;
2758
2be7e212
DB
2759 ret = skb_unclone(skb, GFP_ATOMIC);
2760 if (unlikely(ret < 0))
2761 return ret;
2762
2763 ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
2764 if (unlikely(ret < 0))
2765 return ret;
2766
2767 if (skb_is_gso(skb)) {
d02f51cb
DA
2768 struct skb_shared_info *shinfo = skb_shinfo(skb);
2769
2be7e212 2770 /* Due to header shrink, MSS can be upgraded. */
d02f51cb 2771 skb_increase_gso_size(shinfo, len_diff);
2be7e212 2772 /* Header must be checked, and gso_segs recomputed. */
d02f51cb
DA
2773 shinfo->gso_type |= SKB_GSO_DODGY;
2774 shinfo->gso_segs = 0;
2be7e212
DB
2775 }
2776
2777 return 0;
2778}
2779
2780static u32 __bpf_skb_max_len(const struct sk_buff *skb)
2781{
2782 return skb->dev->mtu + skb->dev->hard_header_len;
2783}
2784
2785static int bpf_skb_adjust_net(struct sk_buff *skb, s32 len_diff)
2786{
2787 bool trans_same = skb->transport_header == skb->network_header;
2788 u32 len_cur, len_diff_abs = abs(len_diff);
2789 u32 len_min = bpf_skb_net_base_len(skb);
2790 u32 len_max = __bpf_skb_max_len(skb);
2791 __be16 proto = skb->protocol;
2792 bool shrink = len_diff < 0;
2793 int ret;
2794
2795 if (unlikely(len_diff_abs > 0xfffU))
2796 return -EFAULT;
2797 if (unlikely(proto != htons(ETH_P_IP) &&
2798 proto != htons(ETH_P_IPV6)))
2799 return -ENOTSUPP;
2800
2801 len_cur = skb->len - skb_network_offset(skb);
2802 if (skb_transport_header_was_set(skb) && !trans_same)
2803 len_cur = skb_network_header_len(skb);
2804 if ((shrink && (len_diff_abs >= len_cur ||
2805 len_cur - len_diff_abs < len_min)) ||
2806 (!shrink && (skb->len + len_diff_abs > len_max &&
2807 !skb_is_gso(skb))))
2808 return -ENOTSUPP;
2809
2810 ret = shrink ? bpf_skb_net_shrink(skb, len_diff_abs) :
2811 bpf_skb_net_grow(skb, len_diff_abs);
2812
6aaae2b6 2813 bpf_compute_data_pointers(skb);
e4a6a342 2814 return ret;
2be7e212
DB
2815}
2816
2817BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
2818 u32, mode, u64, flags)
2819{
2820 if (unlikely(flags))
2821 return -EINVAL;
2822 if (likely(mode == BPF_ADJ_ROOM_NET))
2823 return bpf_skb_adjust_net(skb, len_diff);
2824
2825 return -ENOTSUPP;
2826}
2827
2828static const struct bpf_func_proto bpf_skb_adjust_room_proto = {
2829 .func = bpf_skb_adjust_room,
2830 .gpl_only = false,
2831 .ret_type = RET_INTEGER,
2832 .arg1_type = ARG_PTR_TO_CTX,
2833 .arg2_type = ARG_ANYTHING,
2834 .arg3_type = ARG_ANYTHING,
2835 .arg4_type = ARG_ANYTHING,
2836};
2837
5293efe6
DB
2838static u32 __bpf_skb_min_len(const struct sk_buff *skb)
2839{
2840 u32 min_len = skb_network_offset(skb);
2841
2842 if (skb_transport_header_was_set(skb))
2843 min_len = skb_transport_offset(skb);
2844 if (skb->ip_summed == CHECKSUM_PARTIAL)
2845 min_len = skb_checksum_start_offset(skb) +
2846 skb->csum_offset + sizeof(__sum16);
2847 return min_len;
2848}
2849
5293efe6
DB
2850static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len)
2851{
2852 unsigned int old_len = skb->len;
2853 int ret;
2854
2855 ret = __skb_grow_rcsum(skb, new_len);
2856 if (!ret)
2857 memset(skb->data + old_len, 0, new_len - old_len);
2858 return ret;
2859}
2860
2861static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len)
2862{
2863 return __skb_trim_rcsum(skb, new_len);
2864}
2865
f3694e00
DB
2866BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
2867 u64, flags)
5293efe6 2868{
5293efe6
DB
2869 u32 max_len = __bpf_skb_max_len(skb);
2870 u32 min_len = __bpf_skb_min_len(skb);
5293efe6
DB
2871 int ret;
2872
2873 if (unlikely(flags || new_len > max_len || new_len < min_len))
2874 return -EINVAL;
2875 if (skb->encapsulation)
2876 return -ENOTSUPP;
2877
2878 /* The basic idea of this helper is that it's performing the
2879 * needed work to either grow or trim an skb, and eBPF program
2880 * rewrites the rest via helpers like bpf_skb_store_bytes(),
2881 * bpf_lX_csum_replace() and others rather than passing a raw
2882 * buffer here. This one is a slow path helper and intended
2883 * for replies with control messages.
2884 *
2885 * Like in bpf_skb_change_proto(), we want to keep this rather
2886 * minimal and without protocol specifics so that we are able
2887 * to separate concerns as in bpf_skb_store_bytes() should only
2888 * be the one responsible for writing buffers.
2889 *
2890 * It's really expected to be a slow path operation here for
2891 * control message replies, so we're implicitly linearizing,
2892 * uncloning and drop offloads from the skb by this.
2893 */
2894 ret = __bpf_try_make_writable(skb, skb->len);
2895 if (!ret) {
2896 if (new_len > skb->len)
2897 ret = bpf_skb_grow_rcsum(skb, new_len);
2898 else if (new_len < skb->len)
2899 ret = bpf_skb_trim_rcsum(skb, new_len);
2900 if (!ret && skb_is_gso(skb))
2901 skb_gso_reset(skb);
2902 }
2903
6aaae2b6 2904 bpf_compute_data_pointers(skb);
5293efe6
DB
2905 return ret;
2906}
2907
2908static const struct bpf_func_proto bpf_skb_change_tail_proto = {
2909 .func = bpf_skb_change_tail,
2910 .gpl_only = false,
2911 .ret_type = RET_INTEGER,
2912 .arg1_type = ARG_PTR_TO_CTX,
2913 .arg2_type = ARG_ANYTHING,
2914 .arg3_type = ARG_ANYTHING,
2915};
2916
3a0af8fd
TG
2917BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room,
2918 u64, flags)
2919{
2920 u32 max_len = __bpf_skb_max_len(skb);
2921 u32 new_len = skb->len + head_room;
2922 int ret;
2923
2924 if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) ||
2925 new_len < skb->len))
2926 return -EINVAL;
2927
2928 ret = skb_cow(skb, head_room);
2929 if (likely(!ret)) {
2930 /* Idea for this helper is that we currently only
2931 * allow to expand on mac header. This means that
2932 * skb->protocol network header, etc, stay as is.
2933 * Compared to bpf_skb_change_tail(), we're more
2934 * flexible due to not needing to linearize or
2935 * reset GSO. Intention for this helper is to be
2936 * used by an L3 skb that needs to push mac header
2937 * for redirection into L2 device.
2938 */
2939 __skb_push(skb, head_room);
2940 memset(skb->data, 0, head_room);
2941 skb_reset_mac_header(skb);
2942 }
2943
6aaae2b6 2944 bpf_compute_data_pointers(skb);
3a0af8fd
TG
2945 return 0;
2946}
2947
2948static const struct bpf_func_proto bpf_skb_change_head_proto = {
2949 .func = bpf_skb_change_head,
2950 .gpl_only = false,
2951 .ret_type = RET_INTEGER,
2952 .arg1_type = ARG_PTR_TO_CTX,
2953 .arg2_type = ARG_ANYTHING,
2954 .arg3_type = ARG_ANYTHING,
2955};
2956
de8f3a83
DB
2957static unsigned long xdp_get_metalen(const struct xdp_buff *xdp)
2958{
2959 return xdp_data_meta_unsupported(xdp) ? 0 :
2960 xdp->data - xdp->data_meta;
2961}
2962
17bedab2
MKL
2963BPF_CALL_2(bpf_xdp_adjust_head, struct xdp_buff *, xdp, int, offset)
2964{
6dfb970d 2965 void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame);
de8f3a83 2966 unsigned long metalen = xdp_get_metalen(xdp);
97e19cce 2967 void *data_start = xdp_frame_end + metalen;
17bedab2
MKL
2968 void *data = xdp->data + offset;
2969
de8f3a83 2970 if (unlikely(data < data_start ||
17bedab2
MKL
2971 data > xdp->data_end - ETH_HLEN))
2972 return -EINVAL;
2973
de8f3a83
DB
2974 if (metalen)
2975 memmove(xdp->data_meta + offset,
2976 xdp->data_meta, metalen);
2977 xdp->data_meta += offset;
17bedab2
MKL
2978 xdp->data = data;
2979
2980 return 0;
2981}
2982
2983static const struct bpf_func_proto bpf_xdp_adjust_head_proto = {
2984 .func = bpf_xdp_adjust_head,
2985 .gpl_only = false,
2986 .ret_type = RET_INTEGER,
2987 .arg1_type = ARG_PTR_TO_CTX,
2988 .arg2_type = ARG_ANYTHING,
2989};
2990
b32cc5b9
NS
2991BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset)
2992{
2993 void *data_end = xdp->data_end + offset;
2994
2995 /* only shrinking is allowed for now. */
2996 if (unlikely(offset >= 0))
2997 return -EINVAL;
2998
2999 if (unlikely(data_end < xdp->data + ETH_HLEN))
3000 return -EINVAL;
3001
3002 xdp->data_end = data_end;
3003
3004 return 0;
3005}
3006
3007static const struct bpf_func_proto bpf_xdp_adjust_tail_proto = {
3008 .func = bpf_xdp_adjust_tail,
3009 .gpl_only = false,
3010 .ret_type = RET_INTEGER,
3011 .arg1_type = ARG_PTR_TO_CTX,
3012 .arg2_type = ARG_ANYTHING,
3013};
3014
de8f3a83
DB
3015BPF_CALL_2(bpf_xdp_adjust_meta, struct xdp_buff *, xdp, int, offset)
3016{
97e19cce 3017 void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame);
de8f3a83
DB
3018 void *meta = xdp->data_meta + offset;
3019 unsigned long metalen = xdp->data - meta;
3020
3021 if (xdp_data_meta_unsupported(xdp))
3022 return -ENOTSUPP;
97e19cce 3023 if (unlikely(meta < xdp_frame_end ||
de8f3a83
DB
3024 meta > xdp->data))
3025 return -EINVAL;
3026 if (unlikely((metalen & (sizeof(__u32) - 1)) ||
3027 (metalen > 32)))
3028 return -EACCES;
3029
3030 xdp->data_meta = meta;
3031
3032 return 0;
3033}
3034
3035static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = {
3036 .func = bpf_xdp_adjust_meta,
3037 .gpl_only = false,
3038 .ret_type = RET_INTEGER,
3039 .arg1_type = ARG_PTR_TO_CTX,
3040 .arg2_type = ARG_ANYTHING,
3041};
3042
11393cc9
JF
3043static int __bpf_tx_xdp(struct net_device *dev,
3044 struct bpf_map *map,
3045 struct xdp_buff *xdp,
3046 u32 index)
814abfab 3047{
44fa2dbd 3048 struct xdp_frame *xdpf;
735fc405 3049 int sent;
11393cc9
JF
3050
3051 if (!dev->netdev_ops->ndo_xdp_xmit) {
11393cc9 3052 return -EOPNOTSUPP;
814abfab 3053 }
11393cc9 3054
44fa2dbd
JDB
3055 xdpf = convert_to_xdp_frame(xdp);
3056 if (unlikely(!xdpf))
3057 return -EOVERFLOW;
3058
1e67575a 3059 sent = dev->netdev_ops->ndo_xdp_xmit(dev, 1, &xdpf, XDP_XMIT_FLUSH);
735fc405
JDB
3060 if (sent <= 0)
3061 return sent;
9c270af3
JDB
3062 return 0;
3063}
3064
3065static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
3066 struct bpf_map *map,
3067 struct xdp_buff *xdp,
3068 u32 index)
3069{
3070 int err;
3071
1b1a251c
BT
3072 switch (map->map_type) {
3073 case BPF_MAP_TYPE_DEVMAP: {
67f29e07 3074 struct bpf_dtab_netdev *dst = fwd;
9c270af3 3075
38edddb8 3076 err = dev_map_enqueue(dst, xdp, dev_rx);
9c270af3
JDB
3077 if (err)
3078 return err;
11393cc9 3079 __dev_map_insert_ctx(map, index);
1b1a251c
BT
3080 break;
3081 }
3082 case BPF_MAP_TYPE_CPUMAP: {
9c270af3
JDB
3083 struct bpf_cpu_map_entry *rcpu = fwd;
3084
3085 err = cpu_map_enqueue(rcpu, xdp, dev_rx);
3086 if (err)
3087 return err;
3088 __cpu_map_insert_ctx(map, index);
1b1a251c
BT
3089 break;
3090 }
3091 case BPF_MAP_TYPE_XSKMAP: {
3092 struct xdp_sock *xs = fwd;
3093
3094 err = __xsk_map_redirect(map, xdp, xs);
3095 return err;
3096 }
3097 default:
3098 break;
9c270af3 3099 }
e4a8e817 3100 return 0;
814abfab
JF
3101}
3102
11393cc9
JF
3103void xdp_do_flush_map(void)
3104{
3105 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
3106 struct bpf_map *map = ri->map_to_flush;
3107
11393cc9 3108 ri->map_to_flush = NULL;
9c270af3
JDB
3109 if (map) {
3110 switch (map->map_type) {
3111 case BPF_MAP_TYPE_DEVMAP:
3112 __dev_map_flush(map);
3113 break;
3114 case BPF_MAP_TYPE_CPUMAP:
3115 __cpu_map_flush(map);
3116 break;
1b1a251c
BT
3117 case BPF_MAP_TYPE_XSKMAP:
3118 __xsk_map_flush(map);
3119 break;
9c270af3
JDB
3120 default:
3121 break;
3122 }
3123 }
11393cc9
JF
3124}
3125EXPORT_SYMBOL_GPL(xdp_do_flush_map);
3126
9c270af3
JDB
3127static void *__xdp_map_lookup_elem(struct bpf_map *map, u32 index)
3128{
3129 switch (map->map_type) {
3130 case BPF_MAP_TYPE_DEVMAP:
3131 return __dev_map_lookup_elem(map, index);
3132 case BPF_MAP_TYPE_CPUMAP:
3133 return __cpu_map_lookup_elem(map, index);
1b1a251c
BT
3134 case BPF_MAP_TYPE_XSKMAP:
3135 return __xsk_map_lookup_elem(map, index);
9c270af3
JDB
3136 default:
3137 return NULL;
3138 }
3139}
3140
7c300131
DB
3141static inline bool xdp_map_invalid(const struct bpf_prog *xdp_prog,
3142 unsigned long aux)
3143{
3144 return (unsigned long)xdp_prog->aux != aux;
3145}
3146
e4a8e817
DB
3147static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
3148 struct bpf_prog *xdp_prog)
97f91a7c
JF
3149{
3150 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
7c300131 3151 unsigned long map_owner = ri->map_owner;
97f91a7c 3152 struct bpf_map *map = ri->map;
11393cc9 3153 u32 index = ri->ifindex;
9c270af3 3154 void *fwd = NULL;
4c03bdd7 3155 int err;
97f91a7c
JF
3156
3157 ri->ifindex = 0;
3158 ri->map = NULL;
7c300131 3159 ri->map_owner = 0;
109980b8 3160
7c300131 3161 if (unlikely(xdp_map_invalid(xdp_prog, map_owner))) {
96c5508e
JDB
3162 err = -EFAULT;
3163 map = NULL;
3164 goto err;
3165 }
97f91a7c 3166
9c270af3 3167 fwd = __xdp_map_lookup_elem(map, index);
4c03bdd7
JDB
3168 if (!fwd) {
3169 err = -EINVAL;
f5836ca5 3170 goto err;
4c03bdd7 3171 }
e4a8e817 3172 if (ri->map_to_flush && ri->map_to_flush != map)
11393cc9
JF
3173 xdp_do_flush_map();
3174
9c270af3 3175 err = __bpf_tx_xdp_map(dev, fwd, map, xdp, index);
f5836ca5
JDB
3176 if (unlikely(err))
3177 goto err;
3178
3179 ri->map_to_flush = map;
59a30896 3180 _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index);
f5836ca5
JDB
3181 return 0;
3182err:
59a30896 3183 _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err);
97f91a7c
JF
3184 return err;
3185}
3186
5acaee0a
JF
3187int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
3188 struct bpf_prog *xdp_prog)
814abfab
JF
3189{
3190 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
5acaee0a 3191 struct net_device *fwd;
eb48d682 3192 u32 index = ri->ifindex;
4c03bdd7 3193 int err;
814abfab 3194
97f91a7c
JF
3195 if (ri->map)
3196 return xdp_do_redirect_map(dev, xdp, xdp_prog);
3197
eb48d682 3198 fwd = dev_get_by_index_rcu(dev_net(dev), index);
814abfab 3199 ri->ifindex = 0;
5acaee0a 3200 if (unlikely(!fwd)) {
4c03bdd7 3201 err = -EINVAL;
f5836ca5 3202 goto err;
814abfab
JF
3203 }
3204
4c03bdd7 3205 err = __bpf_tx_xdp(fwd, NULL, xdp, 0);
f5836ca5
JDB
3206 if (unlikely(err))
3207 goto err;
3208
3209 _trace_xdp_redirect(dev, xdp_prog, index);
3210 return 0;
3211err:
3212 _trace_xdp_redirect_err(dev, xdp_prog, index, err);
4c03bdd7 3213 return err;
814abfab
JF
3214}
3215EXPORT_SYMBOL_GPL(xdp_do_redirect);
3216
c060bc61
XS
3217static int xdp_do_generic_redirect_map(struct net_device *dev,
3218 struct sk_buff *skb,
02671e23 3219 struct xdp_buff *xdp,
c060bc61 3220 struct bpf_prog *xdp_prog)
6103aa96
JF
3221{
3222 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
7c300131 3223 unsigned long map_owner = ri->map_owner;
96c5508e 3224 struct bpf_map *map = ri->map;
eb48d682 3225 u32 index = ri->ifindex;
02671e23 3226 void *fwd = NULL;
2facaad6 3227 int err = 0;
6103aa96 3228
6103aa96 3229 ri->ifindex = 0;
96c5508e 3230 ri->map = NULL;
7c300131 3231 ri->map_owner = 0;
96c5508e 3232
9c270af3
JDB
3233 if (unlikely(xdp_map_invalid(xdp_prog, map_owner))) {
3234 err = -EFAULT;
3235 map = NULL;
3236 goto err;
96c5508e 3237 }
9c270af3 3238 fwd = __xdp_map_lookup_elem(map, index);
2facaad6
JDB
3239 if (unlikely(!fwd)) {
3240 err = -EINVAL;
f5836ca5 3241 goto err;
6103aa96
JF
3242 }
3243
9c270af3 3244 if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
6d5fc195
TM
3245 struct bpf_dtab_netdev *dst = fwd;
3246
3247 err = dev_map_generic_redirect(dst, skb, xdp_prog);
3248 if (unlikely(err))
9c270af3 3249 goto err;
02671e23
BT
3250 } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
3251 struct xdp_sock *xs = fwd;
3252
3253 err = xsk_generic_rcv(xs, xdp);
3254 if (err)
3255 goto err;
3256 consume_skb(skb);
9c270af3
JDB
3257 } else {
3258 /* TODO: Handle BPF_MAP_TYPE_CPUMAP */
3259 err = -EBADRQC;
f5836ca5 3260 goto err;
2facaad6 3261 }
6103aa96 3262
9c270af3
JDB
3263 _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index);
3264 return 0;
3265err:
3266 _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err);
3267 return err;
3268}
3269
3270int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
02671e23 3271 struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
9c270af3
JDB
3272{
3273 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
3274 u32 index = ri->ifindex;
3275 struct net_device *fwd;
3276 int err = 0;
3277
3278 if (ri->map)
02671e23 3279 return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog);
9c270af3
JDB
3280
3281 ri->ifindex = 0;
3282 fwd = dev_get_by_index_rcu(dev_net(dev), index);
3283 if (unlikely(!fwd)) {
3284 err = -EINVAL;
f5836ca5 3285 goto err;
2facaad6
JDB
3286 }
3287
9c270af3
JDB
3288 if (unlikely((err = __xdp_generic_ok_fwd_dev(skb, fwd))))
3289 goto err;
3290
2facaad6 3291 skb->dev = fwd;
9c270af3 3292 _trace_xdp_redirect(dev, xdp_prog, index);
02671e23 3293 generic_xdp_tx(skb, xdp_prog);
f5836ca5
JDB
3294 return 0;
3295err:
9c270af3 3296 _trace_xdp_redirect_err(dev, xdp_prog, index, err);
2facaad6 3297 return err;
6103aa96
JF
3298}
3299EXPORT_SYMBOL_GPL(xdp_do_generic_redirect);
3300
814abfab
JF
3301BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
3302{
3303 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
3304
3305 if (unlikely(flags))
3306 return XDP_ABORTED;
3307
3308 ri->ifindex = ifindex;
3309 ri->flags = flags;
109980b8 3310 ri->map = NULL;
7c300131 3311 ri->map_owner = 0;
e4a8e817 3312
814abfab
JF
3313 return XDP_REDIRECT;
3314}
3315
3316static const struct bpf_func_proto bpf_xdp_redirect_proto = {
3317 .func = bpf_xdp_redirect,
3318 .gpl_only = false,
3319 .ret_type = RET_INTEGER,
3320 .arg1_type = ARG_ANYTHING,
3321 .arg2_type = ARG_ANYTHING,
3322};
3323
109980b8 3324BPF_CALL_4(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags,
7c300131 3325 unsigned long, map_owner)
e4a8e817
DB
3326{
3327 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
3328
3329 if (unlikely(flags))
3330 return XDP_ABORTED;
3331
3332 ri->ifindex = ifindex;
3333 ri->flags = flags;
3334 ri->map = map;
109980b8 3335 ri->map_owner = map_owner;
e4a8e817
DB
3336
3337 return XDP_REDIRECT;
3338}
3339
109980b8
DB
3340/* Note, arg4 is hidden from users and populated by the verifier
3341 * with the right pointer.
3342 */
e4a8e817
DB
3343static const struct bpf_func_proto bpf_xdp_redirect_map_proto = {
3344 .func = bpf_xdp_redirect_map,
3345 .gpl_only = false,
3346 .ret_type = RET_INTEGER,
3347 .arg1_type = ARG_CONST_MAP_PTR,
3348 .arg2_type = ARG_ANYTHING,
3349 .arg3_type = ARG_ANYTHING,
3350};
3351
555c8a86 3352static unsigned long bpf_skb_copy(void *dst_buff, const void *skb,
aa7145c1 3353 unsigned long off, unsigned long len)
555c8a86 3354{
aa7145c1 3355 void *ptr = skb_header_pointer(skb, off, len, dst_buff);
555c8a86
DB
3356
3357 if (unlikely(!ptr))
3358 return len;
3359 if (ptr != dst_buff)
3360 memcpy(dst_buff, ptr, len);
3361
3362 return 0;
3363}
3364
f3694e00
DB
3365BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map,
3366 u64, flags, void *, meta, u64, meta_size)
555c8a86 3367{
555c8a86 3368 u64 skb_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
555c8a86
DB
3369
3370 if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
3371 return -EINVAL;
3372 if (unlikely(skb_size > skb->len))
3373 return -EFAULT;
3374
3375 return bpf_event_output(map, flags, meta, meta_size, skb, skb_size,
3376 bpf_skb_copy);
3377}
3378
3379static const struct bpf_func_proto bpf_skb_event_output_proto = {
3380 .func = bpf_skb_event_output,
3381 .gpl_only = true,
3382 .ret_type = RET_INTEGER,
3383 .arg1_type = ARG_PTR_TO_CTX,
3384 .arg2_type = ARG_CONST_MAP_PTR,
3385 .arg3_type = ARG_ANYTHING,
39f19ebb 3386 .arg4_type = ARG_PTR_TO_MEM,
1728a4f2 3387 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
555c8a86
DB
3388};
3389
c6c33454
DB
3390static unsigned short bpf_tunnel_key_af(u64 flags)
3391{
3392 return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET;
3393}
3394
f3694e00
DB
3395BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key *, to,
3396 u32, size, u64, flags)
d3aa45ce 3397{
c6c33454
DB
3398 const struct ip_tunnel_info *info = skb_tunnel_info(skb);
3399 u8 compat[sizeof(struct bpf_tunnel_key)];
074f528e
DB
3400 void *to_orig = to;
3401 int err;
d3aa45ce 3402
074f528e
DB
3403 if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6)))) {
3404 err = -EINVAL;
3405 goto err_clear;
3406 }
3407 if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags)) {
3408 err = -EPROTO;
3409 goto err_clear;
3410 }
c6c33454 3411 if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
074f528e 3412 err = -EINVAL;
c6c33454 3413 switch (size) {
4018ab18 3414 case offsetof(struct bpf_tunnel_key, tunnel_label):
c0e760c9 3415 case offsetof(struct bpf_tunnel_key, tunnel_ext):
4018ab18 3416 goto set_compat;
c6c33454
DB
3417 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
3418 /* Fixup deprecated structure layouts here, so we have
3419 * a common path later on.
3420 */
3421 if (ip_tunnel_info_af(info) != AF_INET)
074f528e 3422 goto err_clear;
4018ab18 3423set_compat:
c6c33454
DB
3424 to = (struct bpf_tunnel_key *)compat;
3425 break;
3426 default:
074f528e 3427 goto err_clear;
c6c33454
DB
3428 }
3429 }
d3aa45ce
AS
3430
3431 to->tunnel_id = be64_to_cpu(info->key.tun_id);
c6c33454
DB
3432 to->tunnel_tos = info->key.tos;
3433 to->tunnel_ttl = info->key.ttl;
1fbc2e0c 3434 to->tunnel_ext = 0;
c6c33454 3435
4018ab18 3436 if (flags & BPF_F_TUNINFO_IPV6) {
c6c33454
DB
3437 memcpy(to->remote_ipv6, &info->key.u.ipv6.src,
3438 sizeof(to->remote_ipv6));
4018ab18
DB
3439 to->tunnel_label = be32_to_cpu(info->key.label);
3440 } else {
c6c33454 3441 to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src);
1fbc2e0c
DB
3442 memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3);
3443 to->tunnel_label = 0;
4018ab18 3444 }
c6c33454
DB
3445
3446 if (unlikely(size != sizeof(struct bpf_tunnel_key)))
074f528e 3447 memcpy(to_orig, to, size);
d3aa45ce
AS
3448
3449 return 0;
074f528e
DB
3450err_clear:
3451 memset(to_orig, 0, size);
3452 return err;
d3aa45ce
AS
3453}
3454
577c50aa 3455static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
d3aa45ce
AS
3456 .func = bpf_skb_get_tunnel_key,
3457 .gpl_only = false,
3458 .ret_type = RET_INTEGER,
3459 .arg1_type = ARG_PTR_TO_CTX,
39f19ebb
AS
3460 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
3461 .arg3_type = ARG_CONST_SIZE,
d3aa45ce
AS
3462 .arg4_type = ARG_ANYTHING,
3463};
3464
f3694e00 3465BPF_CALL_3(bpf_skb_get_tunnel_opt, struct sk_buff *, skb, u8 *, to, u32, size)
14ca0751 3466{
14ca0751 3467 const struct ip_tunnel_info *info = skb_tunnel_info(skb);
074f528e 3468 int err;
14ca0751
DB
3469
3470 if (unlikely(!info ||
074f528e
DB
3471 !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) {
3472 err = -ENOENT;
3473 goto err_clear;
3474 }
3475 if (unlikely(size < info->options_len)) {
3476 err = -ENOMEM;
3477 goto err_clear;
3478 }
14ca0751
DB
3479
3480 ip_tunnel_info_opts_get(to, info);
074f528e
DB
3481 if (size > info->options_len)
3482 memset(to + info->options_len, 0, size - info->options_len);
14ca0751
DB
3483
3484 return info->options_len;
074f528e
DB
3485err_clear:
3486 memset(to, 0, size);
3487 return err;
14ca0751
DB
3488}
3489
3490static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = {
3491 .func = bpf_skb_get_tunnel_opt,
3492 .gpl_only = false,
3493 .ret_type = RET_INTEGER,
3494 .arg1_type = ARG_PTR_TO_CTX,
39f19ebb
AS
3495 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
3496 .arg3_type = ARG_CONST_SIZE,
14ca0751
DB
3497};
3498
d3aa45ce
AS
3499static struct metadata_dst __percpu *md_dst;
3500
f3694e00
DB
3501BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
3502 const struct bpf_tunnel_key *, from, u32, size, u64, flags)
d3aa45ce 3503{
d3aa45ce 3504 struct metadata_dst *md = this_cpu_ptr(md_dst);
c6c33454 3505 u8 compat[sizeof(struct bpf_tunnel_key)];
d3aa45ce
AS
3506 struct ip_tunnel_info *info;
3507
22080870 3508 if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX |
77a5196a 3509 BPF_F_DONT_FRAGMENT | BPF_F_SEQ_NUMBER)))
d3aa45ce 3510 return -EINVAL;
c6c33454
DB
3511 if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
3512 switch (size) {
4018ab18 3513 case offsetof(struct bpf_tunnel_key, tunnel_label):
c0e760c9 3514 case offsetof(struct bpf_tunnel_key, tunnel_ext):
c6c33454
DB
3515 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
3516 /* Fixup deprecated structure layouts here, so we have
3517 * a common path later on.
3518 */
3519 memcpy(compat, from, size);
3520 memset(compat + size, 0, sizeof(compat) - size);
f3694e00 3521 from = (const struct bpf_tunnel_key *) compat;
c6c33454
DB
3522 break;
3523 default:
3524 return -EINVAL;
3525 }
3526 }
c0e760c9
DB
3527 if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) ||
3528 from->tunnel_ext))
4018ab18 3529 return -EINVAL;
d3aa45ce
AS
3530
3531 skb_dst_drop(skb);
3532 dst_hold((struct dst_entry *) md);
3533 skb_dst_set(skb, (struct dst_entry *) md);
3534
3535 info = &md->u.tun_info;
5540fbf4 3536 memset(info, 0, sizeof(*info));
d3aa45ce 3537 info->mode = IP_TUNNEL_INFO_TX;
c6c33454 3538
db3c6139 3539 info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
22080870
DB
3540 if (flags & BPF_F_DONT_FRAGMENT)
3541 info->key.tun_flags |= TUNNEL_DONT_FRAGMENT;
792f3dd6
WT
3542 if (flags & BPF_F_ZERO_CSUM_TX)
3543 info->key.tun_flags &= ~TUNNEL_CSUM;
77a5196a
WT
3544 if (flags & BPF_F_SEQ_NUMBER)
3545 info->key.tun_flags |= TUNNEL_SEQ;
22080870 3546
d3aa45ce 3547 info->key.tun_id = cpu_to_be64(from->tunnel_id);
c6c33454
DB
3548 info->key.tos = from->tunnel_tos;
3549 info->key.ttl = from->tunnel_ttl;
3550
3551 if (flags & BPF_F_TUNINFO_IPV6) {
3552 info->mode |= IP_TUNNEL_INFO_IPV6;
3553 memcpy(&info->key.u.ipv6.dst, from->remote_ipv6,
3554 sizeof(from->remote_ipv6));
4018ab18
DB
3555 info->key.label = cpu_to_be32(from->tunnel_label) &
3556 IPV6_FLOWLABEL_MASK;
c6c33454
DB
3557 } else {
3558 info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
3559 }
d3aa45ce
AS
3560
3561 return 0;
3562}
3563
577c50aa 3564static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
d3aa45ce
AS
3565 .func = bpf_skb_set_tunnel_key,
3566 .gpl_only = false,
3567 .ret_type = RET_INTEGER,
3568 .arg1_type = ARG_PTR_TO_CTX,
39f19ebb
AS
3569 .arg2_type = ARG_PTR_TO_MEM,
3570 .arg3_type = ARG_CONST_SIZE,
d3aa45ce
AS
3571 .arg4_type = ARG_ANYTHING,
3572};
3573
f3694e00
DB
3574BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb,
3575 const u8 *, from, u32, size)
14ca0751 3576{
14ca0751
DB
3577 struct ip_tunnel_info *info = skb_tunnel_info(skb);
3578 const struct metadata_dst *md = this_cpu_ptr(md_dst);
3579
3580 if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1))))
3581 return -EINVAL;
fca5fdf6 3582 if (unlikely(size > IP_TUNNEL_OPTS_MAX))
14ca0751
DB
3583 return -ENOMEM;
3584
3585 ip_tunnel_info_opts_set(info, from, size);
3586
3587 return 0;
3588}
3589
3590static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = {
3591 .func = bpf_skb_set_tunnel_opt,
3592 .gpl_only = false,
3593 .ret_type = RET_INTEGER,
3594 .arg1_type = ARG_PTR_TO_CTX,
39f19ebb
AS
3595 .arg2_type = ARG_PTR_TO_MEM,
3596 .arg3_type = ARG_CONST_SIZE,
14ca0751
DB
3597};
3598
3599static const struct bpf_func_proto *
3600bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
d3aa45ce
AS
3601{
3602 if (!md_dst) {
d66f2b91
JK
3603 struct metadata_dst __percpu *tmp;
3604
3605 tmp = metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX,
3606 METADATA_IP_TUNNEL,
3607 GFP_KERNEL);
3608 if (!tmp)
d3aa45ce 3609 return NULL;
d66f2b91
JK
3610 if (cmpxchg(&md_dst, NULL, tmp))
3611 metadata_dst_free_percpu(tmp);
d3aa45ce 3612 }
14ca0751
DB
3613
3614 switch (which) {
3615 case BPF_FUNC_skb_set_tunnel_key:
3616 return &bpf_skb_set_tunnel_key_proto;
3617 case BPF_FUNC_skb_set_tunnel_opt:
3618 return &bpf_skb_set_tunnel_opt_proto;
3619 default:
3620 return NULL;
3621 }
d3aa45ce
AS
3622}
3623
f3694e00
DB
3624BPF_CALL_3(bpf_skb_under_cgroup, struct sk_buff *, skb, struct bpf_map *, map,
3625 u32, idx)
4a482f34 3626{
4a482f34
MKL
3627 struct bpf_array *array = container_of(map, struct bpf_array, map);
3628 struct cgroup *cgrp;
3629 struct sock *sk;
4a482f34 3630
2d48c5f9 3631 sk = skb_to_full_sk(skb);
4a482f34
MKL
3632 if (!sk || !sk_fullsock(sk))
3633 return -ENOENT;
f3694e00 3634 if (unlikely(idx >= array->map.max_entries))
4a482f34
MKL
3635 return -E2BIG;
3636
f3694e00 3637 cgrp = READ_ONCE(array->ptrs[idx]);
4a482f34
MKL
3638 if (unlikely(!cgrp))
3639 return -EAGAIN;
3640
54fd9c2d 3641 return sk_under_cgroup_hierarchy(sk, cgrp);
4a482f34
MKL
3642}
3643
747ea55e
DB
3644static const struct bpf_func_proto bpf_skb_under_cgroup_proto = {
3645 .func = bpf_skb_under_cgroup,
4a482f34
MKL
3646 .gpl_only = false,
3647 .ret_type = RET_INTEGER,
3648 .arg1_type = ARG_PTR_TO_CTX,
3649 .arg2_type = ARG_CONST_MAP_PTR,
3650 .arg3_type = ARG_ANYTHING,
3651};
4a482f34 3652
cb20b08e
DB
3653#ifdef CONFIG_SOCK_CGROUP_DATA
3654BPF_CALL_1(bpf_skb_cgroup_id, const struct sk_buff *, skb)
3655{
3656 struct sock *sk = skb_to_full_sk(skb);
3657 struct cgroup *cgrp;
3658
3659 if (!sk || !sk_fullsock(sk))
3660 return 0;
3661
3662 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
3663 return cgrp->kn->id.id;
3664}
3665
3666static const struct bpf_func_proto bpf_skb_cgroup_id_proto = {
3667 .func = bpf_skb_cgroup_id,
3668 .gpl_only = false,
3669 .ret_type = RET_INTEGER,
3670 .arg1_type = ARG_PTR_TO_CTX,
3671};
3672#endif
3673
4de16969
DB
3674static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff,
3675 unsigned long off, unsigned long len)
3676{
3677 memcpy(dst_buff, src_buff + off, len);
3678 return 0;
3679}
3680
f3694e00
DB
3681BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map,
3682 u64, flags, void *, meta, u64, meta_size)
4de16969 3683{
4de16969 3684 u64 xdp_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
4de16969
DB
3685
3686 if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
3687 return -EINVAL;
3688 if (unlikely(xdp_size > (unsigned long)(xdp->data_end - xdp->data)))
3689 return -EFAULT;
3690
9c471370
MKL
3691 return bpf_event_output(map, flags, meta, meta_size, xdp->data,
3692 xdp_size, bpf_xdp_copy);
4de16969
DB
3693}
3694
3695static const struct bpf_func_proto bpf_xdp_event_output_proto = {
3696 .func = bpf_xdp_event_output,
3697 .gpl_only = true,
3698 .ret_type = RET_INTEGER,
3699 .arg1_type = ARG_PTR_TO_CTX,
3700 .arg2_type = ARG_CONST_MAP_PTR,
3701 .arg3_type = ARG_ANYTHING,
39f19ebb 3702 .arg4_type = ARG_PTR_TO_MEM,
1728a4f2 3703 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
4de16969
DB
3704};
3705
91b8270f
CF
3706BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb)
3707{
3708 return skb->sk ? sock_gen_cookie(skb->sk) : 0;
3709}
3710
3711static const struct bpf_func_proto bpf_get_socket_cookie_proto = {
3712 .func = bpf_get_socket_cookie,
3713 .gpl_only = false,
3714 .ret_type = RET_INTEGER,
3715 .arg1_type = ARG_PTR_TO_CTX,
3716};
3717
6acc5c29
CF
3718BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb)
3719{
3720 struct sock *sk = sk_to_full_sk(skb->sk);
3721 kuid_t kuid;
3722
3723 if (!sk || !sk_fullsock(sk))
3724 return overflowuid;
3725 kuid = sock_net_uid(sock_net(sk), sk);
3726 return from_kuid_munged(sock_net(sk)->user_ns, kuid);
3727}
3728
3729static const struct bpf_func_proto bpf_get_socket_uid_proto = {
3730 .func = bpf_get_socket_uid,
3731 .gpl_only = false,
3732 .ret_type = RET_INTEGER,
3733 .arg1_type = ARG_PTR_TO_CTX,
3734};
3735
8c4b4c7e
LB
3736BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
3737 int, level, int, optname, char *, optval, int, optlen)
3738{
3739 struct sock *sk = bpf_sock->sk;
3740 int ret = 0;
3741 int val;
3742
3743 if (!sk_fullsock(sk))
3744 return -EINVAL;
3745
3746 if (level == SOL_SOCKET) {
3747 if (optlen != sizeof(int))
3748 return -EINVAL;
3749 val = *((int *)optval);
3750
3751 /* Only some socketops are supported */
3752 switch (optname) {
3753 case SO_RCVBUF:
3754 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
3755 sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
3756 break;
3757 case SO_SNDBUF:
3758 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
3759 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
3760 break;
3761 case SO_MAX_PACING_RATE:
3762 sk->sk_max_pacing_rate = val;
3763 sk->sk_pacing_rate = min(sk->sk_pacing_rate,
3764 sk->sk_max_pacing_rate);
3765 break;
3766 case SO_PRIORITY:
3767 sk->sk_priority = val;
3768 break;
3769 case SO_RCVLOWAT:
3770 if (val < 0)
3771 val = INT_MAX;
3772 sk->sk_rcvlowat = val ? : 1;
3773 break;
3774 case SO_MARK:
3775 sk->sk_mark = val;
3776 break;
3777 default:
3778 ret = -EINVAL;
3779 }
a5192c52 3780#ifdef CONFIG_INET
6f5c39fa
NS
3781 } else if (level == SOL_IP) {
3782 if (optlen != sizeof(int) || sk->sk_family != AF_INET)
3783 return -EINVAL;
3784
3785 val = *((int *)optval);
3786 /* Only some options are supported */
3787 switch (optname) {
3788 case IP_TOS:
3789 if (val < -1 || val > 0xff) {
3790 ret = -EINVAL;
3791 } else {
3792 struct inet_sock *inet = inet_sk(sk);
3793
3794 if (val == -1)
3795 val = 0;
3796 inet->tos = val;
3797 }
3798 break;
3799 default:
3800 ret = -EINVAL;
3801 }
6f9bd3d7
LB
3802#if IS_ENABLED(CONFIG_IPV6)
3803 } else if (level == SOL_IPV6) {
3804 if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
3805 return -EINVAL;
3806
3807 val = *((int *)optval);
3808 /* Only some options are supported */
3809 switch (optname) {
3810 case IPV6_TCLASS:
3811 if (val < -1 || val > 0xff) {
3812 ret = -EINVAL;
3813 } else {
3814 struct ipv6_pinfo *np = inet6_sk(sk);
3815
3816 if (val == -1)
3817 val = 0;
3818 np->tclass = val;
3819 }
3820 break;
3821 default:
3822 ret = -EINVAL;
3823 }
3824#endif
8c4b4c7e
LB
3825 } else if (level == SOL_TCP &&
3826 sk->sk_prot->setsockopt == tcp_setsockopt) {
91b5b21c
LB
3827 if (optname == TCP_CONGESTION) {
3828 char name[TCP_CA_NAME_MAX];
ebfa00c5 3829 bool reinit = bpf_sock->op > BPF_SOCK_OPS_NEEDS_ECN;
91b5b21c
LB
3830
3831 strncpy(name, optval, min_t(long, optlen,
3832 TCP_CA_NAME_MAX-1));
3833 name[TCP_CA_NAME_MAX-1] = 0;
6f9bd3d7
LB
3834 ret = tcp_set_congestion_control(sk, name, false,
3835 reinit);
91b5b21c 3836 } else {
fc747810
LB
3837 struct tcp_sock *tp = tcp_sk(sk);
3838
3839 if (optlen != sizeof(int))
3840 return -EINVAL;
3841
3842 val = *((int *)optval);
3843 /* Only some options are supported */
3844 switch (optname) {
3845 case TCP_BPF_IW:
3846 if (val <= 0 || tp->data_segs_out > 0)
3847 ret = -EINVAL;
3848 else
3849 tp->snd_cwnd = val;
3850 break;
13bf9641
LB
3851 case TCP_BPF_SNDCWND_CLAMP:
3852 if (val <= 0) {
3853 ret = -EINVAL;
3854 } else {
3855 tp->snd_cwnd_clamp = val;
3856 tp->snd_ssthresh = val;
3857 }
6d3f06a0 3858 break;
fc747810
LB
3859 default:
3860 ret = -EINVAL;
3861 }
91b5b21c 3862 }
91b5b21c 3863#endif
8c4b4c7e
LB
3864 } else {
3865 ret = -EINVAL;
3866 }
3867 return ret;
3868}
3869
3870static const struct bpf_func_proto bpf_setsockopt_proto = {
3871 .func = bpf_setsockopt,
cd86d1fd 3872 .gpl_only = false,
8c4b4c7e
LB
3873 .ret_type = RET_INTEGER,
3874 .arg1_type = ARG_PTR_TO_CTX,
3875 .arg2_type = ARG_ANYTHING,
3876 .arg3_type = ARG_ANYTHING,
3877 .arg4_type = ARG_PTR_TO_MEM,
3878 .arg5_type = ARG_CONST_SIZE,
3879};
3880
cd86d1fd
LB
3881BPF_CALL_5(bpf_getsockopt, struct bpf_sock_ops_kern *, bpf_sock,
3882 int, level, int, optname, char *, optval, int, optlen)
3883{
3884 struct sock *sk = bpf_sock->sk;
cd86d1fd
LB
3885
3886 if (!sk_fullsock(sk))
3887 goto err_clear;
3888
3889#ifdef CONFIG_INET
3890 if (level == SOL_TCP && sk->sk_prot->getsockopt == tcp_getsockopt) {
3891 if (optname == TCP_CONGESTION) {
3892 struct inet_connection_sock *icsk = inet_csk(sk);
3893
3894 if (!icsk->icsk_ca_ops || optlen <= 1)
3895 goto err_clear;
3896 strncpy(optval, icsk->icsk_ca_ops->name, optlen);
3897 optval[optlen - 1] = 0;
3898 } else {
3899 goto err_clear;
3900 }
6f5c39fa
NS
3901 } else if (level == SOL_IP) {
3902 struct inet_sock *inet = inet_sk(sk);
3903
3904 if (optlen != sizeof(int) || sk->sk_family != AF_INET)
3905 goto err_clear;
3906
3907 /* Only some options are supported */
3908 switch (optname) {
3909 case IP_TOS:
3910 *((int *)optval) = (int)inet->tos;
3911 break;
3912 default:
3913 goto err_clear;
3914 }
6f9bd3d7
LB
3915#if IS_ENABLED(CONFIG_IPV6)
3916 } else if (level == SOL_IPV6) {
3917 struct ipv6_pinfo *np = inet6_sk(sk);
3918
3919 if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
3920 goto err_clear;
3921
3922 /* Only some options are supported */
3923 switch (optname) {
3924 case IPV6_TCLASS:
3925 *((int *)optval) = (int)np->tclass;
3926 break;
3927 default:
3928 goto err_clear;
3929 }
3930#endif
cd86d1fd
LB
3931 } else {
3932 goto err_clear;
3933 }
aa2bc739 3934 return 0;
cd86d1fd
LB
3935#endif
3936err_clear:
3937 memset(optval, 0, optlen);
3938 return -EINVAL;
3939}
3940
3941static const struct bpf_func_proto bpf_getsockopt_proto = {
3942 .func = bpf_getsockopt,
3943 .gpl_only = false,
3944 .ret_type = RET_INTEGER,
3945 .arg1_type = ARG_PTR_TO_CTX,
3946 .arg2_type = ARG_ANYTHING,
3947 .arg3_type = ARG_ANYTHING,
3948 .arg4_type = ARG_PTR_TO_UNINIT_MEM,
3949 .arg5_type = ARG_CONST_SIZE,
3950};
3951
b13d8807
LB
3952BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock,
3953 int, argval)
3954{
3955 struct sock *sk = bpf_sock->sk;
3956 int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS;
3957
a7dcdf6e 3958 if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk))
b13d8807
LB
3959 return -EINVAL;
3960
b13d8807
LB
3961 if (val)
3962 tcp_sk(sk)->bpf_sock_ops_cb_flags = val;
3963
3964 return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS);
b13d8807
LB
3965}
3966
3967static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = {
3968 .func = bpf_sock_ops_cb_flags_set,
3969 .gpl_only = false,
3970 .ret_type = RET_INTEGER,
3971 .arg1_type = ARG_PTR_TO_CTX,
3972 .arg2_type = ARG_ANYTHING,
3973};
3974
d74bad4e
AI
3975const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly;
3976EXPORT_SYMBOL_GPL(ipv6_bpf_stub);
3977
3978BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr,
3979 int, addr_len)
3980{
3981#ifdef CONFIG_INET
3982 struct sock *sk = ctx->sk;
3983 int err;
3984
3985 /* Binding to port can be expensive so it's prohibited in the helper.
3986 * Only binding to IP is supported.
3987 */
3988 err = -EINVAL;
3989 if (addr->sa_family == AF_INET) {
3990 if (addr_len < sizeof(struct sockaddr_in))
3991 return err;
3992 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
3993 return err;
3994 return __inet_bind(sk, addr, addr_len, true, false);
3995#if IS_ENABLED(CONFIG_IPV6)
3996 } else if (addr->sa_family == AF_INET6) {
3997 if (addr_len < SIN6_LEN_RFC2133)
3998 return err;
3999 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
4000 return err;
4001 /* ipv6_bpf_stub cannot be NULL, since it's called from
4002 * bpf_cgroup_inet6_connect hook and ipv6 is already loaded
4003 */
4004 return ipv6_bpf_stub->inet6_bind(sk, addr, addr_len, true, false);
4005#endif /* CONFIG_IPV6 */
4006 }
4007#endif /* CONFIG_INET */
4008
4009 return -EAFNOSUPPORT;
4010}
4011
4012static const struct bpf_func_proto bpf_bind_proto = {
4013 .func = bpf_bind,
4014 .gpl_only = false,
4015 .ret_type = RET_INTEGER,
4016 .arg1_type = ARG_PTR_TO_CTX,
4017 .arg2_type = ARG_PTR_TO_MEM,
4018 .arg3_type = ARG_CONST_SIZE,
4019};
4020
12bed760
EB
4021#ifdef CONFIG_XFRM
4022BPF_CALL_5(bpf_skb_get_xfrm_state, struct sk_buff *, skb, u32, index,
4023 struct bpf_xfrm_state *, to, u32, size, u64, flags)
4024{
4025 const struct sec_path *sp = skb_sec_path(skb);
4026 const struct xfrm_state *x;
4027
4028 if (!sp || unlikely(index >= sp->len || flags))
4029 goto err_clear;
4030
4031 x = sp->xvec[index];
4032
4033 if (unlikely(size != sizeof(struct bpf_xfrm_state)))
4034 goto err_clear;
4035
4036 to->reqid = x->props.reqid;
4037 to->spi = x->id.spi;
4038 to->family = x->props.family;
1fbc2e0c
DB
4039 to->ext = 0;
4040
12bed760
EB
4041 if (to->family == AF_INET6) {
4042 memcpy(to->remote_ipv6, x->props.saddr.a6,
4043 sizeof(to->remote_ipv6));
4044 } else {
4045 to->remote_ipv4 = x->props.saddr.a4;
1fbc2e0c 4046 memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3);
12bed760
EB
4047 }
4048
4049 return 0;
4050err_clear:
4051 memset(to, 0, size);
4052 return -EINVAL;
4053}
4054
4055static const struct bpf_func_proto bpf_skb_get_xfrm_state_proto = {
4056 .func = bpf_skb_get_xfrm_state,
4057 .gpl_only = false,
4058 .ret_type = RET_INTEGER,
4059 .arg1_type = ARG_PTR_TO_CTX,
4060 .arg2_type = ARG_ANYTHING,
4061 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
4062 .arg4_type = ARG_CONST_SIZE,
4063 .arg5_type = ARG_ANYTHING,
4064};
4065#endif
4066
87f5fc7e
DA
4067#if IS_ENABLED(CONFIG_INET) || IS_ENABLED(CONFIG_IPV6)
4068static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params,
4069 const struct neighbour *neigh,
4070 const struct net_device *dev)
4071{
4072 memcpy(params->dmac, neigh->ha, ETH_ALEN);
4073 memcpy(params->smac, dev->dev_addr, ETH_ALEN);
4074 params->h_vlan_TCI = 0;
4075 params->h_vlan_proto = 0;
4076
4077 return dev->ifindex;
4078}
4079#endif
4080
4081#if IS_ENABLED(CONFIG_INET)
4082static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
4f74fede 4083 u32 flags, bool check_mtu)
87f5fc7e
DA
4084{
4085 struct in_device *in_dev;
4086 struct neighbour *neigh;
4087 struct net_device *dev;
4088 struct fib_result res;
4089 struct fib_nh *nh;
4090 struct flowi4 fl4;
4091 int err;
4f74fede 4092 u32 mtu;
87f5fc7e
DA
4093
4094 dev = dev_get_by_index_rcu(net, params->ifindex);
4095 if (unlikely(!dev))
4096 return -ENODEV;
4097
4098 /* verify forwarding is enabled on this interface */
4099 in_dev = __in_dev_get_rcu(dev);
4100 if (unlikely(!in_dev || !IN_DEV_FORWARD(in_dev)))
4101 return 0;
4102
4103 if (flags & BPF_FIB_LOOKUP_OUTPUT) {
4104 fl4.flowi4_iif = 1;
4105 fl4.flowi4_oif = params->ifindex;
4106 } else {
4107 fl4.flowi4_iif = params->ifindex;
4108 fl4.flowi4_oif = 0;
4109 }
4110 fl4.flowi4_tos = params->tos & IPTOS_RT_MASK;
4111 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
4112 fl4.flowi4_flags = 0;
4113
4114 fl4.flowi4_proto = params->l4_protocol;
4115 fl4.daddr = params->ipv4_dst;
4116 fl4.saddr = params->ipv4_src;
4117 fl4.fl4_sport = params->sport;
4118 fl4.fl4_dport = params->dport;
4119
4120 if (flags & BPF_FIB_LOOKUP_DIRECT) {
4121 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
4122 struct fib_table *tb;
4123
4124 tb = fib_get_table(net, tbid);
4125 if (unlikely(!tb))
4126 return 0;
4127
4128 err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
4129 } else {
4130 fl4.flowi4_mark = 0;
4131 fl4.flowi4_secid = 0;
4132 fl4.flowi4_tun_key.tun_id = 0;
4133 fl4.flowi4_uid = sock_net_uid(net, NULL);
4134
4135 err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_NOREF);
4136 }
4137
4138 if (err || res.type != RTN_UNICAST)
4139 return 0;
4140
4141 if (res.fi->fib_nhs > 1)
4142 fib_select_path(net, &res, &fl4, NULL);
4143
4f74fede
DA
4144 if (check_mtu) {
4145 mtu = ip_mtu_from_fib_result(&res, params->ipv4_dst);
4146 if (params->tot_len > mtu)
4147 return 0;
4148 }
4149
87f5fc7e
DA
4150 nh = &res.fi->fib_nh[res.nh_sel];
4151
4152 /* do not handle lwt encaps right now */
4153 if (nh->nh_lwtstate)
4154 return 0;
4155
4156 dev = nh->nh_dev;
4157 if (unlikely(!dev))
4158 return 0;
4159
4160 if (nh->nh_gw)
4161 params->ipv4_dst = nh->nh_gw;
4162
4163 params->rt_metric = res.fi->fib_priority;
4164
4165 /* xdp and cls_bpf programs are run in RCU-bh so
4166 * rcu_read_lock_bh is not needed here
4167 */
4168 neigh = __ipv4_neigh_lookup_noref(dev, (__force u32)params->ipv4_dst);
4169 if (neigh)
4170 return bpf_fib_set_fwd_params(params, neigh, dev);
4171
4172 return 0;
4173}
4174#endif
4175
4176#if IS_ENABLED(CONFIG_IPV6)
4177static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
4f74fede 4178 u32 flags, bool check_mtu)
87f5fc7e
DA
4179{
4180 struct in6_addr *src = (struct in6_addr *) params->ipv6_src;
4181 struct in6_addr *dst = (struct in6_addr *) params->ipv6_dst;
4182 struct neighbour *neigh;
4183 struct net_device *dev;
4184 struct inet6_dev *idev;
4185 struct fib6_info *f6i;
4186 struct flowi6 fl6;
4187 int strict = 0;
4188 int oif;
4f74fede 4189 u32 mtu;
87f5fc7e
DA
4190
4191 /* link local addresses are never forwarded */
4192 if (rt6_need_strict(dst) || rt6_need_strict(src))
4193 return 0;
4194
4195 dev = dev_get_by_index_rcu(net, params->ifindex);
4196 if (unlikely(!dev))
4197 return -ENODEV;
4198
4199 idev = __in6_dev_get_safely(dev);
4200 if (unlikely(!idev || !net->ipv6.devconf_all->forwarding))
4201 return 0;
4202
4203 if (flags & BPF_FIB_LOOKUP_OUTPUT) {
4204 fl6.flowi6_iif = 1;
4205 oif = fl6.flowi6_oif = params->ifindex;
4206 } else {
4207 oif = fl6.flowi6_iif = params->ifindex;
4208 fl6.flowi6_oif = 0;
4209 strict = RT6_LOOKUP_F_HAS_SADDR;
4210 }
bd3a08aa 4211 fl6.flowlabel = params->flowinfo;
87f5fc7e
DA
4212 fl6.flowi6_scope = 0;
4213 fl6.flowi6_flags = 0;
4214 fl6.mp_hash = 0;
4215
4216 fl6.flowi6_proto = params->l4_protocol;
4217 fl6.daddr = *dst;
4218 fl6.saddr = *src;
4219 fl6.fl6_sport = params->sport;
4220 fl6.fl6_dport = params->dport;
4221
4222 if (flags & BPF_FIB_LOOKUP_DIRECT) {
4223 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
4224 struct fib6_table *tb;
4225
4226 tb = ipv6_stub->fib6_get_table(net, tbid);
4227 if (unlikely(!tb))
4228 return 0;
4229
4230 f6i = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, strict);
4231 } else {
4232 fl6.flowi6_mark = 0;
4233 fl6.flowi6_secid = 0;
4234 fl6.flowi6_tun_key.tun_id = 0;
4235 fl6.flowi6_uid = sock_net_uid(net, NULL);
4236
4237 f6i = ipv6_stub->fib6_lookup(net, oif, &fl6, strict);
4238 }
4239
4240 if (unlikely(IS_ERR_OR_NULL(f6i) || f6i == net->ipv6.fib6_null_entry))
4241 return 0;
4242
4243 if (unlikely(f6i->fib6_flags & RTF_REJECT ||
4244 f6i->fib6_type != RTN_UNICAST))
4245 return 0;
4246
4247 if (f6i->fib6_nsiblings && fl6.flowi6_oif == 0)
4248 f6i = ipv6_stub->fib6_multipath_select(net, f6i, &fl6,
4249 fl6.flowi6_oif, NULL,
4250 strict);
4251
4f74fede
DA
4252 if (check_mtu) {
4253 mtu = ipv6_stub->ip6_mtu_from_fib6(f6i, dst, src);
4254 if (params->tot_len > mtu)
4255 return 0;
4256 }
4257
87f5fc7e
DA
4258 if (f6i->fib6_nh.nh_lwtstate)
4259 return 0;
4260
4261 if (f6i->fib6_flags & RTF_GATEWAY)
4262 *dst = f6i->fib6_nh.nh_gw;
4263
4264 dev = f6i->fib6_nh.nh_dev;
4265 params->rt_metric = f6i->fib6_metric;
4266
4267 /* xdp and cls_bpf programs are run in RCU-bh so rcu_read_lock_bh is
4268 * not needed here. Can not use __ipv6_neigh_lookup_noref here
4269 * because we need to get nd_tbl via the stub
4270 */
4271 neigh = ___neigh_lookup_noref(ipv6_stub->nd_tbl, neigh_key_eq128,
4272 ndisc_hashfn, dst, dev);
4273 if (neigh)
4274 return bpf_fib_set_fwd_params(params, neigh, dev);
4275
4276 return 0;
4277}
4278#endif
4279
4280BPF_CALL_4(bpf_xdp_fib_lookup, struct xdp_buff *, ctx,
4281 struct bpf_fib_lookup *, params, int, plen, u32, flags)
4282{
4283 if (plen < sizeof(*params))
4284 return -EINVAL;
4285
9ce64f19
DA
4286 if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT))
4287 return -EINVAL;
4288
87f5fc7e
DA
4289 switch (params->family) {
4290#if IS_ENABLED(CONFIG_INET)
4291 case AF_INET:
4292 return bpf_ipv4_fib_lookup(dev_net(ctx->rxq->dev), params,
4f74fede 4293 flags, true);
87f5fc7e
DA
4294#endif
4295#if IS_ENABLED(CONFIG_IPV6)
4296 case AF_INET6:
4297 return bpf_ipv6_fib_lookup(dev_net(ctx->rxq->dev), params,
4f74fede 4298 flags, true);
87f5fc7e
DA
4299#endif
4300 }
bcece5dc 4301 return -EAFNOSUPPORT;
87f5fc7e
DA
4302}
4303
4304static const struct bpf_func_proto bpf_xdp_fib_lookup_proto = {
4305 .func = bpf_xdp_fib_lookup,
4306 .gpl_only = true,
4307 .ret_type = RET_INTEGER,
4308 .arg1_type = ARG_PTR_TO_CTX,
4309 .arg2_type = ARG_PTR_TO_MEM,
4310 .arg3_type = ARG_CONST_SIZE,
4311 .arg4_type = ARG_ANYTHING,
4312};
4313
4314BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
4315 struct bpf_fib_lookup *, params, int, plen, u32, flags)
4316{
4f74fede 4317 struct net *net = dev_net(skb->dev);
bcece5dc 4318 int index = -EAFNOSUPPORT;
4f74fede 4319
87f5fc7e
DA
4320 if (plen < sizeof(*params))
4321 return -EINVAL;
4322
9ce64f19
DA
4323 if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT))
4324 return -EINVAL;
4325
87f5fc7e
DA
4326 switch (params->family) {
4327#if IS_ENABLED(CONFIG_INET)
4328 case AF_INET:
4f74fede
DA
4329 index = bpf_ipv4_fib_lookup(net, params, flags, false);
4330 break;
87f5fc7e
DA
4331#endif
4332#if IS_ENABLED(CONFIG_IPV6)
4333 case AF_INET6:
4f74fede
DA
4334 index = bpf_ipv6_fib_lookup(net, params, flags, false);
4335 break;
87f5fc7e
DA
4336#endif
4337 }
4f74fede
DA
4338
4339 if (index > 0) {
4340 struct net_device *dev;
4341
4342 dev = dev_get_by_index_rcu(net, index);
4343 if (!is_skb_forwardable(dev, skb))
4344 index = 0;
4345 }
4346
4347 return index;
87f5fc7e
DA
4348}
4349
4350static const struct bpf_func_proto bpf_skb_fib_lookup_proto = {
4351 .func = bpf_skb_fib_lookup,
4352 .gpl_only = true,
4353 .ret_type = RET_INTEGER,
4354 .arg1_type = ARG_PTR_TO_CTX,
4355 .arg2_type = ARG_PTR_TO_MEM,
4356 .arg3_type = ARG_CONST_SIZE,
4357 .arg4_type = ARG_ANYTHING,
4358};
4359
fe94cc29
MX
4360#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
4361static int bpf_push_seg6_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
4362{
4363 int err;
4364 struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)hdr;
4365
4366 if (!seg6_validate_srh(srh, len))
4367 return -EINVAL;
4368
4369 switch (type) {
4370 case BPF_LWT_ENCAP_SEG6_INLINE:
4371 if (skb->protocol != htons(ETH_P_IPV6))
4372 return -EBADMSG;
4373
4374 err = seg6_do_srh_inline(skb, srh);
4375 break;
4376 case BPF_LWT_ENCAP_SEG6:
4377 skb_reset_inner_headers(skb);
4378 skb->encapsulation = 1;
4379 err = seg6_do_srh_encap(skb, srh, IPPROTO_IPV6);
4380 break;
4381 default:
4382 return -EINVAL;
4383 }
4384
4385 bpf_compute_data_pointers(skb);
4386 if (err)
4387 return err;
4388
4389 ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
4390 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
4391
4392 return seg6_lookup_nexthop(skb, NULL, 0);
4393}
4394#endif /* CONFIG_IPV6_SEG6_BPF */
4395
4396BPF_CALL_4(bpf_lwt_push_encap, struct sk_buff *, skb, u32, type, void *, hdr,
4397 u32, len)
4398{
4399 switch (type) {
4400#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
4401 case BPF_LWT_ENCAP_SEG6:
4402 case BPF_LWT_ENCAP_SEG6_INLINE:
4403 return bpf_push_seg6_encap(skb, type, hdr, len);
4404#endif
4405 default:
4406 return -EINVAL;
4407 }
4408}
4409
4410static const struct bpf_func_proto bpf_lwt_push_encap_proto = {
4411 .func = bpf_lwt_push_encap,
4412 .gpl_only = false,
4413 .ret_type = RET_INTEGER,
4414 .arg1_type = ARG_PTR_TO_CTX,
4415 .arg2_type = ARG_ANYTHING,
4416 .arg3_type = ARG_PTR_TO_MEM,
4417 .arg4_type = ARG_CONST_SIZE
4418};
4419
4420BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset,
4421 const void *, from, u32, len)
4422{
4423#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
4424 struct seg6_bpf_srh_state *srh_state =
4425 this_cpu_ptr(&seg6_bpf_srh_states);
4426 void *srh_tlvs, *srh_end, *ptr;
4427 struct ipv6_sr_hdr *srh;
4428 int srhoff = 0;
4429
4430 if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
4431 return -EINVAL;
4432
4433 srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
4434 srh_tlvs = (void *)((char *)srh + ((srh->first_segment + 1) << 4));
4435 srh_end = (void *)((char *)srh + sizeof(*srh) + srh_state->hdrlen);
4436
4437 ptr = skb->data + offset;
4438 if (ptr >= srh_tlvs && ptr + len <= srh_end)
4439 srh_state->valid = 0;
4440 else if (ptr < (void *)&srh->flags ||
4441 ptr + len > (void *)&srh->segments)
4442 return -EFAULT;
4443
4444 if (unlikely(bpf_try_make_writable(skb, offset + len)))
4445 return -EFAULT;
4446
4447 memcpy(skb->data + offset, from, len);
4448 return 0;
4449#else /* CONFIG_IPV6_SEG6_BPF */
4450 return -EOPNOTSUPP;
4451#endif
4452}
4453
4454static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = {
4455 .func = bpf_lwt_seg6_store_bytes,
4456 .gpl_only = false,
4457 .ret_type = RET_INTEGER,
4458 .arg1_type = ARG_PTR_TO_CTX,
4459 .arg2_type = ARG_ANYTHING,
4460 .arg3_type = ARG_PTR_TO_MEM,
4461 .arg4_type = ARG_CONST_SIZE
4462};
4463
4464BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb,
4465 u32, action, void *, param, u32, param_len)
4466{
4467#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
4468 struct seg6_bpf_srh_state *srh_state =
4469 this_cpu_ptr(&seg6_bpf_srh_states);
4470 struct ipv6_sr_hdr *srh;
4471 int srhoff = 0;
4472 int err;
4473
4474 if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
4475 return -EINVAL;
4476 srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
4477
4478 if (!srh_state->valid) {
4479 if (unlikely((srh_state->hdrlen & 7) != 0))
4480 return -EBADMSG;
4481
4482 srh->hdrlen = (u8)(srh_state->hdrlen >> 3);
4483 if (unlikely(!seg6_validate_srh(srh, (srh->hdrlen + 1) << 3)))
4484 return -EBADMSG;
4485
4486 srh_state->valid = 1;
4487 }
4488
4489 switch (action) {
4490 case SEG6_LOCAL_ACTION_END_X:
4491 if (param_len != sizeof(struct in6_addr))
4492 return -EINVAL;
4493 return seg6_lookup_nexthop(skb, (struct in6_addr *)param, 0);
4494 case SEG6_LOCAL_ACTION_END_T:
4495 if (param_len != sizeof(int))
4496 return -EINVAL;
4497 return seg6_lookup_nexthop(skb, NULL, *(int *)param);
4498 case SEG6_LOCAL_ACTION_END_B6:
4499 err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6_INLINE,
4500 param, param_len);
4501 if (!err)
4502 srh_state->hdrlen =
4503 ((struct ipv6_sr_hdr *)param)->hdrlen << 3;
4504 return err;
4505 case SEG6_LOCAL_ACTION_END_B6_ENCAP:
4506 err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6,
4507 param, param_len);
4508 if (!err)
4509 srh_state->hdrlen =
4510 ((struct ipv6_sr_hdr *)param)->hdrlen << 3;
4511 return err;
4512 default:
4513 return -EINVAL;
4514 }
4515#else /* CONFIG_IPV6_SEG6_BPF */
4516 return -EOPNOTSUPP;
4517#endif
4518}
4519
4520static const struct bpf_func_proto bpf_lwt_seg6_action_proto = {
4521 .func = bpf_lwt_seg6_action,
4522 .gpl_only = false,
4523 .ret_type = RET_INTEGER,
4524 .arg1_type = ARG_PTR_TO_CTX,
4525 .arg2_type = ARG_ANYTHING,
4526 .arg3_type = ARG_PTR_TO_MEM,
4527 .arg4_type = ARG_CONST_SIZE
4528};
4529
4530BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset,
4531 s32, len)
4532{
4533#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
4534 struct seg6_bpf_srh_state *srh_state =
4535 this_cpu_ptr(&seg6_bpf_srh_states);
4536 void *srh_end, *srh_tlvs, *ptr;
4537 struct ipv6_sr_hdr *srh;
4538 struct ipv6hdr *hdr;
4539 int srhoff = 0;
4540 int ret;
4541
4542 if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
4543 return -EINVAL;
4544 srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
4545
4546 srh_tlvs = (void *)((unsigned char *)srh + sizeof(*srh) +
4547 ((srh->first_segment + 1) << 4));
4548 srh_end = (void *)((unsigned char *)srh + sizeof(*srh) +
4549 srh_state->hdrlen);
4550 ptr = skb->data + offset;
4551
4552 if (unlikely(ptr < srh_tlvs || ptr > srh_end))
4553 return -EFAULT;
4554 if (unlikely(len < 0 && (void *)((char *)ptr - len) > srh_end))
4555 return -EFAULT;
4556
4557 if (len > 0) {
4558 ret = skb_cow_head(skb, len);
4559 if (unlikely(ret < 0))
4560 return ret;
4561
4562 ret = bpf_skb_net_hdr_push(skb, offset, len);
4563 } else {
4564 ret = bpf_skb_net_hdr_pop(skb, offset, -1 * len);
4565 }
4566
4567 bpf_compute_data_pointers(skb);
4568 if (unlikely(ret < 0))
4569 return ret;
4570
4571 hdr = (struct ipv6hdr *)skb->data;
4572 hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
4573
4574 srh_state->hdrlen += len;
4575 srh_state->valid = 0;
4576 return 0;
4577#else /* CONFIG_IPV6_SEG6_BPF */
4578 return -EOPNOTSUPP;
4579#endif
4580}
4581
4582static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
4583 .func = bpf_lwt_seg6_adjust_srh,
4584 .gpl_only = false,
4585 .ret_type = RET_INTEGER,
4586 .arg1_type = ARG_PTR_TO_CTX,
4587 .arg2_type = ARG_ANYTHING,
4588 .arg3_type = ARG_ANYTHING,
4589};
4590
4591bool bpf_helper_changes_pkt_data(void *func)
4592{
4593 if (func == bpf_skb_vlan_push ||
4594 func == bpf_skb_vlan_pop ||
4595 func == bpf_skb_store_bytes ||
4596 func == bpf_skb_change_proto ||
4597 func == bpf_skb_change_head ||
4598 func == bpf_skb_change_tail ||
4599 func == bpf_skb_adjust_room ||
4600 func == bpf_skb_pull_data ||
4601 func == bpf_clone_redirect ||
4602 func == bpf_l3_csum_replace ||
4603 func == bpf_l4_csum_replace ||
4604 func == bpf_xdp_adjust_head ||
4605 func == bpf_xdp_adjust_meta ||
4606 func == bpf_msg_pull_data ||
4607 func == bpf_xdp_adjust_tail ||
4608 func == bpf_lwt_push_encap ||
4609 func == bpf_lwt_seg6_store_bytes ||
4610 func == bpf_lwt_seg6_adjust_srh ||
4611 func == bpf_lwt_seg6_action
4612 )
4613 return true;
4614
4615 return false;
4616}
4617
d4052c4a 4618static const struct bpf_func_proto *
2492d3b8 4619bpf_base_func_proto(enum bpf_func_id func_id)
89aa0758
AS
4620{
4621 switch (func_id) {
4622 case BPF_FUNC_map_lookup_elem:
4623 return &bpf_map_lookup_elem_proto;
4624 case BPF_FUNC_map_update_elem:
4625 return &bpf_map_update_elem_proto;
4626 case BPF_FUNC_map_delete_elem:
4627 return &bpf_map_delete_elem_proto;
03e69b50
DB
4628 case BPF_FUNC_get_prandom_u32:
4629 return &bpf_get_prandom_u32_proto;
c04167ce 4630 case BPF_FUNC_get_smp_processor_id:
80b48c44 4631 return &bpf_get_raw_smp_processor_id_proto;
2d0e30c3
DB
4632 case BPF_FUNC_get_numa_node_id:
4633 return &bpf_get_numa_node_id_proto;
04fd61ab
AS
4634 case BPF_FUNC_tail_call:
4635 return &bpf_tail_call_proto;
17ca8cbf
DB
4636 case BPF_FUNC_ktime_get_ns:
4637 return &bpf_ktime_get_ns_proto;
0756ea3e 4638 case BPF_FUNC_trace_printk:
1be7f75d
AS
4639 if (capable(CAP_SYS_ADMIN))
4640 return bpf_get_trace_printk_proto();
89aa0758
AS
4641 default:
4642 return NULL;
4643 }
4644}
4645
ae2cf1c4 4646static const struct bpf_func_proto *
5e43f899 4647sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
ae2cf1c4
DA
4648{
4649 switch (func_id) {
4650 /* inet and inet6 sockets are created in a process
4651 * context so there is always a valid uid/gid
4652 */
4653 case BPF_FUNC_get_current_uid_gid:
4654 return &bpf_get_current_uid_gid_proto;
4655 default:
4656 return bpf_base_func_proto(func_id);
4657 }
4658}
4659
4fbac77d
AI
4660static const struct bpf_func_proto *
4661sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4662{
4663 switch (func_id) {
4664 /* inet and inet6 sockets are created in a process
4665 * context so there is always a valid uid/gid
4666 */
4667 case BPF_FUNC_get_current_uid_gid:
4668 return &bpf_get_current_uid_gid_proto;
d74bad4e
AI
4669 case BPF_FUNC_bind:
4670 switch (prog->expected_attach_type) {
4671 case BPF_CGROUP_INET4_CONNECT:
4672 case BPF_CGROUP_INET6_CONNECT:
4673 return &bpf_bind_proto;
4674 default:
4675 return NULL;
4676 }
4fbac77d
AI
4677 default:
4678 return bpf_base_func_proto(func_id);
4679 }
4680}
4681
2492d3b8 4682static const struct bpf_func_proto *
5e43f899 4683sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2492d3b8
DB
4684{
4685 switch (func_id) {
4686 case BPF_FUNC_skb_load_bytes:
4687 return &bpf_skb_load_bytes_proto;
4e1ec56c
DB
4688 case BPF_FUNC_skb_load_bytes_relative:
4689 return &bpf_skb_load_bytes_relative_proto;
91b8270f
CF
4690 case BPF_FUNC_get_socket_cookie:
4691 return &bpf_get_socket_cookie_proto;
6acc5c29
CF
4692 case BPF_FUNC_get_socket_uid:
4693 return &bpf_get_socket_uid_proto;
2492d3b8
DB
4694 default:
4695 return bpf_base_func_proto(func_id);
4696 }
4697}
4698
608cd71a 4699static const struct bpf_func_proto *
5e43f899 4700tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
608cd71a
AS
4701{
4702 switch (func_id) {
4703 case BPF_FUNC_skb_store_bytes:
4704 return &bpf_skb_store_bytes_proto;
05c74e5e
DB
4705 case BPF_FUNC_skb_load_bytes:
4706 return &bpf_skb_load_bytes_proto;
4e1ec56c
DB
4707 case BPF_FUNC_skb_load_bytes_relative:
4708 return &bpf_skb_load_bytes_relative_proto;
36bbef52
DB
4709 case BPF_FUNC_skb_pull_data:
4710 return &bpf_skb_pull_data_proto;
7d672345
DB
4711 case BPF_FUNC_csum_diff:
4712 return &bpf_csum_diff_proto;
36bbef52
DB
4713 case BPF_FUNC_csum_update:
4714 return &bpf_csum_update_proto;
91bc4822
AS
4715 case BPF_FUNC_l3_csum_replace:
4716 return &bpf_l3_csum_replace_proto;
4717 case BPF_FUNC_l4_csum_replace:
4718 return &bpf_l4_csum_replace_proto;
3896d655
AS
4719 case BPF_FUNC_clone_redirect:
4720 return &bpf_clone_redirect_proto;
8d20aabe
DB
4721 case BPF_FUNC_get_cgroup_classid:
4722 return &bpf_get_cgroup_classid_proto;
4e10df9a
AS
4723 case BPF_FUNC_skb_vlan_push:
4724 return &bpf_skb_vlan_push_proto;
4725 case BPF_FUNC_skb_vlan_pop:
4726 return &bpf_skb_vlan_pop_proto;
6578171a
DB
4727 case BPF_FUNC_skb_change_proto:
4728 return &bpf_skb_change_proto_proto;
d2485c42
DB
4729 case BPF_FUNC_skb_change_type:
4730 return &bpf_skb_change_type_proto;
2be7e212
DB
4731 case BPF_FUNC_skb_adjust_room:
4732 return &bpf_skb_adjust_room_proto;
5293efe6
DB
4733 case BPF_FUNC_skb_change_tail:
4734 return &bpf_skb_change_tail_proto;
d3aa45ce
AS
4735 case BPF_FUNC_skb_get_tunnel_key:
4736 return &bpf_skb_get_tunnel_key_proto;
4737 case BPF_FUNC_skb_set_tunnel_key:
14ca0751
DB
4738 return bpf_get_skb_set_tunnel_proto(func_id);
4739 case BPF_FUNC_skb_get_tunnel_opt:
4740 return &bpf_skb_get_tunnel_opt_proto;
4741 case BPF_FUNC_skb_set_tunnel_opt:
4742 return bpf_get_skb_set_tunnel_proto(func_id);
27b29f63
AS
4743 case BPF_FUNC_redirect:
4744 return &bpf_redirect_proto;
c46646d0
DB
4745 case BPF_FUNC_get_route_realm:
4746 return &bpf_get_route_realm_proto;
13c5c240
DB
4747 case BPF_FUNC_get_hash_recalc:
4748 return &bpf_get_hash_recalc_proto;
7a4b28c6
DB
4749 case BPF_FUNC_set_hash_invalid:
4750 return &bpf_set_hash_invalid_proto;
ded092cd
DB
4751 case BPF_FUNC_set_hash:
4752 return &bpf_set_hash_proto;
bd570ff9 4753 case BPF_FUNC_perf_event_output:
555c8a86 4754 return &bpf_skb_event_output_proto;
80b48c44
DB
4755 case BPF_FUNC_get_smp_processor_id:
4756 return &bpf_get_smp_processor_id_proto;
747ea55e
DB
4757 case BPF_FUNC_skb_under_cgroup:
4758 return &bpf_skb_under_cgroup_proto;
91b8270f
CF
4759 case BPF_FUNC_get_socket_cookie:
4760 return &bpf_get_socket_cookie_proto;
6acc5c29
CF
4761 case BPF_FUNC_get_socket_uid:
4762 return &bpf_get_socket_uid_proto;
cb20b08e
DB
4763 case BPF_FUNC_fib_lookup:
4764 return &bpf_skb_fib_lookup_proto;
12bed760
EB
4765#ifdef CONFIG_XFRM
4766 case BPF_FUNC_skb_get_xfrm_state:
4767 return &bpf_skb_get_xfrm_state_proto;
4768#endif
cb20b08e
DB
4769#ifdef CONFIG_SOCK_CGROUP_DATA
4770 case BPF_FUNC_skb_cgroup_id:
4771 return &bpf_skb_cgroup_id_proto;
4772#endif
608cd71a 4773 default:
2492d3b8 4774 return bpf_base_func_proto(func_id);
608cd71a
AS
4775 }
4776}
4777
6a773a15 4778static const struct bpf_func_proto *
5e43f899 4779xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6a773a15 4780{
4de16969
DB
4781 switch (func_id) {
4782 case BPF_FUNC_perf_event_output:
4783 return &bpf_xdp_event_output_proto;
669dc4d7
DB
4784 case BPF_FUNC_get_smp_processor_id:
4785 return &bpf_get_smp_processor_id_proto;
205c3807
DB
4786 case BPF_FUNC_csum_diff:
4787 return &bpf_csum_diff_proto;
17bedab2
MKL
4788 case BPF_FUNC_xdp_adjust_head:
4789 return &bpf_xdp_adjust_head_proto;
de8f3a83
DB
4790 case BPF_FUNC_xdp_adjust_meta:
4791 return &bpf_xdp_adjust_meta_proto;
814abfab
JF
4792 case BPF_FUNC_redirect:
4793 return &bpf_xdp_redirect_proto;
97f91a7c 4794 case BPF_FUNC_redirect_map:
e4a8e817 4795 return &bpf_xdp_redirect_map_proto;
b32cc5b9
NS
4796 case BPF_FUNC_xdp_adjust_tail:
4797 return &bpf_xdp_adjust_tail_proto;
87f5fc7e
DA
4798 case BPF_FUNC_fib_lookup:
4799 return &bpf_xdp_fib_lookup_proto;
4de16969 4800 default:
2492d3b8 4801 return bpf_base_func_proto(func_id);
4de16969 4802 }
6a773a15
BB
4803}
4804
8c4b4c7e 4805static const struct bpf_func_proto *
5e43f899 4806sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
8c4b4c7e
LB
4807{
4808 switch (func_id) {
4809 case BPF_FUNC_setsockopt:
4810 return &bpf_setsockopt_proto;
cd86d1fd
LB
4811 case BPF_FUNC_getsockopt:
4812 return &bpf_getsockopt_proto;
b13d8807
LB
4813 case BPF_FUNC_sock_ops_cb_flags_set:
4814 return &bpf_sock_ops_cb_flags_set_proto;
174a79ff
JF
4815 case BPF_FUNC_sock_map_update:
4816 return &bpf_sock_map_update_proto;
81110384
JF
4817 case BPF_FUNC_sock_hash_update:
4818 return &bpf_sock_hash_update_proto;
8c4b4c7e
LB
4819 default:
4820 return bpf_base_func_proto(func_id);
4821 }
4822}
4823
5e43f899
AI
4824static const struct bpf_func_proto *
4825sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4f738adb
JF
4826{
4827 switch (func_id) {
4828 case BPF_FUNC_msg_redirect_map:
4829 return &bpf_msg_redirect_map_proto;
81110384
JF
4830 case BPF_FUNC_msg_redirect_hash:
4831 return &bpf_msg_redirect_hash_proto;
2a100317
JF
4832 case BPF_FUNC_msg_apply_bytes:
4833 return &bpf_msg_apply_bytes_proto;
91843d54
JF
4834 case BPF_FUNC_msg_cork_bytes:
4835 return &bpf_msg_cork_bytes_proto;
015632bb
JF
4836 case BPF_FUNC_msg_pull_data:
4837 return &bpf_msg_pull_data_proto;
4f738adb
JF
4838 default:
4839 return bpf_base_func_proto(func_id);
4840 }
4841}
4842
5e43f899
AI
4843static const struct bpf_func_proto *
4844sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
b005fd18
JF
4845{
4846 switch (func_id) {
8a31db56
JF
4847 case BPF_FUNC_skb_store_bytes:
4848 return &bpf_skb_store_bytes_proto;
b005fd18
JF
4849 case BPF_FUNC_skb_load_bytes:
4850 return &bpf_skb_load_bytes_proto;
8a31db56
JF
4851 case BPF_FUNC_skb_pull_data:
4852 return &bpf_skb_pull_data_proto;
4853 case BPF_FUNC_skb_change_tail:
4854 return &bpf_skb_change_tail_proto;
4855 case BPF_FUNC_skb_change_head:
4856 return &bpf_skb_change_head_proto;
b005fd18
JF
4857 case BPF_FUNC_get_socket_cookie:
4858 return &bpf_get_socket_cookie_proto;
4859 case BPF_FUNC_get_socket_uid:
4860 return &bpf_get_socket_uid_proto;
174a79ff
JF
4861 case BPF_FUNC_sk_redirect_map:
4862 return &bpf_sk_redirect_map_proto;
81110384
JF
4863 case BPF_FUNC_sk_redirect_hash:
4864 return &bpf_sk_redirect_hash_proto;
b005fd18
JF
4865 default:
4866 return bpf_base_func_proto(func_id);
4867 }
4868}
4869
cd3092c7
MX
4870static const struct bpf_func_proto *
4871lwt_out_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4872{
4873 switch (func_id) {
4874 case BPF_FUNC_skb_load_bytes:
4875 return &bpf_skb_load_bytes_proto;
4876 case BPF_FUNC_skb_pull_data:
4877 return &bpf_skb_pull_data_proto;
4878 case BPF_FUNC_csum_diff:
4879 return &bpf_csum_diff_proto;
4880 case BPF_FUNC_get_cgroup_classid:
4881 return &bpf_get_cgroup_classid_proto;
4882 case BPF_FUNC_get_route_realm:
4883 return &bpf_get_route_realm_proto;
4884 case BPF_FUNC_get_hash_recalc:
4885 return &bpf_get_hash_recalc_proto;
4886 case BPF_FUNC_perf_event_output:
4887 return &bpf_skb_event_output_proto;
4888 case BPF_FUNC_get_smp_processor_id:
4889 return &bpf_get_smp_processor_id_proto;
4890 case BPF_FUNC_skb_under_cgroup:
4891 return &bpf_skb_under_cgroup_proto;
4892 default:
4893 return bpf_base_func_proto(func_id);
4894 }
4895}
4896
4897static const struct bpf_func_proto *
4898lwt_in_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4899{
4900 switch (func_id) {
4901 case BPF_FUNC_lwt_push_encap:
4902 return &bpf_lwt_push_encap_proto;
4903 default:
4904 return lwt_out_func_proto(func_id, prog);
4905 }
4906}
4907
3a0af8fd 4908static const struct bpf_func_proto *
5e43f899 4909lwt_xmit_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
3a0af8fd
TG
4910{
4911 switch (func_id) {
4912 case BPF_FUNC_skb_get_tunnel_key:
4913 return &bpf_skb_get_tunnel_key_proto;
4914 case BPF_FUNC_skb_set_tunnel_key:
4915 return bpf_get_skb_set_tunnel_proto(func_id);
4916 case BPF_FUNC_skb_get_tunnel_opt:
4917 return &bpf_skb_get_tunnel_opt_proto;
4918 case BPF_FUNC_skb_set_tunnel_opt:
4919 return bpf_get_skb_set_tunnel_proto(func_id);
4920 case BPF_FUNC_redirect:
4921 return &bpf_redirect_proto;
4922 case BPF_FUNC_clone_redirect:
4923 return &bpf_clone_redirect_proto;
4924 case BPF_FUNC_skb_change_tail:
4925 return &bpf_skb_change_tail_proto;
4926 case BPF_FUNC_skb_change_head:
4927 return &bpf_skb_change_head_proto;
4928 case BPF_FUNC_skb_store_bytes:
4929 return &bpf_skb_store_bytes_proto;
4930 case BPF_FUNC_csum_update:
4931 return &bpf_csum_update_proto;
4932 case BPF_FUNC_l3_csum_replace:
4933 return &bpf_l3_csum_replace_proto;
4934 case BPF_FUNC_l4_csum_replace:
4935 return &bpf_l4_csum_replace_proto;
4936 case BPF_FUNC_set_hash_invalid:
4937 return &bpf_set_hash_invalid_proto;
4938 default:
cd3092c7 4939 return lwt_out_func_proto(func_id, prog);
3a0af8fd
TG
4940 }
4941}
4942
004d4b27
MX
4943static const struct bpf_func_proto *
4944lwt_seg6local_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4945{
4946 switch (func_id) {
4947 case BPF_FUNC_lwt_seg6_store_bytes:
4948 return &bpf_lwt_seg6_store_bytes_proto;
4949 case BPF_FUNC_lwt_seg6_action:
4950 return &bpf_lwt_seg6_action_proto;
4951 case BPF_FUNC_lwt_seg6_adjust_srh:
4952 return &bpf_lwt_seg6_adjust_srh_proto;
4953 default:
4954 return lwt_out_func_proto(func_id, prog);
3a0af8fd
TG
4955 }
4956}
4957
f96da094 4958static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 4959 const struct bpf_prog *prog,
f96da094 4960 struct bpf_insn_access_aux *info)
23994631 4961{
f96da094 4962 const int size_default = sizeof(__u32);
23994631 4963
9bac3d6d
AS
4964 if (off < 0 || off >= sizeof(struct __sk_buff))
4965 return false;
62c7989b 4966
4936e352 4967 /* The verifier guarantees that size > 0. */
9bac3d6d
AS
4968 if (off % size != 0)
4969 return false;
62c7989b
DB
4970
4971 switch (off) {
f96da094
DB
4972 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
4973 if (off + size > offsetofend(struct __sk_buff, cb[4]))
62c7989b
DB
4974 return false;
4975 break;
8a31db56
JF
4976 case bpf_ctx_range_till(struct __sk_buff, remote_ip6[0], remote_ip6[3]):
4977 case bpf_ctx_range_till(struct __sk_buff, local_ip6[0], local_ip6[3]):
4978 case bpf_ctx_range_till(struct __sk_buff, remote_ip4, remote_ip4):
4979 case bpf_ctx_range_till(struct __sk_buff, local_ip4, local_ip4):
f96da094 4980 case bpf_ctx_range(struct __sk_buff, data):
de8f3a83 4981 case bpf_ctx_range(struct __sk_buff, data_meta):
f96da094
DB
4982 case bpf_ctx_range(struct __sk_buff, data_end):
4983 if (size != size_default)
23994631 4984 return false;
31fd8581
YS
4985 break;
4986 default:
f96da094 4987 /* Only narrow read access allowed for now. */
31fd8581 4988 if (type == BPF_WRITE) {
f96da094 4989 if (size != size_default)
31fd8581
YS
4990 return false;
4991 } else {
f96da094
DB
4992 bpf_ctx_record_field_size(info, size_default);
4993 if (!bpf_ctx_narrow_access_ok(off, size, size_default))
23994631 4994 return false;
31fd8581 4995 }
62c7989b 4996 }
9bac3d6d
AS
4997
4998 return true;
4999}
5000
d691f9e8 5001static bool sk_filter_is_valid_access(int off, int size,
19de99f7 5002 enum bpf_access_type type,
5e43f899 5003 const struct bpf_prog *prog,
23994631 5004 struct bpf_insn_access_aux *info)
d691f9e8 5005{
db58ba45 5006 switch (off) {
f96da094
DB
5007 case bpf_ctx_range(struct __sk_buff, tc_classid):
5008 case bpf_ctx_range(struct __sk_buff, data):
de8f3a83 5009 case bpf_ctx_range(struct __sk_buff, data_meta):
f96da094 5010 case bpf_ctx_range(struct __sk_buff, data_end):
8a31db56 5011 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
045efa82 5012 return false;
db58ba45 5013 }
045efa82 5014
d691f9e8
AS
5015 if (type == BPF_WRITE) {
5016 switch (off) {
f96da094 5017 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
d691f9e8
AS
5018 break;
5019 default:
5020 return false;
5021 }
5022 }
5023
5e43f899 5024 return bpf_skb_is_valid_access(off, size, type, prog, info);
d691f9e8
AS
5025}
5026
3a0af8fd
TG
5027static bool lwt_is_valid_access(int off, int size,
5028 enum bpf_access_type type,
5e43f899 5029 const struct bpf_prog *prog,
23994631 5030 struct bpf_insn_access_aux *info)
3a0af8fd
TG
5031{
5032 switch (off) {
f96da094 5033 case bpf_ctx_range(struct __sk_buff, tc_classid):
8a31db56 5034 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
de8f3a83 5035 case bpf_ctx_range(struct __sk_buff, data_meta):
3a0af8fd
TG
5036 return false;
5037 }
5038
5039 if (type == BPF_WRITE) {
5040 switch (off) {
f96da094
DB
5041 case bpf_ctx_range(struct __sk_buff, mark):
5042 case bpf_ctx_range(struct __sk_buff, priority):
5043 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
3a0af8fd
TG
5044 break;
5045 default:
5046 return false;
5047 }
5048 }
5049
f96da094
DB
5050 switch (off) {
5051 case bpf_ctx_range(struct __sk_buff, data):
5052 info->reg_type = PTR_TO_PACKET;
5053 break;
5054 case bpf_ctx_range(struct __sk_buff, data_end):
5055 info->reg_type = PTR_TO_PACKET_END;
5056 break;
5057 }
5058
5e43f899 5059 return bpf_skb_is_valid_access(off, size, type, prog, info);
3a0af8fd
TG
5060}
5061
aac3fc32
AI
5062/* Attach type specific accesses */
5063static bool __sock_filter_check_attach_type(int off,
5064 enum bpf_access_type access_type,
5065 enum bpf_attach_type attach_type)
61023658 5066{
aac3fc32
AI
5067 switch (off) {
5068 case offsetof(struct bpf_sock, bound_dev_if):
5069 case offsetof(struct bpf_sock, mark):
5070 case offsetof(struct bpf_sock, priority):
5071 switch (attach_type) {
5072 case BPF_CGROUP_INET_SOCK_CREATE:
5073 goto full_access;
5074 default:
5075 return false;
5076 }
5077 case bpf_ctx_range(struct bpf_sock, src_ip4):
5078 switch (attach_type) {
5079 case BPF_CGROUP_INET4_POST_BIND:
5080 goto read_only;
5081 default:
5082 return false;
5083 }
5084 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
5085 switch (attach_type) {
5086 case BPF_CGROUP_INET6_POST_BIND:
5087 goto read_only;
5088 default:
5089 return false;
5090 }
5091 case bpf_ctx_range(struct bpf_sock, src_port):
5092 switch (attach_type) {
5093 case BPF_CGROUP_INET4_POST_BIND:
5094 case BPF_CGROUP_INET6_POST_BIND:
5095 goto read_only;
61023658
DA
5096 default:
5097 return false;
5098 }
5099 }
aac3fc32
AI
5100read_only:
5101 return access_type == BPF_READ;
5102full_access:
5103 return true;
5104}
5105
5106static bool __sock_filter_check_size(int off, int size,
5107 struct bpf_insn_access_aux *info)
5108{
5109 const int size_default = sizeof(__u32);
61023658 5110
aac3fc32
AI
5111 switch (off) {
5112 case bpf_ctx_range(struct bpf_sock, src_ip4):
5113 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
5114 bpf_ctx_record_field_size(info, size_default);
5115 return bpf_ctx_narrow_access_ok(off, size, size_default);
5116 }
5117
5118 return size == size_default;
5119}
5120
5121static bool sock_filter_is_valid_access(int off, int size,
5122 enum bpf_access_type type,
5123 const struct bpf_prog *prog,
5124 struct bpf_insn_access_aux *info)
5125{
5126 if (off < 0 || off >= sizeof(struct bpf_sock))
61023658 5127 return false;
61023658
DA
5128 if (off % size != 0)
5129 return false;
aac3fc32
AI
5130 if (!__sock_filter_check_attach_type(off, type,
5131 prog->expected_attach_type))
5132 return false;
5133 if (!__sock_filter_check_size(off, size, info))
61023658 5134 return false;
61023658
DA
5135 return true;
5136}
5137
047b0ecd
DB
5138static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write,
5139 const struct bpf_prog *prog, int drop_verdict)
36bbef52
DB
5140{
5141 struct bpf_insn *insn = insn_buf;
5142
5143 if (!direct_write)
5144 return 0;
5145
5146 /* if (!skb->cloned)
5147 * goto start;
5148 *
5149 * (Fast-path, otherwise approximation that we might be
5150 * a clone, do the rest in helper.)
5151 */
5152 *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET());
5153 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK);
5154 *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7);
5155
5156 /* ret = bpf_skb_pull_data(skb, 0); */
5157 *insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
5158 *insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_2, BPF_REG_2);
5159 *insn++ = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5160 BPF_FUNC_skb_pull_data);
5161 /* if (!ret)
5162 * goto restore;
5163 * return TC_ACT_SHOT;
5164 */
5165 *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2);
047b0ecd 5166 *insn++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, drop_verdict);
36bbef52
DB
5167 *insn++ = BPF_EXIT_INSN();
5168
5169 /* restore: */
5170 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
5171 /* start: */
5172 *insn++ = prog->insnsi[0];
5173
5174 return insn - insn_buf;
5175}
5176
e0cea7ce
DB
5177static int bpf_gen_ld_abs(const struct bpf_insn *orig,
5178 struct bpf_insn *insn_buf)
5179{
5180 bool indirect = BPF_MODE(orig->code) == BPF_IND;
5181 struct bpf_insn *insn = insn_buf;
5182
5183 /* We're guaranteed here that CTX is in R6. */
5184 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX);
5185 if (!indirect) {
5186 *insn++ = BPF_MOV64_IMM(BPF_REG_2, orig->imm);
5187 } else {
5188 *insn++ = BPF_MOV64_REG(BPF_REG_2, orig->src_reg);
5189 if (orig->imm)
5190 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, orig->imm);
5191 }
5192
5193 switch (BPF_SIZE(orig->code)) {
5194 case BPF_B:
5195 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8_no_cache);
5196 break;
5197 case BPF_H:
5198 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16_no_cache);
5199 break;
5200 case BPF_W:
5201 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32_no_cache);
5202 break;
5203 }
5204
5205 *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 2);
5206 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_0);
5207 *insn++ = BPF_EXIT_INSN();
5208
5209 return insn - insn_buf;
5210}
5211
047b0ecd
DB
5212static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write,
5213 const struct bpf_prog *prog)
5214{
5215 return bpf_unclone_prologue(insn_buf, direct_write, prog, TC_ACT_SHOT);
5216}
5217
d691f9e8 5218static bool tc_cls_act_is_valid_access(int off, int size,
19de99f7 5219 enum bpf_access_type type,
5e43f899 5220 const struct bpf_prog *prog,
23994631 5221 struct bpf_insn_access_aux *info)
d691f9e8
AS
5222{
5223 if (type == BPF_WRITE) {
5224 switch (off) {
f96da094
DB
5225 case bpf_ctx_range(struct __sk_buff, mark):
5226 case bpf_ctx_range(struct __sk_buff, tc_index):
5227 case bpf_ctx_range(struct __sk_buff, priority):
5228 case bpf_ctx_range(struct __sk_buff, tc_classid):
5229 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
d691f9e8
AS
5230 break;
5231 default:
5232 return false;
5233 }
5234 }
19de99f7 5235
f96da094
DB
5236 switch (off) {
5237 case bpf_ctx_range(struct __sk_buff, data):
5238 info->reg_type = PTR_TO_PACKET;
5239 break;
de8f3a83
DB
5240 case bpf_ctx_range(struct __sk_buff, data_meta):
5241 info->reg_type = PTR_TO_PACKET_META;
5242 break;
f96da094
DB
5243 case bpf_ctx_range(struct __sk_buff, data_end):
5244 info->reg_type = PTR_TO_PACKET_END;
5245 break;
8a31db56
JF
5246 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
5247 return false;
f96da094
DB
5248 }
5249
5e43f899 5250 return bpf_skb_is_valid_access(off, size, type, prog, info);
d691f9e8
AS
5251}
5252
1afaf661 5253static bool __is_valid_xdp_access(int off, int size)
6a773a15
BB
5254{
5255 if (off < 0 || off >= sizeof(struct xdp_md))
5256 return false;
5257 if (off % size != 0)
5258 return false;
6088b582 5259 if (size != sizeof(__u32))
6a773a15
BB
5260 return false;
5261
5262 return true;
5263}
5264
5265static bool xdp_is_valid_access(int off, int size,
5266 enum bpf_access_type type,
5e43f899 5267 const struct bpf_prog *prog,
23994631 5268 struct bpf_insn_access_aux *info)
6a773a15 5269{
0d830032
JK
5270 if (type == BPF_WRITE) {
5271 if (bpf_prog_is_dev_bound(prog->aux)) {
5272 switch (off) {
5273 case offsetof(struct xdp_md, rx_queue_index):
5274 return __is_valid_xdp_access(off, size);
5275 }
5276 }
6a773a15 5277 return false;
0d830032 5278 }
6a773a15
BB
5279
5280 switch (off) {
5281 case offsetof(struct xdp_md, data):
23994631 5282 info->reg_type = PTR_TO_PACKET;
6a773a15 5283 break;
de8f3a83
DB
5284 case offsetof(struct xdp_md, data_meta):
5285 info->reg_type = PTR_TO_PACKET_META;
5286 break;
6a773a15 5287 case offsetof(struct xdp_md, data_end):
23994631 5288 info->reg_type = PTR_TO_PACKET_END;
6a773a15
BB
5289 break;
5290 }
5291
1afaf661 5292 return __is_valid_xdp_access(off, size);
6a773a15
BB
5293}
5294
5295void bpf_warn_invalid_xdp_action(u32 act)
5296{
9beb8bed
DB
5297 const u32 act_max = XDP_REDIRECT;
5298
5299 WARN_ONCE(1, "%s XDP return value %u, expect packet loss!\n",
5300 act > act_max ? "Illegal" : "Driver unsupported",
5301 act);
6a773a15
BB
5302}
5303EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
5304
4fbac77d
AI
5305static bool sock_addr_is_valid_access(int off, int size,
5306 enum bpf_access_type type,
5307 const struct bpf_prog *prog,
5308 struct bpf_insn_access_aux *info)
5309{
5310 const int size_default = sizeof(__u32);
5311
5312 if (off < 0 || off >= sizeof(struct bpf_sock_addr))
5313 return false;
5314 if (off % size != 0)
5315 return false;
5316
5317 /* Disallow access to IPv6 fields from IPv4 contex and vise
5318 * versa.
5319 */
5320 switch (off) {
5321 case bpf_ctx_range(struct bpf_sock_addr, user_ip4):
5322 switch (prog->expected_attach_type) {
5323 case BPF_CGROUP_INET4_BIND:
d74bad4e 5324 case BPF_CGROUP_INET4_CONNECT:
1cedee13 5325 case BPF_CGROUP_UDP4_SENDMSG:
4fbac77d
AI
5326 break;
5327 default:
5328 return false;
5329 }
5330 break;
5331 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
5332 switch (prog->expected_attach_type) {
5333 case BPF_CGROUP_INET6_BIND:
d74bad4e 5334 case BPF_CGROUP_INET6_CONNECT:
1cedee13
AI
5335 case BPF_CGROUP_UDP6_SENDMSG:
5336 break;
5337 default:
5338 return false;
5339 }
5340 break;
5341 case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4):
5342 switch (prog->expected_attach_type) {
5343 case BPF_CGROUP_UDP4_SENDMSG:
5344 break;
5345 default:
5346 return false;
5347 }
5348 break;
5349 case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
5350 msg_src_ip6[3]):
5351 switch (prog->expected_attach_type) {
5352 case BPF_CGROUP_UDP6_SENDMSG:
4fbac77d
AI
5353 break;
5354 default:
5355 return false;
5356 }
5357 break;
5358 }
5359
5360 switch (off) {
5361 case bpf_ctx_range(struct bpf_sock_addr, user_ip4):
5362 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
1cedee13
AI
5363 case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4):
5364 case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
5365 msg_src_ip6[3]):
4fbac77d
AI
5366 /* Only narrow read access allowed for now. */
5367 if (type == BPF_READ) {
5368 bpf_ctx_record_field_size(info, size_default);
5369 if (!bpf_ctx_narrow_access_ok(off, size, size_default))
5370 return false;
5371 } else {
5372 if (size != size_default)
5373 return false;
5374 }
5375 break;
5376 case bpf_ctx_range(struct bpf_sock_addr, user_port):
5377 if (size != size_default)
5378 return false;
5379 break;
5380 default:
5381 if (type == BPF_READ) {
5382 if (size != size_default)
5383 return false;
5384 } else {
5385 return false;
5386 }
5387 }
5388
5389 return true;
5390}
5391
44f0e430
LB
5392static bool sock_ops_is_valid_access(int off, int size,
5393 enum bpf_access_type type,
5e43f899 5394 const struct bpf_prog *prog,
44f0e430 5395 struct bpf_insn_access_aux *info)
40304b2a 5396{
44f0e430
LB
5397 const int size_default = sizeof(__u32);
5398
40304b2a
LB
5399 if (off < 0 || off >= sizeof(struct bpf_sock_ops))
5400 return false;
44f0e430 5401
40304b2a
LB
5402 /* The verifier guarantees that size > 0. */
5403 if (off % size != 0)
5404 return false;
40304b2a 5405
40304b2a
LB
5406 if (type == BPF_WRITE) {
5407 switch (off) {
2585cd62 5408 case offsetof(struct bpf_sock_ops, reply):
6f9bd3d7 5409 case offsetof(struct bpf_sock_ops, sk_txhash):
44f0e430
LB
5410 if (size != size_default)
5411 return false;
40304b2a
LB
5412 break;
5413 default:
5414 return false;
5415 }
44f0e430
LB
5416 } else {
5417 switch (off) {
5418 case bpf_ctx_range_till(struct bpf_sock_ops, bytes_received,
5419 bytes_acked):
5420 if (size != sizeof(__u64))
5421 return false;
5422 break;
5423 default:
5424 if (size != size_default)
5425 return false;
5426 break;
5427 }
40304b2a
LB
5428 }
5429
44f0e430 5430 return true;
40304b2a
LB
5431}
5432
8a31db56
JF
5433static int sk_skb_prologue(struct bpf_insn *insn_buf, bool direct_write,
5434 const struct bpf_prog *prog)
5435{
047b0ecd 5436 return bpf_unclone_prologue(insn_buf, direct_write, prog, SK_DROP);
8a31db56
JF
5437}
5438
b005fd18
JF
5439static bool sk_skb_is_valid_access(int off, int size,
5440 enum bpf_access_type type,
5e43f899 5441 const struct bpf_prog *prog,
b005fd18
JF
5442 struct bpf_insn_access_aux *info)
5443{
de8f3a83
DB
5444 switch (off) {
5445 case bpf_ctx_range(struct __sk_buff, tc_classid):
5446 case bpf_ctx_range(struct __sk_buff, data_meta):
5447 return false;
5448 }
5449
8a31db56
JF
5450 if (type == BPF_WRITE) {
5451 switch (off) {
8a31db56
JF
5452 case bpf_ctx_range(struct __sk_buff, tc_index):
5453 case bpf_ctx_range(struct __sk_buff, priority):
5454 break;
5455 default:
5456 return false;
5457 }
5458 }
5459
b005fd18 5460 switch (off) {
f7e9cb1e 5461 case bpf_ctx_range(struct __sk_buff, mark):
8a31db56 5462 return false;
b005fd18
JF
5463 case bpf_ctx_range(struct __sk_buff, data):
5464 info->reg_type = PTR_TO_PACKET;
5465 break;
5466 case bpf_ctx_range(struct __sk_buff, data_end):
5467 info->reg_type = PTR_TO_PACKET_END;
5468 break;
5469 }
5470
5e43f899 5471 return bpf_skb_is_valid_access(off, size, type, prog, info);
b005fd18
JF
5472}
5473
4f738adb
JF
5474static bool sk_msg_is_valid_access(int off, int size,
5475 enum bpf_access_type type,
5e43f899 5476 const struct bpf_prog *prog,
4f738adb
JF
5477 struct bpf_insn_access_aux *info)
5478{
5479 if (type == BPF_WRITE)
5480 return false;
5481
5482 switch (off) {
5483 case offsetof(struct sk_msg_md, data):
5484 info->reg_type = PTR_TO_PACKET;
303def35
JF
5485 if (size != sizeof(__u64))
5486 return false;
4f738adb
JF
5487 break;
5488 case offsetof(struct sk_msg_md, data_end):
5489 info->reg_type = PTR_TO_PACKET_END;
303def35
JF
5490 if (size != sizeof(__u64))
5491 return false;
4f738adb 5492 break;
303def35
JF
5493 default:
5494 if (size != sizeof(__u32))
5495 return false;
4f738adb
JF
5496 }
5497
5498 if (off < 0 || off >= sizeof(struct sk_msg_md))
5499 return false;
5500 if (off % size != 0)
5501 return false;
4f738adb
JF
5502
5503 return true;
5504}
5505
2492d3b8
DB
5506static u32 bpf_convert_ctx_access(enum bpf_access_type type,
5507 const struct bpf_insn *si,
5508 struct bpf_insn *insn_buf,
f96da094 5509 struct bpf_prog *prog, u32 *target_size)
9bac3d6d
AS
5510{
5511 struct bpf_insn *insn = insn_buf;
6b8cc1d1 5512 int off;
9bac3d6d 5513
6b8cc1d1 5514 switch (si->off) {
9bac3d6d 5515 case offsetof(struct __sk_buff, len):
6b8cc1d1 5516 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
5517 bpf_target_off(struct sk_buff, len, 4,
5518 target_size));
9bac3d6d
AS
5519 break;
5520
0b8c707d 5521 case offsetof(struct __sk_buff, protocol):
6b8cc1d1 5522 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
f96da094
DB
5523 bpf_target_off(struct sk_buff, protocol, 2,
5524 target_size));
0b8c707d
DB
5525 break;
5526
27cd5452 5527 case offsetof(struct __sk_buff, vlan_proto):
6b8cc1d1 5528 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
f96da094
DB
5529 bpf_target_off(struct sk_buff, vlan_proto, 2,
5530 target_size));
27cd5452
MS
5531 break;
5532
bcad5718 5533 case offsetof(struct __sk_buff, priority):
754f1e6a 5534 if (type == BPF_WRITE)
6b8cc1d1 5535 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
5536 bpf_target_off(struct sk_buff, priority, 4,
5537 target_size));
754f1e6a 5538 else
6b8cc1d1 5539 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
5540 bpf_target_off(struct sk_buff, priority, 4,
5541 target_size));
bcad5718
DB
5542 break;
5543
37e82c2f 5544 case offsetof(struct __sk_buff, ingress_ifindex):
6b8cc1d1 5545 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
5546 bpf_target_off(struct sk_buff, skb_iif, 4,
5547 target_size));
37e82c2f
AS
5548 break;
5549
5550 case offsetof(struct __sk_buff, ifindex):
f035a515 5551 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
6b8cc1d1 5552 si->dst_reg, si->src_reg,
37e82c2f 5553 offsetof(struct sk_buff, dev));
6b8cc1d1
DB
5554 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
5555 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
f96da094
DB
5556 bpf_target_off(struct net_device, ifindex, 4,
5557 target_size));
37e82c2f
AS
5558 break;
5559
ba7591d8 5560 case offsetof(struct __sk_buff, hash):
6b8cc1d1 5561 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
5562 bpf_target_off(struct sk_buff, hash, 4,
5563 target_size));
ba7591d8
DB
5564 break;
5565
9bac3d6d 5566 case offsetof(struct __sk_buff, mark):
d691f9e8 5567 if (type == BPF_WRITE)
6b8cc1d1 5568 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
5569 bpf_target_off(struct sk_buff, mark, 4,
5570 target_size));
d691f9e8 5571 else
6b8cc1d1 5572 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
5573 bpf_target_off(struct sk_buff, mark, 4,
5574 target_size));
d691f9e8 5575 break;
9bac3d6d
AS
5576
5577 case offsetof(struct __sk_buff, pkt_type):
f96da094
DB
5578 *target_size = 1;
5579 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
5580 PKT_TYPE_OFFSET());
5581 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, PKT_TYPE_MAX);
5582#ifdef __BIG_ENDIAN_BITFIELD
5583 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 5);
5584#endif
5585 break;
9bac3d6d
AS
5586
5587 case offsetof(struct __sk_buff, queue_mapping):
f96da094
DB
5588 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
5589 bpf_target_off(struct sk_buff, queue_mapping, 2,
5590 target_size));
5591 break;
c2497395 5592
c2497395 5593 case offsetof(struct __sk_buff, vlan_present):
c2497395 5594 case offsetof(struct __sk_buff, vlan_tci):
f96da094
DB
5595 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
5596
5597 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
5598 bpf_target_off(struct sk_buff, vlan_tci, 2,
5599 target_size));
5600 if (si->off == offsetof(struct __sk_buff, vlan_tci)) {
5601 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg,
5602 ~VLAN_TAG_PRESENT);
5603 } else {
5604 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 12);
5605 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1);
5606 }
5607 break;
d691f9e8
AS
5608
5609 case offsetof(struct __sk_buff, cb[0]) ...
f96da094 5610 offsetofend(struct __sk_buff, cb[4]) - 1:
d691f9e8 5611 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20);
62c7989b
DB
5612 BUILD_BUG_ON((offsetof(struct sk_buff, cb) +
5613 offsetof(struct qdisc_skb_cb, data)) %
5614 sizeof(__u64));
d691f9e8 5615
ff936a04 5616 prog->cb_access = 1;
6b8cc1d1
DB
5617 off = si->off;
5618 off -= offsetof(struct __sk_buff, cb[0]);
5619 off += offsetof(struct sk_buff, cb);
5620 off += offsetof(struct qdisc_skb_cb, data);
d691f9e8 5621 if (type == BPF_WRITE)
62c7989b 5622 *insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg,
6b8cc1d1 5623 si->src_reg, off);
d691f9e8 5624 else
62c7989b 5625 *insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg,
6b8cc1d1 5626 si->src_reg, off);
d691f9e8
AS
5627 break;
5628
045efa82 5629 case offsetof(struct __sk_buff, tc_classid):
6b8cc1d1
DB
5630 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, tc_classid) != 2);
5631
5632 off = si->off;
5633 off -= offsetof(struct __sk_buff, tc_classid);
5634 off += offsetof(struct sk_buff, cb);
5635 off += offsetof(struct qdisc_skb_cb, tc_classid);
f96da094 5636 *target_size = 2;
09c37a2c 5637 if (type == BPF_WRITE)
6b8cc1d1
DB
5638 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg,
5639 si->src_reg, off);
09c37a2c 5640 else
6b8cc1d1
DB
5641 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg,
5642 si->src_reg, off);
045efa82
DB
5643 break;
5644
db58ba45 5645 case offsetof(struct __sk_buff, data):
f035a515 5646 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
6b8cc1d1 5647 si->dst_reg, si->src_reg,
db58ba45
AS
5648 offsetof(struct sk_buff, data));
5649 break;
5650
de8f3a83
DB
5651 case offsetof(struct __sk_buff, data_meta):
5652 off = si->off;
5653 off -= offsetof(struct __sk_buff, data_meta);
5654 off += offsetof(struct sk_buff, cb);
5655 off += offsetof(struct bpf_skb_data_end, data_meta);
5656 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
5657 si->src_reg, off);
5658 break;
5659
db58ba45 5660 case offsetof(struct __sk_buff, data_end):
6b8cc1d1
DB
5661 off = si->off;
5662 off -= offsetof(struct __sk_buff, data_end);
5663 off += offsetof(struct sk_buff, cb);
5664 off += offsetof(struct bpf_skb_data_end, data_end);
5665 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
5666 si->src_reg, off);
db58ba45
AS
5667 break;
5668
d691f9e8
AS
5669 case offsetof(struct __sk_buff, tc_index):
5670#ifdef CONFIG_NET_SCHED
d691f9e8 5671 if (type == BPF_WRITE)
6b8cc1d1 5672 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
f96da094
DB
5673 bpf_target_off(struct sk_buff, tc_index, 2,
5674 target_size));
d691f9e8 5675 else
6b8cc1d1 5676 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
f96da094
DB
5677 bpf_target_off(struct sk_buff, tc_index, 2,
5678 target_size));
d691f9e8 5679#else
2ed46ce4 5680 *target_size = 2;
d691f9e8 5681 if (type == BPF_WRITE)
6b8cc1d1 5682 *insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg);
d691f9e8 5683 else
6b8cc1d1 5684 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
b1d9fc41
DB
5685#endif
5686 break;
5687
5688 case offsetof(struct __sk_buff, napi_id):
5689#if defined(CONFIG_NET_RX_BUSY_POLL)
b1d9fc41 5690 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
5691 bpf_target_off(struct sk_buff, napi_id, 4,
5692 target_size));
b1d9fc41
DB
5693 *insn++ = BPF_JMP_IMM(BPF_JGE, si->dst_reg, MIN_NAPI_ID, 1);
5694 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
5695#else
2ed46ce4 5696 *target_size = 4;
b1d9fc41 5697 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
d691f9e8 5698#endif
6b8cc1d1 5699 break;
8a31db56
JF
5700 case offsetof(struct __sk_buff, family):
5701 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2);
5702
5703 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
5704 si->dst_reg, si->src_reg,
5705 offsetof(struct sk_buff, sk));
5706 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
5707 bpf_target_off(struct sock_common,
5708 skc_family,
5709 2, target_size));
5710 break;
5711 case offsetof(struct __sk_buff, remote_ip4):
5712 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4);
5713
5714 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
5715 si->dst_reg, si->src_reg,
5716 offsetof(struct sk_buff, sk));
5717 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
5718 bpf_target_off(struct sock_common,
5719 skc_daddr,
5720 4, target_size));
5721 break;
5722 case offsetof(struct __sk_buff, local_ip4):
5723 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
5724 skc_rcv_saddr) != 4);
5725
5726 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
5727 si->dst_reg, si->src_reg,
5728 offsetof(struct sk_buff, sk));
5729 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
5730 bpf_target_off(struct sock_common,
5731 skc_rcv_saddr,
5732 4, target_size));
5733 break;
5734 case offsetof(struct __sk_buff, remote_ip6[0]) ...
5735 offsetof(struct __sk_buff, remote_ip6[3]):
5736#if IS_ENABLED(CONFIG_IPV6)
5737 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
5738 skc_v6_daddr.s6_addr32[0]) != 4);
5739
5740 off = si->off;
5741 off -= offsetof(struct __sk_buff, remote_ip6[0]);
5742
5743 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
5744 si->dst_reg, si->src_reg,
5745 offsetof(struct sk_buff, sk));
5746 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
5747 offsetof(struct sock_common,
5748 skc_v6_daddr.s6_addr32[0]) +
5749 off);
5750#else
5751 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
5752#endif
5753 break;
5754 case offsetof(struct __sk_buff, local_ip6[0]) ...
5755 offsetof(struct __sk_buff, local_ip6[3]):
5756#if IS_ENABLED(CONFIG_IPV6)
5757 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
5758 skc_v6_rcv_saddr.s6_addr32[0]) != 4);
5759
5760 off = si->off;
5761 off -= offsetof(struct __sk_buff, local_ip6[0]);
5762
5763 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
5764 si->dst_reg, si->src_reg,
5765 offsetof(struct sk_buff, sk));
5766 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
5767 offsetof(struct sock_common,
5768 skc_v6_rcv_saddr.s6_addr32[0]) +
5769 off);
5770#else
5771 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
5772#endif
5773 break;
5774
5775 case offsetof(struct __sk_buff, remote_port):
5776 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2);
5777
5778 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
5779 si->dst_reg, si->src_reg,
5780 offsetof(struct sk_buff, sk));
5781 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
5782 bpf_target_off(struct sock_common,
5783 skc_dport,
5784 2, target_size));
5785#ifndef __BIG_ENDIAN_BITFIELD
5786 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
5787#endif
5788 break;
5789
5790 case offsetof(struct __sk_buff, local_port):
5791 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2);
5792
5793 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
5794 si->dst_reg, si->src_reg,
5795 offsetof(struct sk_buff, sk));
5796 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
5797 bpf_target_off(struct sock_common,
5798 skc_num, 2, target_size));
5799 break;
9bac3d6d
AS
5800 }
5801
5802 return insn - insn_buf;
89aa0758
AS
5803}
5804
61023658 5805static u32 sock_filter_convert_ctx_access(enum bpf_access_type type,
6b8cc1d1 5806 const struct bpf_insn *si,
61023658 5807 struct bpf_insn *insn_buf,
f96da094 5808 struct bpf_prog *prog, u32 *target_size)
61023658
DA
5809{
5810 struct bpf_insn *insn = insn_buf;
aac3fc32 5811 int off;
61023658 5812
6b8cc1d1 5813 switch (si->off) {
61023658
DA
5814 case offsetof(struct bpf_sock, bound_dev_if):
5815 BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_bound_dev_if) != 4);
5816
5817 if (type == BPF_WRITE)
6b8cc1d1 5818 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
61023658
DA
5819 offsetof(struct sock, sk_bound_dev_if));
5820 else
6b8cc1d1 5821 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
61023658
DA
5822 offsetof(struct sock, sk_bound_dev_if));
5823 break;
aa4c1037 5824
482dca93
DA
5825 case offsetof(struct bpf_sock, mark):
5826 BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_mark) != 4);
5827
5828 if (type == BPF_WRITE)
5829 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
5830 offsetof(struct sock, sk_mark));
5831 else
5832 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
5833 offsetof(struct sock, sk_mark));
5834 break;
5835
5836 case offsetof(struct bpf_sock, priority):
5837 BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_priority) != 4);
5838
5839 if (type == BPF_WRITE)
5840 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
5841 offsetof(struct sock, sk_priority));
5842 else
5843 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
5844 offsetof(struct sock, sk_priority));
5845 break;
5846
aa4c1037
DA
5847 case offsetof(struct bpf_sock, family):
5848 BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_family) != 2);
5849
6b8cc1d1 5850 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
aa4c1037
DA
5851 offsetof(struct sock, sk_family));
5852 break;
5853
5854 case offsetof(struct bpf_sock, type):
6b8cc1d1 5855 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
aa4c1037 5856 offsetof(struct sock, __sk_flags_offset));
6b8cc1d1
DB
5857 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK);
5858 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT);
aa4c1037
DA
5859 break;
5860
5861 case offsetof(struct bpf_sock, protocol):
6b8cc1d1 5862 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
aa4c1037 5863 offsetof(struct sock, __sk_flags_offset));
6b8cc1d1
DB
5864 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
5865 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_PROTO_SHIFT);
aa4c1037 5866 break;
aac3fc32
AI
5867
5868 case offsetof(struct bpf_sock, src_ip4):
5869 *insn++ = BPF_LDX_MEM(
5870 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
5871 bpf_target_off(struct sock_common, skc_rcv_saddr,
5872 FIELD_SIZEOF(struct sock_common,
5873 skc_rcv_saddr),
5874 target_size));
5875 break;
5876
5877 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
5878#if IS_ENABLED(CONFIG_IPV6)
5879 off = si->off;
5880 off -= offsetof(struct bpf_sock, src_ip6[0]);
5881 *insn++ = BPF_LDX_MEM(
5882 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
5883 bpf_target_off(
5884 struct sock_common,
5885 skc_v6_rcv_saddr.s6_addr32[0],
5886 FIELD_SIZEOF(struct sock_common,
5887 skc_v6_rcv_saddr.s6_addr32[0]),
5888 target_size) + off);
5889#else
5890 (void)off;
5891 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
5892#endif
5893 break;
5894
5895 case offsetof(struct bpf_sock, src_port):
5896 *insn++ = BPF_LDX_MEM(
5897 BPF_FIELD_SIZEOF(struct sock_common, skc_num),
5898 si->dst_reg, si->src_reg,
5899 bpf_target_off(struct sock_common, skc_num,
5900 FIELD_SIZEOF(struct sock_common,
5901 skc_num),
5902 target_size));
5903 break;
61023658
DA
5904 }
5905
5906 return insn - insn_buf;
5907}
5908
6b8cc1d1
DB
5909static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type,
5910 const struct bpf_insn *si,
374fb54e 5911 struct bpf_insn *insn_buf,
f96da094 5912 struct bpf_prog *prog, u32 *target_size)
374fb54e
DB
5913{
5914 struct bpf_insn *insn = insn_buf;
5915
6b8cc1d1 5916 switch (si->off) {
374fb54e 5917 case offsetof(struct __sk_buff, ifindex):
374fb54e 5918 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
6b8cc1d1 5919 si->dst_reg, si->src_reg,
374fb54e 5920 offsetof(struct sk_buff, dev));
6b8cc1d1 5921 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
f96da094
DB
5922 bpf_target_off(struct net_device, ifindex, 4,
5923 target_size));
374fb54e
DB
5924 break;
5925 default:
f96da094
DB
5926 return bpf_convert_ctx_access(type, si, insn_buf, prog,
5927 target_size);
374fb54e
DB
5928 }
5929
5930 return insn - insn_buf;
5931}
5932
6b8cc1d1
DB
5933static u32 xdp_convert_ctx_access(enum bpf_access_type type,
5934 const struct bpf_insn *si,
6a773a15 5935 struct bpf_insn *insn_buf,
f96da094 5936 struct bpf_prog *prog, u32 *target_size)
6a773a15
BB
5937{
5938 struct bpf_insn *insn = insn_buf;
5939
6b8cc1d1 5940 switch (si->off) {
6a773a15 5941 case offsetof(struct xdp_md, data):
f035a515 5942 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data),
6b8cc1d1 5943 si->dst_reg, si->src_reg,
6a773a15
BB
5944 offsetof(struct xdp_buff, data));
5945 break;
de8f3a83
DB
5946 case offsetof(struct xdp_md, data_meta):
5947 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_meta),
5948 si->dst_reg, si->src_reg,
5949 offsetof(struct xdp_buff, data_meta));
5950 break;
6a773a15 5951 case offsetof(struct xdp_md, data_end):
f035a515 5952 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_end),
6b8cc1d1 5953 si->dst_reg, si->src_reg,
6a773a15
BB
5954 offsetof(struct xdp_buff, data_end));
5955 break;
02dd3291
JDB
5956 case offsetof(struct xdp_md, ingress_ifindex):
5957 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
5958 si->dst_reg, si->src_reg,
5959 offsetof(struct xdp_buff, rxq));
5960 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_rxq_info, dev),
5961 si->dst_reg, si->dst_reg,
5962 offsetof(struct xdp_rxq_info, dev));
5963 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
daaf24c6 5964 offsetof(struct net_device, ifindex));
02dd3291
JDB
5965 break;
5966 case offsetof(struct xdp_md, rx_queue_index):
5967 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
5968 si->dst_reg, si->src_reg,
5969 offsetof(struct xdp_buff, rxq));
5970 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
daaf24c6
JDB
5971 offsetof(struct xdp_rxq_info,
5972 queue_index));
02dd3291 5973 break;
6a773a15
BB
5974 }
5975
5976 return insn - insn_buf;
5977}
5978
4fbac77d
AI
5979/* SOCK_ADDR_LOAD_NESTED_FIELD() loads Nested Field S.F.NF where S is type of
5980 * context Structure, F is Field in context structure that contains a pointer
5981 * to Nested Structure of type NS that has the field NF.
5982 *
5983 * SIZE encodes the load size (BPF_B, BPF_H, etc). It's up to caller to make
5984 * sure that SIZE is not greater than actual size of S.F.NF.
5985 *
5986 * If offset OFF is provided, the load happens from that offset relative to
5987 * offset of NF.
5988 */
5989#define SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF) \
5990 do { \
5991 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), si->dst_reg, \
5992 si->src_reg, offsetof(S, F)); \
5993 *insn++ = BPF_LDX_MEM( \
5994 SIZE, si->dst_reg, si->dst_reg, \
5995 bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF), \
5996 target_size) \
5997 + OFF); \
5998 } while (0)
5999
6000#define SOCK_ADDR_LOAD_NESTED_FIELD(S, NS, F, NF) \
6001 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, \
6002 BPF_FIELD_SIZEOF(NS, NF), 0)
6003
6004/* SOCK_ADDR_STORE_NESTED_FIELD_OFF() has semantic similar to
6005 * SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF() but for store operation.
6006 *
6007 * It doesn't support SIZE argument though since narrow stores are not
6008 * supported for now.
6009 *
6010 * In addition it uses Temporary Field TF (member of struct S) as the 3rd
6011 * "register" since two registers available in convert_ctx_access are not
6012 * enough: we can't override neither SRC, since it contains value to store, nor
6013 * DST since it contains pointer to context that may be used by later
6014 * instructions. But we need a temporary place to save pointer to nested
6015 * structure whose field we want to store to.
6016 */
6017#define SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, OFF, TF) \
6018 do { \
6019 int tmp_reg = BPF_REG_9; \
6020 if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \
6021 --tmp_reg; \
6022 if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \
6023 --tmp_reg; \
6024 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, tmp_reg, \
6025 offsetof(S, TF)); \
6026 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), tmp_reg, \
6027 si->dst_reg, offsetof(S, F)); \
6028 *insn++ = BPF_STX_MEM( \
6029 BPF_FIELD_SIZEOF(NS, NF), tmp_reg, si->src_reg, \
6030 bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF), \
6031 target_size) \
6032 + OFF); \
6033 *insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, si->dst_reg, \
6034 offsetof(S, TF)); \
6035 } while (0)
6036
6037#define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF, \
6038 TF) \
6039 do { \
6040 if (type == BPF_WRITE) { \
6041 SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, OFF, \
6042 TF); \
6043 } else { \
6044 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF( \
6045 S, NS, F, NF, SIZE, OFF); \
6046 } \
6047 } while (0)
6048
6049#define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(S, NS, F, NF, TF) \
6050 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( \
6051 S, NS, F, NF, BPF_FIELD_SIZEOF(NS, NF), 0, TF)
6052
6053static u32 sock_addr_convert_ctx_access(enum bpf_access_type type,
6054 const struct bpf_insn *si,
6055 struct bpf_insn *insn_buf,
6056 struct bpf_prog *prog, u32 *target_size)
6057{
6058 struct bpf_insn *insn = insn_buf;
6059 int off;
6060
6061 switch (si->off) {
6062 case offsetof(struct bpf_sock_addr, user_family):
6063 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
6064 struct sockaddr, uaddr, sa_family);
6065 break;
6066
6067 case offsetof(struct bpf_sock_addr, user_ip4):
6068 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
6069 struct bpf_sock_addr_kern, struct sockaddr_in, uaddr,
6070 sin_addr, BPF_SIZE(si->code), 0, tmp_reg);
6071 break;
6072
6073 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
6074 off = si->off;
6075 off -= offsetof(struct bpf_sock_addr, user_ip6[0]);
6076 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
6077 struct bpf_sock_addr_kern, struct sockaddr_in6, uaddr,
6078 sin6_addr.s6_addr32[0], BPF_SIZE(si->code), off,
6079 tmp_reg);
6080 break;
6081
6082 case offsetof(struct bpf_sock_addr, user_port):
6083 /* To get port we need to know sa_family first and then treat
6084 * sockaddr as either sockaddr_in or sockaddr_in6.
6085 * Though we can simplify since port field has same offset and
6086 * size in both structures.
6087 * Here we check this invariant and use just one of the
6088 * structures if it's true.
6089 */
6090 BUILD_BUG_ON(offsetof(struct sockaddr_in, sin_port) !=
6091 offsetof(struct sockaddr_in6, sin6_port));
6092 BUILD_BUG_ON(FIELD_SIZEOF(struct sockaddr_in, sin_port) !=
6093 FIELD_SIZEOF(struct sockaddr_in6, sin6_port));
6094 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(struct bpf_sock_addr_kern,
6095 struct sockaddr_in6, uaddr,
6096 sin6_port, tmp_reg);
6097 break;
6098
6099 case offsetof(struct bpf_sock_addr, family):
6100 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
6101 struct sock, sk, sk_family);
6102 break;
6103
6104 case offsetof(struct bpf_sock_addr, type):
6105 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(
6106 struct bpf_sock_addr_kern, struct sock, sk,
6107 __sk_flags_offset, BPF_W, 0);
6108 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK);
6109 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT);
6110 break;
6111
6112 case offsetof(struct bpf_sock_addr, protocol):
6113 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(
6114 struct bpf_sock_addr_kern, struct sock, sk,
6115 __sk_flags_offset, BPF_W, 0);
6116 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
6117 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg,
6118 SK_FL_PROTO_SHIFT);
6119 break;
1cedee13
AI
6120
6121 case offsetof(struct bpf_sock_addr, msg_src_ip4):
6122 /* Treat t_ctx as struct in_addr for msg_src_ip4. */
6123 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
6124 struct bpf_sock_addr_kern, struct in_addr, t_ctx,
6125 s_addr, BPF_SIZE(si->code), 0, tmp_reg);
6126 break;
6127
6128 case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
6129 msg_src_ip6[3]):
6130 off = si->off;
6131 off -= offsetof(struct bpf_sock_addr, msg_src_ip6[0]);
6132 /* Treat t_ctx as struct in6_addr for msg_src_ip6. */
6133 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
6134 struct bpf_sock_addr_kern, struct in6_addr, t_ctx,
6135 s6_addr32[0], BPF_SIZE(si->code), off, tmp_reg);
6136 break;
4fbac77d
AI
6137 }
6138
6139 return insn - insn_buf;
6140}
6141
40304b2a
LB
6142static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
6143 const struct bpf_insn *si,
6144 struct bpf_insn *insn_buf,
f96da094
DB
6145 struct bpf_prog *prog,
6146 u32 *target_size)
40304b2a
LB
6147{
6148 struct bpf_insn *insn = insn_buf;
6149 int off;
6150
6151 switch (si->off) {
6152 case offsetof(struct bpf_sock_ops, op) ...
6153 offsetof(struct bpf_sock_ops, replylong[3]):
6154 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, op) !=
6155 FIELD_SIZEOF(struct bpf_sock_ops_kern, op));
6156 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, reply) !=
6157 FIELD_SIZEOF(struct bpf_sock_ops_kern, reply));
6158 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, replylong) !=
6159 FIELD_SIZEOF(struct bpf_sock_ops_kern, replylong));
6160 off = si->off;
6161 off -= offsetof(struct bpf_sock_ops, op);
6162 off += offsetof(struct bpf_sock_ops_kern, op);
6163 if (type == BPF_WRITE)
6164 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
6165 off);
6166 else
6167 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
6168 off);
6169 break;
6170
6171 case offsetof(struct bpf_sock_ops, family):
6172 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2);
6173
6174 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6175 struct bpf_sock_ops_kern, sk),
6176 si->dst_reg, si->src_reg,
6177 offsetof(struct bpf_sock_ops_kern, sk));
6178 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
6179 offsetof(struct sock_common, skc_family));
6180 break;
6181
6182 case offsetof(struct bpf_sock_ops, remote_ip4):
6183 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4);
6184
6185 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6186 struct bpf_sock_ops_kern, sk),
6187 si->dst_reg, si->src_reg,
6188 offsetof(struct bpf_sock_ops_kern, sk));
6189 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
6190 offsetof(struct sock_common, skc_daddr));
6191 break;
6192
6193 case offsetof(struct bpf_sock_ops, local_ip4):
303def35
JF
6194 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
6195 skc_rcv_saddr) != 4);
40304b2a
LB
6196
6197 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6198 struct bpf_sock_ops_kern, sk),
6199 si->dst_reg, si->src_reg,
6200 offsetof(struct bpf_sock_ops_kern, sk));
6201 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
6202 offsetof(struct sock_common,
6203 skc_rcv_saddr));
6204 break;
6205
6206 case offsetof(struct bpf_sock_ops, remote_ip6[0]) ...
6207 offsetof(struct bpf_sock_ops, remote_ip6[3]):
6208#if IS_ENABLED(CONFIG_IPV6)
6209 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
6210 skc_v6_daddr.s6_addr32[0]) != 4);
6211
6212 off = si->off;
6213 off -= offsetof(struct bpf_sock_ops, remote_ip6[0]);
6214 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6215 struct bpf_sock_ops_kern, sk),
6216 si->dst_reg, si->src_reg,
6217 offsetof(struct bpf_sock_ops_kern, sk));
6218 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
6219 offsetof(struct sock_common,
6220 skc_v6_daddr.s6_addr32[0]) +
6221 off);
6222#else
6223 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
6224#endif
6225 break;
6226
6227 case offsetof(struct bpf_sock_ops, local_ip6[0]) ...
6228 offsetof(struct bpf_sock_ops, local_ip6[3]):
6229#if IS_ENABLED(CONFIG_IPV6)
6230 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
6231 skc_v6_rcv_saddr.s6_addr32[0]) != 4);
6232
6233 off = si->off;
6234 off -= offsetof(struct bpf_sock_ops, local_ip6[0]);
6235 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6236 struct bpf_sock_ops_kern, sk),
6237 si->dst_reg, si->src_reg,
6238 offsetof(struct bpf_sock_ops_kern, sk));
6239 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
6240 offsetof(struct sock_common,
6241 skc_v6_rcv_saddr.s6_addr32[0]) +
6242 off);
6243#else
6244 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
6245#endif
6246 break;
6247
6248 case offsetof(struct bpf_sock_ops, remote_port):
6249 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2);
6250
6251 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6252 struct bpf_sock_ops_kern, sk),
6253 si->dst_reg, si->src_reg,
6254 offsetof(struct bpf_sock_ops_kern, sk));
6255 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
6256 offsetof(struct sock_common, skc_dport));
6257#ifndef __BIG_ENDIAN_BITFIELD
6258 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
6259#endif
6260 break;
6261
6262 case offsetof(struct bpf_sock_ops, local_port):
6263 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2);
6264
6265 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6266 struct bpf_sock_ops_kern, sk),
6267 si->dst_reg, si->src_reg,
6268 offsetof(struct bpf_sock_ops_kern, sk));
6269 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
6270 offsetof(struct sock_common, skc_num));
6271 break;
f19397a5
LB
6272
6273 case offsetof(struct bpf_sock_ops, is_fullsock):
6274 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6275 struct bpf_sock_ops_kern,
6276 is_fullsock),
6277 si->dst_reg, si->src_reg,
6278 offsetof(struct bpf_sock_ops_kern,
6279 is_fullsock));
6280 break;
6281
44f0e430
LB
6282 case offsetof(struct bpf_sock_ops, state):
6283 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_state) != 1);
6284
6285 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6286 struct bpf_sock_ops_kern, sk),
6287 si->dst_reg, si->src_reg,
6288 offsetof(struct bpf_sock_ops_kern, sk));
6289 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->dst_reg,
6290 offsetof(struct sock_common, skc_state));
6291 break;
6292
6293 case offsetof(struct bpf_sock_ops, rtt_min):
6294 BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, rtt_min) !=
6295 sizeof(struct minmax));
6296 BUILD_BUG_ON(sizeof(struct minmax) <
6297 sizeof(struct minmax_sample));
6298
6299 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6300 struct bpf_sock_ops_kern, sk),
6301 si->dst_reg, si->src_reg,
6302 offsetof(struct bpf_sock_ops_kern, sk));
6303 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
6304 offsetof(struct tcp_sock, rtt_min) +
6305 FIELD_SIZEOF(struct minmax_sample, t));
6306 break;
6307
34d367c5
LB
6308/* Helper macro for adding read access to tcp_sock or sock fields. */
6309#define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
f19397a5 6310 do { \
34d367c5
LB
6311 BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \
6312 FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \
f19397a5
LB
6313 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
6314 struct bpf_sock_ops_kern, \
6315 is_fullsock), \
6316 si->dst_reg, si->src_reg, \
6317 offsetof(struct bpf_sock_ops_kern, \
6318 is_fullsock)); \
6319 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 2); \
6320 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
6321 struct bpf_sock_ops_kern, sk),\
6322 si->dst_reg, si->src_reg, \
6323 offsetof(struct bpf_sock_ops_kern, sk));\
34d367c5
LB
6324 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(OBJ, \
6325 OBJ_FIELD), \
6326 si->dst_reg, si->dst_reg, \
6327 offsetof(OBJ, OBJ_FIELD)); \
f19397a5
LB
6328 } while (0)
6329
b73042b8
LB
6330/* Helper macro for adding write access to tcp_sock or sock fields.
6331 * The macro is called with two registers, dst_reg which contains a pointer
6332 * to ctx (context) and src_reg which contains the value that should be
6333 * stored. However, we need an additional register since we cannot overwrite
6334 * dst_reg because it may be used later in the program.
6335 * Instead we "borrow" one of the other register. We first save its value
6336 * into a new (temp) field in bpf_sock_ops_kern, use it, and then restore
6337 * it at the end of the macro.
6338 */
6339#define SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
6340 do { \
6341 int reg = BPF_REG_9; \
6342 BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \
6343 FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \
6344 if (si->dst_reg == reg || si->src_reg == reg) \
6345 reg--; \
6346 if (si->dst_reg == reg || si->src_reg == reg) \
6347 reg--; \
6348 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, reg, \
6349 offsetof(struct bpf_sock_ops_kern, \
6350 temp)); \
6351 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
6352 struct bpf_sock_ops_kern, \
6353 is_fullsock), \
6354 reg, si->dst_reg, \
6355 offsetof(struct bpf_sock_ops_kern, \
6356 is_fullsock)); \
6357 *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2); \
6358 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
6359 struct bpf_sock_ops_kern, sk),\
6360 reg, si->dst_reg, \
6361 offsetof(struct bpf_sock_ops_kern, sk));\
6362 *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD), \
6363 reg, si->src_reg, \
6364 offsetof(OBJ, OBJ_FIELD)); \
6365 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->dst_reg, \
6366 offsetof(struct bpf_sock_ops_kern, \
6367 temp)); \
6368 } while (0)
6369
6370#define SOCK_OPS_GET_OR_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ, TYPE) \
6371 do { \
6372 if (TYPE == BPF_WRITE) \
6373 SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
6374 else \
6375 SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
6376 } while (0)
6377
f19397a5 6378 case offsetof(struct bpf_sock_ops, snd_cwnd):
34d367c5 6379 SOCK_OPS_GET_FIELD(snd_cwnd, snd_cwnd, struct tcp_sock);
f19397a5
LB
6380 break;
6381
6382 case offsetof(struct bpf_sock_ops, srtt_us):
34d367c5 6383 SOCK_OPS_GET_FIELD(srtt_us, srtt_us, struct tcp_sock);
f19397a5 6384 break;
b13d8807
LB
6385
6386 case offsetof(struct bpf_sock_ops, bpf_sock_ops_cb_flags):
6387 SOCK_OPS_GET_FIELD(bpf_sock_ops_cb_flags, bpf_sock_ops_cb_flags,
6388 struct tcp_sock);
6389 break;
44f0e430
LB
6390
6391 case offsetof(struct bpf_sock_ops, snd_ssthresh):
6392 SOCK_OPS_GET_FIELD(snd_ssthresh, snd_ssthresh, struct tcp_sock);
6393 break;
6394
6395 case offsetof(struct bpf_sock_ops, rcv_nxt):
6396 SOCK_OPS_GET_FIELD(rcv_nxt, rcv_nxt, struct tcp_sock);
6397 break;
6398
6399 case offsetof(struct bpf_sock_ops, snd_nxt):
6400 SOCK_OPS_GET_FIELD(snd_nxt, snd_nxt, struct tcp_sock);
6401 break;
6402
6403 case offsetof(struct bpf_sock_ops, snd_una):
6404 SOCK_OPS_GET_FIELD(snd_una, snd_una, struct tcp_sock);
6405 break;
6406
6407 case offsetof(struct bpf_sock_ops, mss_cache):
6408 SOCK_OPS_GET_FIELD(mss_cache, mss_cache, struct tcp_sock);
6409 break;
6410
6411 case offsetof(struct bpf_sock_ops, ecn_flags):
6412 SOCK_OPS_GET_FIELD(ecn_flags, ecn_flags, struct tcp_sock);
6413 break;
6414
6415 case offsetof(struct bpf_sock_ops, rate_delivered):
6416 SOCK_OPS_GET_FIELD(rate_delivered, rate_delivered,
6417 struct tcp_sock);
6418 break;
6419
6420 case offsetof(struct bpf_sock_ops, rate_interval_us):
6421 SOCK_OPS_GET_FIELD(rate_interval_us, rate_interval_us,
6422 struct tcp_sock);
6423 break;
6424
6425 case offsetof(struct bpf_sock_ops, packets_out):
6426 SOCK_OPS_GET_FIELD(packets_out, packets_out, struct tcp_sock);
6427 break;
6428
6429 case offsetof(struct bpf_sock_ops, retrans_out):
6430 SOCK_OPS_GET_FIELD(retrans_out, retrans_out, struct tcp_sock);
6431 break;
6432
6433 case offsetof(struct bpf_sock_ops, total_retrans):
6434 SOCK_OPS_GET_FIELD(total_retrans, total_retrans,
6435 struct tcp_sock);
6436 break;
6437
6438 case offsetof(struct bpf_sock_ops, segs_in):
6439 SOCK_OPS_GET_FIELD(segs_in, segs_in, struct tcp_sock);
6440 break;
6441
6442 case offsetof(struct bpf_sock_ops, data_segs_in):
6443 SOCK_OPS_GET_FIELD(data_segs_in, data_segs_in, struct tcp_sock);
6444 break;
6445
6446 case offsetof(struct bpf_sock_ops, segs_out):
6447 SOCK_OPS_GET_FIELD(segs_out, segs_out, struct tcp_sock);
6448 break;
6449
6450 case offsetof(struct bpf_sock_ops, data_segs_out):
6451 SOCK_OPS_GET_FIELD(data_segs_out, data_segs_out,
6452 struct tcp_sock);
6453 break;
6454
6455 case offsetof(struct bpf_sock_ops, lost_out):
6456 SOCK_OPS_GET_FIELD(lost_out, lost_out, struct tcp_sock);
6457 break;
6458
6459 case offsetof(struct bpf_sock_ops, sacked_out):
6460 SOCK_OPS_GET_FIELD(sacked_out, sacked_out, struct tcp_sock);
6461 break;
6462
6463 case offsetof(struct bpf_sock_ops, sk_txhash):
6f9bd3d7
LB
6464 SOCK_OPS_GET_OR_SET_FIELD(sk_txhash, sk_txhash,
6465 struct sock, type);
44f0e430
LB
6466 break;
6467
6468 case offsetof(struct bpf_sock_ops, bytes_received):
6469 SOCK_OPS_GET_FIELD(bytes_received, bytes_received,
6470 struct tcp_sock);
6471 break;
6472
6473 case offsetof(struct bpf_sock_ops, bytes_acked):
6474 SOCK_OPS_GET_FIELD(bytes_acked, bytes_acked, struct tcp_sock);
6475 break;
6f9bd3d7 6476
40304b2a
LB
6477 }
6478 return insn - insn_buf;
6479}
6480
8108a775
JF
6481static u32 sk_skb_convert_ctx_access(enum bpf_access_type type,
6482 const struct bpf_insn *si,
6483 struct bpf_insn *insn_buf,
6484 struct bpf_prog *prog, u32 *target_size)
6485{
6486 struct bpf_insn *insn = insn_buf;
6487 int off;
6488
6489 switch (si->off) {
6490 case offsetof(struct __sk_buff, data_end):
6491 off = si->off;
6492 off -= offsetof(struct __sk_buff, data_end);
6493 off += offsetof(struct sk_buff, cb);
6494 off += offsetof(struct tcp_skb_cb, bpf.data_end);
6495 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
6496 si->src_reg, off);
6497 break;
6498 default:
6499 return bpf_convert_ctx_access(type, si, insn_buf, prog,
6500 target_size);
6501 }
6502
6503 return insn - insn_buf;
6504}
6505
4f738adb
JF
6506static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
6507 const struct bpf_insn *si,
6508 struct bpf_insn *insn_buf,
6509 struct bpf_prog *prog, u32 *target_size)
6510{
6511 struct bpf_insn *insn = insn_buf;
720e7f38 6512#if IS_ENABLED(CONFIG_IPV6)
303def35 6513 int off;
720e7f38 6514#endif
4f738adb
JF
6515
6516 switch (si->off) {
6517 case offsetof(struct sk_msg_md, data):
6518 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_buff, data),
6519 si->dst_reg, si->src_reg,
6520 offsetof(struct sk_msg_buff, data));
6521 break;
6522 case offsetof(struct sk_msg_md, data_end):
6523 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_buff, data_end),
6524 si->dst_reg, si->src_reg,
6525 offsetof(struct sk_msg_buff, data_end));
6526 break;
303def35
JF
6527 case offsetof(struct sk_msg_md, family):
6528 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2);
6529
6530 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6531 struct sk_msg_buff, sk),
6532 si->dst_reg, si->src_reg,
6533 offsetof(struct sk_msg_buff, sk));
6534 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
6535 offsetof(struct sock_common, skc_family));
6536 break;
6537
6538 case offsetof(struct sk_msg_md, remote_ip4):
6539 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4);
6540
6541 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6542 struct sk_msg_buff, sk),
6543 si->dst_reg, si->src_reg,
6544 offsetof(struct sk_msg_buff, sk));
6545 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
6546 offsetof(struct sock_common, skc_daddr));
6547 break;
6548
6549 case offsetof(struct sk_msg_md, local_ip4):
6550 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
6551 skc_rcv_saddr) != 4);
6552
6553 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6554 struct sk_msg_buff, sk),
6555 si->dst_reg, si->src_reg,
6556 offsetof(struct sk_msg_buff, sk));
6557 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
6558 offsetof(struct sock_common,
6559 skc_rcv_saddr));
6560 break;
6561
6562 case offsetof(struct sk_msg_md, remote_ip6[0]) ...
6563 offsetof(struct sk_msg_md, remote_ip6[3]):
6564#if IS_ENABLED(CONFIG_IPV6)
6565 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
6566 skc_v6_daddr.s6_addr32[0]) != 4);
6567
6568 off = si->off;
6569 off -= offsetof(struct sk_msg_md, remote_ip6[0]);
6570 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6571 struct sk_msg_buff, sk),
6572 si->dst_reg, si->src_reg,
6573 offsetof(struct sk_msg_buff, sk));
6574 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
6575 offsetof(struct sock_common,
6576 skc_v6_daddr.s6_addr32[0]) +
6577 off);
6578#else
6579 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
6580#endif
6581 break;
6582
6583 case offsetof(struct sk_msg_md, local_ip6[0]) ...
6584 offsetof(struct sk_msg_md, local_ip6[3]):
6585#if IS_ENABLED(CONFIG_IPV6)
6586 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
6587 skc_v6_rcv_saddr.s6_addr32[0]) != 4);
6588
6589 off = si->off;
6590 off -= offsetof(struct sk_msg_md, local_ip6[0]);
6591 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6592 struct sk_msg_buff, sk),
6593 si->dst_reg, si->src_reg,
6594 offsetof(struct sk_msg_buff, sk));
6595 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
6596 offsetof(struct sock_common,
6597 skc_v6_rcv_saddr.s6_addr32[0]) +
6598 off);
6599#else
6600 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
6601#endif
6602 break;
6603
6604 case offsetof(struct sk_msg_md, remote_port):
6605 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2);
6606
6607 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6608 struct sk_msg_buff, sk),
6609 si->dst_reg, si->src_reg,
6610 offsetof(struct sk_msg_buff, sk));
6611 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
6612 offsetof(struct sock_common, skc_dport));
6613#ifndef __BIG_ENDIAN_BITFIELD
6614 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
6615#endif
6616 break;
6617
6618 case offsetof(struct sk_msg_md, local_port):
6619 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2);
6620
6621 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6622 struct sk_msg_buff, sk),
6623 si->dst_reg, si->src_reg,
6624 offsetof(struct sk_msg_buff, sk));
6625 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
6626 offsetof(struct sock_common, skc_num));
6627 break;
4f738adb
JF
6628 }
6629
6630 return insn - insn_buf;
6631}
6632
7de16e3a 6633const struct bpf_verifier_ops sk_filter_verifier_ops = {
4936e352
DB
6634 .get_func_proto = sk_filter_func_proto,
6635 .is_valid_access = sk_filter_is_valid_access,
2492d3b8 6636 .convert_ctx_access = bpf_convert_ctx_access,
e0cea7ce 6637 .gen_ld_abs = bpf_gen_ld_abs,
89aa0758
AS
6638};
6639
7de16e3a 6640const struct bpf_prog_ops sk_filter_prog_ops = {
61f3c964 6641 .test_run = bpf_prog_test_run_skb,
7de16e3a
JK
6642};
6643
6644const struct bpf_verifier_ops tc_cls_act_verifier_ops = {
4936e352
DB
6645 .get_func_proto = tc_cls_act_func_proto,
6646 .is_valid_access = tc_cls_act_is_valid_access,
374fb54e 6647 .convert_ctx_access = tc_cls_act_convert_ctx_access,
36bbef52 6648 .gen_prologue = tc_cls_act_prologue,
e0cea7ce 6649 .gen_ld_abs = bpf_gen_ld_abs,
7de16e3a
JK
6650};
6651
6652const struct bpf_prog_ops tc_cls_act_prog_ops = {
1cf1cae9 6653 .test_run = bpf_prog_test_run_skb,
608cd71a
AS
6654};
6655
7de16e3a 6656const struct bpf_verifier_ops xdp_verifier_ops = {
6a773a15
BB
6657 .get_func_proto = xdp_func_proto,
6658 .is_valid_access = xdp_is_valid_access,
6659 .convert_ctx_access = xdp_convert_ctx_access,
7de16e3a
JK
6660};
6661
6662const struct bpf_prog_ops xdp_prog_ops = {
1cf1cae9 6663 .test_run = bpf_prog_test_run_xdp,
6a773a15
BB
6664};
6665
7de16e3a 6666const struct bpf_verifier_ops cg_skb_verifier_ops = {
966789fb 6667 .get_func_proto = sk_filter_func_proto,
0e33661d 6668 .is_valid_access = sk_filter_is_valid_access,
2492d3b8 6669 .convert_ctx_access = bpf_convert_ctx_access,
7de16e3a
JK
6670};
6671
6672const struct bpf_prog_ops cg_skb_prog_ops = {
1cf1cae9 6673 .test_run = bpf_prog_test_run_skb,
0e33661d
DM
6674};
6675
cd3092c7
MX
6676const struct bpf_verifier_ops lwt_in_verifier_ops = {
6677 .get_func_proto = lwt_in_func_proto,
3a0af8fd 6678 .is_valid_access = lwt_is_valid_access,
2492d3b8 6679 .convert_ctx_access = bpf_convert_ctx_access,
7de16e3a
JK
6680};
6681
cd3092c7
MX
6682const struct bpf_prog_ops lwt_in_prog_ops = {
6683 .test_run = bpf_prog_test_run_skb,
6684};
6685
6686const struct bpf_verifier_ops lwt_out_verifier_ops = {
6687 .get_func_proto = lwt_out_func_proto,
3a0af8fd 6688 .is_valid_access = lwt_is_valid_access,
2492d3b8 6689 .convert_ctx_access = bpf_convert_ctx_access,
7de16e3a
JK
6690};
6691
cd3092c7 6692const struct bpf_prog_ops lwt_out_prog_ops = {
1cf1cae9 6693 .test_run = bpf_prog_test_run_skb,
3a0af8fd
TG
6694};
6695
7de16e3a 6696const struct bpf_verifier_ops lwt_xmit_verifier_ops = {
3a0af8fd
TG
6697 .get_func_proto = lwt_xmit_func_proto,
6698 .is_valid_access = lwt_is_valid_access,
2492d3b8 6699 .convert_ctx_access = bpf_convert_ctx_access,
3a0af8fd 6700 .gen_prologue = tc_cls_act_prologue,
7de16e3a
JK
6701};
6702
6703const struct bpf_prog_ops lwt_xmit_prog_ops = {
1cf1cae9 6704 .test_run = bpf_prog_test_run_skb,
3a0af8fd
TG
6705};
6706
004d4b27
MX
6707const struct bpf_verifier_ops lwt_seg6local_verifier_ops = {
6708 .get_func_proto = lwt_seg6local_func_proto,
6709 .is_valid_access = lwt_is_valid_access,
6710 .convert_ctx_access = bpf_convert_ctx_access,
6711};
6712
6713const struct bpf_prog_ops lwt_seg6local_prog_ops = {
6714 .test_run = bpf_prog_test_run_skb,
6715};
6716
7de16e3a 6717const struct bpf_verifier_ops cg_sock_verifier_ops = {
ae2cf1c4 6718 .get_func_proto = sock_filter_func_proto,
61023658
DA
6719 .is_valid_access = sock_filter_is_valid_access,
6720 .convert_ctx_access = sock_filter_convert_ctx_access,
6721};
6722
7de16e3a
JK
6723const struct bpf_prog_ops cg_sock_prog_ops = {
6724};
6725
4fbac77d
AI
6726const struct bpf_verifier_ops cg_sock_addr_verifier_ops = {
6727 .get_func_proto = sock_addr_func_proto,
6728 .is_valid_access = sock_addr_is_valid_access,
6729 .convert_ctx_access = sock_addr_convert_ctx_access,
6730};
6731
6732const struct bpf_prog_ops cg_sock_addr_prog_ops = {
6733};
6734
7de16e3a 6735const struct bpf_verifier_ops sock_ops_verifier_ops = {
8c4b4c7e 6736 .get_func_proto = sock_ops_func_proto,
40304b2a
LB
6737 .is_valid_access = sock_ops_is_valid_access,
6738 .convert_ctx_access = sock_ops_convert_ctx_access,
6739};
6740
7de16e3a
JK
6741const struct bpf_prog_ops sock_ops_prog_ops = {
6742};
6743
6744const struct bpf_verifier_ops sk_skb_verifier_ops = {
b005fd18
JF
6745 .get_func_proto = sk_skb_func_proto,
6746 .is_valid_access = sk_skb_is_valid_access,
8108a775 6747 .convert_ctx_access = sk_skb_convert_ctx_access,
8a31db56 6748 .gen_prologue = sk_skb_prologue,
b005fd18
JF
6749};
6750
7de16e3a
JK
6751const struct bpf_prog_ops sk_skb_prog_ops = {
6752};
6753
4f738adb
JF
6754const struct bpf_verifier_ops sk_msg_verifier_ops = {
6755 .get_func_proto = sk_msg_func_proto,
6756 .is_valid_access = sk_msg_is_valid_access,
6757 .convert_ctx_access = sk_msg_convert_ctx_access,
6758};
6759
6760const struct bpf_prog_ops sk_msg_prog_ops = {
6761};
6762
8ced425e 6763int sk_detach_filter(struct sock *sk)
55b33325
PE
6764{
6765 int ret = -ENOENT;
6766 struct sk_filter *filter;
6767
d59577b6
VB
6768 if (sock_flag(sk, SOCK_FILTER_LOCKED))
6769 return -EPERM;
6770
8ced425e
HFS
6771 filter = rcu_dereference_protected(sk->sk_filter,
6772 lockdep_sock_is_held(sk));
55b33325 6773 if (filter) {
a9b3cd7f 6774 RCU_INIT_POINTER(sk->sk_filter, NULL);
46bcf14f 6775 sk_filter_uncharge(sk, filter);
55b33325
PE
6776 ret = 0;
6777 }
a3ea269b 6778
55b33325
PE
6779 return ret;
6780}
8ced425e 6781EXPORT_SYMBOL_GPL(sk_detach_filter);
a8fc9277 6782
a3ea269b
DB
6783int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
6784 unsigned int len)
a8fc9277 6785{
a3ea269b 6786 struct sock_fprog_kern *fprog;
a8fc9277 6787 struct sk_filter *filter;
a3ea269b 6788 int ret = 0;
a8fc9277
PE
6789
6790 lock_sock(sk);
6791 filter = rcu_dereference_protected(sk->sk_filter,
8ced425e 6792 lockdep_sock_is_held(sk));
a8fc9277
PE
6793 if (!filter)
6794 goto out;
a3ea269b
DB
6795
6796 /* We're copying the filter that has been originally attached,
93d08b69
DB
6797 * so no conversion/decode needed anymore. eBPF programs that
6798 * have no original program cannot be dumped through this.
a3ea269b 6799 */
93d08b69 6800 ret = -EACCES;
7ae457c1 6801 fprog = filter->prog->orig_prog;
93d08b69
DB
6802 if (!fprog)
6803 goto out;
a3ea269b
DB
6804
6805 ret = fprog->len;
a8fc9277 6806 if (!len)
a3ea269b 6807 /* User space only enquires number of filter blocks. */
a8fc9277 6808 goto out;
a3ea269b 6809
a8fc9277 6810 ret = -EINVAL;
a3ea269b 6811 if (len < fprog->len)
a8fc9277
PE
6812 goto out;
6813
6814 ret = -EFAULT;
009937e7 6815 if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog)))
a3ea269b 6816 goto out;
a8fc9277 6817
a3ea269b
DB
6818 /* Instead of bytes, the API requests to return the number
6819 * of filter blocks.
6820 */
6821 ret = fprog->len;
a8fc9277
PE
6822out:
6823 release_sock(sk);
6824 return ret;
6825}