]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Linux Socket Filter - Kernel level socket filtering | |
3 | * | |
bd4cf0ed AS |
4 | * Based on the design of the Berkeley Packet Filter. The new |
5 | * internal format has been designed by PLUMgrid: | |
1da177e4 | 6 | * |
bd4cf0ed AS |
7 | * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com |
8 | * | |
9 | * Authors: | |
10 | * | |
11 | * Jay Schulist <jschlst@samba.org> | |
12 | * Alexei Starovoitov <ast@plumgrid.com> | |
13 | * Daniel Borkmann <dborkman@redhat.com> | |
1da177e4 LT |
14 | * |
15 | * This program is free software; you can redistribute it and/or | |
16 | * modify it under the terms of the GNU General Public License | |
17 | * as published by the Free Software Foundation; either version | |
18 | * 2 of the License, or (at your option) any later version. | |
19 | * | |
20 | * Andi Kleen - Fix a few bad bugs and races. | |
4df95ff4 | 21 | * Kris Katterjohn - Added many additional checks in bpf_check_classic() |
1da177e4 LT |
22 | */ |
23 | ||
24 | #include <linux/module.h> | |
25 | #include <linux/types.h> | |
1da177e4 LT |
26 | #include <linux/mm.h> |
27 | #include <linux/fcntl.h> | |
28 | #include <linux/socket.h> | |
29 | #include <linux/in.h> | |
30 | #include <linux/inet.h> | |
31 | #include <linux/netdevice.h> | |
32 | #include <linux/if_packet.h> | |
5a0e3ad6 | 33 | #include <linux/gfp.h> |
1da177e4 LT |
34 | #include <net/ip.h> |
35 | #include <net/protocol.h> | |
4738c1db | 36 | #include <net/netlink.h> |
1da177e4 LT |
37 | #include <linux/skbuff.h> |
38 | #include <net/sock.h> | |
10b89ee4 | 39 | #include <net/flow_dissector.h> |
1da177e4 LT |
40 | #include <linux/errno.h> |
41 | #include <linux/timer.h> | |
1da177e4 | 42 | #include <asm/uaccess.h> |
40daafc8 | 43 | #include <asm/unaligned.h> |
1da177e4 | 44 | #include <linux/filter.h> |
86e4ca66 | 45 | #include <linux/ratelimit.h> |
46b325c7 | 46 | #include <linux/seccomp.h> |
f3335031 | 47 | #include <linux/if_vlan.h> |
89aa0758 | 48 | #include <linux/bpf.h> |
d691f9e8 | 49 | #include <net/sch_generic.h> |
8d20aabe | 50 | #include <net/cls_cgroup.h> |
d3aa45ce | 51 | #include <net/dst_metadata.h> |
c46646d0 | 52 | #include <net/dst.h> |
1da177e4 | 53 | |
43db6d65 SH |
54 | /** |
55 | * sk_filter - run a packet through a socket filter | |
56 | * @sk: sock associated with &sk_buff | |
57 | * @skb: buffer to filter | |
43db6d65 | 58 | * |
ff936a04 AS |
59 | * Run the eBPF program and then cut skb->data to correct size returned by |
60 | * the program. If pkt_len is 0 we toss packet. If skb->len is smaller | |
43db6d65 | 61 | * than pkt_len we keep whole skb->data. This is the socket level |
ff936a04 | 62 | * wrapper to BPF_PROG_RUN. It returns 0 if the packet should |
43db6d65 SH |
63 | * be accepted or -EPERM if the packet should be tossed. |
64 | * | |
65 | */ | |
66 | int sk_filter(struct sock *sk, struct sk_buff *skb) | |
67 | { | |
68 | int err; | |
69 | struct sk_filter *filter; | |
70 | ||
c93bdd0e MG |
71 | /* |
72 | * If the skb was allocated from pfmemalloc reserves, only | |
73 | * allow SOCK_MEMALLOC sockets to use it as this socket is | |
74 | * helping free memory | |
75 | */ | |
76 | if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) | |
77 | return -ENOMEM; | |
78 | ||
43db6d65 SH |
79 | err = security_sock_rcv_skb(sk, skb); |
80 | if (err) | |
81 | return err; | |
82 | ||
80f8f102 ED |
83 | rcu_read_lock(); |
84 | filter = rcu_dereference(sk->sk_filter); | |
43db6d65 | 85 | if (filter) { |
ff936a04 | 86 | unsigned int pkt_len = bpf_prog_run_save_cb(filter->prog, skb); |
0d7da9dd | 87 | |
43db6d65 SH |
88 | err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; |
89 | } | |
80f8f102 | 90 | rcu_read_unlock(); |
43db6d65 SH |
91 | |
92 | return err; | |
93 | } | |
94 | EXPORT_SYMBOL(sk_filter); | |
95 | ||
30743837 | 96 | static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) |
bd4cf0ed | 97 | { |
56193d1b | 98 | return skb_get_poff((struct sk_buff *)(unsigned long) ctx); |
bd4cf0ed AS |
99 | } |
100 | ||
30743837 | 101 | static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) |
bd4cf0ed | 102 | { |
eb9672f4 | 103 | struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx; |
bd4cf0ed AS |
104 | struct nlattr *nla; |
105 | ||
106 | if (skb_is_nonlinear(skb)) | |
107 | return 0; | |
108 | ||
05ab8f26 MK |
109 | if (skb->len < sizeof(struct nlattr)) |
110 | return 0; | |
111 | ||
30743837 | 112 | if (a > skb->len - sizeof(struct nlattr)) |
bd4cf0ed AS |
113 | return 0; |
114 | ||
30743837 | 115 | nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x); |
bd4cf0ed AS |
116 | if (nla) |
117 | return (void *) nla - (void *) skb->data; | |
118 | ||
119 | return 0; | |
120 | } | |
121 | ||
30743837 | 122 | static u64 __skb_get_nlattr_nest(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) |
bd4cf0ed | 123 | { |
eb9672f4 | 124 | struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx; |
bd4cf0ed AS |
125 | struct nlattr *nla; |
126 | ||
127 | if (skb_is_nonlinear(skb)) | |
128 | return 0; | |
129 | ||
05ab8f26 MK |
130 | if (skb->len < sizeof(struct nlattr)) |
131 | return 0; | |
132 | ||
30743837 | 133 | if (a > skb->len - sizeof(struct nlattr)) |
bd4cf0ed AS |
134 | return 0; |
135 | ||
30743837 DB |
136 | nla = (struct nlattr *) &skb->data[a]; |
137 | if (nla->nla_len > skb->len - a) | |
bd4cf0ed AS |
138 | return 0; |
139 | ||
30743837 | 140 | nla = nla_find_nested(nla, x); |
bd4cf0ed AS |
141 | if (nla) |
142 | return (void *) nla - (void *) skb->data; | |
143 | ||
144 | return 0; | |
145 | } | |
146 | ||
30743837 | 147 | static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) |
bd4cf0ed AS |
148 | { |
149 | return raw_smp_processor_id(); | |
150 | } | |
151 | ||
9bac3d6d AS |
152 | static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg, |
153 | struct bpf_insn *insn_buf) | |
154 | { | |
155 | struct bpf_insn *insn = insn_buf; | |
156 | ||
157 | switch (skb_field) { | |
158 | case SKF_AD_MARK: | |
159 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); | |
160 | ||
161 | *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, | |
162 | offsetof(struct sk_buff, mark)); | |
163 | break; | |
164 | ||
165 | case SKF_AD_PKTTYPE: | |
166 | *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET()); | |
167 | *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX); | |
168 | #ifdef __BIG_ENDIAN_BITFIELD | |
169 | *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5); | |
170 | #endif | |
171 | break; | |
172 | ||
173 | case SKF_AD_QUEUE: | |
174 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); | |
175 | ||
176 | *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, | |
177 | offsetof(struct sk_buff, queue_mapping)); | |
178 | break; | |
c2497395 | 179 | |
c2497395 AS |
180 | case SKF_AD_VLAN_TAG: |
181 | case SKF_AD_VLAN_TAG_PRESENT: | |
182 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); | |
183 | BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); | |
184 | ||
185 | /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */ | |
186 | *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, | |
187 | offsetof(struct sk_buff, vlan_tci)); | |
188 | if (skb_field == SKF_AD_VLAN_TAG) { | |
189 | *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, | |
190 | ~VLAN_TAG_PRESENT); | |
191 | } else { | |
192 | /* dst_reg >>= 12 */ | |
193 | *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 12); | |
194 | /* dst_reg &= 1 */ | |
195 | *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1); | |
196 | } | |
197 | break; | |
9bac3d6d AS |
198 | } |
199 | ||
200 | return insn - insn_buf; | |
201 | } | |
202 | ||
bd4cf0ed | 203 | static bool convert_bpf_extensions(struct sock_filter *fp, |
2695fb55 | 204 | struct bpf_insn **insnp) |
bd4cf0ed | 205 | { |
2695fb55 | 206 | struct bpf_insn *insn = *insnp; |
9bac3d6d | 207 | u32 cnt; |
bd4cf0ed AS |
208 | |
209 | switch (fp->k) { | |
210 | case SKF_AD_OFF + SKF_AD_PROTOCOL: | |
0b8c707d DB |
211 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); |
212 | ||
213 | /* A = *(u16 *) (CTX + offsetof(protocol)) */ | |
214 | *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, | |
215 | offsetof(struct sk_buff, protocol)); | |
216 | /* A = ntohs(A) [emitting a nop or swap16] */ | |
217 | *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16); | |
bd4cf0ed AS |
218 | break; |
219 | ||
220 | case SKF_AD_OFF + SKF_AD_PKTTYPE: | |
9bac3d6d AS |
221 | cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn); |
222 | insn += cnt - 1; | |
bd4cf0ed AS |
223 | break; |
224 | ||
225 | case SKF_AD_OFF + SKF_AD_IFINDEX: | |
226 | case SKF_AD_OFF + SKF_AD_HATYPE: | |
bd4cf0ed AS |
227 | BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); |
228 | BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2); | |
f8f6d679 DB |
229 | BUILD_BUG_ON(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)) < 0); |
230 | ||
231 | *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)), | |
232 | BPF_REG_TMP, BPF_REG_CTX, | |
233 | offsetof(struct sk_buff, dev)); | |
234 | /* if (tmp != 0) goto pc + 1 */ | |
235 | *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1); | |
236 | *insn++ = BPF_EXIT_INSN(); | |
237 | if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX) | |
238 | *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP, | |
239 | offsetof(struct net_device, ifindex)); | |
240 | else | |
241 | *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP, | |
242 | offsetof(struct net_device, type)); | |
bd4cf0ed AS |
243 | break; |
244 | ||
245 | case SKF_AD_OFF + SKF_AD_MARK: | |
9bac3d6d AS |
246 | cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn); |
247 | insn += cnt - 1; | |
bd4cf0ed AS |
248 | break; |
249 | ||
250 | case SKF_AD_OFF + SKF_AD_RXHASH: | |
251 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); | |
252 | ||
9739eef1 AS |
253 | *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, |
254 | offsetof(struct sk_buff, hash)); | |
bd4cf0ed AS |
255 | break; |
256 | ||
257 | case SKF_AD_OFF + SKF_AD_QUEUE: | |
9bac3d6d AS |
258 | cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn); |
259 | insn += cnt - 1; | |
bd4cf0ed AS |
260 | break; |
261 | ||
262 | case SKF_AD_OFF + SKF_AD_VLAN_TAG: | |
c2497395 AS |
263 | cnt = convert_skb_access(SKF_AD_VLAN_TAG, |
264 | BPF_REG_A, BPF_REG_CTX, insn); | |
265 | insn += cnt - 1; | |
266 | break; | |
bd4cf0ed | 267 | |
c2497395 AS |
268 | case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT: |
269 | cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT, | |
270 | BPF_REG_A, BPF_REG_CTX, insn); | |
271 | insn += cnt - 1; | |
bd4cf0ed AS |
272 | break; |
273 | ||
27cd5452 MS |
274 | case SKF_AD_OFF + SKF_AD_VLAN_TPID: |
275 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2); | |
276 | ||
277 | /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */ | |
278 | *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, | |
279 | offsetof(struct sk_buff, vlan_proto)); | |
280 | /* A = ntohs(A) [emitting a nop or swap16] */ | |
281 | *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16); | |
282 | break; | |
283 | ||
bd4cf0ed AS |
284 | case SKF_AD_OFF + SKF_AD_PAY_OFFSET: |
285 | case SKF_AD_OFF + SKF_AD_NLATTR: | |
286 | case SKF_AD_OFF + SKF_AD_NLATTR_NEST: | |
287 | case SKF_AD_OFF + SKF_AD_CPU: | |
4cd3675e | 288 | case SKF_AD_OFF + SKF_AD_RANDOM: |
e430f34e | 289 | /* arg1 = CTX */ |
f8f6d679 | 290 | *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX); |
bd4cf0ed | 291 | /* arg2 = A */ |
f8f6d679 | 292 | *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A); |
bd4cf0ed | 293 | /* arg3 = X */ |
f8f6d679 | 294 | *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X); |
e430f34e | 295 | /* Emit call(arg1=CTX, arg2=A, arg3=X) */ |
bd4cf0ed AS |
296 | switch (fp->k) { |
297 | case SKF_AD_OFF + SKF_AD_PAY_OFFSET: | |
f8f6d679 | 298 | *insn = BPF_EMIT_CALL(__skb_get_pay_offset); |
bd4cf0ed AS |
299 | break; |
300 | case SKF_AD_OFF + SKF_AD_NLATTR: | |
f8f6d679 | 301 | *insn = BPF_EMIT_CALL(__skb_get_nlattr); |
bd4cf0ed AS |
302 | break; |
303 | case SKF_AD_OFF + SKF_AD_NLATTR_NEST: | |
f8f6d679 | 304 | *insn = BPF_EMIT_CALL(__skb_get_nlattr_nest); |
bd4cf0ed AS |
305 | break; |
306 | case SKF_AD_OFF + SKF_AD_CPU: | |
f8f6d679 | 307 | *insn = BPF_EMIT_CALL(__get_raw_cpu_id); |
bd4cf0ed | 308 | break; |
4cd3675e | 309 | case SKF_AD_OFF + SKF_AD_RANDOM: |
3ad00405 DB |
310 | *insn = BPF_EMIT_CALL(bpf_user_rnd_u32); |
311 | bpf_user_rnd_init_once(); | |
4cd3675e | 312 | break; |
bd4cf0ed AS |
313 | } |
314 | break; | |
315 | ||
316 | case SKF_AD_OFF + SKF_AD_ALU_XOR_X: | |
9739eef1 AS |
317 | /* A ^= X */ |
318 | *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X); | |
bd4cf0ed AS |
319 | break; |
320 | ||
321 | default: | |
322 | /* This is just a dummy call to avoid letting the compiler | |
323 | * evict __bpf_call_base() as an optimization. Placed here | |
324 | * where no-one bothers. | |
325 | */ | |
326 | BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0); | |
327 | return false; | |
328 | } | |
329 | ||
330 | *insnp = insn; | |
331 | return true; | |
332 | } | |
333 | ||
334 | /** | |
8fb575ca | 335 | * bpf_convert_filter - convert filter program |
bd4cf0ed AS |
336 | * @prog: the user passed filter program |
337 | * @len: the length of the user passed filter program | |
338 | * @new_prog: buffer where converted program will be stored | |
339 | * @new_len: pointer to store length of converted program | |
340 | * | |
341 | * Remap 'sock_filter' style BPF instruction set to 'sock_filter_ext' style. | |
342 | * Conversion workflow: | |
343 | * | |
344 | * 1) First pass for calculating the new program length: | |
8fb575ca | 345 | * bpf_convert_filter(old_prog, old_len, NULL, &new_len) |
bd4cf0ed AS |
346 | * |
347 | * 2) 2nd pass to remap in two passes: 1st pass finds new | |
348 | * jump offsets, 2nd pass remapping: | |
2695fb55 | 349 | * new_prog = kmalloc(sizeof(struct bpf_insn) * new_len); |
8fb575ca | 350 | * bpf_convert_filter(old_prog, old_len, new_prog, &new_len); |
bd4cf0ed | 351 | */ |
d9e12f42 NS |
352 | static int bpf_convert_filter(struct sock_filter *prog, int len, |
353 | struct bpf_insn *new_prog, int *new_len) | |
bd4cf0ed AS |
354 | { |
355 | int new_flen = 0, pass = 0, target, i; | |
2695fb55 | 356 | struct bpf_insn *new_insn; |
bd4cf0ed AS |
357 | struct sock_filter *fp; |
358 | int *addrs = NULL; | |
359 | u8 bpf_src; | |
360 | ||
361 | BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK); | |
30743837 | 362 | BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); |
bd4cf0ed | 363 | |
6f9a093b | 364 | if (len <= 0 || len > BPF_MAXINSNS) |
bd4cf0ed AS |
365 | return -EINVAL; |
366 | ||
367 | if (new_prog) { | |
658da937 DB |
368 | addrs = kcalloc(len, sizeof(*addrs), |
369 | GFP_KERNEL | __GFP_NOWARN); | |
bd4cf0ed AS |
370 | if (!addrs) |
371 | return -ENOMEM; | |
372 | } | |
373 | ||
374 | do_pass: | |
375 | new_insn = new_prog; | |
376 | fp = prog; | |
377 | ||
8b614aeb DB |
378 | /* Classic BPF related prologue emission. */ |
379 | if (new_insn) { | |
380 | /* Classic BPF expects A and X to be reset first. These need | |
381 | * to be guaranteed to be the first two instructions. | |
382 | */ | |
383 | *new_insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); | |
384 | *new_insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_X, BPF_REG_X); | |
385 | ||
386 | /* All programs must keep CTX in callee saved BPF_REG_CTX. | |
387 | * In eBPF case it's done by the compiler, here we need to | |
388 | * do this ourself. Initial CTX is present in BPF_REG_ARG1. | |
389 | */ | |
390 | *new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1); | |
391 | } else { | |
392 | new_insn += 3; | |
393 | } | |
bd4cf0ed AS |
394 | |
395 | for (i = 0; i < len; fp++, i++) { | |
2695fb55 AS |
396 | struct bpf_insn tmp_insns[6] = { }; |
397 | struct bpf_insn *insn = tmp_insns; | |
bd4cf0ed AS |
398 | |
399 | if (addrs) | |
400 | addrs[i] = new_insn - new_prog; | |
401 | ||
402 | switch (fp->code) { | |
403 | /* All arithmetic insns and skb loads map as-is. */ | |
404 | case BPF_ALU | BPF_ADD | BPF_X: | |
405 | case BPF_ALU | BPF_ADD | BPF_K: | |
406 | case BPF_ALU | BPF_SUB | BPF_X: | |
407 | case BPF_ALU | BPF_SUB | BPF_K: | |
408 | case BPF_ALU | BPF_AND | BPF_X: | |
409 | case BPF_ALU | BPF_AND | BPF_K: | |
410 | case BPF_ALU | BPF_OR | BPF_X: | |
411 | case BPF_ALU | BPF_OR | BPF_K: | |
412 | case BPF_ALU | BPF_LSH | BPF_X: | |
413 | case BPF_ALU | BPF_LSH | BPF_K: | |
414 | case BPF_ALU | BPF_RSH | BPF_X: | |
415 | case BPF_ALU | BPF_RSH | BPF_K: | |
416 | case BPF_ALU | BPF_XOR | BPF_X: | |
417 | case BPF_ALU | BPF_XOR | BPF_K: | |
418 | case BPF_ALU | BPF_MUL | BPF_X: | |
419 | case BPF_ALU | BPF_MUL | BPF_K: | |
420 | case BPF_ALU | BPF_DIV | BPF_X: | |
421 | case BPF_ALU | BPF_DIV | BPF_K: | |
422 | case BPF_ALU | BPF_MOD | BPF_X: | |
423 | case BPF_ALU | BPF_MOD | BPF_K: | |
424 | case BPF_ALU | BPF_NEG: | |
425 | case BPF_LD | BPF_ABS | BPF_W: | |
426 | case BPF_LD | BPF_ABS | BPF_H: | |
427 | case BPF_LD | BPF_ABS | BPF_B: | |
428 | case BPF_LD | BPF_IND | BPF_W: | |
429 | case BPF_LD | BPF_IND | BPF_H: | |
430 | case BPF_LD | BPF_IND | BPF_B: | |
431 | /* Check for overloaded BPF extension and | |
432 | * directly convert it if found, otherwise | |
433 | * just move on with mapping. | |
434 | */ | |
435 | if (BPF_CLASS(fp->code) == BPF_LD && | |
436 | BPF_MODE(fp->code) == BPF_ABS && | |
437 | convert_bpf_extensions(fp, &insn)) | |
438 | break; | |
439 | ||
f8f6d679 | 440 | *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k); |
bd4cf0ed AS |
441 | break; |
442 | ||
f8f6d679 DB |
443 | /* Jump transformation cannot use BPF block macros |
444 | * everywhere as offset calculation and target updates | |
445 | * require a bit more work than the rest, i.e. jump | |
446 | * opcodes map as-is, but offsets need adjustment. | |
447 | */ | |
448 | ||
449 | #define BPF_EMIT_JMP \ | |
bd4cf0ed AS |
450 | do { \ |
451 | if (target >= len || target < 0) \ | |
452 | goto err; \ | |
453 | insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0; \ | |
454 | /* Adjust pc relative offset for 2nd or 3rd insn. */ \ | |
455 | insn->off -= insn - tmp_insns; \ | |
456 | } while (0) | |
457 | ||
f8f6d679 DB |
458 | case BPF_JMP | BPF_JA: |
459 | target = i + fp->k + 1; | |
460 | insn->code = fp->code; | |
461 | BPF_EMIT_JMP; | |
bd4cf0ed AS |
462 | break; |
463 | ||
464 | case BPF_JMP | BPF_JEQ | BPF_K: | |
465 | case BPF_JMP | BPF_JEQ | BPF_X: | |
466 | case BPF_JMP | BPF_JSET | BPF_K: | |
467 | case BPF_JMP | BPF_JSET | BPF_X: | |
468 | case BPF_JMP | BPF_JGT | BPF_K: | |
469 | case BPF_JMP | BPF_JGT | BPF_X: | |
470 | case BPF_JMP | BPF_JGE | BPF_K: | |
471 | case BPF_JMP | BPF_JGE | BPF_X: | |
472 | if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) { | |
473 | /* BPF immediates are signed, zero extend | |
474 | * immediate into tmp register and use it | |
475 | * in compare insn. | |
476 | */ | |
f8f6d679 | 477 | *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k); |
bd4cf0ed | 478 | |
e430f34e AS |
479 | insn->dst_reg = BPF_REG_A; |
480 | insn->src_reg = BPF_REG_TMP; | |
bd4cf0ed AS |
481 | bpf_src = BPF_X; |
482 | } else { | |
e430f34e | 483 | insn->dst_reg = BPF_REG_A; |
bd4cf0ed AS |
484 | insn->imm = fp->k; |
485 | bpf_src = BPF_SRC(fp->code); | |
19539ce7 | 486 | insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0; |
1da177e4 | 487 | } |
bd4cf0ed AS |
488 | |
489 | /* Common case where 'jump_false' is next insn. */ | |
490 | if (fp->jf == 0) { | |
491 | insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; | |
492 | target = i + fp->jt + 1; | |
f8f6d679 | 493 | BPF_EMIT_JMP; |
bd4cf0ed | 494 | break; |
1da177e4 | 495 | } |
bd4cf0ed AS |
496 | |
497 | /* Convert JEQ into JNE when 'jump_true' is next insn. */ | |
498 | if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) { | |
499 | insn->code = BPF_JMP | BPF_JNE | bpf_src; | |
500 | target = i + fp->jf + 1; | |
f8f6d679 | 501 | BPF_EMIT_JMP; |
bd4cf0ed | 502 | break; |
0b05b2a4 | 503 | } |
bd4cf0ed AS |
504 | |
505 | /* Other jumps are mapped into two insns: Jxx and JA. */ | |
506 | target = i + fp->jt + 1; | |
507 | insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; | |
f8f6d679 | 508 | BPF_EMIT_JMP; |
bd4cf0ed AS |
509 | insn++; |
510 | ||
511 | insn->code = BPF_JMP | BPF_JA; | |
512 | target = i + fp->jf + 1; | |
f8f6d679 | 513 | BPF_EMIT_JMP; |
bd4cf0ed AS |
514 | break; |
515 | ||
516 | /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */ | |
517 | case BPF_LDX | BPF_MSH | BPF_B: | |
9739eef1 | 518 | /* tmp = A */ |
f8f6d679 | 519 | *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_A); |
1268e253 | 520 | /* A = BPF_R0 = *(u8 *) (skb->data + K) */ |
f8f6d679 | 521 | *insn++ = BPF_LD_ABS(BPF_B, fp->k); |
9739eef1 | 522 | /* A &= 0xf */ |
f8f6d679 | 523 | *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf); |
9739eef1 | 524 | /* A <<= 2 */ |
f8f6d679 | 525 | *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2); |
9739eef1 | 526 | /* X = A */ |
f8f6d679 | 527 | *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); |
9739eef1 | 528 | /* A = tmp */ |
f8f6d679 | 529 | *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP); |
bd4cf0ed AS |
530 | break; |
531 | ||
532 | /* RET_K, RET_A are remaped into 2 insns. */ | |
533 | case BPF_RET | BPF_A: | |
534 | case BPF_RET | BPF_K: | |
f8f6d679 DB |
535 | *insn++ = BPF_MOV32_RAW(BPF_RVAL(fp->code) == BPF_K ? |
536 | BPF_K : BPF_X, BPF_REG_0, | |
537 | BPF_REG_A, fp->k); | |
9739eef1 | 538 | *insn = BPF_EXIT_INSN(); |
bd4cf0ed AS |
539 | break; |
540 | ||
541 | /* Store to stack. */ | |
542 | case BPF_ST: | |
543 | case BPF_STX: | |
f8f6d679 DB |
544 | *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) == |
545 | BPF_ST ? BPF_REG_A : BPF_REG_X, | |
546 | -(BPF_MEMWORDS - fp->k) * 4); | |
bd4cf0ed AS |
547 | break; |
548 | ||
549 | /* Load from stack. */ | |
550 | case BPF_LD | BPF_MEM: | |
551 | case BPF_LDX | BPF_MEM: | |
f8f6d679 DB |
552 | *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ? |
553 | BPF_REG_A : BPF_REG_X, BPF_REG_FP, | |
554 | -(BPF_MEMWORDS - fp->k) * 4); | |
bd4cf0ed AS |
555 | break; |
556 | ||
557 | /* A = K or X = K */ | |
558 | case BPF_LD | BPF_IMM: | |
559 | case BPF_LDX | BPF_IMM: | |
f8f6d679 DB |
560 | *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ? |
561 | BPF_REG_A : BPF_REG_X, fp->k); | |
bd4cf0ed AS |
562 | break; |
563 | ||
564 | /* X = A */ | |
565 | case BPF_MISC | BPF_TAX: | |
f8f6d679 | 566 | *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); |
bd4cf0ed AS |
567 | break; |
568 | ||
569 | /* A = X */ | |
570 | case BPF_MISC | BPF_TXA: | |
f8f6d679 | 571 | *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X); |
bd4cf0ed AS |
572 | break; |
573 | ||
574 | /* A = skb->len or X = skb->len */ | |
575 | case BPF_LD | BPF_W | BPF_LEN: | |
576 | case BPF_LDX | BPF_W | BPF_LEN: | |
f8f6d679 DB |
577 | *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ? |
578 | BPF_REG_A : BPF_REG_X, BPF_REG_CTX, | |
579 | offsetof(struct sk_buff, len)); | |
bd4cf0ed AS |
580 | break; |
581 | ||
f8f6d679 | 582 | /* Access seccomp_data fields. */ |
bd4cf0ed | 583 | case BPF_LDX | BPF_ABS | BPF_W: |
9739eef1 AS |
584 | /* A = *(u32 *) (ctx + K) */ |
585 | *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k); | |
bd4cf0ed AS |
586 | break; |
587 | ||
ca9f1fd2 | 588 | /* Unknown instruction. */ |
1da177e4 | 589 | default: |
bd4cf0ed | 590 | goto err; |
1da177e4 | 591 | } |
bd4cf0ed AS |
592 | |
593 | insn++; | |
594 | if (new_prog) | |
595 | memcpy(new_insn, tmp_insns, | |
596 | sizeof(*insn) * (insn - tmp_insns)); | |
bd4cf0ed | 597 | new_insn += insn - tmp_insns; |
1da177e4 LT |
598 | } |
599 | ||
bd4cf0ed AS |
600 | if (!new_prog) { |
601 | /* Only calculating new length. */ | |
602 | *new_len = new_insn - new_prog; | |
603 | return 0; | |
604 | } | |
605 | ||
606 | pass++; | |
607 | if (new_flen != new_insn - new_prog) { | |
608 | new_flen = new_insn - new_prog; | |
609 | if (pass > 2) | |
610 | goto err; | |
bd4cf0ed AS |
611 | goto do_pass; |
612 | } | |
613 | ||
614 | kfree(addrs); | |
615 | BUG_ON(*new_len != new_flen); | |
1da177e4 | 616 | return 0; |
bd4cf0ed AS |
617 | err: |
618 | kfree(addrs); | |
619 | return -EINVAL; | |
1da177e4 LT |
620 | } |
621 | ||
bd4cf0ed | 622 | /* Security: |
bd4cf0ed | 623 | * |
2d5311e4 | 624 | * As we dont want to clear mem[] array for each packet going through |
8ea6e345 | 625 | * __bpf_prog_run(), we check that filter loaded by user never try to read |
2d5311e4 | 626 | * a cell if not previously written, and we check all branches to be sure |
25985edc | 627 | * a malicious user doesn't try to abuse us. |
2d5311e4 | 628 | */ |
ec31a05c | 629 | static int check_load_and_stores(const struct sock_filter *filter, int flen) |
2d5311e4 | 630 | { |
34805931 | 631 | u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */ |
2d5311e4 ED |
632 | int pc, ret = 0; |
633 | ||
634 | BUILD_BUG_ON(BPF_MEMWORDS > 16); | |
34805931 | 635 | |
99e72a0f | 636 | masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL); |
2d5311e4 ED |
637 | if (!masks) |
638 | return -ENOMEM; | |
34805931 | 639 | |
2d5311e4 ED |
640 | memset(masks, 0xff, flen * sizeof(*masks)); |
641 | ||
642 | for (pc = 0; pc < flen; pc++) { | |
643 | memvalid &= masks[pc]; | |
644 | ||
645 | switch (filter[pc].code) { | |
34805931 DB |
646 | case BPF_ST: |
647 | case BPF_STX: | |
2d5311e4 ED |
648 | memvalid |= (1 << filter[pc].k); |
649 | break; | |
34805931 DB |
650 | case BPF_LD | BPF_MEM: |
651 | case BPF_LDX | BPF_MEM: | |
2d5311e4 ED |
652 | if (!(memvalid & (1 << filter[pc].k))) { |
653 | ret = -EINVAL; | |
654 | goto error; | |
655 | } | |
656 | break; | |
34805931 DB |
657 | case BPF_JMP | BPF_JA: |
658 | /* A jump must set masks on target */ | |
2d5311e4 ED |
659 | masks[pc + 1 + filter[pc].k] &= memvalid; |
660 | memvalid = ~0; | |
661 | break; | |
34805931 DB |
662 | case BPF_JMP | BPF_JEQ | BPF_K: |
663 | case BPF_JMP | BPF_JEQ | BPF_X: | |
664 | case BPF_JMP | BPF_JGE | BPF_K: | |
665 | case BPF_JMP | BPF_JGE | BPF_X: | |
666 | case BPF_JMP | BPF_JGT | BPF_K: | |
667 | case BPF_JMP | BPF_JGT | BPF_X: | |
668 | case BPF_JMP | BPF_JSET | BPF_K: | |
669 | case BPF_JMP | BPF_JSET | BPF_X: | |
670 | /* A jump must set masks on targets */ | |
2d5311e4 ED |
671 | masks[pc + 1 + filter[pc].jt] &= memvalid; |
672 | masks[pc + 1 + filter[pc].jf] &= memvalid; | |
673 | memvalid = ~0; | |
674 | break; | |
675 | } | |
676 | } | |
677 | error: | |
678 | kfree(masks); | |
679 | return ret; | |
680 | } | |
681 | ||
34805931 DB |
682 | static bool chk_code_allowed(u16 code_to_probe) |
683 | { | |
684 | static const bool codes[] = { | |
685 | /* 32 bit ALU operations */ | |
686 | [BPF_ALU | BPF_ADD | BPF_K] = true, | |
687 | [BPF_ALU | BPF_ADD | BPF_X] = true, | |
688 | [BPF_ALU | BPF_SUB | BPF_K] = true, | |
689 | [BPF_ALU | BPF_SUB | BPF_X] = true, | |
690 | [BPF_ALU | BPF_MUL | BPF_K] = true, | |
691 | [BPF_ALU | BPF_MUL | BPF_X] = true, | |
692 | [BPF_ALU | BPF_DIV | BPF_K] = true, | |
693 | [BPF_ALU | BPF_DIV | BPF_X] = true, | |
694 | [BPF_ALU | BPF_MOD | BPF_K] = true, | |
695 | [BPF_ALU | BPF_MOD | BPF_X] = true, | |
696 | [BPF_ALU | BPF_AND | BPF_K] = true, | |
697 | [BPF_ALU | BPF_AND | BPF_X] = true, | |
698 | [BPF_ALU | BPF_OR | BPF_K] = true, | |
699 | [BPF_ALU | BPF_OR | BPF_X] = true, | |
700 | [BPF_ALU | BPF_XOR | BPF_K] = true, | |
701 | [BPF_ALU | BPF_XOR | BPF_X] = true, | |
702 | [BPF_ALU | BPF_LSH | BPF_K] = true, | |
703 | [BPF_ALU | BPF_LSH | BPF_X] = true, | |
704 | [BPF_ALU | BPF_RSH | BPF_K] = true, | |
705 | [BPF_ALU | BPF_RSH | BPF_X] = true, | |
706 | [BPF_ALU | BPF_NEG] = true, | |
707 | /* Load instructions */ | |
708 | [BPF_LD | BPF_W | BPF_ABS] = true, | |
709 | [BPF_LD | BPF_H | BPF_ABS] = true, | |
710 | [BPF_LD | BPF_B | BPF_ABS] = true, | |
711 | [BPF_LD | BPF_W | BPF_LEN] = true, | |
712 | [BPF_LD | BPF_W | BPF_IND] = true, | |
713 | [BPF_LD | BPF_H | BPF_IND] = true, | |
714 | [BPF_LD | BPF_B | BPF_IND] = true, | |
715 | [BPF_LD | BPF_IMM] = true, | |
716 | [BPF_LD | BPF_MEM] = true, | |
717 | [BPF_LDX | BPF_W | BPF_LEN] = true, | |
718 | [BPF_LDX | BPF_B | BPF_MSH] = true, | |
719 | [BPF_LDX | BPF_IMM] = true, | |
720 | [BPF_LDX | BPF_MEM] = true, | |
721 | /* Store instructions */ | |
722 | [BPF_ST] = true, | |
723 | [BPF_STX] = true, | |
724 | /* Misc instructions */ | |
725 | [BPF_MISC | BPF_TAX] = true, | |
726 | [BPF_MISC | BPF_TXA] = true, | |
727 | /* Return instructions */ | |
728 | [BPF_RET | BPF_K] = true, | |
729 | [BPF_RET | BPF_A] = true, | |
730 | /* Jump instructions */ | |
731 | [BPF_JMP | BPF_JA] = true, | |
732 | [BPF_JMP | BPF_JEQ | BPF_K] = true, | |
733 | [BPF_JMP | BPF_JEQ | BPF_X] = true, | |
734 | [BPF_JMP | BPF_JGE | BPF_K] = true, | |
735 | [BPF_JMP | BPF_JGE | BPF_X] = true, | |
736 | [BPF_JMP | BPF_JGT | BPF_K] = true, | |
737 | [BPF_JMP | BPF_JGT | BPF_X] = true, | |
738 | [BPF_JMP | BPF_JSET | BPF_K] = true, | |
739 | [BPF_JMP | BPF_JSET | BPF_X] = true, | |
740 | }; | |
741 | ||
742 | if (code_to_probe >= ARRAY_SIZE(codes)) | |
743 | return false; | |
744 | ||
745 | return codes[code_to_probe]; | |
746 | } | |
747 | ||
1da177e4 | 748 | /** |
4df95ff4 | 749 | * bpf_check_classic - verify socket filter code |
1da177e4 LT |
750 | * @filter: filter to verify |
751 | * @flen: length of filter | |
752 | * | |
753 | * Check the user's filter code. If we let some ugly | |
754 | * filter code slip through kaboom! The filter must contain | |
93699863 KK |
755 | * no references or jumps that are out of range, no illegal |
756 | * instructions, and must end with a RET instruction. | |
1da177e4 | 757 | * |
7b11f69f KK |
758 | * All jumps are forward as they are not signed. |
759 | * | |
760 | * Returns 0 if the rule set is legal or -EINVAL if not. | |
1da177e4 | 761 | */ |
d9e12f42 NS |
762 | static int bpf_check_classic(const struct sock_filter *filter, |
763 | unsigned int flen) | |
1da177e4 | 764 | { |
aa1113d9 | 765 | bool anc_found; |
34805931 | 766 | int pc; |
1da177e4 | 767 | |
1b93ae64 | 768 | if (flen == 0 || flen > BPF_MAXINSNS) |
1da177e4 LT |
769 | return -EINVAL; |
770 | ||
34805931 | 771 | /* Check the filter code now */ |
1da177e4 | 772 | for (pc = 0; pc < flen; pc++) { |
ec31a05c | 773 | const struct sock_filter *ftest = &filter[pc]; |
93699863 | 774 | |
34805931 DB |
775 | /* May we actually operate on this code? */ |
776 | if (!chk_code_allowed(ftest->code)) | |
cba328fc | 777 | return -EINVAL; |
34805931 | 778 | |
93699863 | 779 | /* Some instructions need special checks */ |
34805931 DB |
780 | switch (ftest->code) { |
781 | case BPF_ALU | BPF_DIV | BPF_K: | |
782 | case BPF_ALU | BPF_MOD | BPF_K: | |
783 | /* Check for division by zero */ | |
b6069a95 ED |
784 | if (ftest->k == 0) |
785 | return -EINVAL; | |
786 | break; | |
34805931 DB |
787 | case BPF_LD | BPF_MEM: |
788 | case BPF_LDX | BPF_MEM: | |
789 | case BPF_ST: | |
790 | case BPF_STX: | |
791 | /* Check for invalid memory addresses */ | |
93699863 KK |
792 | if (ftest->k >= BPF_MEMWORDS) |
793 | return -EINVAL; | |
794 | break; | |
34805931 DB |
795 | case BPF_JMP | BPF_JA: |
796 | /* Note, the large ftest->k might cause loops. | |
93699863 KK |
797 | * Compare this with conditional jumps below, |
798 | * where offsets are limited. --ANK (981016) | |
799 | */ | |
34805931 | 800 | if (ftest->k >= (unsigned int)(flen - pc - 1)) |
93699863 | 801 | return -EINVAL; |
01f2f3f6 | 802 | break; |
34805931 DB |
803 | case BPF_JMP | BPF_JEQ | BPF_K: |
804 | case BPF_JMP | BPF_JEQ | BPF_X: | |
805 | case BPF_JMP | BPF_JGE | BPF_K: | |
806 | case BPF_JMP | BPF_JGE | BPF_X: | |
807 | case BPF_JMP | BPF_JGT | BPF_K: | |
808 | case BPF_JMP | BPF_JGT | BPF_X: | |
809 | case BPF_JMP | BPF_JSET | BPF_K: | |
810 | case BPF_JMP | BPF_JSET | BPF_X: | |
811 | /* Both conditionals must be safe */ | |
e35bedf3 | 812 | if (pc + ftest->jt + 1 >= flen || |
93699863 KK |
813 | pc + ftest->jf + 1 >= flen) |
814 | return -EINVAL; | |
cba328fc | 815 | break; |
34805931 DB |
816 | case BPF_LD | BPF_W | BPF_ABS: |
817 | case BPF_LD | BPF_H | BPF_ABS: | |
818 | case BPF_LD | BPF_B | BPF_ABS: | |
aa1113d9 | 819 | anc_found = false; |
34805931 DB |
820 | if (bpf_anc_helper(ftest) & BPF_ANC) |
821 | anc_found = true; | |
822 | /* Ancillary operation unknown or unsupported */ | |
aa1113d9 DB |
823 | if (anc_found == false && ftest->k >= SKF_AD_OFF) |
824 | return -EINVAL; | |
01f2f3f6 HPP |
825 | } |
826 | } | |
93699863 | 827 | |
34805931 | 828 | /* Last instruction must be a RET code */ |
01f2f3f6 | 829 | switch (filter[flen - 1].code) { |
34805931 DB |
830 | case BPF_RET | BPF_K: |
831 | case BPF_RET | BPF_A: | |
2d5311e4 | 832 | return check_load_and_stores(filter, flen); |
cba328fc | 833 | } |
34805931 | 834 | |
cba328fc | 835 | return -EINVAL; |
1da177e4 LT |
836 | } |
837 | ||
7ae457c1 AS |
838 | static int bpf_prog_store_orig_filter(struct bpf_prog *fp, |
839 | const struct sock_fprog *fprog) | |
a3ea269b | 840 | { |
009937e7 | 841 | unsigned int fsize = bpf_classic_proglen(fprog); |
a3ea269b DB |
842 | struct sock_fprog_kern *fkprog; |
843 | ||
844 | fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL); | |
845 | if (!fp->orig_prog) | |
846 | return -ENOMEM; | |
847 | ||
848 | fkprog = fp->orig_prog; | |
849 | fkprog->len = fprog->len; | |
658da937 DB |
850 | |
851 | fkprog->filter = kmemdup(fp->insns, fsize, | |
852 | GFP_KERNEL | __GFP_NOWARN); | |
a3ea269b DB |
853 | if (!fkprog->filter) { |
854 | kfree(fp->orig_prog); | |
855 | return -ENOMEM; | |
856 | } | |
857 | ||
858 | return 0; | |
859 | } | |
860 | ||
7ae457c1 | 861 | static void bpf_release_orig_filter(struct bpf_prog *fp) |
a3ea269b DB |
862 | { |
863 | struct sock_fprog_kern *fprog = fp->orig_prog; | |
864 | ||
865 | if (fprog) { | |
866 | kfree(fprog->filter); | |
867 | kfree(fprog); | |
868 | } | |
869 | } | |
870 | ||
7ae457c1 AS |
871 | static void __bpf_prog_release(struct bpf_prog *prog) |
872 | { | |
24701ece | 873 | if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) { |
89aa0758 AS |
874 | bpf_prog_put(prog); |
875 | } else { | |
876 | bpf_release_orig_filter(prog); | |
877 | bpf_prog_free(prog); | |
878 | } | |
7ae457c1 AS |
879 | } |
880 | ||
34c5bd66 PN |
881 | static void __sk_filter_release(struct sk_filter *fp) |
882 | { | |
7ae457c1 AS |
883 | __bpf_prog_release(fp->prog); |
884 | kfree(fp); | |
34c5bd66 PN |
885 | } |
886 | ||
47e958ea | 887 | /** |
46bcf14f | 888 | * sk_filter_release_rcu - Release a socket filter by rcu_head |
47e958ea PE |
889 | * @rcu: rcu_head that contains the sk_filter to free |
890 | */ | |
fbc907f0 | 891 | static void sk_filter_release_rcu(struct rcu_head *rcu) |
47e958ea PE |
892 | { |
893 | struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); | |
894 | ||
34c5bd66 | 895 | __sk_filter_release(fp); |
47e958ea | 896 | } |
fbc907f0 DB |
897 | |
898 | /** | |
899 | * sk_filter_release - release a socket filter | |
900 | * @fp: filter to remove | |
901 | * | |
902 | * Remove a filter from a socket and release its resources. | |
903 | */ | |
904 | static void sk_filter_release(struct sk_filter *fp) | |
905 | { | |
906 | if (atomic_dec_and_test(&fp->refcnt)) | |
907 | call_rcu(&fp->rcu, sk_filter_release_rcu); | |
908 | } | |
909 | ||
910 | void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) | |
911 | { | |
7ae457c1 | 912 | u32 filter_size = bpf_prog_size(fp->prog->len); |
fbc907f0 | 913 | |
278571ba AS |
914 | atomic_sub(filter_size, &sk->sk_omem_alloc); |
915 | sk_filter_release(fp); | |
fbc907f0 | 916 | } |
47e958ea | 917 | |
278571ba AS |
918 | /* try to charge the socket memory if there is space available |
919 | * return true on success | |
920 | */ | |
921 | bool sk_filter_charge(struct sock *sk, struct sk_filter *fp) | |
bd4cf0ed | 922 | { |
7ae457c1 | 923 | u32 filter_size = bpf_prog_size(fp->prog->len); |
278571ba AS |
924 | |
925 | /* same check as in sock_kmalloc() */ | |
926 | if (filter_size <= sysctl_optmem_max && | |
927 | atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) { | |
928 | atomic_inc(&fp->refcnt); | |
929 | atomic_add(filter_size, &sk->sk_omem_alloc); | |
930 | return true; | |
bd4cf0ed | 931 | } |
278571ba | 932 | return false; |
bd4cf0ed AS |
933 | } |
934 | ||
7ae457c1 | 935 | static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp) |
bd4cf0ed AS |
936 | { |
937 | struct sock_filter *old_prog; | |
7ae457c1 | 938 | struct bpf_prog *old_fp; |
34805931 | 939 | int err, new_len, old_len = fp->len; |
bd4cf0ed AS |
940 | |
941 | /* We are free to overwrite insns et al right here as it | |
942 | * won't be used at this point in time anymore internally | |
943 | * after the migration to the internal BPF instruction | |
944 | * representation. | |
945 | */ | |
946 | BUILD_BUG_ON(sizeof(struct sock_filter) != | |
2695fb55 | 947 | sizeof(struct bpf_insn)); |
bd4cf0ed | 948 | |
bd4cf0ed AS |
949 | /* Conversion cannot happen on overlapping memory areas, |
950 | * so we need to keep the user BPF around until the 2nd | |
951 | * pass. At this time, the user BPF is stored in fp->insns. | |
952 | */ | |
953 | old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter), | |
658da937 | 954 | GFP_KERNEL | __GFP_NOWARN); |
bd4cf0ed AS |
955 | if (!old_prog) { |
956 | err = -ENOMEM; | |
957 | goto out_err; | |
958 | } | |
959 | ||
960 | /* 1st pass: calculate the new program length. */ | |
8fb575ca | 961 | err = bpf_convert_filter(old_prog, old_len, NULL, &new_len); |
bd4cf0ed AS |
962 | if (err) |
963 | goto out_err_free; | |
964 | ||
965 | /* Expand fp for appending the new filter representation. */ | |
966 | old_fp = fp; | |
60a3b225 | 967 | fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0); |
bd4cf0ed AS |
968 | if (!fp) { |
969 | /* The old_fp is still around in case we couldn't | |
970 | * allocate new memory, so uncharge on that one. | |
971 | */ | |
972 | fp = old_fp; | |
973 | err = -ENOMEM; | |
974 | goto out_err_free; | |
975 | } | |
976 | ||
bd4cf0ed AS |
977 | fp->len = new_len; |
978 | ||
2695fb55 | 979 | /* 2nd pass: remap sock_filter insns into bpf_insn insns. */ |
8fb575ca | 980 | err = bpf_convert_filter(old_prog, old_len, fp->insnsi, &new_len); |
bd4cf0ed | 981 | if (err) |
8fb575ca | 982 | /* 2nd bpf_convert_filter() can fail only if it fails |
bd4cf0ed AS |
983 | * to allocate memory, remapping must succeed. Note, |
984 | * that at this time old_fp has already been released | |
278571ba | 985 | * by krealloc(). |
bd4cf0ed AS |
986 | */ |
987 | goto out_err_free; | |
988 | ||
7ae457c1 | 989 | bpf_prog_select_runtime(fp); |
5fe821a9 | 990 | |
bd4cf0ed AS |
991 | kfree(old_prog); |
992 | return fp; | |
993 | ||
994 | out_err_free: | |
995 | kfree(old_prog); | |
996 | out_err: | |
7ae457c1 | 997 | __bpf_prog_release(fp); |
bd4cf0ed AS |
998 | return ERR_PTR(err); |
999 | } | |
1000 | ||
ac67eb2c DB |
1001 | static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp, |
1002 | bpf_aux_classic_check_t trans) | |
302d6637 JP |
1003 | { |
1004 | int err; | |
1005 | ||
bd4cf0ed | 1006 | fp->bpf_func = NULL; |
a91263d5 | 1007 | fp->jited = 0; |
302d6637 | 1008 | |
4df95ff4 | 1009 | err = bpf_check_classic(fp->insns, fp->len); |
418c96ac | 1010 | if (err) { |
7ae457c1 | 1011 | __bpf_prog_release(fp); |
bd4cf0ed | 1012 | return ERR_PTR(err); |
418c96ac | 1013 | } |
302d6637 | 1014 | |
4ae92bc7 NS |
1015 | /* There might be additional checks and transformations |
1016 | * needed on classic filters, f.e. in case of seccomp. | |
1017 | */ | |
1018 | if (trans) { | |
1019 | err = trans(fp->insns, fp->len); | |
1020 | if (err) { | |
1021 | __bpf_prog_release(fp); | |
1022 | return ERR_PTR(err); | |
1023 | } | |
1024 | } | |
1025 | ||
bd4cf0ed AS |
1026 | /* Probe if we can JIT compile the filter and if so, do |
1027 | * the compilation of the filter. | |
1028 | */ | |
302d6637 | 1029 | bpf_jit_compile(fp); |
bd4cf0ed AS |
1030 | |
1031 | /* JIT compiler couldn't process this filter, so do the | |
1032 | * internal BPF translation for the optimized interpreter. | |
1033 | */ | |
5fe821a9 | 1034 | if (!fp->jited) |
7ae457c1 | 1035 | fp = bpf_migrate_filter(fp); |
bd4cf0ed AS |
1036 | |
1037 | return fp; | |
302d6637 JP |
1038 | } |
1039 | ||
1040 | /** | |
7ae457c1 | 1041 | * bpf_prog_create - create an unattached filter |
c6c4b97c | 1042 | * @pfp: the unattached filter that is created |
677a9fd3 | 1043 | * @fprog: the filter program |
302d6637 | 1044 | * |
c6c4b97c | 1045 | * Create a filter independent of any socket. We first run some |
302d6637 JP |
1046 | * sanity checks on it to make sure it does not explode on us later. |
1047 | * If an error occurs or there is insufficient memory for the filter | |
1048 | * a negative errno code is returned. On success the return is zero. | |
1049 | */ | |
7ae457c1 | 1050 | int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog) |
302d6637 | 1051 | { |
009937e7 | 1052 | unsigned int fsize = bpf_classic_proglen(fprog); |
7ae457c1 | 1053 | struct bpf_prog *fp; |
302d6637 JP |
1054 | |
1055 | /* Make sure new filter is there and in the right amounts. */ | |
1056 | if (fprog->filter == NULL) | |
1057 | return -EINVAL; | |
1058 | ||
60a3b225 | 1059 | fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); |
302d6637 JP |
1060 | if (!fp) |
1061 | return -ENOMEM; | |
a3ea269b | 1062 | |
302d6637 JP |
1063 | memcpy(fp->insns, fprog->filter, fsize); |
1064 | ||
302d6637 | 1065 | fp->len = fprog->len; |
a3ea269b DB |
1066 | /* Since unattached filters are not copied back to user |
1067 | * space through sk_get_filter(), we do not need to hold | |
1068 | * a copy here, and can spare us the work. | |
1069 | */ | |
1070 | fp->orig_prog = NULL; | |
302d6637 | 1071 | |
7ae457c1 | 1072 | /* bpf_prepare_filter() already takes care of freeing |
bd4cf0ed AS |
1073 | * memory in case something goes wrong. |
1074 | */ | |
4ae92bc7 | 1075 | fp = bpf_prepare_filter(fp, NULL); |
bd4cf0ed AS |
1076 | if (IS_ERR(fp)) |
1077 | return PTR_ERR(fp); | |
302d6637 JP |
1078 | |
1079 | *pfp = fp; | |
1080 | return 0; | |
302d6637 | 1081 | } |
7ae457c1 | 1082 | EXPORT_SYMBOL_GPL(bpf_prog_create); |
302d6637 | 1083 | |
ac67eb2c DB |
1084 | /** |
1085 | * bpf_prog_create_from_user - create an unattached filter from user buffer | |
1086 | * @pfp: the unattached filter that is created | |
1087 | * @fprog: the filter program | |
1088 | * @trans: post-classic verifier transformation handler | |
bab18991 | 1089 | * @save_orig: save classic BPF program |
ac67eb2c DB |
1090 | * |
1091 | * This function effectively does the same as bpf_prog_create(), only | |
1092 | * that it builds up its insns buffer from user space provided buffer. | |
1093 | * It also allows for passing a bpf_aux_classic_check_t handler. | |
1094 | */ | |
1095 | int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, | |
bab18991 | 1096 | bpf_aux_classic_check_t trans, bool save_orig) |
ac67eb2c DB |
1097 | { |
1098 | unsigned int fsize = bpf_classic_proglen(fprog); | |
1099 | struct bpf_prog *fp; | |
bab18991 | 1100 | int err; |
ac67eb2c DB |
1101 | |
1102 | /* Make sure new filter is there and in the right amounts. */ | |
1103 | if (fprog->filter == NULL) | |
1104 | return -EINVAL; | |
1105 | ||
1106 | fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); | |
1107 | if (!fp) | |
1108 | return -ENOMEM; | |
1109 | ||
1110 | if (copy_from_user(fp->insns, fprog->filter, fsize)) { | |
1111 | __bpf_prog_free(fp); | |
1112 | return -EFAULT; | |
1113 | } | |
1114 | ||
1115 | fp->len = fprog->len; | |
ac67eb2c DB |
1116 | fp->orig_prog = NULL; |
1117 | ||
bab18991 DB |
1118 | if (save_orig) { |
1119 | err = bpf_prog_store_orig_filter(fp, fprog); | |
1120 | if (err) { | |
1121 | __bpf_prog_free(fp); | |
1122 | return -ENOMEM; | |
1123 | } | |
1124 | } | |
1125 | ||
ac67eb2c DB |
1126 | /* bpf_prepare_filter() already takes care of freeing |
1127 | * memory in case something goes wrong. | |
1128 | */ | |
1129 | fp = bpf_prepare_filter(fp, trans); | |
1130 | if (IS_ERR(fp)) | |
1131 | return PTR_ERR(fp); | |
1132 | ||
1133 | *pfp = fp; | |
1134 | return 0; | |
1135 | } | |
2ea273d7 | 1136 | EXPORT_SYMBOL_GPL(bpf_prog_create_from_user); |
ac67eb2c | 1137 | |
7ae457c1 | 1138 | void bpf_prog_destroy(struct bpf_prog *fp) |
302d6637 | 1139 | { |
7ae457c1 | 1140 | __bpf_prog_release(fp); |
302d6637 | 1141 | } |
7ae457c1 | 1142 | EXPORT_SYMBOL_GPL(bpf_prog_destroy); |
302d6637 | 1143 | |
49b31e57 DB |
1144 | static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk) |
1145 | { | |
1146 | struct sk_filter *fp, *old_fp; | |
1147 | ||
1148 | fp = kmalloc(sizeof(*fp), GFP_KERNEL); | |
1149 | if (!fp) | |
1150 | return -ENOMEM; | |
1151 | ||
1152 | fp->prog = prog; | |
1153 | atomic_set(&fp->refcnt, 0); | |
1154 | ||
1155 | if (!sk_filter_charge(sk, fp)) { | |
1156 | kfree(fp); | |
1157 | return -ENOMEM; | |
1158 | } | |
1159 | ||
1160 | old_fp = rcu_dereference_protected(sk->sk_filter, | |
1161 | sock_owned_by_user(sk)); | |
1162 | rcu_assign_pointer(sk->sk_filter, fp); | |
1163 | ||
1164 | if (old_fp) | |
1165 | sk_filter_uncharge(sk, old_fp); | |
1166 | ||
1167 | return 0; | |
1168 | } | |
1169 | ||
1da177e4 LT |
1170 | /** |
1171 | * sk_attach_filter - attach a socket filter | |
1172 | * @fprog: the filter program | |
1173 | * @sk: the socket to use | |
1174 | * | |
1175 | * Attach the user's filter code. We first run some sanity checks on | |
1176 | * it to make sure it does not explode on us later. If an error | |
1177 | * occurs or there is insufficient memory for the filter a negative | |
1178 | * errno code is returned. On success the return is zero. | |
1179 | */ | |
1180 | int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) | |
1181 | { | |
009937e7 | 1182 | unsigned int fsize = bpf_classic_proglen(fprog); |
7ae457c1 AS |
1183 | unsigned int bpf_fsize = bpf_prog_size(fprog->len); |
1184 | struct bpf_prog *prog; | |
1da177e4 LT |
1185 | int err; |
1186 | ||
d59577b6 VB |
1187 | if (sock_flag(sk, SOCK_FILTER_LOCKED)) |
1188 | return -EPERM; | |
1189 | ||
1da177e4 | 1190 | /* Make sure new filter is there and in the right amounts. */ |
e35bedf3 KK |
1191 | if (fprog->filter == NULL) |
1192 | return -EINVAL; | |
1da177e4 | 1193 | |
60a3b225 | 1194 | prog = bpf_prog_alloc(bpf_fsize, 0); |
7ae457c1 | 1195 | if (!prog) |
1da177e4 | 1196 | return -ENOMEM; |
a3ea269b | 1197 | |
7ae457c1 | 1198 | if (copy_from_user(prog->insns, fprog->filter, fsize)) { |
c0d1379a | 1199 | __bpf_prog_free(prog); |
1da177e4 LT |
1200 | return -EFAULT; |
1201 | } | |
1202 | ||
7ae457c1 | 1203 | prog->len = fprog->len; |
1da177e4 | 1204 | |
7ae457c1 | 1205 | err = bpf_prog_store_orig_filter(prog, fprog); |
a3ea269b | 1206 | if (err) { |
c0d1379a | 1207 | __bpf_prog_free(prog); |
a3ea269b DB |
1208 | return -ENOMEM; |
1209 | } | |
1210 | ||
7ae457c1 | 1211 | /* bpf_prepare_filter() already takes care of freeing |
bd4cf0ed AS |
1212 | * memory in case something goes wrong. |
1213 | */ | |
4ae92bc7 | 1214 | prog = bpf_prepare_filter(prog, NULL); |
7ae457c1 AS |
1215 | if (IS_ERR(prog)) |
1216 | return PTR_ERR(prog); | |
1217 | ||
49b31e57 DB |
1218 | err = __sk_attach_prog(prog, sk); |
1219 | if (err < 0) { | |
7ae457c1 | 1220 | __bpf_prog_release(prog); |
49b31e57 | 1221 | return err; |
278571ba AS |
1222 | } |
1223 | ||
d3904b73 | 1224 | return 0; |
1da177e4 | 1225 | } |
5ff3f073 | 1226 | EXPORT_SYMBOL_GPL(sk_attach_filter); |
1da177e4 | 1227 | |
89aa0758 AS |
1228 | int sk_attach_bpf(u32 ufd, struct sock *sk) |
1229 | { | |
89aa0758 | 1230 | struct bpf_prog *prog; |
49b31e57 | 1231 | int err; |
89aa0758 AS |
1232 | |
1233 | if (sock_flag(sk, SOCK_FILTER_LOCKED)) | |
1234 | return -EPERM; | |
1235 | ||
1236 | prog = bpf_prog_get(ufd); | |
198bf1b0 AS |
1237 | if (IS_ERR(prog)) |
1238 | return PTR_ERR(prog); | |
89aa0758 | 1239 | |
24701ece | 1240 | if (prog->type != BPF_PROG_TYPE_SOCKET_FILTER) { |
89aa0758 AS |
1241 | bpf_prog_put(prog); |
1242 | return -EINVAL; | |
1243 | } | |
1244 | ||
49b31e57 DB |
1245 | err = __sk_attach_prog(prog, sk); |
1246 | if (err < 0) { | |
89aa0758 | 1247 | bpf_prog_put(prog); |
49b31e57 | 1248 | return err; |
89aa0758 AS |
1249 | } |
1250 | ||
89aa0758 AS |
1251 | return 0; |
1252 | } | |
1253 | ||
91bc4822 | 1254 | #define BPF_RECOMPUTE_CSUM(flags) ((flags) & 1) |
05c74e5e | 1255 | #define BPF_LDST_LEN 16U |
91bc4822 AS |
1256 | |
1257 | static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) | |
608cd71a AS |
1258 | { |
1259 | struct sk_buff *skb = (struct sk_buff *) (long) r1; | |
a166151c | 1260 | int offset = (int) r2; |
608cd71a AS |
1261 | void *from = (void *) (long) r3; |
1262 | unsigned int len = (unsigned int) r4; | |
05c74e5e | 1263 | char buf[BPF_LDST_LEN]; |
608cd71a AS |
1264 | void *ptr; |
1265 | ||
1266 | /* bpf verifier guarantees that: | |
1267 | * 'from' pointer points to bpf program stack | |
1268 | * 'len' bytes of it were initialized | |
1269 | * 'len' > 0 | |
1270 | * 'skb' is a valid pointer to 'struct sk_buff' | |
1271 | * | |
1272 | * so check for invalid 'offset' and too large 'len' | |
1273 | */ | |
a166151c | 1274 | if (unlikely((u32) offset > 0xffff || len > sizeof(buf))) |
608cd71a AS |
1275 | return -EFAULT; |
1276 | ||
a166151c | 1277 | if (unlikely(skb_cloned(skb) && |
3431205e | 1278 | !skb_clone_writable(skb, offset + len))) |
608cd71a AS |
1279 | return -EFAULT; |
1280 | ||
1281 | ptr = skb_header_pointer(skb, offset, len, buf); | |
1282 | if (unlikely(!ptr)) | |
1283 | return -EFAULT; | |
1284 | ||
91bc4822 AS |
1285 | if (BPF_RECOMPUTE_CSUM(flags)) |
1286 | skb_postpull_rcsum(skb, ptr, len); | |
608cd71a AS |
1287 | |
1288 | memcpy(ptr, from, len); | |
1289 | ||
1290 | if (ptr == buf) | |
1291 | /* skb_store_bits cannot return -EFAULT here */ | |
1292 | skb_store_bits(skb, offset, ptr, len); | |
1293 | ||
91bc4822 | 1294 | if (BPF_RECOMPUTE_CSUM(flags) && skb->ip_summed == CHECKSUM_COMPLETE) |
608cd71a AS |
1295 | skb->csum = csum_add(skb->csum, csum_partial(ptr, len, 0)); |
1296 | return 0; | |
1297 | } | |
1298 | ||
1299 | const struct bpf_func_proto bpf_skb_store_bytes_proto = { | |
1300 | .func = bpf_skb_store_bytes, | |
1301 | .gpl_only = false, | |
1302 | .ret_type = RET_INTEGER, | |
1303 | .arg1_type = ARG_PTR_TO_CTX, | |
1304 | .arg2_type = ARG_ANYTHING, | |
1305 | .arg3_type = ARG_PTR_TO_STACK, | |
1306 | .arg4_type = ARG_CONST_STACK_SIZE, | |
91bc4822 AS |
1307 | .arg5_type = ARG_ANYTHING, |
1308 | }; | |
1309 | ||
05c74e5e DB |
1310 | static u64 bpf_skb_load_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) |
1311 | { | |
1312 | const struct sk_buff *skb = (const struct sk_buff *)(unsigned long) r1; | |
1313 | int offset = (int) r2; | |
1314 | void *to = (void *)(unsigned long) r3; | |
1315 | unsigned int len = (unsigned int) r4; | |
1316 | void *ptr; | |
1317 | ||
1318 | if (unlikely((u32) offset > 0xffff || len > BPF_LDST_LEN)) | |
1319 | return -EFAULT; | |
1320 | ||
1321 | ptr = skb_header_pointer(skb, offset, len, to); | |
1322 | if (unlikely(!ptr)) | |
1323 | return -EFAULT; | |
1324 | if (ptr != to) | |
1325 | memcpy(to, ptr, len); | |
1326 | ||
1327 | return 0; | |
1328 | } | |
1329 | ||
1330 | const struct bpf_func_proto bpf_skb_load_bytes_proto = { | |
1331 | .func = bpf_skb_load_bytes, | |
1332 | .gpl_only = false, | |
1333 | .ret_type = RET_INTEGER, | |
1334 | .arg1_type = ARG_PTR_TO_CTX, | |
1335 | .arg2_type = ARG_ANYTHING, | |
1336 | .arg3_type = ARG_PTR_TO_STACK, | |
1337 | .arg4_type = ARG_CONST_STACK_SIZE, | |
1338 | }; | |
1339 | ||
91bc4822 AS |
1340 | #define BPF_HEADER_FIELD_SIZE(flags) ((flags) & 0x0f) |
1341 | #define BPF_IS_PSEUDO_HEADER(flags) ((flags) & 0x10) | |
1342 | ||
a166151c | 1343 | static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) |
91bc4822 AS |
1344 | { |
1345 | struct sk_buff *skb = (struct sk_buff *) (long) r1; | |
a166151c | 1346 | int offset = (int) r2; |
91bc4822 AS |
1347 | __sum16 sum, *ptr; |
1348 | ||
a166151c | 1349 | if (unlikely((u32) offset > 0xffff)) |
91bc4822 AS |
1350 | return -EFAULT; |
1351 | ||
a166151c | 1352 | if (unlikely(skb_cloned(skb) && |
3431205e | 1353 | !skb_clone_writable(skb, offset + sizeof(sum)))) |
91bc4822 AS |
1354 | return -EFAULT; |
1355 | ||
1356 | ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum); | |
1357 | if (unlikely(!ptr)) | |
1358 | return -EFAULT; | |
1359 | ||
1360 | switch (BPF_HEADER_FIELD_SIZE(flags)) { | |
1361 | case 2: | |
1362 | csum_replace2(ptr, from, to); | |
1363 | break; | |
1364 | case 4: | |
1365 | csum_replace4(ptr, from, to); | |
1366 | break; | |
1367 | default: | |
1368 | return -EINVAL; | |
1369 | } | |
1370 | ||
1371 | if (ptr == &sum) | |
1372 | /* skb_store_bits guaranteed to not return -EFAULT here */ | |
1373 | skb_store_bits(skb, offset, ptr, sizeof(sum)); | |
1374 | ||
1375 | return 0; | |
1376 | } | |
1377 | ||
1378 | const struct bpf_func_proto bpf_l3_csum_replace_proto = { | |
1379 | .func = bpf_l3_csum_replace, | |
1380 | .gpl_only = false, | |
1381 | .ret_type = RET_INTEGER, | |
1382 | .arg1_type = ARG_PTR_TO_CTX, | |
1383 | .arg2_type = ARG_ANYTHING, | |
1384 | .arg3_type = ARG_ANYTHING, | |
1385 | .arg4_type = ARG_ANYTHING, | |
1386 | .arg5_type = ARG_ANYTHING, | |
1387 | }; | |
1388 | ||
a166151c | 1389 | static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) |
91bc4822 AS |
1390 | { |
1391 | struct sk_buff *skb = (struct sk_buff *) (long) r1; | |
4b048d6d | 1392 | bool is_pseudo = !!BPF_IS_PSEUDO_HEADER(flags); |
a166151c | 1393 | int offset = (int) r2; |
91bc4822 AS |
1394 | __sum16 sum, *ptr; |
1395 | ||
a166151c | 1396 | if (unlikely((u32) offset > 0xffff)) |
91bc4822 AS |
1397 | return -EFAULT; |
1398 | ||
a166151c | 1399 | if (unlikely(skb_cloned(skb) && |
3431205e | 1400 | !skb_clone_writable(skb, offset + sizeof(sum)))) |
91bc4822 AS |
1401 | return -EFAULT; |
1402 | ||
1403 | ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum); | |
1404 | if (unlikely(!ptr)) | |
1405 | return -EFAULT; | |
1406 | ||
1407 | switch (BPF_HEADER_FIELD_SIZE(flags)) { | |
1408 | case 2: | |
1409 | inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo); | |
1410 | break; | |
1411 | case 4: | |
1412 | inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo); | |
1413 | break; | |
1414 | default: | |
1415 | return -EINVAL; | |
1416 | } | |
1417 | ||
1418 | if (ptr == &sum) | |
1419 | /* skb_store_bits guaranteed to not return -EFAULT here */ | |
1420 | skb_store_bits(skb, offset, ptr, sizeof(sum)); | |
1421 | ||
1422 | return 0; | |
1423 | } | |
1424 | ||
1425 | const struct bpf_func_proto bpf_l4_csum_replace_proto = { | |
1426 | .func = bpf_l4_csum_replace, | |
1427 | .gpl_only = false, | |
1428 | .ret_type = RET_INTEGER, | |
1429 | .arg1_type = ARG_PTR_TO_CTX, | |
1430 | .arg2_type = ARG_ANYTHING, | |
1431 | .arg3_type = ARG_ANYTHING, | |
1432 | .arg4_type = ARG_ANYTHING, | |
1433 | .arg5_type = ARG_ANYTHING, | |
608cd71a AS |
1434 | }; |
1435 | ||
3896d655 AS |
1436 | #define BPF_IS_REDIRECT_INGRESS(flags) ((flags) & 1) |
1437 | ||
1438 | static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5) | |
1439 | { | |
1440 | struct sk_buff *skb = (struct sk_buff *) (long) r1, *skb2; | |
1441 | struct net_device *dev; | |
1442 | ||
1443 | dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex); | |
1444 | if (unlikely(!dev)) | |
1445 | return -EINVAL; | |
1446 | ||
3896d655 AS |
1447 | skb2 = skb_clone(skb, GFP_ATOMIC); |
1448 | if (unlikely(!skb2)) | |
1449 | return -ENOMEM; | |
1450 | ||
3896d655 AS |
1451 | if (BPF_IS_REDIRECT_INGRESS(flags)) |
1452 | return dev_forward_skb(dev, skb2); | |
1453 | ||
1454 | skb2->dev = dev; | |
6bf05773 | 1455 | skb_sender_cpu_clear(skb2); |
3896d655 AS |
1456 | return dev_queue_xmit(skb2); |
1457 | } | |
1458 | ||
1459 | const struct bpf_func_proto bpf_clone_redirect_proto = { | |
1460 | .func = bpf_clone_redirect, | |
1461 | .gpl_only = false, | |
1462 | .ret_type = RET_INTEGER, | |
1463 | .arg1_type = ARG_PTR_TO_CTX, | |
1464 | .arg2_type = ARG_ANYTHING, | |
1465 | .arg3_type = ARG_ANYTHING, | |
1466 | }; | |
1467 | ||
27b29f63 AS |
1468 | struct redirect_info { |
1469 | u32 ifindex; | |
1470 | u32 flags; | |
1471 | }; | |
1472 | ||
1473 | static DEFINE_PER_CPU(struct redirect_info, redirect_info); | |
1474 | static u64 bpf_redirect(u64 ifindex, u64 flags, u64 r3, u64 r4, u64 r5) | |
1475 | { | |
1476 | struct redirect_info *ri = this_cpu_ptr(&redirect_info); | |
1477 | ||
1478 | ri->ifindex = ifindex; | |
1479 | ri->flags = flags; | |
1480 | return TC_ACT_REDIRECT; | |
1481 | } | |
1482 | ||
1483 | int skb_do_redirect(struct sk_buff *skb) | |
1484 | { | |
1485 | struct redirect_info *ri = this_cpu_ptr(&redirect_info); | |
1486 | struct net_device *dev; | |
1487 | ||
1488 | dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->ifindex); | |
1489 | ri->ifindex = 0; | |
1490 | if (unlikely(!dev)) { | |
1491 | kfree_skb(skb); | |
1492 | return -EINVAL; | |
1493 | } | |
1494 | ||
1495 | if (BPF_IS_REDIRECT_INGRESS(ri->flags)) | |
1496 | return dev_forward_skb(dev, skb); | |
1497 | ||
1498 | skb->dev = dev; | |
cfc81b50 | 1499 | skb_sender_cpu_clear(skb); |
27b29f63 AS |
1500 | return dev_queue_xmit(skb); |
1501 | } | |
1502 | ||
1503 | const struct bpf_func_proto bpf_redirect_proto = { | |
1504 | .func = bpf_redirect, | |
1505 | .gpl_only = false, | |
1506 | .ret_type = RET_INTEGER, | |
1507 | .arg1_type = ARG_ANYTHING, | |
1508 | .arg2_type = ARG_ANYTHING, | |
1509 | }; | |
1510 | ||
8d20aabe DB |
1511 | static u64 bpf_get_cgroup_classid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) |
1512 | { | |
1513 | return task_get_classid((struct sk_buff *) (unsigned long) r1); | |
1514 | } | |
1515 | ||
1516 | static const struct bpf_func_proto bpf_get_cgroup_classid_proto = { | |
1517 | .func = bpf_get_cgroup_classid, | |
1518 | .gpl_only = false, | |
1519 | .ret_type = RET_INTEGER, | |
1520 | .arg1_type = ARG_PTR_TO_CTX, | |
1521 | }; | |
1522 | ||
c46646d0 DB |
1523 | static u64 bpf_get_route_realm(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) |
1524 | { | |
1525 | #ifdef CONFIG_IP_ROUTE_CLASSID | |
1526 | const struct dst_entry *dst; | |
1527 | ||
1528 | dst = skb_dst((struct sk_buff *) (unsigned long) r1); | |
1529 | if (dst) | |
1530 | return dst->tclassid; | |
1531 | #endif | |
1532 | return 0; | |
1533 | } | |
1534 | ||
1535 | static const struct bpf_func_proto bpf_get_route_realm_proto = { | |
1536 | .func = bpf_get_route_realm, | |
1537 | .gpl_only = false, | |
1538 | .ret_type = RET_INTEGER, | |
1539 | .arg1_type = ARG_PTR_TO_CTX, | |
1540 | }; | |
1541 | ||
4e10df9a AS |
1542 | static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5) |
1543 | { | |
1544 | struct sk_buff *skb = (struct sk_buff *) (long) r1; | |
1545 | __be16 vlan_proto = (__force __be16) r2; | |
1546 | ||
1547 | if (unlikely(vlan_proto != htons(ETH_P_8021Q) && | |
1548 | vlan_proto != htons(ETH_P_8021AD))) | |
1549 | vlan_proto = htons(ETH_P_8021Q); | |
1550 | ||
1551 | return skb_vlan_push(skb, vlan_proto, vlan_tci); | |
1552 | } | |
1553 | ||
1554 | const struct bpf_func_proto bpf_skb_vlan_push_proto = { | |
1555 | .func = bpf_skb_vlan_push, | |
1556 | .gpl_only = false, | |
1557 | .ret_type = RET_INTEGER, | |
1558 | .arg1_type = ARG_PTR_TO_CTX, | |
1559 | .arg2_type = ARG_ANYTHING, | |
1560 | .arg3_type = ARG_ANYTHING, | |
1561 | }; | |
4d9c5c53 | 1562 | EXPORT_SYMBOL_GPL(bpf_skb_vlan_push_proto); |
4e10df9a AS |
1563 | |
1564 | static u64 bpf_skb_vlan_pop(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) | |
1565 | { | |
1566 | struct sk_buff *skb = (struct sk_buff *) (long) r1; | |
1567 | ||
1568 | return skb_vlan_pop(skb); | |
1569 | } | |
1570 | ||
1571 | const struct bpf_func_proto bpf_skb_vlan_pop_proto = { | |
1572 | .func = bpf_skb_vlan_pop, | |
1573 | .gpl_only = false, | |
1574 | .ret_type = RET_INTEGER, | |
1575 | .arg1_type = ARG_PTR_TO_CTX, | |
1576 | }; | |
4d9c5c53 | 1577 | EXPORT_SYMBOL_GPL(bpf_skb_vlan_pop_proto); |
4e10df9a AS |
1578 | |
1579 | bool bpf_helper_changes_skb_data(void *func) | |
1580 | { | |
1581 | if (func == bpf_skb_vlan_push) | |
1582 | return true; | |
1583 | if (func == bpf_skb_vlan_pop) | |
1584 | return true; | |
1585 | return false; | |
1586 | } | |
1587 | ||
d3aa45ce AS |
1588 | static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5) |
1589 | { | |
1590 | struct sk_buff *skb = (struct sk_buff *) (long) r1; | |
1591 | struct bpf_tunnel_key *to = (struct bpf_tunnel_key *) (long) r2; | |
61adedf3 | 1592 | struct ip_tunnel_info *info = skb_tunnel_info(skb); |
d3aa45ce AS |
1593 | |
1594 | if (unlikely(size != sizeof(struct bpf_tunnel_key) || flags || !info)) | |
1595 | return -EINVAL; | |
7f9562a1 JB |
1596 | if (ip_tunnel_info_af(info) != AF_INET) |
1597 | return -EINVAL; | |
d3aa45ce AS |
1598 | |
1599 | to->tunnel_id = be64_to_cpu(info->key.tun_id); | |
c1ea5d67 | 1600 | to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src); |
d3aa45ce AS |
1601 | |
1602 | return 0; | |
1603 | } | |
1604 | ||
1605 | const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = { | |
1606 | .func = bpf_skb_get_tunnel_key, | |
1607 | .gpl_only = false, | |
1608 | .ret_type = RET_INTEGER, | |
1609 | .arg1_type = ARG_PTR_TO_CTX, | |
1610 | .arg2_type = ARG_PTR_TO_STACK, | |
1611 | .arg3_type = ARG_CONST_STACK_SIZE, | |
1612 | .arg4_type = ARG_ANYTHING, | |
1613 | }; | |
1614 | ||
1615 | static struct metadata_dst __percpu *md_dst; | |
1616 | ||
1617 | static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5) | |
1618 | { | |
1619 | struct sk_buff *skb = (struct sk_buff *) (long) r1; | |
1620 | struct bpf_tunnel_key *from = (struct bpf_tunnel_key *) (long) r2; | |
1621 | struct metadata_dst *md = this_cpu_ptr(md_dst); | |
1622 | struct ip_tunnel_info *info; | |
1623 | ||
1624 | if (unlikely(size != sizeof(struct bpf_tunnel_key) || flags)) | |
1625 | return -EINVAL; | |
1626 | ||
1627 | skb_dst_drop(skb); | |
1628 | dst_hold((struct dst_entry *) md); | |
1629 | skb_dst_set(skb, (struct dst_entry *) md); | |
1630 | ||
1631 | info = &md->u.tun_info; | |
1632 | info->mode = IP_TUNNEL_INFO_TX; | |
1dd34b5a | 1633 | info->key.tun_flags = TUNNEL_KEY; |
d3aa45ce | 1634 | info->key.tun_id = cpu_to_be64(from->tunnel_id); |
c1ea5d67 | 1635 | info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4); |
d3aa45ce AS |
1636 | |
1637 | return 0; | |
1638 | } | |
1639 | ||
1640 | const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = { | |
1641 | .func = bpf_skb_set_tunnel_key, | |
1642 | .gpl_only = false, | |
1643 | .ret_type = RET_INTEGER, | |
1644 | .arg1_type = ARG_PTR_TO_CTX, | |
1645 | .arg2_type = ARG_PTR_TO_STACK, | |
1646 | .arg3_type = ARG_CONST_STACK_SIZE, | |
1647 | .arg4_type = ARG_ANYTHING, | |
1648 | }; | |
1649 | ||
1650 | static const struct bpf_func_proto *bpf_get_skb_set_tunnel_key_proto(void) | |
1651 | { | |
1652 | if (!md_dst) { | |
1653 | /* race is not possible, since it's called from | |
1654 | * verifier that is holding verifier mutex | |
1655 | */ | |
1656 | md_dst = metadata_dst_alloc_percpu(0, GFP_KERNEL); | |
1657 | if (!md_dst) | |
1658 | return NULL; | |
1659 | } | |
1660 | return &bpf_skb_set_tunnel_key_proto; | |
1661 | } | |
1662 | ||
d4052c4a DB |
1663 | static const struct bpf_func_proto * |
1664 | sk_filter_func_proto(enum bpf_func_id func_id) | |
89aa0758 AS |
1665 | { |
1666 | switch (func_id) { | |
1667 | case BPF_FUNC_map_lookup_elem: | |
1668 | return &bpf_map_lookup_elem_proto; | |
1669 | case BPF_FUNC_map_update_elem: | |
1670 | return &bpf_map_update_elem_proto; | |
1671 | case BPF_FUNC_map_delete_elem: | |
1672 | return &bpf_map_delete_elem_proto; | |
03e69b50 DB |
1673 | case BPF_FUNC_get_prandom_u32: |
1674 | return &bpf_get_prandom_u32_proto; | |
c04167ce DB |
1675 | case BPF_FUNC_get_smp_processor_id: |
1676 | return &bpf_get_smp_processor_id_proto; | |
04fd61ab AS |
1677 | case BPF_FUNC_tail_call: |
1678 | return &bpf_tail_call_proto; | |
17ca8cbf DB |
1679 | case BPF_FUNC_ktime_get_ns: |
1680 | return &bpf_ktime_get_ns_proto; | |
0756ea3e | 1681 | case BPF_FUNC_trace_printk: |
1be7f75d AS |
1682 | if (capable(CAP_SYS_ADMIN)) |
1683 | return bpf_get_trace_printk_proto(); | |
89aa0758 AS |
1684 | default: |
1685 | return NULL; | |
1686 | } | |
1687 | } | |
1688 | ||
608cd71a AS |
1689 | static const struct bpf_func_proto * |
1690 | tc_cls_act_func_proto(enum bpf_func_id func_id) | |
1691 | { | |
1692 | switch (func_id) { | |
1693 | case BPF_FUNC_skb_store_bytes: | |
1694 | return &bpf_skb_store_bytes_proto; | |
05c74e5e DB |
1695 | case BPF_FUNC_skb_load_bytes: |
1696 | return &bpf_skb_load_bytes_proto; | |
91bc4822 AS |
1697 | case BPF_FUNC_l3_csum_replace: |
1698 | return &bpf_l3_csum_replace_proto; | |
1699 | case BPF_FUNC_l4_csum_replace: | |
1700 | return &bpf_l4_csum_replace_proto; | |
3896d655 AS |
1701 | case BPF_FUNC_clone_redirect: |
1702 | return &bpf_clone_redirect_proto; | |
8d20aabe DB |
1703 | case BPF_FUNC_get_cgroup_classid: |
1704 | return &bpf_get_cgroup_classid_proto; | |
4e10df9a AS |
1705 | case BPF_FUNC_skb_vlan_push: |
1706 | return &bpf_skb_vlan_push_proto; | |
1707 | case BPF_FUNC_skb_vlan_pop: | |
1708 | return &bpf_skb_vlan_pop_proto; | |
d3aa45ce AS |
1709 | case BPF_FUNC_skb_get_tunnel_key: |
1710 | return &bpf_skb_get_tunnel_key_proto; | |
1711 | case BPF_FUNC_skb_set_tunnel_key: | |
1712 | return bpf_get_skb_set_tunnel_key_proto(); | |
27b29f63 AS |
1713 | case BPF_FUNC_redirect: |
1714 | return &bpf_redirect_proto; | |
c46646d0 DB |
1715 | case BPF_FUNC_get_route_realm: |
1716 | return &bpf_get_route_realm_proto; | |
608cd71a AS |
1717 | default: |
1718 | return sk_filter_func_proto(func_id); | |
1719 | } | |
1720 | } | |
1721 | ||
d691f9e8 | 1722 | static bool __is_valid_access(int off, int size, enum bpf_access_type type) |
89aa0758 | 1723 | { |
9bac3d6d AS |
1724 | /* check bounds */ |
1725 | if (off < 0 || off >= sizeof(struct __sk_buff)) | |
1726 | return false; | |
1727 | ||
1728 | /* disallow misaligned access */ | |
1729 | if (off % size != 0) | |
1730 | return false; | |
1731 | ||
1732 | /* all __sk_buff fields are __u32 */ | |
1733 | if (size != 4) | |
1734 | return false; | |
1735 | ||
1736 | return true; | |
1737 | } | |
1738 | ||
d691f9e8 AS |
1739 | static bool sk_filter_is_valid_access(int off, int size, |
1740 | enum bpf_access_type type) | |
1741 | { | |
045efa82 DB |
1742 | if (off == offsetof(struct __sk_buff, tc_classid)) |
1743 | return false; | |
1744 | ||
d691f9e8 AS |
1745 | if (type == BPF_WRITE) { |
1746 | switch (off) { | |
1747 | case offsetof(struct __sk_buff, cb[0]) ... | |
1748 | offsetof(struct __sk_buff, cb[4]): | |
1749 | break; | |
1750 | default: | |
1751 | return false; | |
1752 | } | |
1753 | } | |
1754 | ||
1755 | return __is_valid_access(off, size, type); | |
1756 | } | |
1757 | ||
1758 | static bool tc_cls_act_is_valid_access(int off, int size, | |
1759 | enum bpf_access_type type) | |
1760 | { | |
045efa82 DB |
1761 | if (off == offsetof(struct __sk_buff, tc_classid)) |
1762 | return type == BPF_WRITE ? true : false; | |
1763 | ||
d691f9e8 AS |
1764 | if (type == BPF_WRITE) { |
1765 | switch (off) { | |
1766 | case offsetof(struct __sk_buff, mark): | |
1767 | case offsetof(struct __sk_buff, tc_index): | |
754f1e6a | 1768 | case offsetof(struct __sk_buff, priority): |
d691f9e8 AS |
1769 | case offsetof(struct __sk_buff, cb[0]) ... |
1770 | offsetof(struct __sk_buff, cb[4]): | |
1771 | break; | |
1772 | default: | |
1773 | return false; | |
1774 | } | |
1775 | } | |
1776 | return __is_valid_access(off, size, type); | |
1777 | } | |
1778 | ||
1779 | static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg, | |
1780 | int src_reg, int ctx_off, | |
ff936a04 AS |
1781 | struct bpf_insn *insn_buf, |
1782 | struct bpf_prog *prog) | |
9bac3d6d AS |
1783 | { |
1784 | struct bpf_insn *insn = insn_buf; | |
1785 | ||
1786 | switch (ctx_off) { | |
1787 | case offsetof(struct __sk_buff, len): | |
1788 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); | |
1789 | ||
1790 | *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, | |
1791 | offsetof(struct sk_buff, len)); | |
1792 | break; | |
1793 | ||
0b8c707d DB |
1794 | case offsetof(struct __sk_buff, protocol): |
1795 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); | |
1796 | ||
1797 | *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, | |
1798 | offsetof(struct sk_buff, protocol)); | |
1799 | break; | |
1800 | ||
27cd5452 MS |
1801 | case offsetof(struct __sk_buff, vlan_proto): |
1802 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2); | |
1803 | ||
1804 | *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, | |
1805 | offsetof(struct sk_buff, vlan_proto)); | |
1806 | break; | |
1807 | ||
bcad5718 DB |
1808 | case offsetof(struct __sk_buff, priority): |
1809 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, priority) != 4); | |
1810 | ||
754f1e6a DB |
1811 | if (type == BPF_WRITE) |
1812 | *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg, | |
1813 | offsetof(struct sk_buff, priority)); | |
1814 | else | |
1815 | *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, | |
1816 | offsetof(struct sk_buff, priority)); | |
bcad5718 DB |
1817 | break; |
1818 | ||
37e82c2f AS |
1819 | case offsetof(struct __sk_buff, ingress_ifindex): |
1820 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, skb_iif) != 4); | |
1821 | ||
1822 | *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, | |
1823 | offsetof(struct sk_buff, skb_iif)); | |
1824 | break; | |
1825 | ||
1826 | case offsetof(struct __sk_buff, ifindex): | |
1827 | BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); | |
1828 | ||
1829 | *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)), | |
1830 | dst_reg, src_reg, | |
1831 | offsetof(struct sk_buff, dev)); | |
1832 | *insn++ = BPF_JMP_IMM(BPF_JEQ, dst_reg, 0, 1); | |
1833 | *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, dst_reg, | |
1834 | offsetof(struct net_device, ifindex)); | |
1835 | break; | |
1836 | ||
ba7591d8 DB |
1837 | case offsetof(struct __sk_buff, hash): |
1838 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); | |
1839 | ||
1840 | *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, | |
1841 | offsetof(struct sk_buff, hash)); | |
1842 | break; | |
1843 | ||
9bac3d6d | 1844 | case offsetof(struct __sk_buff, mark): |
d691f9e8 AS |
1845 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); |
1846 | ||
1847 | if (type == BPF_WRITE) | |
1848 | *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg, | |
1849 | offsetof(struct sk_buff, mark)); | |
1850 | else | |
1851 | *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, | |
1852 | offsetof(struct sk_buff, mark)); | |
1853 | break; | |
9bac3d6d AS |
1854 | |
1855 | case offsetof(struct __sk_buff, pkt_type): | |
1856 | return convert_skb_access(SKF_AD_PKTTYPE, dst_reg, src_reg, insn); | |
1857 | ||
1858 | case offsetof(struct __sk_buff, queue_mapping): | |
1859 | return convert_skb_access(SKF_AD_QUEUE, dst_reg, src_reg, insn); | |
c2497395 | 1860 | |
c2497395 AS |
1861 | case offsetof(struct __sk_buff, vlan_present): |
1862 | return convert_skb_access(SKF_AD_VLAN_TAG_PRESENT, | |
1863 | dst_reg, src_reg, insn); | |
1864 | ||
1865 | case offsetof(struct __sk_buff, vlan_tci): | |
1866 | return convert_skb_access(SKF_AD_VLAN_TAG, | |
1867 | dst_reg, src_reg, insn); | |
d691f9e8 AS |
1868 | |
1869 | case offsetof(struct __sk_buff, cb[0]) ... | |
1870 | offsetof(struct __sk_buff, cb[4]): | |
1871 | BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20); | |
1872 | ||
ff936a04 | 1873 | prog->cb_access = 1; |
d691f9e8 AS |
1874 | ctx_off -= offsetof(struct __sk_buff, cb[0]); |
1875 | ctx_off += offsetof(struct sk_buff, cb); | |
1876 | ctx_off += offsetof(struct qdisc_skb_cb, data); | |
1877 | if (type == BPF_WRITE) | |
1878 | *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg, ctx_off); | |
1879 | else | |
1880 | *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, ctx_off); | |
1881 | break; | |
1882 | ||
045efa82 DB |
1883 | case offsetof(struct __sk_buff, tc_classid): |
1884 | ctx_off -= offsetof(struct __sk_buff, tc_classid); | |
1885 | ctx_off += offsetof(struct sk_buff, cb); | |
1886 | ctx_off += offsetof(struct qdisc_skb_cb, tc_classid); | |
1887 | WARN_ON(type != BPF_WRITE); | |
1888 | *insn++ = BPF_STX_MEM(BPF_H, dst_reg, src_reg, ctx_off); | |
1889 | break; | |
1890 | ||
d691f9e8 AS |
1891 | case offsetof(struct __sk_buff, tc_index): |
1892 | #ifdef CONFIG_NET_SCHED | |
1893 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tc_index) != 2); | |
1894 | ||
1895 | if (type == BPF_WRITE) | |
1896 | *insn++ = BPF_STX_MEM(BPF_H, dst_reg, src_reg, | |
1897 | offsetof(struct sk_buff, tc_index)); | |
1898 | else | |
1899 | *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, | |
1900 | offsetof(struct sk_buff, tc_index)); | |
1901 | break; | |
1902 | #else | |
1903 | if (type == BPF_WRITE) | |
1904 | *insn++ = BPF_MOV64_REG(dst_reg, dst_reg); | |
1905 | else | |
1906 | *insn++ = BPF_MOV64_IMM(dst_reg, 0); | |
1907 | break; | |
1908 | #endif | |
9bac3d6d AS |
1909 | } |
1910 | ||
1911 | return insn - insn_buf; | |
89aa0758 AS |
1912 | } |
1913 | ||
d4052c4a DB |
1914 | static const struct bpf_verifier_ops sk_filter_ops = { |
1915 | .get_func_proto = sk_filter_func_proto, | |
1916 | .is_valid_access = sk_filter_is_valid_access, | |
d691f9e8 | 1917 | .convert_ctx_access = bpf_net_convert_ctx_access, |
89aa0758 AS |
1918 | }; |
1919 | ||
608cd71a AS |
1920 | static const struct bpf_verifier_ops tc_cls_act_ops = { |
1921 | .get_func_proto = tc_cls_act_func_proto, | |
d691f9e8 AS |
1922 | .is_valid_access = tc_cls_act_is_valid_access, |
1923 | .convert_ctx_access = bpf_net_convert_ctx_access, | |
608cd71a AS |
1924 | }; |
1925 | ||
d4052c4a DB |
1926 | static struct bpf_prog_type_list sk_filter_type __read_mostly = { |
1927 | .ops = &sk_filter_ops, | |
89aa0758 AS |
1928 | .type = BPF_PROG_TYPE_SOCKET_FILTER, |
1929 | }; | |
1930 | ||
96be4325 | 1931 | static struct bpf_prog_type_list sched_cls_type __read_mostly = { |
608cd71a | 1932 | .ops = &tc_cls_act_ops, |
96be4325 DB |
1933 | .type = BPF_PROG_TYPE_SCHED_CLS, |
1934 | }; | |
1935 | ||
94caee8c | 1936 | static struct bpf_prog_type_list sched_act_type __read_mostly = { |
608cd71a | 1937 | .ops = &tc_cls_act_ops, |
94caee8c DB |
1938 | .type = BPF_PROG_TYPE_SCHED_ACT, |
1939 | }; | |
1940 | ||
d4052c4a | 1941 | static int __init register_sk_filter_ops(void) |
89aa0758 | 1942 | { |
d4052c4a | 1943 | bpf_register_prog_type(&sk_filter_type); |
96be4325 | 1944 | bpf_register_prog_type(&sched_cls_type); |
94caee8c | 1945 | bpf_register_prog_type(&sched_act_type); |
96be4325 | 1946 | |
89aa0758 AS |
1947 | return 0; |
1948 | } | |
d4052c4a DB |
1949 | late_initcall(register_sk_filter_ops); |
1950 | ||
55b33325 PE |
1951 | int sk_detach_filter(struct sock *sk) |
1952 | { | |
1953 | int ret = -ENOENT; | |
1954 | struct sk_filter *filter; | |
1955 | ||
d59577b6 VB |
1956 | if (sock_flag(sk, SOCK_FILTER_LOCKED)) |
1957 | return -EPERM; | |
1958 | ||
f91ff5b9 ED |
1959 | filter = rcu_dereference_protected(sk->sk_filter, |
1960 | sock_owned_by_user(sk)); | |
55b33325 | 1961 | if (filter) { |
a9b3cd7f | 1962 | RCU_INIT_POINTER(sk->sk_filter, NULL); |
46bcf14f | 1963 | sk_filter_uncharge(sk, filter); |
55b33325 PE |
1964 | ret = 0; |
1965 | } | |
a3ea269b | 1966 | |
55b33325 PE |
1967 | return ret; |
1968 | } | |
5ff3f073 | 1969 | EXPORT_SYMBOL_GPL(sk_detach_filter); |
a8fc9277 | 1970 | |
a3ea269b DB |
1971 | int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, |
1972 | unsigned int len) | |
a8fc9277 | 1973 | { |
a3ea269b | 1974 | struct sock_fprog_kern *fprog; |
a8fc9277 | 1975 | struct sk_filter *filter; |
a3ea269b | 1976 | int ret = 0; |
a8fc9277 PE |
1977 | |
1978 | lock_sock(sk); | |
1979 | filter = rcu_dereference_protected(sk->sk_filter, | |
a3ea269b | 1980 | sock_owned_by_user(sk)); |
a8fc9277 PE |
1981 | if (!filter) |
1982 | goto out; | |
a3ea269b DB |
1983 | |
1984 | /* We're copying the filter that has been originally attached, | |
93d08b69 DB |
1985 | * so no conversion/decode needed anymore. eBPF programs that |
1986 | * have no original program cannot be dumped through this. | |
a3ea269b | 1987 | */ |
93d08b69 | 1988 | ret = -EACCES; |
7ae457c1 | 1989 | fprog = filter->prog->orig_prog; |
93d08b69 DB |
1990 | if (!fprog) |
1991 | goto out; | |
a3ea269b DB |
1992 | |
1993 | ret = fprog->len; | |
a8fc9277 | 1994 | if (!len) |
a3ea269b | 1995 | /* User space only enquires number of filter blocks. */ |
a8fc9277 | 1996 | goto out; |
a3ea269b | 1997 | |
a8fc9277 | 1998 | ret = -EINVAL; |
a3ea269b | 1999 | if (len < fprog->len) |
a8fc9277 PE |
2000 | goto out; |
2001 | ||
2002 | ret = -EFAULT; | |
009937e7 | 2003 | if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog))) |
a3ea269b | 2004 | goto out; |
a8fc9277 | 2005 | |
a3ea269b DB |
2006 | /* Instead of bytes, the API requests to return the number |
2007 | * of filter blocks. | |
2008 | */ | |
2009 | ret = fprog->len; | |
a8fc9277 PE |
2010 | out: |
2011 | release_sock(sk); | |
2012 | return ret; | |
2013 | } |