]> git.ipfire.org Git - thirdparty/linux.git/blob - kernel/bpf/arraymap.c
Merge tag 'io_uring-5.7-2020-05-22' of git://git.kernel.dk/linux-block
[thirdparty/linux.git] / kernel / bpf / arraymap.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016,2017 Facebook
4 */
5 #include <linux/bpf.h>
6 #include <linux/btf.h>
7 #include <linux/err.h>
8 #include <linux/slab.h>
9 #include <linux/mm.h>
10 #include <linux/filter.h>
11 #include <linux/perf_event.h>
12 #include <uapi/linux/btf.h>
13
14 #include "map_in_map.h"
15
16 #define ARRAY_CREATE_FLAG_MASK \
17 (BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK)
18
19 static void bpf_array_free_percpu(struct bpf_array *array)
20 {
21 int i;
22
23 for (i = 0; i < array->map.max_entries; i++) {
24 free_percpu(array->pptrs[i]);
25 cond_resched();
26 }
27 }
28
29 static int bpf_array_alloc_percpu(struct bpf_array *array)
30 {
31 void __percpu *ptr;
32 int i;
33
34 for (i = 0; i < array->map.max_entries; i++) {
35 ptr = __alloc_percpu_gfp(array->elem_size, 8,
36 GFP_USER | __GFP_NOWARN);
37 if (!ptr) {
38 bpf_array_free_percpu(array);
39 return -ENOMEM;
40 }
41 array->pptrs[i] = ptr;
42 cond_resched();
43 }
44
45 return 0;
46 }
47
48 /* Called from syscall */
49 int array_map_alloc_check(union bpf_attr *attr)
50 {
51 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
52 int numa_node = bpf_map_attr_numa_node(attr);
53
54 /* check sanity of attributes */
55 if (attr->max_entries == 0 || attr->key_size != 4 ||
56 attr->value_size == 0 ||
57 attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
58 !bpf_map_flags_access_ok(attr->map_flags) ||
59 (percpu && numa_node != NUMA_NO_NODE))
60 return -EINVAL;
61
62 if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
63 attr->map_flags & BPF_F_MMAPABLE)
64 return -EINVAL;
65
66 if (attr->value_size > KMALLOC_MAX_SIZE)
67 /* if value_size is bigger, the user space won't be able to
68 * access the elements.
69 */
70 return -E2BIG;
71
72 return 0;
73 }
74
75 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
76 {
77 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
78 int ret, numa_node = bpf_map_attr_numa_node(attr);
79 u32 elem_size, index_mask, max_entries;
80 bool unpriv = !capable(CAP_SYS_ADMIN);
81 u64 cost, array_size, mask64;
82 struct bpf_map_memory mem;
83 struct bpf_array *array;
84
85 elem_size = round_up(attr->value_size, 8);
86
87 max_entries = attr->max_entries;
88
89 /* On 32 bit archs roundup_pow_of_two() with max_entries that has
90 * upper most bit set in u32 space is undefined behavior due to
91 * resulting 1U << 32, so do it manually here in u64 space.
92 */
93 mask64 = fls_long(max_entries - 1);
94 mask64 = 1ULL << mask64;
95 mask64 -= 1;
96
97 index_mask = mask64;
98 if (unpriv) {
99 /* round up array size to nearest power of 2,
100 * since cpu will speculate within index_mask limits
101 */
102 max_entries = index_mask + 1;
103 /* Check for overflows. */
104 if (max_entries < attr->max_entries)
105 return ERR_PTR(-E2BIG);
106 }
107
108 array_size = sizeof(*array);
109 if (percpu) {
110 array_size += (u64) max_entries * sizeof(void *);
111 } else {
112 /* rely on vmalloc() to return page-aligned memory and
113 * ensure array->value is exactly page-aligned
114 */
115 if (attr->map_flags & BPF_F_MMAPABLE) {
116 array_size = PAGE_ALIGN(array_size);
117 array_size += PAGE_ALIGN((u64) max_entries * elem_size);
118 } else {
119 array_size += (u64) max_entries * elem_size;
120 }
121 }
122
123 /* make sure there is no u32 overflow later in round_up() */
124 cost = array_size;
125 if (percpu)
126 cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
127
128 ret = bpf_map_charge_init(&mem, cost);
129 if (ret < 0)
130 return ERR_PTR(ret);
131
132 /* allocate all map elements and zero-initialize them */
133 if (attr->map_flags & BPF_F_MMAPABLE) {
134 void *data;
135
136 /* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */
137 data = bpf_map_area_mmapable_alloc(array_size, numa_node);
138 if (!data) {
139 bpf_map_charge_finish(&mem);
140 return ERR_PTR(-ENOMEM);
141 }
142 array = data + PAGE_ALIGN(sizeof(struct bpf_array))
143 - offsetof(struct bpf_array, value);
144 } else {
145 array = bpf_map_area_alloc(array_size, numa_node);
146 }
147 if (!array) {
148 bpf_map_charge_finish(&mem);
149 return ERR_PTR(-ENOMEM);
150 }
151 array->index_mask = index_mask;
152 array->map.unpriv_array = unpriv;
153
154 /* copy mandatory map attributes */
155 bpf_map_init_from_attr(&array->map, attr);
156 bpf_map_charge_move(&array->map.memory, &mem);
157 array->elem_size = elem_size;
158
159 if (percpu && bpf_array_alloc_percpu(array)) {
160 bpf_map_charge_finish(&array->map.memory);
161 bpf_map_area_free(array);
162 return ERR_PTR(-ENOMEM);
163 }
164
165 return &array->map;
166 }
167
168 /* Called from syscall or from eBPF program */
169 static void *array_map_lookup_elem(struct bpf_map *map, void *key)
170 {
171 struct bpf_array *array = container_of(map, struct bpf_array, map);
172 u32 index = *(u32 *)key;
173
174 if (unlikely(index >= array->map.max_entries))
175 return NULL;
176
177 return array->value + array->elem_size * (index & array->index_mask);
178 }
179
180 static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
181 u32 off)
182 {
183 struct bpf_array *array = container_of(map, struct bpf_array, map);
184
185 if (map->max_entries != 1)
186 return -ENOTSUPP;
187 if (off >= map->value_size)
188 return -EINVAL;
189
190 *imm = (unsigned long)array->value;
191 return 0;
192 }
193
194 static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
195 u32 *off)
196 {
197 struct bpf_array *array = container_of(map, struct bpf_array, map);
198 u64 base = (unsigned long)array->value;
199 u64 range = array->elem_size;
200
201 if (map->max_entries != 1)
202 return -ENOTSUPP;
203 if (imm < base || imm >= base + range)
204 return -ENOENT;
205
206 *off = imm - base;
207 return 0;
208 }
209
210 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
211 static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
212 {
213 struct bpf_array *array = container_of(map, struct bpf_array, map);
214 struct bpf_insn *insn = insn_buf;
215 u32 elem_size = round_up(map->value_size, 8);
216 const int ret = BPF_REG_0;
217 const int map_ptr = BPF_REG_1;
218 const int index = BPF_REG_2;
219
220 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
221 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
222 if (map->unpriv_array) {
223 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
224 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
225 } else {
226 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
227 }
228
229 if (is_power_of_2(elem_size)) {
230 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
231 } else {
232 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
233 }
234 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
235 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
236 *insn++ = BPF_MOV64_IMM(ret, 0);
237 return insn - insn_buf;
238 }
239
240 /* Called from eBPF program */
241 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
242 {
243 struct bpf_array *array = container_of(map, struct bpf_array, map);
244 u32 index = *(u32 *)key;
245
246 if (unlikely(index >= array->map.max_entries))
247 return NULL;
248
249 return this_cpu_ptr(array->pptrs[index & array->index_mask]);
250 }
251
252 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
253 {
254 struct bpf_array *array = container_of(map, struct bpf_array, map);
255 u32 index = *(u32 *)key;
256 void __percpu *pptr;
257 int cpu, off = 0;
258 u32 size;
259
260 if (unlikely(index >= array->map.max_entries))
261 return -ENOENT;
262
263 /* per_cpu areas are zero-filled and bpf programs can only
264 * access 'value_size' of them, so copying rounded areas
265 * will not leak any kernel data
266 */
267 size = round_up(map->value_size, 8);
268 rcu_read_lock();
269 pptr = array->pptrs[index & array->index_mask];
270 for_each_possible_cpu(cpu) {
271 bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
272 off += size;
273 }
274 rcu_read_unlock();
275 return 0;
276 }
277
278 /* Called from syscall */
279 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
280 {
281 struct bpf_array *array = container_of(map, struct bpf_array, map);
282 u32 index = key ? *(u32 *)key : U32_MAX;
283 u32 *next = (u32 *)next_key;
284
285 if (index >= array->map.max_entries) {
286 *next = 0;
287 return 0;
288 }
289
290 if (index == array->map.max_entries - 1)
291 return -ENOENT;
292
293 *next = index + 1;
294 return 0;
295 }
296
297 /* Called from syscall or from eBPF program */
298 static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
299 u64 map_flags)
300 {
301 struct bpf_array *array = container_of(map, struct bpf_array, map);
302 u32 index = *(u32 *)key;
303 char *val;
304
305 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
306 /* unknown flags */
307 return -EINVAL;
308
309 if (unlikely(index >= array->map.max_entries))
310 /* all elements were pre-allocated, cannot insert a new one */
311 return -E2BIG;
312
313 if (unlikely(map_flags & BPF_NOEXIST))
314 /* all elements already exist */
315 return -EEXIST;
316
317 if (unlikely((map_flags & BPF_F_LOCK) &&
318 !map_value_has_spin_lock(map)))
319 return -EINVAL;
320
321 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
322 memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
323 value, map->value_size);
324 } else {
325 val = array->value +
326 array->elem_size * (index & array->index_mask);
327 if (map_flags & BPF_F_LOCK)
328 copy_map_value_locked(map, val, value, false);
329 else
330 copy_map_value(map, val, value);
331 }
332 return 0;
333 }
334
335 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
336 u64 map_flags)
337 {
338 struct bpf_array *array = container_of(map, struct bpf_array, map);
339 u32 index = *(u32 *)key;
340 void __percpu *pptr;
341 int cpu, off = 0;
342 u32 size;
343
344 if (unlikely(map_flags > BPF_EXIST))
345 /* unknown flags */
346 return -EINVAL;
347
348 if (unlikely(index >= array->map.max_entries))
349 /* all elements were pre-allocated, cannot insert a new one */
350 return -E2BIG;
351
352 if (unlikely(map_flags == BPF_NOEXIST))
353 /* all elements already exist */
354 return -EEXIST;
355
356 /* the user space will provide round_up(value_size, 8) bytes that
357 * will be copied into per-cpu area. bpf programs can only access
358 * value_size of it. During lookup the same extra bytes will be
359 * returned or zeros which were zero-filled by percpu_alloc,
360 * so no kernel data leaks possible
361 */
362 size = round_up(map->value_size, 8);
363 rcu_read_lock();
364 pptr = array->pptrs[index & array->index_mask];
365 for_each_possible_cpu(cpu) {
366 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
367 off += size;
368 }
369 rcu_read_unlock();
370 return 0;
371 }
372
373 /* Called from syscall or from eBPF program */
374 static int array_map_delete_elem(struct bpf_map *map, void *key)
375 {
376 return -EINVAL;
377 }
378
379 static void *array_map_vmalloc_addr(struct bpf_array *array)
380 {
381 return (void *)round_down((unsigned long)array, PAGE_SIZE);
382 }
383
384 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
385 static void array_map_free(struct bpf_map *map)
386 {
387 struct bpf_array *array = container_of(map, struct bpf_array, map);
388
389 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
390 * so the programs (can be more than one that used this map) were
391 * disconnected from events. Wait for outstanding programs to complete
392 * and free the array
393 */
394 synchronize_rcu();
395
396 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
397 bpf_array_free_percpu(array);
398
399 if (array->map.map_flags & BPF_F_MMAPABLE)
400 bpf_map_area_free(array_map_vmalloc_addr(array));
401 else
402 bpf_map_area_free(array);
403 }
404
405 static void array_map_seq_show_elem(struct bpf_map *map, void *key,
406 struct seq_file *m)
407 {
408 void *value;
409
410 rcu_read_lock();
411
412 value = array_map_lookup_elem(map, key);
413 if (!value) {
414 rcu_read_unlock();
415 return;
416 }
417
418 if (map->btf_key_type_id)
419 seq_printf(m, "%u: ", *(u32 *)key);
420 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
421 seq_puts(m, "\n");
422
423 rcu_read_unlock();
424 }
425
426 static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
427 struct seq_file *m)
428 {
429 struct bpf_array *array = container_of(map, struct bpf_array, map);
430 u32 index = *(u32 *)key;
431 void __percpu *pptr;
432 int cpu;
433
434 rcu_read_lock();
435
436 seq_printf(m, "%u: {\n", *(u32 *)key);
437 pptr = array->pptrs[index & array->index_mask];
438 for_each_possible_cpu(cpu) {
439 seq_printf(m, "\tcpu%d: ", cpu);
440 btf_type_seq_show(map->btf, map->btf_value_type_id,
441 per_cpu_ptr(pptr, cpu), m);
442 seq_puts(m, "\n");
443 }
444 seq_puts(m, "}\n");
445
446 rcu_read_unlock();
447 }
448
449 static int array_map_check_btf(const struct bpf_map *map,
450 const struct btf *btf,
451 const struct btf_type *key_type,
452 const struct btf_type *value_type)
453 {
454 u32 int_data;
455
456 /* One exception for keyless BTF: .bss/.data/.rodata map */
457 if (btf_type_is_void(key_type)) {
458 if (map->map_type != BPF_MAP_TYPE_ARRAY ||
459 map->max_entries != 1)
460 return -EINVAL;
461
462 if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC)
463 return -EINVAL;
464
465 return 0;
466 }
467
468 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
469 return -EINVAL;
470
471 int_data = *(u32 *)(key_type + 1);
472 /* bpf array can only take a u32 key. This check makes sure
473 * that the btf matches the attr used during map_create.
474 */
475 if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
476 return -EINVAL;
477
478 return 0;
479 }
480
481 static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
482 {
483 struct bpf_array *array = container_of(map, struct bpf_array, map);
484 pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT;
485
486 if (!(map->map_flags & BPF_F_MMAPABLE))
487 return -EINVAL;
488
489 if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) >
490 PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
491 return -EINVAL;
492
493 return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
494 vma->vm_pgoff + pgoff);
495 }
496
497 const struct bpf_map_ops array_map_ops = {
498 .map_alloc_check = array_map_alloc_check,
499 .map_alloc = array_map_alloc,
500 .map_free = array_map_free,
501 .map_get_next_key = array_map_get_next_key,
502 .map_lookup_elem = array_map_lookup_elem,
503 .map_update_elem = array_map_update_elem,
504 .map_delete_elem = array_map_delete_elem,
505 .map_gen_lookup = array_map_gen_lookup,
506 .map_direct_value_addr = array_map_direct_value_addr,
507 .map_direct_value_meta = array_map_direct_value_meta,
508 .map_mmap = array_map_mmap,
509 .map_seq_show_elem = array_map_seq_show_elem,
510 .map_check_btf = array_map_check_btf,
511 .map_lookup_batch = generic_map_lookup_batch,
512 .map_update_batch = generic_map_update_batch,
513 };
514
515 const struct bpf_map_ops percpu_array_map_ops = {
516 .map_alloc_check = array_map_alloc_check,
517 .map_alloc = array_map_alloc,
518 .map_free = array_map_free,
519 .map_get_next_key = array_map_get_next_key,
520 .map_lookup_elem = percpu_array_map_lookup_elem,
521 .map_update_elem = array_map_update_elem,
522 .map_delete_elem = array_map_delete_elem,
523 .map_seq_show_elem = percpu_array_map_seq_show_elem,
524 .map_check_btf = array_map_check_btf,
525 };
526
527 static int fd_array_map_alloc_check(union bpf_attr *attr)
528 {
529 /* only file descriptors can be stored in this type of map */
530 if (attr->value_size != sizeof(u32))
531 return -EINVAL;
532 /* Program read-only/write-only not supported for special maps yet. */
533 if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG))
534 return -EINVAL;
535 return array_map_alloc_check(attr);
536 }
537
538 static void fd_array_map_free(struct bpf_map *map)
539 {
540 struct bpf_array *array = container_of(map, struct bpf_array, map);
541 int i;
542
543 synchronize_rcu();
544
545 /* make sure it's empty */
546 for (i = 0; i < array->map.max_entries; i++)
547 BUG_ON(array->ptrs[i] != NULL);
548
549 bpf_map_area_free(array);
550 }
551
552 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
553 {
554 return ERR_PTR(-EOPNOTSUPP);
555 }
556
557 /* only called from syscall */
558 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
559 {
560 void **elem, *ptr;
561 int ret = 0;
562
563 if (!map->ops->map_fd_sys_lookup_elem)
564 return -ENOTSUPP;
565
566 rcu_read_lock();
567 elem = array_map_lookup_elem(map, key);
568 if (elem && (ptr = READ_ONCE(*elem)))
569 *value = map->ops->map_fd_sys_lookup_elem(ptr);
570 else
571 ret = -ENOENT;
572 rcu_read_unlock();
573
574 return ret;
575 }
576
577 /* only called from syscall */
578 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
579 void *key, void *value, u64 map_flags)
580 {
581 struct bpf_array *array = container_of(map, struct bpf_array, map);
582 void *new_ptr, *old_ptr;
583 u32 index = *(u32 *)key, ufd;
584
585 if (map_flags != BPF_ANY)
586 return -EINVAL;
587
588 if (index >= array->map.max_entries)
589 return -E2BIG;
590
591 ufd = *(u32 *)value;
592 new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
593 if (IS_ERR(new_ptr))
594 return PTR_ERR(new_ptr);
595
596 if (map->ops->map_poke_run) {
597 mutex_lock(&array->aux->poke_mutex);
598 old_ptr = xchg(array->ptrs + index, new_ptr);
599 map->ops->map_poke_run(map, index, old_ptr, new_ptr);
600 mutex_unlock(&array->aux->poke_mutex);
601 } else {
602 old_ptr = xchg(array->ptrs + index, new_ptr);
603 }
604
605 if (old_ptr)
606 map->ops->map_fd_put_ptr(old_ptr);
607 return 0;
608 }
609
610 static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
611 {
612 struct bpf_array *array = container_of(map, struct bpf_array, map);
613 void *old_ptr;
614 u32 index = *(u32 *)key;
615
616 if (index >= array->map.max_entries)
617 return -E2BIG;
618
619 if (map->ops->map_poke_run) {
620 mutex_lock(&array->aux->poke_mutex);
621 old_ptr = xchg(array->ptrs + index, NULL);
622 map->ops->map_poke_run(map, index, old_ptr, NULL);
623 mutex_unlock(&array->aux->poke_mutex);
624 } else {
625 old_ptr = xchg(array->ptrs + index, NULL);
626 }
627
628 if (old_ptr) {
629 map->ops->map_fd_put_ptr(old_ptr);
630 return 0;
631 } else {
632 return -ENOENT;
633 }
634 }
635
636 static void *prog_fd_array_get_ptr(struct bpf_map *map,
637 struct file *map_file, int fd)
638 {
639 struct bpf_array *array = container_of(map, struct bpf_array, map);
640 struct bpf_prog *prog = bpf_prog_get(fd);
641
642 if (IS_ERR(prog))
643 return prog;
644
645 if (!bpf_prog_array_compatible(array, prog)) {
646 bpf_prog_put(prog);
647 return ERR_PTR(-EINVAL);
648 }
649
650 return prog;
651 }
652
653 static void prog_fd_array_put_ptr(void *ptr)
654 {
655 bpf_prog_put(ptr);
656 }
657
658 static u32 prog_fd_array_sys_lookup_elem(void *ptr)
659 {
660 return ((struct bpf_prog *)ptr)->aux->id;
661 }
662
663 /* decrement refcnt of all bpf_progs that are stored in this map */
664 static void bpf_fd_array_map_clear(struct bpf_map *map)
665 {
666 struct bpf_array *array = container_of(map, struct bpf_array, map);
667 int i;
668
669 for (i = 0; i < array->map.max_entries; i++)
670 fd_array_map_delete_elem(map, &i);
671 }
672
673 static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
674 struct seq_file *m)
675 {
676 void **elem, *ptr;
677 u32 prog_id;
678
679 rcu_read_lock();
680
681 elem = array_map_lookup_elem(map, key);
682 if (elem) {
683 ptr = READ_ONCE(*elem);
684 if (ptr) {
685 seq_printf(m, "%u: ", *(u32 *)key);
686 prog_id = prog_fd_array_sys_lookup_elem(ptr);
687 btf_type_seq_show(map->btf, map->btf_value_type_id,
688 &prog_id, m);
689 seq_puts(m, "\n");
690 }
691 }
692
693 rcu_read_unlock();
694 }
695
696 struct prog_poke_elem {
697 struct list_head list;
698 struct bpf_prog_aux *aux;
699 };
700
701 static int prog_array_map_poke_track(struct bpf_map *map,
702 struct bpf_prog_aux *prog_aux)
703 {
704 struct prog_poke_elem *elem;
705 struct bpf_array_aux *aux;
706 int ret = 0;
707
708 aux = container_of(map, struct bpf_array, map)->aux;
709 mutex_lock(&aux->poke_mutex);
710 list_for_each_entry(elem, &aux->poke_progs, list) {
711 if (elem->aux == prog_aux)
712 goto out;
713 }
714
715 elem = kmalloc(sizeof(*elem), GFP_KERNEL);
716 if (!elem) {
717 ret = -ENOMEM;
718 goto out;
719 }
720
721 INIT_LIST_HEAD(&elem->list);
722 /* We must track the program's aux info at this point in time
723 * since the program pointer itself may not be stable yet, see
724 * also comment in prog_array_map_poke_run().
725 */
726 elem->aux = prog_aux;
727
728 list_add_tail(&elem->list, &aux->poke_progs);
729 out:
730 mutex_unlock(&aux->poke_mutex);
731 return ret;
732 }
733
734 static void prog_array_map_poke_untrack(struct bpf_map *map,
735 struct bpf_prog_aux *prog_aux)
736 {
737 struct prog_poke_elem *elem, *tmp;
738 struct bpf_array_aux *aux;
739
740 aux = container_of(map, struct bpf_array, map)->aux;
741 mutex_lock(&aux->poke_mutex);
742 list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
743 if (elem->aux == prog_aux) {
744 list_del_init(&elem->list);
745 kfree(elem);
746 break;
747 }
748 }
749 mutex_unlock(&aux->poke_mutex);
750 }
751
752 static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
753 struct bpf_prog *old,
754 struct bpf_prog *new)
755 {
756 struct prog_poke_elem *elem;
757 struct bpf_array_aux *aux;
758
759 aux = container_of(map, struct bpf_array, map)->aux;
760 WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex));
761
762 list_for_each_entry(elem, &aux->poke_progs, list) {
763 struct bpf_jit_poke_descriptor *poke;
764 int i, ret;
765
766 for (i = 0; i < elem->aux->size_poke_tab; i++) {
767 poke = &elem->aux->poke_tab[i];
768
769 /* Few things to be aware of:
770 *
771 * 1) We can only ever access aux in this context, but
772 * not aux->prog since it might not be stable yet and
773 * there could be danger of use after free otherwise.
774 * 2) Initially when we start tracking aux, the program
775 * is not JITed yet and also does not have a kallsyms
776 * entry. We skip these as poke->ip_stable is not
777 * active yet. The JIT will do the final fixup before
778 * setting it stable. The various poke->ip_stable are
779 * successively activated, so tail call updates can
780 * arrive from here while JIT is still finishing its
781 * final fixup for non-activated poke entries.
782 * 3) On program teardown, the program's kallsym entry gets
783 * removed out of RCU callback, but we can only untrack
784 * from sleepable context, therefore bpf_arch_text_poke()
785 * might not see that this is in BPF text section and
786 * bails out with -EINVAL. As these are unreachable since
787 * RCU grace period already passed, we simply skip them.
788 * 4) Also programs reaching refcount of zero while patching
789 * is in progress is okay since we're protected under
790 * poke_mutex and untrack the programs before the JIT
791 * buffer is freed. When we're still in the middle of
792 * patching and suddenly kallsyms entry of the program
793 * gets evicted, we just skip the rest which is fine due
794 * to point 3).
795 * 5) Any other error happening below from bpf_arch_text_poke()
796 * is a unexpected bug.
797 */
798 if (!READ_ONCE(poke->ip_stable))
799 continue;
800 if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
801 continue;
802 if (poke->tail_call.map != map ||
803 poke->tail_call.key != key)
804 continue;
805
806 ret = bpf_arch_text_poke(poke->ip, BPF_MOD_JUMP,
807 old ? (u8 *)old->bpf_func +
808 poke->adj_off : NULL,
809 new ? (u8 *)new->bpf_func +
810 poke->adj_off : NULL);
811 BUG_ON(ret < 0 && ret != -EINVAL);
812 }
813 }
814 }
815
816 static void prog_array_map_clear_deferred(struct work_struct *work)
817 {
818 struct bpf_map *map = container_of(work, struct bpf_array_aux,
819 work)->map;
820 bpf_fd_array_map_clear(map);
821 bpf_map_put(map);
822 }
823
824 static void prog_array_map_clear(struct bpf_map *map)
825 {
826 struct bpf_array_aux *aux = container_of(map, struct bpf_array,
827 map)->aux;
828 bpf_map_inc(map);
829 schedule_work(&aux->work);
830 }
831
832 static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
833 {
834 struct bpf_array_aux *aux;
835 struct bpf_map *map;
836
837 aux = kzalloc(sizeof(*aux), GFP_KERNEL);
838 if (!aux)
839 return ERR_PTR(-ENOMEM);
840
841 INIT_WORK(&aux->work, prog_array_map_clear_deferred);
842 INIT_LIST_HEAD(&aux->poke_progs);
843 mutex_init(&aux->poke_mutex);
844
845 map = array_map_alloc(attr);
846 if (IS_ERR(map)) {
847 kfree(aux);
848 return map;
849 }
850
851 container_of(map, struct bpf_array, map)->aux = aux;
852 aux->map = map;
853
854 return map;
855 }
856
857 static void prog_array_map_free(struct bpf_map *map)
858 {
859 struct prog_poke_elem *elem, *tmp;
860 struct bpf_array_aux *aux;
861
862 aux = container_of(map, struct bpf_array, map)->aux;
863 list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
864 list_del_init(&elem->list);
865 kfree(elem);
866 }
867 kfree(aux);
868 fd_array_map_free(map);
869 }
870
871 const struct bpf_map_ops prog_array_map_ops = {
872 .map_alloc_check = fd_array_map_alloc_check,
873 .map_alloc = prog_array_map_alloc,
874 .map_free = prog_array_map_free,
875 .map_poke_track = prog_array_map_poke_track,
876 .map_poke_untrack = prog_array_map_poke_untrack,
877 .map_poke_run = prog_array_map_poke_run,
878 .map_get_next_key = array_map_get_next_key,
879 .map_lookup_elem = fd_array_map_lookup_elem,
880 .map_delete_elem = fd_array_map_delete_elem,
881 .map_fd_get_ptr = prog_fd_array_get_ptr,
882 .map_fd_put_ptr = prog_fd_array_put_ptr,
883 .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
884 .map_release_uref = prog_array_map_clear,
885 .map_seq_show_elem = prog_array_map_seq_show_elem,
886 };
887
888 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
889 struct file *map_file)
890 {
891 struct bpf_event_entry *ee;
892
893 ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
894 if (ee) {
895 ee->event = perf_file->private_data;
896 ee->perf_file = perf_file;
897 ee->map_file = map_file;
898 }
899
900 return ee;
901 }
902
903 static void __bpf_event_entry_free(struct rcu_head *rcu)
904 {
905 struct bpf_event_entry *ee;
906
907 ee = container_of(rcu, struct bpf_event_entry, rcu);
908 fput(ee->perf_file);
909 kfree(ee);
910 }
911
912 static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
913 {
914 call_rcu(&ee->rcu, __bpf_event_entry_free);
915 }
916
917 static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
918 struct file *map_file, int fd)
919 {
920 struct bpf_event_entry *ee;
921 struct perf_event *event;
922 struct file *perf_file;
923 u64 value;
924
925 perf_file = perf_event_get(fd);
926 if (IS_ERR(perf_file))
927 return perf_file;
928
929 ee = ERR_PTR(-EOPNOTSUPP);
930 event = perf_file->private_data;
931 if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
932 goto err_out;
933
934 ee = bpf_event_entry_gen(perf_file, map_file);
935 if (ee)
936 return ee;
937 ee = ERR_PTR(-ENOMEM);
938 err_out:
939 fput(perf_file);
940 return ee;
941 }
942
943 static void perf_event_fd_array_put_ptr(void *ptr)
944 {
945 bpf_event_entry_free_rcu(ptr);
946 }
947
948 static void perf_event_fd_array_release(struct bpf_map *map,
949 struct file *map_file)
950 {
951 struct bpf_array *array = container_of(map, struct bpf_array, map);
952 struct bpf_event_entry *ee;
953 int i;
954
955 rcu_read_lock();
956 for (i = 0; i < array->map.max_entries; i++) {
957 ee = READ_ONCE(array->ptrs[i]);
958 if (ee && ee->map_file == map_file)
959 fd_array_map_delete_elem(map, &i);
960 }
961 rcu_read_unlock();
962 }
963
964 const struct bpf_map_ops perf_event_array_map_ops = {
965 .map_alloc_check = fd_array_map_alloc_check,
966 .map_alloc = array_map_alloc,
967 .map_free = fd_array_map_free,
968 .map_get_next_key = array_map_get_next_key,
969 .map_lookup_elem = fd_array_map_lookup_elem,
970 .map_delete_elem = fd_array_map_delete_elem,
971 .map_fd_get_ptr = perf_event_fd_array_get_ptr,
972 .map_fd_put_ptr = perf_event_fd_array_put_ptr,
973 .map_release = perf_event_fd_array_release,
974 .map_check_btf = map_check_no_btf,
975 };
976
977 #ifdef CONFIG_CGROUPS
978 static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
979 struct file *map_file /* not used */,
980 int fd)
981 {
982 return cgroup_get_from_fd(fd);
983 }
984
985 static void cgroup_fd_array_put_ptr(void *ptr)
986 {
987 /* cgroup_put free cgrp after a rcu grace period */
988 cgroup_put(ptr);
989 }
990
991 static void cgroup_fd_array_free(struct bpf_map *map)
992 {
993 bpf_fd_array_map_clear(map);
994 fd_array_map_free(map);
995 }
996
997 const struct bpf_map_ops cgroup_array_map_ops = {
998 .map_alloc_check = fd_array_map_alloc_check,
999 .map_alloc = array_map_alloc,
1000 .map_free = cgroup_fd_array_free,
1001 .map_get_next_key = array_map_get_next_key,
1002 .map_lookup_elem = fd_array_map_lookup_elem,
1003 .map_delete_elem = fd_array_map_delete_elem,
1004 .map_fd_get_ptr = cgroup_fd_array_get_ptr,
1005 .map_fd_put_ptr = cgroup_fd_array_put_ptr,
1006 .map_check_btf = map_check_no_btf,
1007 };
1008 #endif
1009
1010 static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
1011 {
1012 struct bpf_map *map, *inner_map_meta;
1013
1014 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
1015 if (IS_ERR(inner_map_meta))
1016 return inner_map_meta;
1017
1018 map = array_map_alloc(attr);
1019 if (IS_ERR(map)) {
1020 bpf_map_meta_free(inner_map_meta);
1021 return map;
1022 }
1023
1024 map->inner_map_meta = inner_map_meta;
1025
1026 return map;
1027 }
1028
1029 static void array_of_map_free(struct bpf_map *map)
1030 {
1031 /* map->inner_map_meta is only accessed by syscall which
1032 * is protected by fdget/fdput.
1033 */
1034 bpf_map_meta_free(map->inner_map_meta);
1035 bpf_fd_array_map_clear(map);
1036 fd_array_map_free(map);
1037 }
1038
1039 static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
1040 {
1041 struct bpf_map **inner_map = array_map_lookup_elem(map, key);
1042
1043 if (!inner_map)
1044 return NULL;
1045
1046 return READ_ONCE(*inner_map);
1047 }
1048
1049 static u32 array_of_map_gen_lookup(struct bpf_map *map,
1050 struct bpf_insn *insn_buf)
1051 {
1052 struct bpf_array *array = container_of(map, struct bpf_array, map);
1053 u32 elem_size = round_up(map->value_size, 8);
1054 struct bpf_insn *insn = insn_buf;
1055 const int ret = BPF_REG_0;
1056 const int map_ptr = BPF_REG_1;
1057 const int index = BPF_REG_2;
1058
1059 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
1060 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
1061 if (map->unpriv_array) {
1062 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
1063 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
1064 } else {
1065 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
1066 }
1067 if (is_power_of_2(elem_size))
1068 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
1069 else
1070 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
1071 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
1072 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
1073 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
1074 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
1075 *insn++ = BPF_MOV64_IMM(ret, 0);
1076
1077 return insn - insn_buf;
1078 }
1079
1080 const struct bpf_map_ops array_of_maps_map_ops = {
1081 .map_alloc_check = fd_array_map_alloc_check,
1082 .map_alloc = array_of_map_alloc,
1083 .map_free = array_of_map_free,
1084 .map_get_next_key = array_map_get_next_key,
1085 .map_lookup_elem = array_of_map_lookup_elem,
1086 .map_delete_elem = fd_array_map_delete_elem,
1087 .map_fd_get_ptr = bpf_map_fd_get_ptr,
1088 .map_fd_put_ptr = bpf_map_fd_put_ptr,
1089 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
1090 .map_gen_lookup = array_of_map_gen_lookup,
1091 .map_check_btf = map_check_no_btf,
1092 };