]>
Commit | Line | Data |
---|---|---|
5b497af4 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
28fbcfa0 | 2 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
81ed18ab | 3 | * Copyright (c) 2016,2017 Facebook |
28fbcfa0 AS |
4 | */ |
5 | #include <linux/bpf.h> | |
a26ca7c9 | 6 | #include <linux/btf.h> |
28fbcfa0 | 7 | #include <linux/err.h> |
28fbcfa0 AS |
8 | #include <linux/slab.h> |
9 | #include <linux/mm.h> | |
04fd61ab | 10 | #include <linux/filter.h> |
0cdf5640 | 11 | #include <linux/perf_event.h> |
a26ca7c9 | 12 | #include <uapi/linux/btf.h> |
28fbcfa0 | 13 | |
56f668df MKL |
14 | #include "map_in_map.h" |
15 | ||
6e71b04a | 16 | #define ARRAY_CREATE_FLAG_MASK \ |
fc970227 | 17 | (BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK) |
6e71b04a | 18 | |
a10423b8 AS |
19 | static void bpf_array_free_percpu(struct bpf_array *array) |
20 | { | |
21 | int i; | |
22 | ||
32fff239 | 23 | for (i = 0; i < array->map.max_entries; i++) { |
a10423b8 | 24 | free_percpu(array->pptrs[i]); |
32fff239 ED |
25 | cond_resched(); |
26 | } | |
a10423b8 AS |
27 | } |
28 | ||
29 | static int bpf_array_alloc_percpu(struct bpf_array *array) | |
30 | { | |
31 | void __percpu *ptr; | |
32 | int i; | |
33 | ||
34 | for (i = 0; i < array->map.max_entries; i++) { | |
35 | ptr = __alloc_percpu_gfp(array->elem_size, 8, | |
36 | GFP_USER | __GFP_NOWARN); | |
37 | if (!ptr) { | |
38 | bpf_array_free_percpu(array); | |
39 | return -ENOMEM; | |
40 | } | |
41 | array->pptrs[i] = ptr; | |
32fff239 | 42 | cond_resched(); |
a10423b8 AS |
43 | } |
44 | ||
45 | return 0; | |
46 | } | |
47 | ||
28fbcfa0 | 48 | /* Called from syscall */ |
5dc4c4b7 | 49 | int array_map_alloc_check(union bpf_attr *attr) |
28fbcfa0 | 50 | { |
a10423b8 | 51 | bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; |
96eabe7a | 52 | int numa_node = bpf_map_attr_numa_node(attr); |
28fbcfa0 AS |
53 | |
54 | /* check sanity of attributes */ | |
55 | if (attr->max_entries == 0 || attr->key_size != 4 || | |
6e71b04a CF |
56 | attr->value_size == 0 || |
57 | attr->map_flags & ~ARRAY_CREATE_FLAG_MASK || | |
591fe988 | 58 | !bpf_map_flags_access_ok(attr->map_flags) || |
96eabe7a | 59 | (percpu && numa_node != NUMA_NO_NODE)) |
ad46061f | 60 | return -EINVAL; |
28fbcfa0 | 61 | |
fc970227 AN |
62 | if (attr->map_type != BPF_MAP_TYPE_ARRAY && |
63 | attr->map_flags & BPF_F_MMAPABLE) | |
64 | return -EINVAL; | |
65 | ||
7984c27c | 66 | if (attr->value_size > KMALLOC_MAX_SIZE) |
01b3f521 AS |
67 | /* if value_size is bigger, the user space won't be able to |
68 | * access the elements. | |
69 | */ | |
ad46061f JK |
70 | return -E2BIG; |
71 | ||
72 | return 0; | |
73 | } | |
74 | ||
75 | static struct bpf_map *array_map_alloc(union bpf_attr *attr) | |
76 | { | |
77 | bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; | |
9c2d63b8 | 78 | int ret, numa_node = bpf_map_attr_numa_node(attr); |
ad46061f JK |
79 | u32 elem_size, index_mask, max_entries; |
80 | bool unpriv = !capable(CAP_SYS_ADMIN); | |
9c2d63b8 | 81 | u64 cost, array_size, mask64; |
b936ca64 | 82 | struct bpf_map_memory mem; |
ad46061f | 83 | struct bpf_array *array; |
01b3f521 | 84 | |
28fbcfa0 AS |
85 | elem_size = round_up(attr->value_size, 8); |
86 | ||
b2157399 | 87 | max_entries = attr->max_entries; |
b2157399 | 88 | |
bbeb6e43 DB |
89 | /* On 32 bit archs roundup_pow_of_two() with max_entries that has |
90 | * upper most bit set in u32 space is undefined behavior due to | |
91 | * resulting 1U << 32, so do it manually here in u64 space. | |
92 | */ | |
93 | mask64 = fls_long(max_entries - 1); | |
94 | mask64 = 1ULL << mask64; | |
95 | mask64 -= 1; | |
96 | ||
97 | index_mask = mask64; | |
98 | if (unpriv) { | |
b2157399 AS |
99 | /* round up array size to nearest power of 2, |
100 | * since cpu will speculate within index_mask limits | |
101 | */ | |
102 | max_entries = index_mask + 1; | |
bbeb6e43 DB |
103 | /* Check for overflows. */ |
104 | if (max_entries < attr->max_entries) | |
105 | return ERR_PTR(-E2BIG); | |
106 | } | |
b2157399 | 107 | |
a10423b8 | 108 | array_size = sizeof(*array); |
fc970227 | 109 | if (percpu) { |
b2157399 | 110 | array_size += (u64) max_entries * sizeof(void *); |
fc970227 AN |
111 | } else { |
112 | /* rely on vmalloc() to return page-aligned memory and | |
113 | * ensure array->value is exactly page-aligned | |
114 | */ | |
115 | if (attr->map_flags & BPF_F_MMAPABLE) { | |
116 | array_size = PAGE_ALIGN(array_size); | |
117 | array_size += PAGE_ALIGN((u64) max_entries * elem_size); | |
118 | } else { | |
119 | array_size += (u64) max_entries * elem_size; | |
120 | } | |
121 | } | |
a10423b8 AS |
122 | |
123 | /* make sure there is no u32 overflow later in round_up() */ | |
9c2d63b8 | 124 | cost = array_size; |
c85d6913 | 125 | if (percpu) |
9c2d63b8 | 126 | cost += (u64)attr->max_entries * elem_size * num_possible_cpus(); |
9c2d63b8 | 127 | |
b936ca64 | 128 | ret = bpf_map_charge_init(&mem, cost); |
9c2d63b8 DB |
129 | if (ret < 0) |
130 | return ERR_PTR(ret); | |
daaf427c | 131 | |
28fbcfa0 | 132 | /* allocate all map elements and zero-initialize them */ |
fc970227 AN |
133 | if (attr->map_flags & BPF_F_MMAPABLE) { |
134 | void *data; | |
135 | ||
136 | /* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */ | |
137 | data = bpf_map_area_mmapable_alloc(array_size, numa_node); | |
138 | if (!data) { | |
139 | bpf_map_charge_finish(&mem); | |
140 | return ERR_PTR(-ENOMEM); | |
141 | } | |
142 | array = data + PAGE_ALIGN(sizeof(struct bpf_array)) | |
143 | - offsetof(struct bpf_array, value); | |
144 | } else { | |
145 | array = bpf_map_area_alloc(array_size, numa_node); | |
146 | } | |
b936ca64 RG |
147 | if (!array) { |
148 | bpf_map_charge_finish(&mem); | |
d407bd25 | 149 | return ERR_PTR(-ENOMEM); |
b936ca64 | 150 | } |
b2157399 AS |
151 | array->index_mask = index_mask; |
152 | array->map.unpriv_array = unpriv; | |
28fbcfa0 AS |
153 | |
154 | /* copy mandatory map attributes */ | |
32852649 | 155 | bpf_map_init_from_attr(&array->map, attr); |
b936ca64 | 156 | bpf_map_charge_move(&array->map.memory, &mem); |
28fbcfa0 AS |
157 | array->elem_size = elem_size; |
158 | ||
9c2d63b8 | 159 | if (percpu && bpf_array_alloc_percpu(array)) { |
b936ca64 | 160 | bpf_map_charge_finish(&array->map.memory); |
d407bd25 | 161 | bpf_map_area_free(array); |
a10423b8 AS |
162 | return ERR_PTR(-ENOMEM); |
163 | } | |
a10423b8 | 164 | |
28fbcfa0 | 165 | return &array->map; |
28fbcfa0 AS |
166 | } |
167 | ||
168 | /* Called from syscall or from eBPF program */ | |
169 | static void *array_map_lookup_elem(struct bpf_map *map, void *key) | |
170 | { | |
171 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
172 | u32 index = *(u32 *)key; | |
173 | ||
a10423b8 | 174 | if (unlikely(index >= array->map.max_entries)) |
28fbcfa0 AS |
175 | return NULL; |
176 | ||
b2157399 | 177 | return array->value + array->elem_size * (index & array->index_mask); |
28fbcfa0 AS |
178 | } |
179 | ||
d8eca5bb DB |
180 | static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm, |
181 | u32 off) | |
182 | { | |
183 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
184 | ||
185 | if (map->max_entries != 1) | |
186 | return -ENOTSUPP; | |
187 | if (off >= map->value_size) | |
188 | return -EINVAL; | |
189 | ||
190 | *imm = (unsigned long)array->value; | |
191 | return 0; | |
192 | } | |
193 | ||
194 | static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm, | |
195 | u32 *off) | |
196 | { | |
197 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
198 | u64 base = (unsigned long)array->value; | |
199 | u64 range = array->elem_size; | |
200 | ||
201 | if (map->max_entries != 1) | |
202 | return -ENOTSUPP; | |
203 | if (imm < base || imm >= base + range) | |
204 | return -ENOENT; | |
205 | ||
206 | *off = imm - base; | |
207 | return 0; | |
208 | } | |
209 | ||
81ed18ab AS |
210 | /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */ |
211 | static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) | |
212 | { | |
b2157399 | 213 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
81ed18ab | 214 | struct bpf_insn *insn = insn_buf; |
fad73a1a | 215 | u32 elem_size = round_up(map->value_size, 8); |
81ed18ab AS |
216 | const int ret = BPF_REG_0; |
217 | const int map_ptr = BPF_REG_1; | |
218 | const int index = BPF_REG_2; | |
219 | ||
220 | *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); | |
221 | *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); | |
b2157399 AS |
222 | if (map->unpriv_array) { |
223 | *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4); | |
224 | *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); | |
225 | } else { | |
226 | *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3); | |
227 | } | |
fad73a1a MKL |
228 | |
229 | if (is_power_of_2(elem_size)) { | |
81ed18ab AS |
230 | *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); |
231 | } else { | |
232 | *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); | |
233 | } | |
234 | *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); | |
235 | *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); | |
236 | *insn++ = BPF_MOV64_IMM(ret, 0); | |
237 | return insn - insn_buf; | |
238 | } | |
239 | ||
a10423b8 AS |
240 | /* Called from eBPF program */ |
241 | static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key) | |
242 | { | |
243 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
244 | u32 index = *(u32 *)key; | |
245 | ||
246 | if (unlikely(index >= array->map.max_entries)) | |
247 | return NULL; | |
248 | ||
b2157399 | 249 | return this_cpu_ptr(array->pptrs[index & array->index_mask]); |
a10423b8 AS |
250 | } |
251 | ||
15a07b33 AS |
252 | int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) |
253 | { | |
254 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
255 | u32 index = *(u32 *)key; | |
256 | void __percpu *pptr; | |
257 | int cpu, off = 0; | |
258 | u32 size; | |
259 | ||
260 | if (unlikely(index >= array->map.max_entries)) | |
261 | return -ENOENT; | |
262 | ||
263 | /* per_cpu areas are zero-filled and bpf programs can only | |
264 | * access 'value_size' of them, so copying rounded areas | |
265 | * will not leak any kernel data | |
266 | */ | |
267 | size = round_up(map->value_size, 8); | |
268 | rcu_read_lock(); | |
b2157399 | 269 | pptr = array->pptrs[index & array->index_mask]; |
15a07b33 AS |
270 | for_each_possible_cpu(cpu) { |
271 | bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size); | |
272 | off += size; | |
273 | } | |
274 | rcu_read_unlock(); | |
275 | return 0; | |
276 | } | |
277 | ||
28fbcfa0 AS |
278 | /* Called from syscall */ |
279 | static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key) | |
280 | { | |
281 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
8fe45924 | 282 | u32 index = key ? *(u32 *)key : U32_MAX; |
28fbcfa0 AS |
283 | u32 *next = (u32 *)next_key; |
284 | ||
285 | if (index >= array->map.max_entries) { | |
286 | *next = 0; | |
287 | return 0; | |
288 | } | |
289 | ||
290 | if (index == array->map.max_entries - 1) | |
291 | return -ENOENT; | |
292 | ||
293 | *next = index + 1; | |
294 | return 0; | |
295 | } | |
296 | ||
297 | /* Called from syscall or from eBPF program */ | |
298 | static int array_map_update_elem(struct bpf_map *map, void *key, void *value, | |
299 | u64 map_flags) | |
300 | { | |
301 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
302 | u32 index = *(u32 *)key; | |
96049f3a | 303 | char *val; |
28fbcfa0 | 304 | |
96049f3a | 305 | if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST)) |
28fbcfa0 AS |
306 | /* unknown flags */ |
307 | return -EINVAL; | |
308 | ||
a10423b8 | 309 | if (unlikely(index >= array->map.max_entries)) |
28fbcfa0 AS |
310 | /* all elements were pre-allocated, cannot insert a new one */ |
311 | return -E2BIG; | |
312 | ||
96049f3a | 313 | if (unlikely(map_flags & BPF_NOEXIST)) |
daaf427c | 314 | /* all elements already exist */ |
28fbcfa0 AS |
315 | return -EEXIST; |
316 | ||
96049f3a AS |
317 | if (unlikely((map_flags & BPF_F_LOCK) && |
318 | !map_value_has_spin_lock(map))) | |
319 | return -EINVAL; | |
320 | ||
321 | if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { | |
b2157399 | 322 | memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]), |
a10423b8 | 323 | value, map->value_size); |
96049f3a AS |
324 | } else { |
325 | val = array->value + | |
326 | array->elem_size * (index & array->index_mask); | |
327 | if (map_flags & BPF_F_LOCK) | |
328 | copy_map_value_locked(map, val, value, false); | |
329 | else | |
330 | copy_map_value(map, val, value); | |
331 | } | |
28fbcfa0 AS |
332 | return 0; |
333 | } | |
334 | ||
15a07b33 AS |
335 | int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, |
336 | u64 map_flags) | |
337 | { | |
338 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
339 | u32 index = *(u32 *)key; | |
340 | void __percpu *pptr; | |
341 | int cpu, off = 0; | |
342 | u32 size; | |
343 | ||
344 | if (unlikely(map_flags > BPF_EXIST)) | |
345 | /* unknown flags */ | |
346 | return -EINVAL; | |
347 | ||
348 | if (unlikely(index >= array->map.max_entries)) | |
349 | /* all elements were pre-allocated, cannot insert a new one */ | |
350 | return -E2BIG; | |
351 | ||
352 | if (unlikely(map_flags == BPF_NOEXIST)) | |
353 | /* all elements already exist */ | |
354 | return -EEXIST; | |
355 | ||
356 | /* the user space will provide round_up(value_size, 8) bytes that | |
357 | * will be copied into per-cpu area. bpf programs can only access | |
358 | * value_size of it. During lookup the same extra bytes will be | |
359 | * returned or zeros which were zero-filled by percpu_alloc, | |
360 | * so no kernel data leaks possible | |
361 | */ | |
362 | size = round_up(map->value_size, 8); | |
363 | rcu_read_lock(); | |
b2157399 | 364 | pptr = array->pptrs[index & array->index_mask]; |
15a07b33 AS |
365 | for_each_possible_cpu(cpu) { |
366 | bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size); | |
367 | off += size; | |
368 | } | |
369 | rcu_read_unlock(); | |
370 | return 0; | |
371 | } | |
372 | ||
28fbcfa0 AS |
373 | /* Called from syscall or from eBPF program */ |
374 | static int array_map_delete_elem(struct bpf_map *map, void *key) | |
375 | { | |
376 | return -EINVAL; | |
377 | } | |
378 | ||
fc970227 AN |
379 | static void *array_map_vmalloc_addr(struct bpf_array *array) |
380 | { | |
381 | return (void *)round_down((unsigned long)array, PAGE_SIZE); | |
382 | } | |
383 | ||
28fbcfa0 AS |
384 | /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ |
385 | static void array_map_free(struct bpf_map *map) | |
386 | { | |
387 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
388 | ||
389 | /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, | |
390 | * so the programs (can be more than one that used this map) were | |
391 | * disconnected from events. Wait for outstanding programs to complete | |
392 | * and free the array | |
393 | */ | |
394 | synchronize_rcu(); | |
395 | ||
a10423b8 AS |
396 | if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) |
397 | bpf_array_free_percpu(array); | |
398 | ||
fc970227 AN |
399 | if (array->map.map_flags & BPF_F_MMAPABLE) |
400 | bpf_map_area_free(array_map_vmalloc_addr(array)); | |
401 | else | |
402 | bpf_map_area_free(array); | |
28fbcfa0 AS |
403 | } |
404 | ||
a26ca7c9 MKL |
405 | static void array_map_seq_show_elem(struct bpf_map *map, void *key, |
406 | struct seq_file *m) | |
407 | { | |
408 | void *value; | |
409 | ||
410 | rcu_read_lock(); | |
411 | ||
412 | value = array_map_lookup_elem(map, key); | |
413 | if (!value) { | |
414 | rcu_read_unlock(); | |
415 | return; | |
416 | } | |
417 | ||
2824ecb7 DB |
418 | if (map->btf_key_type_id) |
419 | seq_printf(m, "%u: ", *(u32 *)key); | |
9b2cf328 | 420 | btf_type_seq_show(map->btf, map->btf_value_type_id, value, m); |
a26ca7c9 MKL |
421 | seq_puts(m, "\n"); |
422 | ||
423 | rcu_read_unlock(); | |
424 | } | |
425 | ||
c7b27c37 YS |
426 | static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key, |
427 | struct seq_file *m) | |
428 | { | |
429 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
430 | u32 index = *(u32 *)key; | |
431 | void __percpu *pptr; | |
432 | int cpu; | |
433 | ||
434 | rcu_read_lock(); | |
435 | ||
436 | seq_printf(m, "%u: {\n", *(u32 *)key); | |
437 | pptr = array->pptrs[index & array->index_mask]; | |
438 | for_each_possible_cpu(cpu) { | |
439 | seq_printf(m, "\tcpu%d: ", cpu); | |
440 | btf_type_seq_show(map->btf, map->btf_value_type_id, | |
441 | per_cpu_ptr(pptr, cpu), m); | |
442 | seq_puts(m, "\n"); | |
443 | } | |
444 | seq_puts(m, "}\n"); | |
445 | ||
446 | rcu_read_unlock(); | |
447 | } | |
448 | ||
e8d2bec0 | 449 | static int array_map_check_btf(const struct bpf_map *map, |
1b2b234b | 450 | const struct btf *btf, |
e8d2bec0 DB |
451 | const struct btf_type *key_type, |
452 | const struct btf_type *value_type) | |
a26ca7c9 | 453 | { |
a26ca7c9 MKL |
454 | u32 int_data; |
455 | ||
2824ecb7 DB |
456 | /* One exception for keyless BTF: .bss/.data/.rodata map */ |
457 | if (btf_type_is_void(key_type)) { | |
458 | if (map->map_type != BPF_MAP_TYPE_ARRAY || | |
459 | map->max_entries != 1) | |
460 | return -EINVAL; | |
461 | ||
462 | if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC) | |
463 | return -EINVAL; | |
464 | ||
465 | return 0; | |
466 | } | |
467 | ||
e8d2bec0 | 468 | if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT) |
a26ca7c9 MKL |
469 | return -EINVAL; |
470 | ||
471 | int_data = *(u32 *)(key_type + 1); | |
e8d2bec0 DB |
472 | /* bpf array can only take a u32 key. This check makes sure |
473 | * that the btf matches the attr used during map_create. | |
a26ca7c9 | 474 | */ |
e8d2bec0 | 475 | if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data)) |
a26ca7c9 MKL |
476 | return -EINVAL; |
477 | ||
478 | return 0; | |
479 | } | |
480 | ||
b2e2f0e6 | 481 | static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma) |
fc970227 AN |
482 | { |
483 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
484 | pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT; | |
485 | ||
486 | if (!(map->map_flags & BPF_F_MMAPABLE)) | |
487 | return -EINVAL; | |
488 | ||
333291ce AN |
489 | if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) > |
490 | PAGE_ALIGN((u64)array->map.max_entries * array->elem_size)) | |
491 | return -EINVAL; | |
492 | ||
493 | return remap_vmalloc_range(vma, array_map_vmalloc_addr(array), | |
494 | vma->vm_pgoff + pgoff); | |
fc970227 AN |
495 | } |
496 | ||
40077e0c | 497 | const struct bpf_map_ops array_map_ops = { |
ad46061f | 498 | .map_alloc_check = array_map_alloc_check, |
28fbcfa0 AS |
499 | .map_alloc = array_map_alloc, |
500 | .map_free = array_map_free, | |
501 | .map_get_next_key = array_map_get_next_key, | |
502 | .map_lookup_elem = array_map_lookup_elem, | |
503 | .map_update_elem = array_map_update_elem, | |
504 | .map_delete_elem = array_map_delete_elem, | |
81ed18ab | 505 | .map_gen_lookup = array_map_gen_lookup, |
d8eca5bb DB |
506 | .map_direct_value_addr = array_map_direct_value_addr, |
507 | .map_direct_value_meta = array_map_direct_value_meta, | |
fc970227 | 508 | .map_mmap = array_map_mmap, |
a26ca7c9 MKL |
509 | .map_seq_show_elem = array_map_seq_show_elem, |
510 | .map_check_btf = array_map_check_btf, | |
c60f2d28 BV |
511 | .map_lookup_batch = generic_map_lookup_batch, |
512 | .map_update_batch = generic_map_update_batch, | |
28fbcfa0 AS |
513 | }; |
514 | ||
40077e0c | 515 | const struct bpf_map_ops percpu_array_map_ops = { |
ad46061f | 516 | .map_alloc_check = array_map_alloc_check, |
a10423b8 AS |
517 | .map_alloc = array_map_alloc, |
518 | .map_free = array_map_free, | |
519 | .map_get_next_key = array_map_get_next_key, | |
520 | .map_lookup_elem = percpu_array_map_lookup_elem, | |
521 | .map_update_elem = array_map_update_elem, | |
522 | .map_delete_elem = array_map_delete_elem, | |
c7b27c37 | 523 | .map_seq_show_elem = percpu_array_map_seq_show_elem, |
e8d2bec0 | 524 | .map_check_btf = array_map_check_btf, |
a10423b8 AS |
525 | }; |
526 | ||
ad46061f | 527 | static int fd_array_map_alloc_check(union bpf_attr *attr) |
04fd61ab | 528 | { |
2a36f0b9 | 529 | /* only file descriptors can be stored in this type of map */ |
04fd61ab | 530 | if (attr->value_size != sizeof(u32)) |
ad46061f | 531 | return -EINVAL; |
591fe988 DB |
532 | /* Program read-only/write-only not supported for special maps yet. */ |
533 | if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) | |
534 | return -EINVAL; | |
ad46061f | 535 | return array_map_alloc_check(attr); |
04fd61ab AS |
536 | } |
537 | ||
2a36f0b9 | 538 | static void fd_array_map_free(struct bpf_map *map) |
04fd61ab AS |
539 | { |
540 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
541 | int i; | |
542 | ||
543 | synchronize_rcu(); | |
544 | ||
545 | /* make sure it's empty */ | |
546 | for (i = 0; i < array->map.max_entries; i++) | |
2a36f0b9 | 547 | BUG_ON(array->ptrs[i] != NULL); |
d407bd25 DB |
548 | |
549 | bpf_map_area_free(array); | |
04fd61ab AS |
550 | } |
551 | ||
2a36f0b9 | 552 | static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) |
04fd61ab | 553 | { |
3b4a63f6 | 554 | return ERR_PTR(-EOPNOTSUPP); |
04fd61ab AS |
555 | } |
556 | ||
14dc6f04 MKL |
557 | /* only called from syscall */ |
558 | int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) | |
559 | { | |
560 | void **elem, *ptr; | |
561 | int ret = 0; | |
562 | ||
563 | if (!map->ops->map_fd_sys_lookup_elem) | |
564 | return -ENOTSUPP; | |
565 | ||
566 | rcu_read_lock(); | |
567 | elem = array_map_lookup_elem(map, key); | |
568 | if (elem && (ptr = READ_ONCE(*elem))) | |
569 | *value = map->ops->map_fd_sys_lookup_elem(ptr); | |
570 | else | |
571 | ret = -ENOENT; | |
572 | rcu_read_unlock(); | |
573 | ||
574 | return ret; | |
575 | } | |
576 | ||
04fd61ab | 577 | /* only called from syscall */ |
d056a788 DB |
578 | int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, |
579 | void *key, void *value, u64 map_flags) | |
04fd61ab AS |
580 | { |
581 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
2a36f0b9 | 582 | void *new_ptr, *old_ptr; |
04fd61ab AS |
583 | u32 index = *(u32 *)key, ufd; |
584 | ||
585 | if (map_flags != BPF_ANY) | |
586 | return -EINVAL; | |
587 | ||
588 | if (index >= array->map.max_entries) | |
589 | return -E2BIG; | |
590 | ||
591 | ufd = *(u32 *)value; | |
d056a788 | 592 | new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); |
2a36f0b9 WN |
593 | if (IS_ERR(new_ptr)) |
594 | return PTR_ERR(new_ptr); | |
04fd61ab | 595 | |
da765a2f DB |
596 | if (map->ops->map_poke_run) { |
597 | mutex_lock(&array->aux->poke_mutex); | |
598 | old_ptr = xchg(array->ptrs + index, new_ptr); | |
599 | map->ops->map_poke_run(map, index, old_ptr, new_ptr); | |
600 | mutex_unlock(&array->aux->poke_mutex); | |
601 | } else { | |
602 | old_ptr = xchg(array->ptrs + index, new_ptr); | |
603 | } | |
604 | ||
2a36f0b9 WN |
605 | if (old_ptr) |
606 | map->ops->map_fd_put_ptr(old_ptr); | |
04fd61ab AS |
607 | return 0; |
608 | } | |
609 | ||
2a36f0b9 | 610 | static int fd_array_map_delete_elem(struct bpf_map *map, void *key) |
04fd61ab AS |
611 | { |
612 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
2a36f0b9 | 613 | void *old_ptr; |
04fd61ab AS |
614 | u32 index = *(u32 *)key; |
615 | ||
616 | if (index >= array->map.max_entries) | |
617 | return -E2BIG; | |
618 | ||
da765a2f DB |
619 | if (map->ops->map_poke_run) { |
620 | mutex_lock(&array->aux->poke_mutex); | |
621 | old_ptr = xchg(array->ptrs + index, NULL); | |
622 | map->ops->map_poke_run(map, index, old_ptr, NULL); | |
623 | mutex_unlock(&array->aux->poke_mutex); | |
624 | } else { | |
625 | old_ptr = xchg(array->ptrs + index, NULL); | |
626 | } | |
627 | ||
2a36f0b9 WN |
628 | if (old_ptr) { |
629 | map->ops->map_fd_put_ptr(old_ptr); | |
04fd61ab AS |
630 | return 0; |
631 | } else { | |
632 | return -ENOENT; | |
633 | } | |
634 | } | |
635 | ||
d056a788 DB |
636 | static void *prog_fd_array_get_ptr(struct bpf_map *map, |
637 | struct file *map_file, int fd) | |
2a36f0b9 WN |
638 | { |
639 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
640 | struct bpf_prog *prog = bpf_prog_get(fd); | |
d056a788 | 641 | |
2a36f0b9 WN |
642 | if (IS_ERR(prog)) |
643 | return prog; | |
644 | ||
645 | if (!bpf_prog_array_compatible(array, prog)) { | |
646 | bpf_prog_put(prog); | |
647 | return ERR_PTR(-EINVAL); | |
648 | } | |
d056a788 | 649 | |
2a36f0b9 WN |
650 | return prog; |
651 | } | |
652 | ||
653 | static void prog_fd_array_put_ptr(void *ptr) | |
654 | { | |
1aacde3d | 655 | bpf_prog_put(ptr); |
2a36f0b9 WN |
656 | } |
657 | ||
14dc6f04 MKL |
658 | static u32 prog_fd_array_sys_lookup_elem(void *ptr) |
659 | { | |
660 | return ((struct bpf_prog *)ptr)->aux->id; | |
661 | } | |
662 | ||
04fd61ab | 663 | /* decrement refcnt of all bpf_progs that are stored in this map */ |
ba6b8de4 | 664 | static void bpf_fd_array_map_clear(struct bpf_map *map) |
04fd61ab AS |
665 | { |
666 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
667 | int i; | |
668 | ||
669 | for (i = 0; i < array->map.max_entries; i++) | |
2a36f0b9 | 670 | fd_array_map_delete_elem(map, &i); |
04fd61ab AS |
671 | } |
672 | ||
a7c19db3 YS |
673 | static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key, |
674 | struct seq_file *m) | |
675 | { | |
676 | void **elem, *ptr; | |
677 | u32 prog_id; | |
678 | ||
679 | rcu_read_lock(); | |
680 | ||
681 | elem = array_map_lookup_elem(map, key); | |
682 | if (elem) { | |
683 | ptr = READ_ONCE(*elem); | |
684 | if (ptr) { | |
685 | seq_printf(m, "%u: ", *(u32 *)key); | |
686 | prog_id = prog_fd_array_sys_lookup_elem(ptr); | |
687 | btf_type_seq_show(map->btf, map->btf_value_type_id, | |
688 | &prog_id, m); | |
689 | seq_puts(m, "\n"); | |
690 | } | |
691 | } | |
692 | ||
693 | rcu_read_unlock(); | |
694 | } | |
695 | ||
da765a2f DB |
696 | struct prog_poke_elem { |
697 | struct list_head list; | |
698 | struct bpf_prog_aux *aux; | |
699 | }; | |
700 | ||
701 | static int prog_array_map_poke_track(struct bpf_map *map, | |
702 | struct bpf_prog_aux *prog_aux) | |
703 | { | |
704 | struct prog_poke_elem *elem; | |
705 | struct bpf_array_aux *aux; | |
706 | int ret = 0; | |
707 | ||
708 | aux = container_of(map, struct bpf_array, map)->aux; | |
709 | mutex_lock(&aux->poke_mutex); | |
710 | list_for_each_entry(elem, &aux->poke_progs, list) { | |
711 | if (elem->aux == prog_aux) | |
712 | goto out; | |
713 | } | |
714 | ||
715 | elem = kmalloc(sizeof(*elem), GFP_KERNEL); | |
716 | if (!elem) { | |
717 | ret = -ENOMEM; | |
718 | goto out; | |
719 | } | |
720 | ||
721 | INIT_LIST_HEAD(&elem->list); | |
722 | /* We must track the program's aux info at this point in time | |
723 | * since the program pointer itself may not be stable yet, see | |
724 | * also comment in prog_array_map_poke_run(). | |
725 | */ | |
726 | elem->aux = prog_aux; | |
727 | ||
728 | list_add_tail(&elem->list, &aux->poke_progs); | |
729 | out: | |
730 | mutex_unlock(&aux->poke_mutex); | |
731 | return ret; | |
732 | } | |
733 | ||
734 | static void prog_array_map_poke_untrack(struct bpf_map *map, | |
735 | struct bpf_prog_aux *prog_aux) | |
736 | { | |
737 | struct prog_poke_elem *elem, *tmp; | |
738 | struct bpf_array_aux *aux; | |
739 | ||
740 | aux = container_of(map, struct bpf_array, map)->aux; | |
741 | mutex_lock(&aux->poke_mutex); | |
742 | list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) { | |
743 | if (elem->aux == prog_aux) { | |
744 | list_del_init(&elem->list); | |
745 | kfree(elem); | |
746 | break; | |
747 | } | |
748 | } | |
749 | mutex_unlock(&aux->poke_mutex); | |
750 | } | |
751 | ||
752 | static void prog_array_map_poke_run(struct bpf_map *map, u32 key, | |
753 | struct bpf_prog *old, | |
754 | struct bpf_prog *new) | |
755 | { | |
da765a2f DB |
756 | struct prog_poke_elem *elem; |
757 | struct bpf_array_aux *aux; | |
758 | ||
da765a2f DB |
759 | aux = container_of(map, struct bpf_array, map)->aux; |
760 | WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex)); | |
761 | ||
762 | list_for_each_entry(elem, &aux->poke_progs, list) { | |
763 | struct bpf_jit_poke_descriptor *poke; | |
764 | int i, ret; | |
765 | ||
766 | for (i = 0; i < elem->aux->size_poke_tab; i++) { | |
767 | poke = &elem->aux->poke_tab[i]; | |
768 | ||
769 | /* Few things to be aware of: | |
770 | * | |
771 | * 1) We can only ever access aux in this context, but | |
772 | * not aux->prog since it might not be stable yet and | |
773 | * there could be danger of use after free otherwise. | |
774 | * 2) Initially when we start tracking aux, the program | |
775 | * is not JITed yet and also does not have a kallsyms | |
776 | * entry. We skip these as poke->ip_stable is not | |
777 | * active yet. The JIT will do the final fixup before | |
778 | * setting it stable. The various poke->ip_stable are | |
779 | * successively activated, so tail call updates can | |
780 | * arrive from here while JIT is still finishing its | |
781 | * final fixup for non-activated poke entries. | |
782 | * 3) On program teardown, the program's kallsym entry gets | |
783 | * removed out of RCU callback, but we can only untrack | |
784 | * from sleepable context, therefore bpf_arch_text_poke() | |
785 | * might not see that this is in BPF text section and | |
786 | * bails out with -EINVAL. As these are unreachable since | |
787 | * RCU grace period already passed, we simply skip them. | |
788 | * 4) Also programs reaching refcount of zero while patching | |
789 | * is in progress is okay since we're protected under | |
790 | * poke_mutex and untrack the programs before the JIT | |
791 | * buffer is freed. When we're still in the middle of | |
792 | * patching and suddenly kallsyms entry of the program | |
793 | * gets evicted, we just skip the rest which is fine due | |
794 | * to point 3). | |
795 | * 5) Any other error happening below from bpf_arch_text_poke() | |
796 | * is a unexpected bug. | |
797 | */ | |
798 | if (!READ_ONCE(poke->ip_stable)) | |
799 | continue; | |
800 | if (poke->reason != BPF_POKE_REASON_TAIL_CALL) | |
801 | continue; | |
802 | if (poke->tail_call.map != map || | |
803 | poke->tail_call.key != key) | |
804 | continue; | |
805 | ||
b553a6ec | 806 | ret = bpf_arch_text_poke(poke->ip, BPF_MOD_JUMP, |
da765a2f DB |
807 | old ? (u8 *)old->bpf_func + |
808 | poke->adj_off : NULL, | |
809 | new ? (u8 *)new->bpf_func + | |
810 | poke->adj_off : NULL); | |
811 | BUG_ON(ret < 0 && ret != -EINVAL); | |
812 | } | |
813 | } | |
814 | } | |
815 | ||
816 | static void prog_array_map_clear_deferred(struct work_struct *work) | |
817 | { | |
818 | struct bpf_map *map = container_of(work, struct bpf_array_aux, | |
819 | work)->map; | |
820 | bpf_fd_array_map_clear(map); | |
821 | bpf_map_put(map); | |
822 | } | |
823 | ||
824 | static void prog_array_map_clear(struct bpf_map *map) | |
825 | { | |
826 | struct bpf_array_aux *aux = container_of(map, struct bpf_array, | |
827 | map)->aux; | |
828 | bpf_map_inc(map); | |
829 | schedule_work(&aux->work); | |
830 | } | |
831 | ||
2beee5f5 DB |
832 | static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr) |
833 | { | |
834 | struct bpf_array_aux *aux; | |
835 | struct bpf_map *map; | |
836 | ||
837 | aux = kzalloc(sizeof(*aux), GFP_KERNEL); | |
838 | if (!aux) | |
839 | return ERR_PTR(-ENOMEM); | |
840 | ||
da765a2f DB |
841 | INIT_WORK(&aux->work, prog_array_map_clear_deferred); |
842 | INIT_LIST_HEAD(&aux->poke_progs); | |
843 | mutex_init(&aux->poke_mutex); | |
844 | ||
2beee5f5 DB |
845 | map = array_map_alloc(attr); |
846 | if (IS_ERR(map)) { | |
847 | kfree(aux); | |
848 | return map; | |
849 | } | |
850 | ||
851 | container_of(map, struct bpf_array, map)->aux = aux; | |
da765a2f DB |
852 | aux->map = map; |
853 | ||
2beee5f5 DB |
854 | return map; |
855 | } | |
856 | ||
857 | static void prog_array_map_free(struct bpf_map *map) | |
858 | { | |
da765a2f | 859 | struct prog_poke_elem *elem, *tmp; |
2beee5f5 DB |
860 | struct bpf_array_aux *aux; |
861 | ||
862 | aux = container_of(map, struct bpf_array, map)->aux; | |
da765a2f DB |
863 | list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) { |
864 | list_del_init(&elem->list); | |
865 | kfree(elem); | |
866 | } | |
2beee5f5 DB |
867 | kfree(aux); |
868 | fd_array_map_free(map); | |
869 | } | |
870 | ||
40077e0c | 871 | const struct bpf_map_ops prog_array_map_ops = { |
ad46061f | 872 | .map_alloc_check = fd_array_map_alloc_check, |
2beee5f5 DB |
873 | .map_alloc = prog_array_map_alloc, |
874 | .map_free = prog_array_map_free, | |
da765a2f DB |
875 | .map_poke_track = prog_array_map_poke_track, |
876 | .map_poke_untrack = prog_array_map_poke_untrack, | |
877 | .map_poke_run = prog_array_map_poke_run, | |
04fd61ab | 878 | .map_get_next_key = array_map_get_next_key, |
2a36f0b9 | 879 | .map_lookup_elem = fd_array_map_lookup_elem, |
2a36f0b9 WN |
880 | .map_delete_elem = fd_array_map_delete_elem, |
881 | .map_fd_get_ptr = prog_fd_array_get_ptr, | |
882 | .map_fd_put_ptr = prog_fd_array_put_ptr, | |
14dc6f04 | 883 | .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem, |
da765a2f | 884 | .map_release_uref = prog_array_map_clear, |
a7c19db3 | 885 | .map_seq_show_elem = prog_array_map_seq_show_elem, |
04fd61ab AS |
886 | }; |
887 | ||
3b1efb19 DB |
888 | static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, |
889 | struct file *map_file) | |
ea317b26 | 890 | { |
3b1efb19 DB |
891 | struct bpf_event_entry *ee; |
892 | ||
858d68f1 | 893 | ee = kzalloc(sizeof(*ee), GFP_ATOMIC); |
3b1efb19 DB |
894 | if (ee) { |
895 | ee->event = perf_file->private_data; | |
896 | ee->perf_file = perf_file; | |
897 | ee->map_file = map_file; | |
898 | } | |
899 | ||
900 | return ee; | |
901 | } | |
902 | ||
903 | static void __bpf_event_entry_free(struct rcu_head *rcu) | |
904 | { | |
905 | struct bpf_event_entry *ee; | |
906 | ||
907 | ee = container_of(rcu, struct bpf_event_entry, rcu); | |
908 | fput(ee->perf_file); | |
909 | kfree(ee); | |
910 | } | |
911 | ||
912 | static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee) | |
913 | { | |
914 | call_rcu(&ee->rcu, __bpf_event_entry_free); | |
ea317b26 KX |
915 | } |
916 | ||
d056a788 DB |
917 | static void *perf_event_fd_array_get_ptr(struct bpf_map *map, |
918 | struct file *map_file, int fd) | |
ea317b26 | 919 | { |
3b1efb19 DB |
920 | struct bpf_event_entry *ee; |
921 | struct perf_event *event; | |
922 | struct file *perf_file; | |
f91840a3 | 923 | u64 value; |
ea317b26 | 924 | |
3b1efb19 DB |
925 | perf_file = perf_event_get(fd); |
926 | if (IS_ERR(perf_file)) | |
927 | return perf_file; | |
e03e7ee3 | 928 | |
f91840a3 | 929 | ee = ERR_PTR(-EOPNOTSUPP); |
3b1efb19 | 930 | event = perf_file->private_data; |
97562633 | 931 | if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP) |
3b1efb19 DB |
932 | goto err_out; |
933 | ||
f91840a3 AS |
934 | ee = bpf_event_entry_gen(perf_file, map_file); |
935 | if (ee) | |
936 | return ee; | |
937 | ee = ERR_PTR(-ENOMEM); | |
3b1efb19 DB |
938 | err_out: |
939 | fput(perf_file); | |
940 | return ee; | |
ea317b26 KX |
941 | } |
942 | ||
943 | static void perf_event_fd_array_put_ptr(void *ptr) | |
944 | { | |
3b1efb19 DB |
945 | bpf_event_entry_free_rcu(ptr); |
946 | } | |
947 | ||
948 | static void perf_event_fd_array_release(struct bpf_map *map, | |
949 | struct file *map_file) | |
950 | { | |
951 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
952 | struct bpf_event_entry *ee; | |
953 | int i; | |
954 | ||
955 | rcu_read_lock(); | |
956 | for (i = 0; i < array->map.max_entries; i++) { | |
957 | ee = READ_ONCE(array->ptrs[i]); | |
958 | if (ee && ee->map_file == map_file) | |
959 | fd_array_map_delete_elem(map, &i); | |
960 | } | |
961 | rcu_read_unlock(); | |
ea317b26 KX |
962 | } |
963 | ||
40077e0c | 964 | const struct bpf_map_ops perf_event_array_map_ops = { |
ad46061f JK |
965 | .map_alloc_check = fd_array_map_alloc_check, |
966 | .map_alloc = array_map_alloc, | |
3b1efb19 | 967 | .map_free = fd_array_map_free, |
ea317b26 KX |
968 | .map_get_next_key = array_map_get_next_key, |
969 | .map_lookup_elem = fd_array_map_lookup_elem, | |
ea317b26 KX |
970 | .map_delete_elem = fd_array_map_delete_elem, |
971 | .map_fd_get_ptr = perf_event_fd_array_get_ptr, | |
972 | .map_fd_put_ptr = perf_event_fd_array_put_ptr, | |
3b1efb19 | 973 | .map_release = perf_event_fd_array_release, |
e8d2bec0 | 974 | .map_check_btf = map_check_no_btf, |
ea317b26 KX |
975 | }; |
976 | ||
60d20f91 | 977 | #ifdef CONFIG_CGROUPS |
4ed8ec52 MKL |
978 | static void *cgroup_fd_array_get_ptr(struct bpf_map *map, |
979 | struct file *map_file /* not used */, | |
980 | int fd) | |
981 | { | |
982 | return cgroup_get_from_fd(fd); | |
983 | } | |
984 | ||
985 | static void cgroup_fd_array_put_ptr(void *ptr) | |
986 | { | |
987 | /* cgroup_put free cgrp after a rcu grace period */ | |
988 | cgroup_put(ptr); | |
989 | } | |
990 | ||
991 | static void cgroup_fd_array_free(struct bpf_map *map) | |
992 | { | |
993 | bpf_fd_array_map_clear(map); | |
994 | fd_array_map_free(map); | |
995 | } | |
996 | ||
40077e0c | 997 | const struct bpf_map_ops cgroup_array_map_ops = { |
ad46061f JK |
998 | .map_alloc_check = fd_array_map_alloc_check, |
999 | .map_alloc = array_map_alloc, | |
4ed8ec52 MKL |
1000 | .map_free = cgroup_fd_array_free, |
1001 | .map_get_next_key = array_map_get_next_key, | |
1002 | .map_lookup_elem = fd_array_map_lookup_elem, | |
1003 | .map_delete_elem = fd_array_map_delete_elem, | |
1004 | .map_fd_get_ptr = cgroup_fd_array_get_ptr, | |
1005 | .map_fd_put_ptr = cgroup_fd_array_put_ptr, | |
e8d2bec0 | 1006 | .map_check_btf = map_check_no_btf, |
4ed8ec52 | 1007 | }; |
4ed8ec52 | 1008 | #endif |
56f668df MKL |
1009 | |
1010 | static struct bpf_map *array_of_map_alloc(union bpf_attr *attr) | |
1011 | { | |
1012 | struct bpf_map *map, *inner_map_meta; | |
1013 | ||
1014 | inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd); | |
1015 | if (IS_ERR(inner_map_meta)) | |
1016 | return inner_map_meta; | |
1017 | ||
ad46061f | 1018 | map = array_map_alloc(attr); |
56f668df MKL |
1019 | if (IS_ERR(map)) { |
1020 | bpf_map_meta_free(inner_map_meta); | |
1021 | return map; | |
1022 | } | |
1023 | ||
1024 | map->inner_map_meta = inner_map_meta; | |
1025 | ||
1026 | return map; | |
1027 | } | |
1028 | ||
1029 | static void array_of_map_free(struct bpf_map *map) | |
1030 | { | |
1031 | /* map->inner_map_meta is only accessed by syscall which | |
1032 | * is protected by fdget/fdput. | |
1033 | */ | |
1034 | bpf_map_meta_free(map->inner_map_meta); | |
1035 | bpf_fd_array_map_clear(map); | |
1036 | fd_array_map_free(map); | |
1037 | } | |
1038 | ||
1039 | static void *array_of_map_lookup_elem(struct bpf_map *map, void *key) | |
1040 | { | |
1041 | struct bpf_map **inner_map = array_map_lookup_elem(map, key); | |
1042 | ||
1043 | if (!inner_map) | |
1044 | return NULL; | |
1045 | ||
1046 | return READ_ONCE(*inner_map); | |
1047 | } | |
1048 | ||
7b0c2a05 DB |
1049 | static u32 array_of_map_gen_lookup(struct bpf_map *map, |
1050 | struct bpf_insn *insn_buf) | |
1051 | { | |
b2157399 | 1052 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
7b0c2a05 DB |
1053 | u32 elem_size = round_up(map->value_size, 8); |
1054 | struct bpf_insn *insn = insn_buf; | |
1055 | const int ret = BPF_REG_0; | |
1056 | const int map_ptr = BPF_REG_1; | |
1057 | const int index = BPF_REG_2; | |
1058 | ||
1059 | *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); | |
1060 | *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); | |
b2157399 AS |
1061 | if (map->unpriv_array) { |
1062 | *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6); | |
1063 | *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); | |
1064 | } else { | |
1065 | *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5); | |
1066 | } | |
7b0c2a05 DB |
1067 | if (is_power_of_2(elem_size)) |
1068 | *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); | |
1069 | else | |
1070 | *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); | |
1071 | *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); | |
1072 | *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0); | |
1073 | *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1); | |
1074 | *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); | |
1075 | *insn++ = BPF_MOV64_IMM(ret, 0); | |
1076 | ||
1077 | return insn - insn_buf; | |
1078 | } | |
1079 | ||
40077e0c | 1080 | const struct bpf_map_ops array_of_maps_map_ops = { |
ad46061f | 1081 | .map_alloc_check = fd_array_map_alloc_check, |
56f668df MKL |
1082 | .map_alloc = array_of_map_alloc, |
1083 | .map_free = array_of_map_free, | |
1084 | .map_get_next_key = array_map_get_next_key, | |
1085 | .map_lookup_elem = array_of_map_lookup_elem, | |
1086 | .map_delete_elem = fd_array_map_delete_elem, | |
1087 | .map_fd_get_ptr = bpf_map_fd_get_ptr, | |
1088 | .map_fd_put_ptr = bpf_map_fd_put_ptr, | |
14dc6f04 | 1089 | .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, |
7b0c2a05 | 1090 | .map_gen_lookup = array_of_map_gen_lookup, |
e8d2bec0 | 1091 | .map_check_btf = map_check_no_btf, |
56f668df | 1092 | }; |