]>
Commit | Line | Data |
---|---|---|
5b497af4 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
28fbcfa0 | 2 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
81ed18ab | 3 | * Copyright (c) 2016,2017 Facebook |
28fbcfa0 AS |
4 | */ |
5 | #include <linux/bpf.h> | |
a26ca7c9 | 6 | #include <linux/btf.h> |
28fbcfa0 | 7 | #include <linux/err.h> |
28fbcfa0 AS |
8 | #include <linux/slab.h> |
9 | #include <linux/mm.h> | |
04fd61ab | 10 | #include <linux/filter.h> |
0cdf5640 | 11 | #include <linux/perf_event.h> |
a26ca7c9 | 12 | #include <uapi/linux/btf.h> |
1e6c62a8 | 13 | #include <linux/rcupdate_trace.h> |
c317ab71 | 14 | #include <linux/btf_ids.h> |
28fbcfa0 | 15 | |
56f668df MKL |
16 | #include "map_in_map.h" |
17 | ||
6e71b04a | 18 | #define ARRAY_CREATE_FLAG_MASK \ |
792caccc | 19 | (BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \ |
4a8f87e6 | 20 | BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP) |
6e71b04a | 21 | |
a10423b8 AS |
22 | static void bpf_array_free_percpu(struct bpf_array *array) |
23 | { | |
24 | int i; | |
25 | ||
32fff239 | 26 | for (i = 0; i < array->map.max_entries; i++) { |
a10423b8 | 27 | free_percpu(array->pptrs[i]); |
32fff239 ED |
28 | cond_resched(); |
29 | } | |
a10423b8 AS |
30 | } |
31 | ||
32 | static int bpf_array_alloc_percpu(struct bpf_array *array) | |
33 | { | |
34 | void __percpu *ptr; | |
35 | int i; | |
36 | ||
37 | for (i = 0; i < array->map.max_entries; i++) { | |
6d192c79 RG |
38 | ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8, |
39 | GFP_USER | __GFP_NOWARN); | |
a10423b8 AS |
40 | if (!ptr) { |
41 | bpf_array_free_percpu(array); | |
42 | return -ENOMEM; | |
43 | } | |
44 | array->pptrs[i] = ptr; | |
32fff239 | 45 | cond_resched(); |
a10423b8 AS |
46 | } |
47 | ||
48 | return 0; | |
49 | } | |
50 | ||
28fbcfa0 | 51 | /* Called from syscall */ |
5dc4c4b7 | 52 | int array_map_alloc_check(union bpf_attr *attr) |
28fbcfa0 | 53 | { |
a10423b8 | 54 | bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; |
96eabe7a | 55 | int numa_node = bpf_map_attr_numa_node(attr); |
28fbcfa0 AS |
56 | |
57 | /* check sanity of attributes */ | |
58 | if (attr->max_entries == 0 || attr->key_size != 4 || | |
6e71b04a CF |
59 | attr->value_size == 0 || |
60 | attr->map_flags & ~ARRAY_CREATE_FLAG_MASK || | |
591fe988 | 61 | !bpf_map_flags_access_ok(attr->map_flags) || |
96eabe7a | 62 | (percpu && numa_node != NUMA_NO_NODE)) |
ad46061f | 63 | return -EINVAL; |
28fbcfa0 | 64 | |
fc970227 | 65 | if (attr->map_type != BPF_MAP_TYPE_ARRAY && |
4a8f87e6 | 66 | attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP)) |
fc970227 AN |
67 | return -EINVAL; |
68 | ||
792caccc SL |
69 | if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY && |
70 | attr->map_flags & BPF_F_PRESERVE_ELEMS) | |
71 | return -EINVAL; | |
72 | ||
63b8ce77 AN |
73 | /* avoid overflow on round_up(map->value_size) */ |
74 | if (attr->value_size > INT_MAX) | |
ad46061f | 75 | return -E2BIG; |
1d244784 TC |
76 | /* percpu map value size is bound by PCPU_MIN_UNIT_SIZE */ |
77 | if (percpu && round_up(attr->value_size, 8) > PCPU_MIN_UNIT_SIZE) | |
78 | return -E2BIG; | |
ad46061f JK |
79 | |
80 | return 0; | |
81 | } | |
82 | ||
83 | static struct bpf_map *array_map_alloc(union bpf_attr *attr) | |
84 | { | |
85 | bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; | |
1bc59756 | 86 | int numa_node = bpf_map_attr_numa_node(attr); |
ad46061f | 87 | u32 elem_size, index_mask, max_entries; |
d79a3549 | 88 | bool bypass_spec_v1 = bpf_bypass_spec_v1(NULL); |
1bc59756 | 89 | u64 array_size, mask64; |
ad46061f | 90 | struct bpf_array *array; |
01b3f521 | 91 | |
28fbcfa0 AS |
92 | elem_size = round_up(attr->value_size, 8); |
93 | ||
b2157399 | 94 | max_entries = attr->max_entries; |
b2157399 | 95 | |
bbeb6e43 DB |
96 | /* On 32 bit archs roundup_pow_of_two() with max_entries that has |
97 | * upper most bit set in u32 space is undefined behavior due to | |
98 | * resulting 1U << 32, so do it manually here in u64 space. | |
99 | */ | |
100 | mask64 = fls_long(max_entries - 1); | |
101 | mask64 = 1ULL << mask64; | |
102 | mask64 -= 1; | |
103 | ||
104 | index_mask = mask64; | |
2c78ee89 | 105 | if (!bypass_spec_v1) { |
b2157399 AS |
106 | /* round up array size to nearest power of 2, |
107 | * since cpu will speculate within index_mask limits | |
108 | */ | |
109 | max_entries = index_mask + 1; | |
bbeb6e43 DB |
110 | /* Check for overflows. */ |
111 | if (max_entries < attr->max_entries) | |
112 | return ERR_PTR(-E2BIG); | |
113 | } | |
b2157399 | 114 | |
a10423b8 | 115 | array_size = sizeof(*array); |
fc970227 | 116 | if (percpu) { |
b2157399 | 117 | array_size += (u64) max_entries * sizeof(void *); |
fc970227 AN |
118 | } else { |
119 | /* rely on vmalloc() to return page-aligned memory and | |
120 | * ensure array->value is exactly page-aligned | |
121 | */ | |
122 | if (attr->map_flags & BPF_F_MMAPABLE) { | |
123 | array_size = PAGE_ALIGN(array_size); | |
124 | array_size += PAGE_ALIGN((u64) max_entries * elem_size); | |
125 | } else { | |
126 | array_size += (u64) max_entries * elem_size; | |
127 | } | |
128 | } | |
a10423b8 | 129 | |
28fbcfa0 | 130 | /* allocate all map elements and zero-initialize them */ |
fc970227 AN |
131 | if (attr->map_flags & BPF_F_MMAPABLE) { |
132 | void *data; | |
133 | ||
134 | /* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */ | |
135 | data = bpf_map_area_mmapable_alloc(array_size, numa_node); | |
1bc59756 | 136 | if (!data) |
fc970227 | 137 | return ERR_PTR(-ENOMEM); |
fc970227 AN |
138 | array = data + PAGE_ALIGN(sizeof(struct bpf_array)) |
139 | - offsetof(struct bpf_array, value); | |
140 | } else { | |
141 | array = bpf_map_area_alloc(array_size, numa_node); | |
142 | } | |
1bc59756 | 143 | if (!array) |
d407bd25 | 144 | return ERR_PTR(-ENOMEM); |
b2157399 | 145 | array->index_mask = index_mask; |
2c78ee89 | 146 | array->map.bypass_spec_v1 = bypass_spec_v1; |
28fbcfa0 AS |
147 | |
148 | /* copy mandatory map attributes */ | |
32852649 | 149 | bpf_map_init_from_attr(&array->map, attr); |
28fbcfa0 AS |
150 | array->elem_size = elem_size; |
151 | ||
9c2d63b8 | 152 | if (percpu && bpf_array_alloc_percpu(array)) { |
d407bd25 | 153 | bpf_map_area_free(array); |
a10423b8 AS |
154 | return ERR_PTR(-ENOMEM); |
155 | } | |
a10423b8 | 156 | |
28fbcfa0 | 157 | return &array->map; |
28fbcfa0 AS |
158 | } |
159 | ||
87ac0d60 AN |
160 | static void *array_map_elem_ptr(struct bpf_array* array, u32 index) |
161 | { | |
162 | return array->value + (u64)array->elem_size * index; | |
163 | } | |
164 | ||
28fbcfa0 AS |
165 | /* Called from syscall or from eBPF program */ |
166 | static void *array_map_lookup_elem(struct bpf_map *map, void *key) | |
167 | { | |
168 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
169 | u32 index = *(u32 *)key; | |
170 | ||
a10423b8 | 171 | if (unlikely(index >= array->map.max_entries)) |
28fbcfa0 AS |
172 | return NULL; |
173 | ||
87ac0d60 | 174 | return array->value + (u64)array->elem_size * (index & array->index_mask); |
28fbcfa0 AS |
175 | } |
176 | ||
d8eca5bb DB |
177 | static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm, |
178 | u32 off) | |
179 | { | |
180 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
181 | ||
182 | if (map->max_entries != 1) | |
183 | return -ENOTSUPP; | |
184 | if (off >= map->value_size) | |
185 | return -EINVAL; | |
186 | ||
187 | *imm = (unsigned long)array->value; | |
188 | return 0; | |
189 | } | |
190 | ||
191 | static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm, | |
192 | u32 *off) | |
193 | { | |
194 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
195 | u64 base = (unsigned long)array->value; | |
196 | u64 range = array->elem_size; | |
197 | ||
198 | if (map->max_entries != 1) | |
199 | return -ENOTSUPP; | |
200 | if (imm < base || imm >= base + range) | |
201 | return -ENOENT; | |
202 | ||
203 | *off = imm - base; | |
204 | return 0; | |
205 | } | |
206 | ||
81ed18ab | 207 | /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */ |
4a8f87e6 | 208 | static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) |
81ed18ab | 209 | { |
b2157399 | 210 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
81ed18ab | 211 | struct bpf_insn *insn = insn_buf; |
d937bc34 | 212 | u32 elem_size = array->elem_size; |
81ed18ab AS |
213 | const int ret = BPF_REG_0; |
214 | const int map_ptr = BPF_REG_1; | |
215 | const int index = BPF_REG_2; | |
216 | ||
4a8f87e6 DB |
217 | if (map->map_flags & BPF_F_INNER_MAP) |
218 | return -EOPNOTSUPP; | |
219 | ||
81ed18ab AS |
220 | *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); |
221 | *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); | |
2c78ee89 | 222 | if (!map->bypass_spec_v1) { |
b2157399 AS |
223 | *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4); |
224 | *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); | |
225 | } else { | |
226 | *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3); | |
227 | } | |
fad73a1a MKL |
228 | |
229 | if (is_power_of_2(elem_size)) { | |
81ed18ab AS |
230 | *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); |
231 | } else { | |
232 | *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); | |
233 | } | |
234 | *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); | |
235 | *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); | |
236 | *insn++ = BPF_MOV64_IMM(ret, 0); | |
237 | return insn - insn_buf; | |
238 | } | |
239 | ||
a10423b8 AS |
240 | /* Called from eBPF program */ |
241 | static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key) | |
242 | { | |
243 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
244 | u32 index = *(u32 *)key; | |
245 | ||
246 | if (unlikely(index >= array->map.max_entries)) | |
247 | return NULL; | |
248 | ||
b2157399 | 249 | return this_cpu_ptr(array->pptrs[index & array->index_mask]); |
a10423b8 AS |
250 | } |
251 | ||
db69718b AN |
252 | /* emit BPF instructions equivalent to C code of percpu_array_map_lookup_elem() */ |
253 | static int percpu_array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) | |
254 | { | |
255 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
256 | struct bpf_insn *insn = insn_buf; | |
257 | ||
258 | if (!bpf_jit_supports_percpu_insn()) | |
259 | return -EOPNOTSUPP; | |
260 | ||
261 | if (map->map_flags & BPF_F_INNER_MAP) | |
262 | return -EOPNOTSUPP; | |
263 | ||
264 | BUILD_BUG_ON(offsetof(struct bpf_array, map) != 0); | |
265 | *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct bpf_array, pptrs)); | |
266 | ||
267 | *insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0); | |
268 | if (!map->bypass_spec_v1) { | |
269 | *insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 6); | |
270 | *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_0, array->index_mask); | |
271 | } else { | |
272 | *insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 5); | |
273 | } | |
274 | ||
275 | *insn++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3); | |
276 | *insn++ = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1); | |
277 | *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0); | |
278 | *insn++ = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0); | |
279 | *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); | |
280 | *insn++ = BPF_MOV64_IMM(BPF_REG_0, 0); | |
281 | return insn - insn_buf; | |
282 | } | |
283 | ||
07343110 FZ |
284 | static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu) |
285 | { | |
286 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
287 | u32 index = *(u32 *)key; | |
288 | ||
289 | if (cpu >= nr_cpu_ids) | |
290 | return NULL; | |
291 | ||
292 | if (unlikely(index >= array->map.max_entries)) | |
293 | return NULL; | |
294 | ||
295 | return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu); | |
296 | } | |
297 | ||
15a07b33 AS |
298 | int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) |
299 | { | |
300 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
301 | u32 index = *(u32 *)key; | |
302 | void __percpu *pptr; | |
303 | int cpu, off = 0; | |
304 | u32 size; | |
305 | ||
306 | if (unlikely(index >= array->map.max_entries)) | |
307 | return -ENOENT; | |
308 | ||
309 | /* per_cpu areas are zero-filled and bpf programs can only | |
310 | * access 'value_size' of them, so copying rounded areas | |
311 | * will not leak any kernel data | |
312 | */ | |
d937bc34 | 313 | size = array->elem_size; |
15a07b33 | 314 | rcu_read_lock(); |
b2157399 | 315 | pptr = array->pptrs[index & array->index_mask]; |
15a07b33 | 316 | for_each_possible_cpu(cpu) { |
6df4ea1f KKD |
317 | copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu)); |
318 | check_and_init_map_value(map, value + off); | |
15a07b33 AS |
319 | off += size; |
320 | } | |
321 | rcu_read_unlock(); | |
322 | return 0; | |
323 | } | |
324 | ||
28fbcfa0 AS |
325 | /* Called from syscall */ |
326 | static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key) | |
327 | { | |
328 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
8fe45924 | 329 | u32 index = key ? *(u32 *)key : U32_MAX; |
28fbcfa0 AS |
330 | u32 *next = (u32 *)next_key; |
331 | ||
332 | if (index >= array->map.max_entries) { | |
333 | *next = 0; | |
334 | return 0; | |
335 | } | |
336 | ||
337 | if (index == array->map.max_entries - 1) | |
338 | return -ENOENT; | |
339 | ||
340 | *next = index + 1; | |
341 | return 0; | |
342 | } | |
343 | ||
344 | /* Called from syscall or from eBPF program */ | |
d7ba4cc9 JK |
345 | static long array_map_update_elem(struct bpf_map *map, void *key, void *value, |
346 | u64 map_flags) | |
28fbcfa0 AS |
347 | { |
348 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
349 | u32 index = *(u32 *)key; | |
96049f3a | 350 | char *val; |
28fbcfa0 | 351 | |
96049f3a | 352 | if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST)) |
28fbcfa0 AS |
353 | /* unknown flags */ |
354 | return -EINVAL; | |
355 | ||
a10423b8 | 356 | if (unlikely(index >= array->map.max_entries)) |
28fbcfa0 AS |
357 | /* all elements were pre-allocated, cannot insert a new one */ |
358 | return -E2BIG; | |
359 | ||
96049f3a | 360 | if (unlikely(map_flags & BPF_NOEXIST)) |
daaf427c | 361 | /* all elements already exist */ |
28fbcfa0 AS |
362 | return -EEXIST; |
363 | ||
96049f3a | 364 | if (unlikely((map_flags & BPF_F_LOCK) && |
db559117 | 365 | !btf_record_has_field(map->record, BPF_SPIN_LOCK))) |
96049f3a AS |
366 | return -EINVAL; |
367 | ||
368 | if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { | |
6df4ea1f KKD |
369 | val = this_cpu_ptr(array->pptrs[index & array->index_mask]); |
370 | copy_map_value(map, val, value); | |
db559117 | 371 | bpf_obj_free_fields(array->map.record, val); |
96049f3a AS |
372 | } else { |
373 | val = array->value + | |
87ac0d60 | 374 | (u64)array->elem_size * (index & array->index_mask); |
96049f3a AS |
375 | if (map_flags & BPF_F_LOCK) |
376 | copy_map_value_locked(map, val, value, false); | |
377 | else | |
378 | copy_map_value(map, val, value); | |
db559117 | 379 | bpf_obj_free_fields(array->map.record, val); |
96049f3a | 380 | } |
28fbcfa0 AS |
381 | return 0; |
382 | } | |
383 | ||
15a07b33 AS |
384 | int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, |
385 | u64 map_flags) | |
386 | { | |
387 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
388 | u32 index = *(u32 *)key; | |
389 | void __percpu *pptr; | |
390 | int cpu, off = 0; | |
391 | u32 size; | |
392 | ||
393 | if (unlikely(map_flags > BPF_EXIST)) | |
394 | /* unknown flags */ | |
395 | return -EINVAL; | |
396 | ||
397 | if (unlikely(index >= array->map.max_entries)) | |
398 | /* all elements were pre-allocated, cannot insert a new one */ | |
399 | return -E2BIG; | |
400 | ||
401 | if (unlikely(map_flags == BPF_NOEXIST)) | |
402 | /* all elements already exist */ | |
403 | return -EEXIST; | |
404 | ||
405 | /* the user space will provide round_up(value_size, 8) bytes that | |
406 | * will be copied into per-cpu area. bpf programs can only access | |
407 | * value_size of it. During lookup the same extra bytes will be | |
408 | * returned or zeros which were zero-filled by percpu_alloc, | |
409 | * so no kernel data leaks possible | |
410 | */ | |
d937bc34 | 411 | size = array->elem_size; |
15a07b33 | 412 | rcu_read_lock(); |
b2157399 | 413 | pptr = array->pptrs[index & array->index_mask]; |
15a07b33 | 414 | for_each_possible_cpu(cpu) { |
6df4ea1f | 415 | copy_map_value_long(map, per_cpu_ptr(pptr, cpu), value + off); |
db559117 | 416 | bpf_obj_free_fields(array->map.record, per_cpu_ptr(pptr, cpu)); |
15a07b33 AS |
417 | off += size; |
418 | } | |
419 | rcu_read_unlock(); | |
420 | return 0; | |
421 | } | |
422 | ||
28fbcfa0 | 423 | /* Called from syscall or from eBPF program */ |
d7ba4cc9 | 424 | static long array_map_delete_elem(struct bpf_map *map, void *key) |
28fbcfa0 AS |
425 | { |
426 | return -EINVAL; | |
427 | } | |
428 | ||
fc970227 AN |
429 | static void *array_map_vmalloc_addr(struct bpf_array *array) |
430 | { | |
431 | return (void *)round_down((unsigned long)array, PAGE_SIZE); | |
432 | } | |
433 | ||
246331e3 | 434 | static void array_map_free_timers_wq(struct bpf_map *map) |
68134668 AS |
435 | { |
436 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
437 | int i; | |
438 | ||
246331e3 BT |
439 | /* We don't reset or free fields other than timer and workqueue |
440 | * on uref dropping to zero. | |
441 | */ | |
b98a5c68 BT |
442 | if (btf_record_has_field(map->record, BPF_TIMER | BPF_WORKQUEUE)) { |
443 | for (i = 0; i < array->map.max_entries; i++) { | |
444 | if (btf_record_has_field(map->record, BPF_TIMER)) | |
445 | bpf_obj_free_timer(map->record, array_map_elem_ptr(array, i)); | |
446 | if (btf_record_has_field(map->record, BPF_WORKQUEUE)) | |
447 | bpf_obj_free_workqueue(map->record, array_map_elem_ptr(array, i)); | |
448 | } | |
449 | } | |
68134668 AS |
450 | } |
451 | ||
28fbcfa0 AS |
452 | /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ |
453 | static void array_map_free(struct bpf_map *map) | |
454 | { | |
455 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
14a324f6 KKD |
456 | int i; |
457 | ||
aa3496ac | 458 | if (!IS_ERR_OR_NULL(map->record)) { |
6df4ea1f KKD |
459 | if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { |
460 | for (i = 0; i < array->map.max_entries; i++) { | |
461 | void __percpu *pptr = array->pptrs[i & array->index_mask]; | |
462 | int cpu; | |
463 | ||
464 | for_each_possible_cpu(cpu) { | |
aa3496ac | 465 | bpf_obj_free_fields(map->record, per_cpu_ptr(pptr, cpu)); |
6df4ea1f KKD |
466 | cond_resched(); |
467 | } | |
468 | } | |
469 | } else { | |
470 | for (i = 0; i < array->map.max_entries; i++) | |
aa3496ac | 471 | bpf_obj_free_fields(map->record, array_map_elem_ptr(array, i)); |
6df4ea1f | 472 | } |
14a324f6 | 473 | } |
28fbcfa0 | 474 | |
a10423b8 AS |
475 | if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) |
476 | bpf_array_free_percpu(array); | |
477 | ||
fc970227 AN |
478 | if (array->map.map_flags & BPF_F_MMAPABLE) |
479 | bpf_map_area_free(array_map_vmalloc_addr(array)); | |
480 | else | |
481 | bpf_map_area_free(array); | |
28fbcfa0 AS |
482 | } |
483 | ||
a26ca7c9 MKL |
484 | static void array_map_seq_show_elem(struct bpf_map *map, void *key, |
485 | struct seq_file *m) | |
486 | { | |
487 | void *value; | |
488 | ||
489 | rcu_read_lock(); | |
490 | ||
491 | value = array_map_lookup_elem(map, key); | |
492 | if (!value) { | |
493 | rcu_read_unlock(); | |
494 | return; | |
495 | } | |
496 | ||
2824ecb7 DB |
497 | if (map->btf_key_type_id) |
498 | seq_printf(m, "%u: ", *(u32 *)key); | |
9b2cf328 | 499 | btf_type_seq_show(map->btf, map->btf_value_type_id, value, m); |
df862de4 | 500 | seq_putc(m, '\n'); |
a26ca7c9 MKL |
501 | |
502 | rcu_read_unlock(); | |
503 | } | |
504 | ||
c7b27c37 YS |
505 | static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key, |
506 | struct seq_file *m) | |
507 | { | |
508 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
509 | u32 index = *(u32 *)key; | |
510 | void __percpu *pptr; | |
511 | int cpu; | |
512 | ||
513 | rcu_read_lock(); | |
514 | ||
515 | seq_printf(m, "%u: {\n", *(u32 *)key); | |
516 | pptr = array->pptrs[index & array->index_mask]; | |
517 | for_each_possible_cpu(cpu) { | |
518 | seq_printf(m, "\tcpu%d: ", cpu); | |
519 | btf_type_seq_show(map->btf, map->btf_value_type_id, | |
520 | per_cpu_ptr(pptr, cpu), m); | |
df862de4 | 521 | seq_putc(m, '\n'); |
c7b27c37 YS |
522 | } |
523 | seq_puts(m, "}\n"); | |
524 | ||
525 | rcu_read_unlock(); | |
526 | } | |
527 | ||
e8d2bec0 | 528 | static int array_map_check_btf(const struct bpf_map *map, |
1b2b234b | 529 | const struct btf *btf, |
e8d2bec0 DB |
530 | const struct btf_type *key_type, |
531 | const struct btf_type *value_type) | |
a26ca7c9 | 532 | { |
a26ca7c9 MKL |
533 | u32 int_data; |
534 | ||
2824ecb7 DB |
535 | /* One exception for keyless BTF: .bss/.data/.rodata map */ |
536 | if (btf_type_is_void(key_type)) { | |
537 | if (map->map_type != BPF_MAP_TYPE_ARRAY || | |
538 | map->max_entries != 1) | |
539 | return -EINVAL; | |
540 | ||
541 | if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC) | |
542 | return -EINVAL; | |
543 | ||
544 | return 0; | |
545 | } | |
546 | ||
e8d2bec0 | 547 | if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT) |
a26ca7c9 MKL |
548 | return -EINVAL; |
549 | ||
550 | int_data = *(u32 *)(key_type + 1); | |
e8d2bec0 DB |
551 | /* bpf array can only take a u32 key. This check makes sure |
552 | * that the btf matches the attr used during map_create. | |
a26ca7c9 | 553 | */ |
e8d2bec0 | 554 | if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data)) |
a26ca7c9 MKL |
555 | return -EINVAL; |
556 | ||
557 | return 0; | |
558 | } | |
559 | ||
b2e2f0e6 | 560 | static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma) |
fc970227 AN |
561 | { |
562 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
563 | pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT; | |
564 | ||
565 | if (!(map->map_flags & BPF_F_MMAPABLE)) | |
566 | return -EINVAL; | |
567 | ||
333291ce AN |
568 | if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) > |
569 | PAGE_ALIGN((u64)array->map.max_entries * array->elem_size)) | |
570 | return -EINVAL; | |
571 | ||
572 | return remap_vmalloc_range(vma, array_map_vmalloc_addr(array), | |
573 | vma->vm_pgoff + pgoff); | |
fc970227 AN |
574 | } |
575 | ||
134fede4 MKL |
576 | static bool array_map_meta_equal(const struct bpf_map *meta0, |
577 | const struct bpf_map *meta1) | |
578 | { | |
4a8f87e6 DB |
579 | if (!bpf_map_meta_equal(meta0, meta1)) |
580 | return false; | |
581 | return meta0->map_flags & BPF_F_INNER_MAP ? true : | |
582 | meta0->max_entries == meta1->max_entries; | |
134fede4 MKL |
583 | } |
584 | ||
d3cc2ab5 YS |
585 | struct bpf_iter_seq_array_map_info { |
586 | struct bpf_map *map; | |
587 | void *percpu_value_buf; | |
588 | u32 index; | |
589 | }; | |
590 | ||
591 | static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos) | |
592 | { | |
593 | struct bpf_iter_seq_array_map_info *info = seq->private; | |
594 | struct bpf_map *map = info->map; | |
595 | struct bpf_array *array; | |
596 | u32 index; | |
597 | ||
598 | if (info->index >= map->max_entries) | |
599 | return NULL; | |
600 | ||
601 | if (*pos == 0) | |
602 | ++*pos; | |
603 | array = container_of(map, struct bpf_array, map); | |
604 | index = info->index & array->index_mask; | |
605 | if (info->percpu_value_buf) | |
6d641ca5 | 606 | return (void *)(uintptr_t)array->pptrs[index]; |
87ac0d60 | 607 | return array_map_elem_ptr(array, index); |
d3cc2ab5 YS |
608 | } |
609 | ||
610 | static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |
611 | { | |
612 | struct bpf_iter_seq_array_map_info *info = seq->private; | |
613 | struct bpf_map *map = info->map; | |
614 | struct bpf_array *array; | |
615 | u32 index; | |
616 | ||
617 | ++*pos; | |
618 | ++info->index; | |
619 | if (info->index >= map->max_entries) | |
620 | return NULL; | |
621 | ||
622 | array = container_of(map, struct bpf_array, map); | |
623 | index = info->index & array->index_mask; | |
624 | if (info->percpu_value_buf) | |
6d641ca5 | 625 | return (void *)(uintptr_t)array->pptrs[index]; |
87ac0d60 | 626 | return array_map_elem_ptr(array, index); |
d3cc2ab5 YS |
627 | } |
628 | ||
629 | static int __bpf_array_map_seq_show(struct seq_file *seq, void *v) | |
630 | { | |
631 | struct bpf_iter_seq_array_map_info *info = seq->private; | |
632 | struct bpf_iter__bpf_map_elem ctx = {}; | |
633 | struct bpf_map *map = info->map; | |
d937bc34 | 634 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
d3cc2ab5 YS |
635 | struct bpf_iter_meta meta; |
636 | struct bpf_prog *prog; | |
637 | int off = 0, cpu = 0; | |
6d641ca5 | 638 | void __percpu *pptr; |
d3cc2ab5 YS |
639 | u32 size; |
640 | ||
641 | meta.seq = seq; | |
642 | prog = bpf_iter_get_info(&meta, v == NULL); | |
643 | if (!prog) | |
644 | return 0; | |
645 | ||
646 | ctx.meta = &meta; | |
647 | ctx.map = info->map; | |
648 | if (v) { | |
649 | ctx.key = &info->index; | |
650 | ||
651 | if (!info->percpu_value_buf) { | |
652 | ctx.value = v; | |
653 | } else { | |
6d641ca5 | 654 | pptr = (void __percpu *)(uintptr_t)v; |
d937bc34 | 655 | size = array->elem_size; |
d3cc2ab5 | 656 | for_each_possible_cpu(cpu) { |
6df4ea1f KKD |
657 | copy_map_value_long(map, info->percpu_value_buf + off, |
658 | per_cpu_ptr(pptr, cpu)); | |
659 | check_and_init_map_value(map, info->percpu_value_buf + off); | |
d3cc2ab5 YS |
660 | off += size; |
661 | } | |
662 | ctx.value = info->percpu_value_buf; | |
663 | } | |
664 | } | |
665 | ||
666 | return bpf_iter_run_prog(prog, &ctx); | |
667 | } | |
668 | ||
669 | static int bpf_array_map_seq_show(struct seq_file *seq, void *v) | |
670 | { | |
671 | return __bpf_array_map_seq_show(seq, v); | |
672 | } | |
673 | ||
674 | static void bpf_array_map_seq_stop(struct seq_file *seq, void *v) | |
675 | { | |
676 | if (!v) | |
677 | (void)__bpf_array_map_seq_show(seq, NULL); | |
678 | } | |
679 | ||
680 | static int bpf_iter_init_array_map(void *priv_data, | |
681 | struct bpf_iter_aux_info *aux) | |
682 | { | |
683 | struct bpf_iter_seq_array_map_info *seq_info = priv_data; | |
684 | struct bpf_map *map = aux->map; | |
d937bc34 | 685 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
d3cc2ab5 YS |
686 | void *value_buf; |
687 | u32 buf_size; | |
688 | ||
689 | if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { | |
d937bc34 | 690 | buf_size = array->elem_size * num_possible_cpus(); |
d3cc2ab5 YS |
691 | value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN); |
692 | if (!value_buf) | |
693 | return -ENOMEM; | |
694 | ||
695 | seq_info->percpu_value_buf = value_buf; | |
696 | } | |
697 | ||
f76fa6b3 HT |
698 | /* bpf_iter_attach_map() acquires a map uref, and the uref may be |
699 | * released before or in the middle of iterating map elements, so | |
700 | * acquire an extra map uref for iterator. | |
701 | */ | |
702 | bpf_map_inc_with_uref(map); | |
d3cc2ab5 YS |
703 | seq_info->map = map; |
704 | return 0; | |
705 | } | |
706 | ||
707 | static void bpf_iter_fini_array_map(void *priv_data) | |
708 | { | |
709 | struct bpf_iter_seq_array_map_info *seq_info = priv_data; | |
710 | ||
f76fa6b3 | 711 | bpf_map_put_with_uref(seq_info->map); |
d3cc2ab5 YS |
712 | kfree(seq_info->percpu_value_buf); |
713 | } | |
714 | ||
715 | static const struct seq_operations bpf_array_map_seq_ops = { | |
716 | .start = bpf_array_map_seq_start, | |
717 | .next = bpf_array_map_seq_next, | |
718 | .stop = bpf_array_map_seq_stop, | |
719 | .show = bpf_array_map_seq_show, | |
720 | }; | |
721 | ||
722 | static const struct bpf_iter_seq_info iter_seq_info = { | |
723 | .seq_ops = &bpf_array_map_seq_ops, | |
724 | .init_seq_private = bpf_iter_init_array_map, | |
725 | .fini_seq_private = bpf_iter_fini_array_map, | |
726 | .seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info), | |
727 | }; | |
728 | ||
d7ba4cc9 JK |
729 | static long bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn, |
730 | void *callback_ctx, u64 flags) | |
06dcdcd4 YS |
731 | { |
732 | u32 i, key, num_elems = 0; | |
733 | struct bpf_array *array; | |
734 | bool is_percpu; | |
735 | u64 ret = 0; | |
736 | void *val; | |
737 | ||
ea5b2296 HT |
738 | cant_migrate(); |
739 | ||
06dcdcd4 YS |
740 | if (flags != 0) |
741 | return -EINVAL; | |
742 | ||
743 | is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; | |
744 | array = container_of(map, struct bpf_array, map); | |
06dcdcd4 YS |
745 | for (i = 0; i < map->max_entries; i++) { |
746 | if (is_percpu) | |
747 | val = this_cpu_ptr(array->pptrs[i]); | |
748 | else | |
87ac0d60 | 749 | val = array_map_elem_ptr(array, i); |
06dcdcd4 YS |
750 | num_elems++; |
751 | key = i; | |
102acbac KC |
752 | ret = callback_fn((u64)(long)map, (u64)(long)&key, |
753 | (u64)(long)val, (u64)(long)callback_ctx, 0); | |
06dcdcd4 YS |
754 | /* return value: 0 - continue, 1 - stop and return */ |
755 | if (ret) | |
756 | break; | |
757 | } | |
758 | ||
06dcdcd4 YS |
759 | return num_elems; |
760 | } | |
761 | ||
1746d055 YS |
762 | static u64 array_map_mem_usage(const struct bpf_map *map) |
763 | { | |
764 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
765 | bool percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; | |
766 | u32 elem_size = array->elem_size; | |
767 | u64 entries = map->max_entries; | |
768 | u64 usage = sizeof(*array); | |
769 | ||
770 | if (percpu) { | |
771 | usage += entries * sizeof(void *); | |
772 | usage += entries * elem_size * num_possible_cpus(); | |
773 | } else { | |
774 | if (map->map_flags & BPF_F_MMAPABLE) { | |
775 | usage = PAGE_ALIGN(usage); | |
776 | usage += PAGE_ALIGN(entries * elem_size); | |
777 | } else { | |
778 | usage += entries * elem_size; | |
779 | } | |
780 | } | |
781 | return usage; | |
782 | } | |
783 | ||
c317ab71 | 784 | BTF_ID_LIST_SINGLE(array_map_btf_ids, struct, bpf_array) |
40077e0c | 785 | const struct bpf_map_ops array_map_ops = { |
134fede4 | 786 | .map_meta_equal = array_map_meta_equal, |
ad46061f | 787 | .map_alloc_check = array_map_alloc_check, |
28fbcfa0 AS |
788 | .map_alloc = array_map_alloc, |
789 | .map_free = array_map_free, | |
790 | .map_get_next_key = array_map_get_next_key, | |
246331e3 | 791 | .map_release_uref = array_map_free_timers_wq, |
28fbcfa0 AS |
792 | .map_lookup_elem = array_map_lookup_elem, |
793 | .map_update_elem = array_map_update_elem, | |
794 | .map_delete_elem = array_map_delete_elem, | |
81ed18ab | 795 | .map_gen_lookup = array_map_gen_lookup, |
d8eca5bb DB |
796 | .map_direct_value_addr = array_map_direct_value_addr, |
797 | .map_direct_value_meta = array_map_direct_value_meta, | |
fc970227 | 798 | .map_mmap = array_map_mmap, |
a26ca7c9 MKL |
799 | .map_seq_show_elem = array_map_seq_show_elem, |
800 | .map_check_btf = array_map_check_btf, | |
c60f2d28 BV |
801 | .map_lookup_batch = generic_map_lookup_batch, |
802 | .map_update_batch = generic_map_update_batch, | |
06dcdcd4 YS |
803 | .map_set_for_each_callback_args = map_set_for_each_callback_args, |
804 | .map_for_each_callback = bpf_for_each_array_elem, | |
1746d055 | 805 | .map_mem_usage = array_map_mem_usage, |
c317ab71 | 806 | .map_btf_id = &array_map_btf_ids[0], |
d3cc2ab5 | 807 | .iter_seq_info = &iter_seq_info, |
28fbcfa0 AS |
808 | }; |
809 | ||
40077e0c | 810 | const struct bpf_map_ops percpu_array_map_ops = { |
f4d05259 | 811 | .map_meta_equal = bpf_map_meta_equal, |
ad46061f | 812 | .map_alloc_check = array_map_alloc_check, |
a10423b8 AS |
813 | .map_alloc = array_map_alloc, |
814 | .map_free = array_map_free, | |
815 | .map_get_next_key = array_map_get_next_key, | |
816 | .map_lookup_elem = percpu_array_map_lookup_elem, | |
db69718b | 817 | .map_gen_lookup = percpu_array_map_gen_lookup, |
a10423b8 AS |
818 | .map_update_elem = array_map_update_elem, |
819 | .map_delete_elem = array_map_delete_elem, | |
07343110 | 820 | .map_lookup_percpu_elem = percpu_array_map_lookup_percpu_elem, |
c7b27c37 | 821 | .map_seq_show_elem = percpu_array_map_seq_show_elem, |
e8d2bec0 | 822 | .map_check_btf = array_map_check_btf, |
f008d732 PT |
823 | .map_lookup_batch = generic_map_lookup_batch, |
824 | .map_update_batch = generic_map_update_batch, | |
06dcdcd4 YS |
825 | .map_set_for_each_callback_args = map_set_for_each_callback_args, |
826 | .map_for_each_callback = bpf_for_each_array_elem, | |
1746d055 | 827 | .map_mem_usage = array_map_mem_usage, |
c317ab71 | 828 | .map_btf_id = &array_map_btf_ids[0], |
d3cc2ab5 | 829 | .iter_seq_info = &iter_seq_info, |
a10423b8 AS |
830 | }; |
831 | ||
ad46061f | 832 | static int fd_array_map_alloc_check(union bpf_attr *attr) |
04fd61ab | 833 | { |
2a36f0b9 | 834 | /* only file descriptors can be stored in this type of map */ |
04fd61ab | 835 | if (attr->value_size != sizeof(u32)) |
ad46061f | 836 | return -EINVAL; |
591fe988 DB |
837 | /* Program read-only/write-only not supported for special maps yet. */ |
838 | if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) | |
839 | return -EINVAL; | |
ad46061f | 840 | return array_map_alloc_check(attr); |
04fd61ab AS |
841 | } |
842 | ||
2a36f0b9 | 843 | static void fd_array_map_free(struct bpf_map *map) |
04fd61ab AS |
844 | { |
845 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
846 | int i; | |
847 | ||
04fd61ab AS |
848 | /* make sure it's empty */ |
849 | for (i = 0; i < array->map.max_entries; i++) | |
2a36f0b9 | 850 | BUG_ON(array->ptrs[i] != NULL); |
d407bd25 DB |
851 | |
852 | bpf_map_area_free(array); | |
04fd61ab AS |
853 | } |
854 | ||
2a36f0b9 | 855 | static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) |
04fd61ab | 856 | { |
3b4a63f6 | 857 | return ERR_PTR(-EOPNOTSUPP); |
04fd61ab AS |
858 | } |
859 | ||
14dc6f04 MKL |
860 | /* only called from syscall */ |
861 | int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) | |
862 | { | |
863 | void **elem, *ptr; | |
864 | int ret = 0; | |
865 | ||
866 | if (!map->ops->map_fd_sys_lookup_elem) | |
867 | return -ENOTSUPP; | |
868 | ||
869 | rcu_read_lock(); | |
870 | elem = array_map_lookup_elem(map, key); | |
871 | if (elem && (ptr = READ_ONCE(*elem))) | |
872 | *value = map->ops->map_fd_sys_lookup_elem(ptr); | |
873 | else | |
874 | ret = -ENOENT; | |
875 | rcu_read_unlock(); | |
876 | ||
877 | return ret; | |
878 | } | |
879 | ||
04fd61ab | 880 | /* only called from syscall */ |
d056a788 DB |
881 | int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, |
882 | void *key, void *value, u64 map_flags) | |
04fd61ab AS |
883 | { |
884 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
2a36f0b9 | 885 | void *new_ptr, *old_ptr; |
04fd61ab AS |
886 | u32 index = *(u32 *)key, ufd; |
887 | ||
888 | if (map_flags != BPF_ANY) | |
889 | return -EINVAL; | |
890 | ||
891 | if (index >= array->map.max_entries) | |
892 | return -E2BIG; | |
893 | ||
894 | ufd = *(u32 *)value; | |
d056a788 | 895 | new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); |
2a36f0b9 WN |
896 | if (IS_ERR(new_ptr)) |
897 | return PTR_ERR(new_ptr); | |
04fd61ab | 898 | |
da765a2f DB |
899 | if (map->ops->map_poke_run) { |
900 | mutex_lock(&array->aux->poke_mutex); | |
901 | old_ptr = xchg(array->ptrs + index, new_ptr); | |
902 | map->ops->map_poke_run(map, index, old_ptr, new_ptr); | |
903 | mutex_unlock(&array->aux->poke_mutex); | |
904 | } else { | |
905 | old_ptr = xchg(array->ptrs + index, new_ptr); | |
906 | } | |
907 | ||
2a36f0b9 | 908 | if (old_ptr) |
20c20bd1 | 909 | map->ops->map_fd_put_ptr(map, old_ptr, true); |
04fd61ab AS |
910 | return 0; |
911 | } | |
912 | ||
79d93b3c | 913 | static long __fd_array_map_delete_elem(struct bpf_map *map, void *key, bool need_defer) |
04fd61ab AS |
914 | { |
915 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
2a36f0b9 | 916 | void *old_ptr; |
04fd61ab AS |
917 | u32 index = *(u32 *)key; |
918 | ||
919 | if (index >= array->map.max_entries) | |
920 | return -E2BIG; | |
921 | ||
da765a2f DB |
922 | if (map->ops->map_poke_run) { |
923 | mutex_lock(&array->aux->poke_mutex); | |
924 | old_ptr = xchg(array->ptrs + index, NULL); | |
925 | map->ops->map_poke_run(map, index, old_ptr, NULL); | |
926 | mutex_unlock(&array->aux->poke_mutex); | |
927 | } else { | |
928 | old_ptr = xchg(array->ptrs + index, NULL); | |
929 | } | |
930 | ||
2a36f0b9 | 931 | if (old_ptr) { |
79d93b3c | 932 | map->ops->map_fd_put_ptr(map, old_ptr, need_defer); |
04fd61ab AS |
933 | return 0; |
934 | } else { | |
935 | return -ENOENT; | |
936 | } | |
937 | } | |
938 | ||
79d93b3c HT |
939 | static long fd_array_map_delete_elem(struct bpf_map *map, void *key) |
940 | { | |
941 | return __fd_array_map_delete_elem(map, key, true); | |
942 | } | |
943 | ||
d056a788 DB |
944 | static void *prog_fd_array_get_ptr(struct bpf_map *map, |
945 | struct file *map_file, int fd) | |
2a36f0b9 | 946 | { |
2a36f0b9 | 947 | struct bpf_prog *prog = bpf_prog_get(fd); |
d6083f04 | 948 | bool is_extended; |
d056a788 | 949 | |
2a36f0b9 WN |
950 | if (IS_ERR(prog)) |
951 | return prog; | |
952 | ||
d6083f04 LH |
953 | if (prog->type == BPF_PROG_TYPE_EXT || |
954 | !bpf_prog_map_compatible(map, prog)) { | |
2a36f0b9 WN |
955 | bpf_prog_put(prog); |
956 | return ERR_PTR(-EINVAL); | |
957 | } | |
d056a788 | 958 | |
d6083f04 LH |
959 | mutex_lock(&prog->aux->ext_mutex); |
960 | is_extended = prog->aux->is_extended; | |
961 | if (!is_extended) | |
962 | prog->aux->prog_array_member_cnt++; | |
963 | mutex_unlock(&prog->aux->ext_mutex); | |
964 | if (is_extended) { | |
965 | /* Extended prog can not be tail callee. It's to prevent a | |
966 | * potential infinite loop like: | |
967 | * tail callee prog entry -> tail callee prog subprog -> | |
968 | * freplace prog entry --tailcall-> tail callee prog entry. | |
969 | */ | |
970 | bpf_prog_put(prog); | |
971 | return ERR_PTR(-EBUSY); | |
972 | } | |
973 | ||
2a36f0b9 WN |
974 | return prog; |
975 | } | |
976 | ||
20c20bd1 | 977 | static void prog_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) |
2a36f0b9 | 978 | { |
d6083f04 LH |
979 | struct bpf_prog *prog = ptr; |
980 | ||
981 | mutex_lock(&prog->aux->ext_mutex); | |
982 | prog->aux->prog_array_member_cnt--; | |
983 | mutex_unlock(&prog->aux->ext_mutex); | |
20c20bd1 | 984 | /* bpf_prog is freed after one RCU or tasks trace grace period */ |
d6083f04 | 985 | bpf_prog_put(prog); |
2a36f0b9 WN |
986 | } |
987 | ||
14dc6f04 MKL |
988 | static u32 prog_fd_array_sys_lookup_elem(void *ptr) |
989 | { | |
990 | return ((struct bpf_prog *)ptr)->aux->id; | |
991 | } | |
992 | ||
04fd61ab | 993 | /* decrement refcnt of all bpf_progs that are stored in this map */ |
79d93b3c | 994 | static void bpf_fd_array_map_clear(struct bpf_map *map, bool need_defer) |
04fd61ab AS |
995 | { |
996 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
997 | int i; | |
998 | ||
999 | for (i = 0; i < array->map.max_entries; i++) | |
79d93b3c | 1000 | __fd_array_map_delete_elem(map, &i, need_defer); |
04fd61ab AS |
1001 | } |
1002 | ||
a7c19db3 YS |
1003 | static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key, |
1004 | struct seq_file *m) | |
1005 | { | |
1006 | void **elem, *ptr; | |
1007 | u32 prog_id; | |
1008 | ||
1009 | rcu_read_lock(); | |
1010 | ||
1011 | elem = array_map_lookup_elem(map, key); | |
1012 | if (elem) { | |
1013 | ptr = READ_ONCE(*elem); | |
1014 | if (ptr) { | |
1015 | seq_printf(m, "%u: ", *(u32 *)key); | |
1016 | prog_id = prog_fd_array_sys_lookup_elem(ptr); | |
1017 | btf_type_seq_show(map->btf, map->btf_value_type_id, | |
1018 | &prog_id, m); | |
df862de4 | 1019 | seq_putc(m, '\n'); |
a7c19db3 YS |
1020 | } |
1021 | } | |
1022 | ||
1023 | rcu_read_unlock(); | |
1024 | } | |
1025 | ||
da765a2f DB |
1026 | struct prog_poke_elem { |
1027 | struct list_head list; | |
1028 | struct bpf_prog_aux *aux; | |
1029 | }; | |
1030 | ||
1031 | static int prog_array_map_poke_track(struct bpf_map *map, | |
1032 | struct bpf_prog_aux *prog_aux) | |
1033 | { | |
1034 | struct prog_poke_elem *elem; | |
1035 | struct bpf_array_aux *aux; | |
1036 | int ret = 0; | |
1037 | ||
1038 | aux = container_of(map, struct bpf_array, map)->aux; | |
1039 | mutex_lock(&aux->poke_mutex); | |
1040 | list_for_each_entry(elem, &aux->poke_progs, list) { | |
1041 | if (elem->aux == prog_aux) | |
1042 | goto out; | |
1043 | } | |
1044 | ||
1045 | elem = kmalloc(sizeof(*elem), GFP_KERNEL); | |
1046 | if (!elem) { | |
1047 | ret = -ENOMEM; | |
1048 | goto out; | |
1049 | } | |
1050 | ||
1051 | INIT_LIST_HEAD(&elem->list); | |
1052 | /* We must track the program's aux info at this point in time | |
1053 | * since the program pointer itself may not be stable yet, see | |
1054 | * also comment in prog_array_map_poke_run(). | |
1055 | */ | |
1056 | elem->aux = prog_aux; | |
1057 | ||
1058 | list_add_tail(&elem->list, &aux->poke_progs); | |
1059 | out: | |
1060 | mutex_unlock(&aux->poke_mutex); | |
1061 | return ret; | |
1062 | } | |
1063 | ||
1064 | static void prog_array_map_poke_untrack(struct bpf_map *map, | |
1065 | struct bpf_prog_aux *prog_aux) | |
1066 | { | |
1067 | struct prog_poke_elem *elem, *tmp; | |
1068 | struct bpf_array_aux *aux; | |
1069 | ||
1070 | aux = container_of(map, struct bpf_array, map)->aux; | |
1071 | mutex_lock(&aux->poke_mutex); | |
1072 | list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) { | |
1073 | if (elem->aux == prog_aux) { | |
1074 | list_del_init(&elem->list); | |
1075 | kfree(elem); | |
1076 | break; | |
1077 | } | |
1078 | } | |
1079 | mutex_unlock(&aux->poke_mutex); | |
1080 | } | |
1081 | ||
4b7de801 JO |
1082 | void __weak bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke, |
1083 | struct bpf_prog *new, struct bpf_prog *old) | |
1084 | { | |
1085 | WARN_ON_ONCE(1); | |
1086 | } | |
1087 | ||
da765a2f DB |
1088 | static void prog_array_map_poke_run(struct bpf_map *map, u32 key, |
1089 | struct bpf_prog *old, | |
1090 | struct bpf_prog *new) | |
1091 | { | |
da765a2f DB |
1092 | struct prog_poke_elem *elem; |
1093 | struct bpf_array_aux *aux; | |
1094 | ||
da765a2f DB |
1095 | aux = container_of(map, struct bpf_array, map)->aux; |
1096 | WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex)); | |
1097 | ||
1098 | list_for_each_entry(elem, &aux->poke_progs, list) { | |
1099 | struct bpf_jit_poke_descriptor *poke; | |
4b7de801 | 1100 | int i; |
da765a2f DB |
1101 | |
1102 | for (i = 0; i < elem->aux->size_poke_tab; i++) { | |
1103 | poke = &elem->aux->poke_tab[i]; | |
1104 | ||
1105 | /* Few things to be aware of: | |
1106 | * | |
1107 | * 1) We can only ever access aux in this context, but | |
1108 | * not aux->prog since it might not be stable yet and | |
1109 | * there could be danger of use after free otherwise. | |
1110 | * 2) Initially when we start tracking aux, the program | |
1111 | * is not JITed yet and also does not have a kallsyms | |
cf71b174 MF |
1112 | * entry. We skip these as poke->tailcall_target_stable |
1113 | * is not active yet. The JIT will do the final fixup | |
1114 | * before setting it stable. The various | |
1115 | * poke->tailcall_target_stable are successively | |
1116 | * activated, so tail call updates can arrive from here | |
1117 | * while JIT is still finishing its final fixup for | |
1118 | * non-activated poke entries. | |
4b7de801 | 1119 | * 3) Also programs reaching refcount of zero while patching |
da765a2f DB |
1120 | * is in progress is okay since we're protected under |
1121 | * poke_mutex and untrack the programs before the JIT | |
4b7de801 | 1122 | * buffer is freed. |
da765a2f | 1123 | */ |
cf71b174 | 1124 | if (!READ_ONCE(poke->tailcall_target_stable)) |
da765a2f DB |
1125 | continue; |
1126 | if (poke->reason != BPF_POKE_REASON_TAIL_CALL) | |
1127 | continue; | |
1128 | if (poke->tail_call.map != map || | |
1129 | poke->tail_call.key != key) | |
1130 | continue; | |
1131 | ||
4b7de801 | 1132 | bpf_arch_poke_desc_update(poke, new, old); |
da765a2f DB |
1133 | } |
1134 | } | |
1135 | } | |
1136 | ||
1137 | static void prog_array_map_clear_deferred(struct work_struct *work) | |
1138 | { | |
1139 | struct bpf_map *map = container_of(work, struct bpf_array_aux, | |
1140 | work)->map; | |
79d93b3c | 1141 | bpf_fd_array_map_clear(map, true); |
da765a2f DB |
1142 | bpf_map_put(map); |
1143 | } | |
1144 | ||
1145 | static void prog_array_map_clear(struct bpf_map *map) | |
1146 | { | |
1147 | struct bpf_array_aux *aux = container_of(map, struct bpf_array, | |
1148 | map)->aux; | |
1149 | bpf_map_inc(map); | |
1150 | schedule_work(&aux->work); | |
1151 | } | |
1152 | ||
2beee5f5 DB |
1153 | static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr) |
1154 | { | |
1155 | struct bpf_array_aux *aux; | |
1156 | struct bpf_map *map; | |
1157 | ||
6d192c79 | 1158 | aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT); |
2beee5f5 DB |
1159 | if (!aux) |
1160 | return ERR_PTR(-ENOMEM); | |
1161 | ||
da765a2f DB |
1162 | INIT_WORK(&aux->work, prog_array_map_clear_deferred); |
1163 | INIT_LIST_HEAD(&aux->poke_progs); | |
1164 | mutex_init(&aux->poke_mutex); | |
1165 | ||
2beee5f5 DB |
1166 | map = array_map_alloc(attr); |
1167 | if (IS_ERR(map)) { | |
1168 | kfree(aux); | |
1169 | return map; | |
1170 | } | |
1171 | ||
1172 | container_of(map, struct bpf_array, map)->aux = aux; | |
da765a2f DB |
1173 | aux->map = map; |
1174 | ||
2beee5f5 DB |
1175 | return map; |
1176 | } | |
1177 | ||
1178 | static void prog_array_map_free(struct bpf_map *map) | |
1179 | { | |
da765a2f | 1180 | struct prog_poke_elem *elem, *tmp; |
2beee5f5 DB |
1181 | struct bpf_array_aux *aux; |
1182 | ||
1183 | aux = container_of(map, struct bpf_array, map)->aux; | |
da765a2f DB |
1184 | list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) { |
1185 | list_del_init(&elem->list); | |
1186 | kfree(elem); | |
1187 | } | |
2beee5f5 DB |
1188 | kfree(aux); |
1189 | fd_array_map_free(map); | |
1190 | } | |
1191 | ||
f4d05259 MKL |
1192 | /* prog_array->aux->{type,jited} is a runtime binding. |
1193 | * Doing static check alone in the verifier is not enough. | |
1194 | * Thus, prog_array_map cannot be used as an inner_map | |
1195 | * and map_meta_equal is not implemented. | |
1196 | */ | |
40077e0c | 1197 | const struct bpf_map_ops prog_array_map_ops = { |
ad46061f | 1198 | .map_alloc_check = fd_array_map_alloc_check, |
2beee5f5 DB |
1199 | .map_alloc = prog_array_map_alloc, |
1200 | .map_free = prog_array_map_free, | |
da765a2f DB |
1201 | .map_poke_track = prog_array_map_poke_track, |
1202 | .map_poke_untrack = prog_array_map_poke_untrack, | |
1203 | .map_poke_run = prog_array_map_poke_run, | |
04fd61ab | 1204 | .map_get_next_key = array_map_get_next_key, |
2a36f0b9 | 1205 | .map_lookup_elem = fd_array_map_lookup_elem, |
2a36f0b9 WN |
1206 | .map_delete_elem = fd_array_map_delete_elem, |
1207 | .map_fd_get_ptr = prog_fd_array_get_ptr, | |
1208 | .map_fd_put_ptr = prog_fd_array_put_ptr, | |
14dc6f04 | 1209 | .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem, |
da765a2f | 1210 | .map_release_uref = prog_array_map_clear, |
a7c19db3 | 1211 | .map_seq_show_elem = prog_array_map_seq_show_elem, |
1746d055 | 1212 | .map_mem_usage = array_map_mem_usage, |
c317ab71 | 1213 | .map_btf_id = &array_map_btf_ids[0], |
04fd61ab AS |
1214 | }; |
1215 | ||
3b1efb19 DB |
1216 | static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, |
1217 | struct file *map_file) | |
ea317b26 | 1218 | { |
3b1efb19 DB |
1219 | struct bpf_event_entry *ee; |
1220 | ||
dc685409 | 1221 | ee = kzalloc(sizeof(*ee), GFP_KERNEL); |
3b1efb19 DB |
1222 | if (ee) { |
1223 | ee->event = perf_file->private_data; | |
1224 | ee->perf_file = perf_file; | |
1225 | ee->map_file = map_file; | |
1226 | } | |
1227 | ||
1228 | return ee; | |
1229 | } | |
1230 | ||
1231 | static void __bpf_event_entry_free(struct rcu_head *rcu) | |
1232 | { | |
1233 | struct bpf_event_entry *ee; | |
1234 | ||
1235 | ee = container_of(rcu, struct bpf_event_entry, rcu); | |
1236 | fput(ee->perf_file); | |
1237 | kfree(ee); | |
1238 | } | |
1239 | ||
1240 | static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee) | |
1241 | { | |
1242 | call_rcu(&ee->rcu, __bpf_event_entry_free); | |
ea317b26 KX |
1243 | } |
1244 | ||
d056a788 DB |
1245 | static void *perf_event_fd_array_get_ptr(struct bpf_map *map, |
1246 | struct file *map_file, int fd) | |
ea317b26 | 1247 | { |
3b1efb19 DB |
1248 | struct bpf_event_entry *ee; |
1249 | struct perf_event *event; | |
1250 | struct file *perf_file; | |
f91840a3 | 1251 | u64 value; |
ea317b26 | 1252 | |
3b1efb19 DB |
1253 | perf_file = perf_event_get(fd); |
1254 | if (IS_ERR(perf_file)) | |
1255 | return perf_file; | |
e03e7ee3 | 1256 | |
f91840a3 | 1257 | ee = ERR_PTR(-EOPNOTSUPP); |
3b1efb19 | 1258 | event = perf_file->private_data; |
97562633 | 1259 | if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP) |
3b1efb19 DB |
1260 | goto err_out; |
1261 | ||
f91840a3 AS |
1262 | ee = bpf_event_entry_gen(perf_file, map_file); |
1263 | if (ee) | |
1264 | return ee; | |
1265 | ee = ERR_PTR(-ENOMEM); | |
3b1efb19 DB |
1266 | err_out: |
1267 | fput(perf_file); | |
1268 | return ee; | |
ea317b26 KX |
1269 | } |
1270 | ||
20c20bd1 | 1271 | static void perf_event_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) |
ea317b26 | 1272 | { |
20c20bd1 | 1273 | /* bpf_perf_event is freed after one RCU grace period */ |
3b1efb19 DB |
1274 | bpf_event_entry_free_rcu(ptr); |
1275 | } | |
1276 | ||
1277 | static void perf_event_fd_array_release(struct bpf_map *map, | |
1278 | struct file *map_file) | |
1279 | { | |
1280 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
1281 | struct bpf_event_entry *ee; | |
1282 | int i; | |
1283 | ||
792caccc SL |
1284 | if (map->map_flags & BPF_F_PRESERVE_ELEMS) |
1285 | return; | |
1286 | ||
3b1efb19 DB |
1287 | rcu_read_lock(); |
1288 | for (i = 0; i < array->map.max_entries; i++) { | |
1289 | ee = READ_ONCE(array->ptrs[i]); | |
1290 | if (ee && ee->map_file == map_file) | |
79d93b3c | 1291 | __fd_array_map_delete_elem(map, &i, true); |
3b1efb19 DB |
1292 | } |
1293 | rcu_read_unlock(); | |
ea317b26 KX |
1294 | } |
1295 | ||
792caccc SL |
1296 | static void perf_event_fd_array_map_free(struct bpf_map *map) |
1297 | { | |
1298 | if (map->map_flags & BPF_F_PRESERVE_ELEMS) | |
79d93b3c | 1299 | bpf_fd_array_map_clear(map, false); |
792caccc SL |
1300 | fd_array_map_free(map); |
1301 | } | |
1302 | ||
40077e0c | 1303 | const struct bpf_map_ops perf_event_array_map_ops = { |
f4d05259 | 1304 | .map_meta_equal = bpf_map_meta_equal, |
ad46061f JK |
1305 | .map_alloc_check = fd_array_map_alloc_check, |
1306 | .map_alloc = array_map_alloc, | |
792caccc | 1307 | .map_free = perf_event_fd_array_map_free, |
ea317b26 KX |
1308 | .map_get_next_key = array_map_get_next_key, |
1309 | .map_lookup_elem = fd_array_map_lookup_elem, | |
ea317b26 KX |
1310 | .map_delete_elem = fd_array_map_delete_elem, |
1311 | .map_fd_get_ptr = perf_event_fd_array_get_ptr, | |
1312 | .map_fd_put_ptr = perf_event_fd_array_put_ptr, | |
3b1efb19 | 1313 | .map_release = perf_event_fd_array_release, |
e8d2bec0 | 1314 | .map_check_btf = map_check_no_btf, |
1746d055 | 1315 | .map_mem_usage = array_map_mem_usage, |
c317ab71 | 1316 | .map_btf_id = &array_map_btf_ids[0], |
ea317b26 KX |
1317 | }; |
1318 | ||
60d20f91 | 1319 | #ifdef CONFIG_CGROUPS |
4ed8ec52 MKL |
1320 | static void *cgroup_fd_array_get_ptr(struct bpf_map *map, |
1321 | struct file *map_file /* not used */, | |
1322 | int fd) | |
1323 | { | |
1324 | return cgroup_get_from_fd(fd); | |
1325 | } | |
1326 | ||
20c20bd1 | 1327 | static void cgroup_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) |
4ed8ec52 MKL |
1328 | { |
1329 | /* cgroup_put free cgrp after a rcu grace period */ | |
1330 | cgroup_put(ptr); | |
1331 | } | |
1332 | ||
1333 | static void cgroup_fd_array_free(struct bpf_map *map) | |
1334 | { | |
79d93b3c | 1335 | bpf_fd_array_map_clear(map, false); |
4ed8ec52 MKL |
1336 | fd_array_map_free(map); |
1337 | } | |
1338 | ||
40077e0c | 1339 | const struct bpf_map_ops cgroup_array_map_ops = { |
f4d05259 | 1340 | .map_meta_equal = bpf_map_meta_equal, |
ad46061f JK |
1341 | .map_alloc_check = fd_array_map_alloc_check, |
1342 | .map_alloc = array_map_alloc, | |
4ed8ec52 MKL |
1343 | .map_free = cgroup_fd_array_free, |
1344 | .map_get_next_key = array_map_get_next_key, | |
1345 | .map_lookup_elem = fd_array_map_lookup_elem, | |
1346 | .map_delete_elem = fd_array_map_delete_elem, | |
1347 | .map_fd_get_ptr = cgroup_fd_array_get_ptr, | |
1348 | .map_fd_put_ptr = cgroup_fd_array_put_ptr, | |
e8d2bec0 | 1349 | .map_check_btf = map_check_no_btf, |
1746d055 | 1350 | .map_mem_usage = array_map_mem_usage, |
c317ab71 | 1351 | .map_btf_id = &array_map_btf_ids[0], |
4ed8ec52 | 1352 | }; |
4ed8ec52 | 1353 | #endif |
56f668df MKL |
1354 | |
1355 | static struct bpf_map *array_of_map_alloc(union bpf_attr *attr) | |
1356 | { | |
1357 | struct bpf_map *map, *inner_map_meta; | |
1358 | ||
1359 | inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd); | |
1360 | if (IS_ERR(inner_map_meta)) | |
1361 | return inner_map_meta; | |
1362 | ||
ad46061f | 1363 | map = array_map_alloc(attr); |
56f668df MKL |
1364 | if (IS_ERR(map)) { |
1365 | bpf_map_meta_free(inner_map_meta); | |
1366 | return map; | |
1367 | } | |
1368 | ||
1369 | map->inner_map_meta = inner_map_meta; | |
1370 | ||
1371 | return map; | |
1372 | } | |
1373 | ||
1374 | static void array_of_map_free(struct bpf_map *map) | |
1375 | { | |
1376 | /* map->inner_map_meta is only accessed by syscall which | |
1377 | * is protected by fdget/fdput. | |
1378 | */ | |
1379 | bpf_map_meta_free(map->inner_map_meta); | |
79d93b3c | 1380 | bpf_fd_array_map_clear(map, false); |
56f668df MKL |
1381 | fd_array_map_free(map); |
1382 | } | |
1383 | ||
1384 | static void *array_of_map_lookup_elem(struct bpf_map *map, void *key) | |
1385 | { | |
1386 | struct bpf_map **inner_map = array_map_lookup_elem(map, key); | |
1387 | ||
1388 | if (!inner_map) | |
1389 | return NULL; | |
1390 | ||
1391 | return READ_ONCE(*inner_map); | |
1392 | } | |
1393 | ||
4a8f87e6 | 1394 | static int array_of_map_gen_lookup(struct bpf_map *map, |
7b0c2a05 DB |
1395 | struct bpf_insn *insn_buf) |
1396 | { | |
b2157399 | 1397 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
d937bc34 | 1398 | u32 elem_size = array->elem_size; |
7b0c2a05 DB |
1399 | struct bpf_insn *insn = insn_buf; |
1400 | const int ret = BPF_REG_0; | |
1401 | const int map_ptr = BPF_REG_1; | |
1402 | const int index = BPF_REG_2; | |
1403 | ||
1404 | *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); | |
1405 | *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); | |
2c78ee89 | 1406 | if (!map->bypass_spec_v1) { |
b2157399 AS |
1407 | *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6); |
1408 | *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); | |
1409 | } else { | |
1410 | *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5); | |
1411 | } | |
7b0c2a05 DB |
1412 | if (is_power_of_2(elem_size)) |
1413 | *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); | |
1414 | else | |
1415 | *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); | |
1416 | *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); | |
1417 | *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0); | |
1418 | *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1); | |
1419 | *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); | |
1420 | *insn++ = BPF_MOV64_IMM(ret, 0); | |
1421 | ||
1422 | return insn - insn_buf; | |
1423 | } | |
1424 | ||
40077e0c | 1425 | const struct bpf_map_ops array_of_maps_map_ops = { |
ad46061f | 1426 | .map_alloc_check = fd_array_map_alloc_check, |
56f668df MKL |
1427 | .map_alloc = array_of_map_alloc, |
1428 | .map_free = array_of_map_free, | |
1429 | .map_get_next_key = array_map_get_next_key, | |
1430 | .map_lookup_elem = array_of_map_lookup_elem, | |
1431 | .map_delete_elem = fd_array_map_delete_elem, | |
1432 | .map_fd_get_ptr = bpf_map_fd_get_ptr, | |
1433 | .map_fd_put_ptr = bpf_map_fd_put_ptr, | |
14dc6f04 | 1434 | .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, |
7b0c2a05 | 1435 | .map_gen_lookup = array_of_map_gen_lookup, |
9263dddc TC |
1436 | .map_lookup_batch = generic_map_lookup_batch, |
1437 | .map_update_batch = generic_map_update_batch, | |
e8d2bec0 | 1438 | .map_check_btf = map_check_no_btf, |
1746d055 | 1439 | .map_mem_usage = array_map_mem_usage, |
c317ab71 | 1440 | .map_btf_id = &array_map_btf_ids[0], |
56f668df | 1441 | }; |