void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
-int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
+int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value, u64 flags);
int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
void *value, u64 flags);
static inline void bpf_cgroup_storage_free(
struct bpf_cgroup_storage *storage) {}
static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
- void *value) {
+ void *value, u64 flags) {
return 0;
}
static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
}
int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *key,
- void *value)
+ void *value, u64 map_flags)
{
struct bpf_cgroup_storage_map *map = map_to_storage(_map);
struct bpf_cgroup_storage *storage;
* access 'value_size' of them, so copying rounded areas
* will not leak any kernel data
*/
+ if (map_flags & BPF_F_CPU) {
+ cpu = map_flags >> 32;
+ copy_map_value(_map, value, per_cpu_ptr(storage->percpu_buf, cpu));
+ goto unlock;
+ }
size = round_up(_map->value_size, 8);
for_each_possible_cpu(cpu) {
copy_map_value_long(_map, value + off, per_cpu_ptr(storage->percpu_buf, cpu));
off += size;
}
+unlock:
rcu_read_unlock();
return 0;
}
{
struct bpf_cgroup_storage_map *map = map_to_storage(_map);
struct bpf_cgroup_storage *storage;
- int cpu, off = 0;
+ void *val;
u32 size;
+ int cpu;
- if (map_flags != BPF_ANY && map_flags != BPF_EXIST)
+ if ((u32)map_flags & ~(BPF_ANY | BPF_EXIST | BPF_F_CPU | BPF_F_ALL_CPUS))
return -EINVAL;
rcu_read_lock();
* returned or zeros which were zero-filled by percpu_alloc,
* so no kernel data leaks possible
*/
+ if (map_flags & BPF_F_CPU) {
+ cpu = map_flags >> 32;
+ copy_map_value(_map, per_cpu_ptr(storage->percpu_buf, cpu), value);
+ goto unlock;
+ }
size = round_up(_map->value_size, 8);
for_each_possible_cpu(cpu) {
- copy_map_value_long(_map, per_cpu_ptr(storage->percpu_buf, cpu), value + off);
- off += size;
+ val = (map_flags & BPF_F_ALL_CPUS) ? value : value + size * cpu;
+ copy_map_value(_map, per_cpu_ptr(storage->percpu_buf, cpu), val);
}
+unlock:
rcu_read_unlock();
return 0;
}
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
err = bpf_percpu_array_copy(map, key, value, flags);
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
- err = bpf_percpu_cgroup_storage_copy(map, key, value);
+ err = bpf_percpu_cgroup_storage_copy(map, key, value, flags);
} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
err = bpf_stackmap_extract(map, key, value, false);
} else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {