]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
bpf: Factor out htab_elem_value helper()
authorHou Tao <houtao1@huawei.com>
Tue, 1 Apr 2025 06:22:45 +0000 (14:22 +0800)
committerAlexei Starovoitov <ast@kernel.org>
Thu, 10 Apr 2025 03:12:53 +0000 (20:12 -0700)
All hash maps store map key and map value together. The relative offset
of the map value compared to the map key is round_up(key_size, 8).
Therefore, factor out a common helper htab_elem_value() to calculate the
address of the map value instead of duplicating the logic.

Acked-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Hou Tao <houtao1@huawei.com>
Link: https://lore.kernel.org/r/20250401062250.543403-2-houtao@huaweicloud.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
kernel/bpf/hashtab.c

index 5a5adc66b8e22454089aab2b2f61961d832b5d3e..0bebc919bbf71f729dab88e46b9fe87f92efdda1 100644 (file)
@@ -175,20 +175,25 @@ static bool htab_is_percpu(const struct bpf_htab *htab)
                htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
 }
 
+static inline void *htab_elem_value(struct htab_elem *l, u32 key_size)
+{
+       return l->key + round_up(key_size, 8);
+}
+
 static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
                                     void __percpu *pptr)
 {
-       *(void __percpu **)(l->key + roundup(key_size, 8)) = pptr;
+       *(void __percpu **)htab_elem_value(l, key_size) = pptr;
 }
 
 static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
 {
-       return *(void __percpu **)(l->key + roundup(key_size, 8));
+       return *(void __percpu **)htab_elem_value(l, key_size);
 }
 
 static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l)
 {
-       return *(void **)(l->key + roundup(map->key_size, 8));
+       return *(void **)htab_elem_value(l, map->key_size);
 }
 
 static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
@@ -215,10 +220,10 @@ static void htab_free_prealloced_timers_and_wq(struct bpf_htab *htab)
                elem = get_htab_elem(htab, i);
                if (btf_record_has_field(htab->map.record, BPF_TIMER))
                        bpf_obj_free_timer(htab->map.record,
-                                          elem->key + round_up(htab->map.key_size, 8));
+                                          htab_elem_value(elem, htab->map.key_size));
                if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE))
                        bpf_obj_free_workqueue(htab->map.record,
-                                              elem->key + round_up(htab->map.key_size, 8));
+                                              htab_elem_value(elem, htab->map.key_size));
                cond_resched();
        }
 }
@@ -245,7 +250,8 @@ static void htab_free_prealloced_fields(struct bpf_htab *htab)
                                cond_resched();
                        }
                } else {
-                       bpf_obj_free_fields(htab->map.record, elem->key + round_up(htab->map.key_size, 8));
+                       bpf_obj_free_fields(htab->map.record,
+                                           htab_elem_value(elem, htab->map.key_size));
                        cond_resched();
                }
                cond_resched();
@@ -670,7 +676,7 @@ static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
        struct htab_elem *l = __htab_map_lookup_elem(map, key);
 
        if (l)
-               return l->key + round_up(map->key_size, 8);
+               return htab_elem_value(l, map->key_size);
 
        return NULL;
 }
@@ -709,7 +715,7 @@ static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map,
        if (l) {
                if (mark)
                        bpf_lru_node_set_ref(&l->lru_node);
-               return l->key + round_up(map->key_size, 8);
+               return htab_elem_value(l, map->key_size);
        }
 
        return NULL;
@@ -763,7 +769,7 @@ static void check_and_free_fields(struct bpf_htab *htab,
                for_each_possible_cpu(cpu)
                        bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu));
        } else {
-               void *map_value = elem->key + round_up(htab->map.key_size, 8);
+               void *map_value = htab_elem_value(elem, htab->map.key_size);
 
                bpf_obj_free_fields(htab->map.record, map_value);
        }
@@ -1039,11 +1045,9 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
                        htab_elem_set_ptr(l_new, key_size, pptr);
        } else if (fd_htab_map_needs_adjust(htab)) {
                size = round_up(size, 8);
-               memcpy(l_new->key + round_up(key_size, 8), value, size);
+               memcpy(htab_elem_value(l_new, key_size), value, size);
        } else {
-               copy_map_value(&htab->map,
-                              l_new->key + round_up(key_size, 8),
-                              value);
+               copy_map_value(&htab->map, htab_elem_value(l_new, key_size), value);
        }
 
        l_new->hash = hash;
@@ -1106,7 +1110,7 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
                if (l_old) {
                        /* grab the element lock and update value in place */
                        copy_map_value_locked(map,
-                                             l_old->key + round_up(key_size, 8),
+                                             htab_elem_value(l_old, key_size),
                                              value, false);
                        return 0;
                }
@@ -1134,7 +1138,7 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
                 * and update element in place
                 */
                copy_map_value_locked(map,
-                                     l_old->key + round_up(key_size, 8),
+                                     htab_elem_value(l_old, key_size),
                                      value, false);
                ret = 0;
                goto err;
@@ -1220,8 +1224,7 @@ static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value
        l_new = prealloc_lru_pop(htab, key, hash);
        if (!l_new)
                return -ENOMEM;
-       copy_map_value(&htab->map,
-                      l_new->key + round_up(map->key_size, 8), value);
+       copy_map_value(&htab->map, htab_elem_value(l_new, map->key_size), value);
 
        ret = htab_lock_bucket(b, &flags);
        if (ret)
@@ -1500,10 +1503,10 @@ static void htab_free_malloced_timers_and_wq(struct bpf_htab *htab)
                        /* We only free timer on uref dropping to zero */
                        if (btf_record_has_field(htab->map.record, BPF_TIMER))
                                bpf_obj_free_timer(htab->map.record,
-                                                  l->key + round_up(htab->map.key_size, 8));
+                                                  htab_elem_value(l, htab->map.key_size));
                        if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE))
                                bpf_obj_free_workqueue(htab->map.record,
-                                                      l->key + round_up(htab->map.key_size, 8));
+                                                      htab_elem_value(l, htab->map.key_size));
                }
                cond_resched_rcu();
        }
@@ -1615,15 +1618,12 @@ static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
                        off += roundup_value_size;
                }
        } else {
-               u32 roundup_key_size = round_up(map->key_size, 8);
+               void *src = htab_elem_value(l, map->key_size);
 
                if (flags & BPF_F_LOCK)
-                       copy_map_value_locked(map, value, l->key +
-                                             roundup_key_size,
-                                             true);
+                       copy_map_value_locked(map, value, src, true);
                else
-                       copy_map_value(map, value, l->key +
-                                      roundup_key_size);
+                       copy_map_value(map, value, src);
                /* Zeroing special fields in the temp buffer */
                check_and_init_map_value(map, value);
        }
@@ -1680,12 +1680,12 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
                                   bool is_percpu)
 {
        struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
-       u32 bucket_cnt, total, key_size, value_size, roundup_key_size;
        void *keys = NULL, *values = NULL, *value, *dst_key, *dst_val;
        void __user *uvalues = u64_to_user_ptr(attr->batch.values);
        void __user *ukeys = u64_to_user_ptr(attr->batch.keys);
        void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
        u32 batch, max_count, size, bucket_size, map_id;
+       u32 bucket_cnt, total, key_size, value_size;
        struct htab_elem *node_to_free = NULL;
        u64 elem_map_flags, map_flags;
        struct hlist_nulls_head *head;
@@ -1720,7 +1720,6 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
                return -ENOENT;
 
        key_size = htab->map.key_size;
-       roundup_key_size = round_up(htab->map.key_size, 8);
        value_size = htab->map.value_size;
        size = round_up(value_size, 8);
        if (is_percpu)
@@ -1812,7 +1811,7 @@ again_nocopy:
                                off += size;
                        }
                } else {
-                       value = l->key + roundup_key_size;
+                       value = htab_elem_value(l, key_size);
                        if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
                                struct bpf_map **inner_map = value;
 
@@ -2063,11 +2062,11 @@ static void *bpf_hash_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 static int __bpf_hash_map_seq_show(struct seq_file *seq, struct htab_elem *elem)
 {
        struct bpf_iter_seq_hash_map_info *info = seq->private;
-       u32 roundup_key_size, roundup_value_size;
        struct bpf_iter__bpf_map_elem ctx = {};
        struct bpf_map *map = info->map;
        struct bpf_iter_meta meta;
        int ret = 0, off = 0, cpu;
+       u32 roundup_value_size;
        struct bpf_prog *prog;
        void __percpu *pptr;
 
@@ -2077,10 +2076,9 @@ static int __bpf_hash_map_seq_show(struct seq_file *seq, struct htab_elem *elem)
                ctx.meta = &meta;
                ctx.map = info->map;
                if (elem) {
-                       roundup_key_size = round_up(map->key_size, 8);
                        ctx.key = elem->key;
                        if (!info->percpu_value_buf) {
-                               ctx.value = elem->key + roundup_key_size;
+                               ctx.value = htab_elem_value(elem, map->key_size);
                        } else {
                                roundup_value_size = round_up(map->value_size, 8);
                                pptr = htab_elem_get_ptr(elem, map->key_size);
@@ -2165,7 +2163,6 @@ static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_
        struct hlist_nulls_head *head;
        struct hlist_nulls_node *n;
        struct htab_elem *elem;
-       u32 roundup_key_size;
        int i, num_elems = 0;
        void __percpu *pptr;
        struct bucket *b;
@@ -2180,7 +2177,6 @@ static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_
 
        is_percpu = htab_is_percpu(htab);
 
-       roundup_key_size = round_up(map->key_size, 8);
        /* migration has been disabled, so percpu value prepared here will be
         * the same as the one seen by the bpf program with
         * bpf_map_lookup_elem().
@@ -2196,7 +2192,7 @@ static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_
                                pptr = htab_elem_get_ptr(elem, map->key_size);
                                val = this_cpu_ptr(pptr);
                        } else {
-                               val = elem->key + roundup_key_size;
+                               val = htab_elem_value(elem, map->key_size);
                        }
                        num_elems++;
                        ret = callback_fn((u64)(long)map, (u64)(long)key,