]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
bpf: udp: Use bpf_udp_iter_batch_item for bpf_udp_iter_state batch items
authorJordan Rife <jordan@jrife.io>
Fri, 2 May 2025 16:15:23 +0000 (09:15 -0700)
committerMartin KaFai Lau <martin.lau@kernel.org>
Fri, 2 May 2025 18:46:42 +0000 (11:46 -0700)
Prepare for the next patch that tracks cookies between iterations by
converting struct sock **batch to union bpf_udp_iter_batch_item *batch
inside struct bpf_udp_iter_state.

Signed-off-by: Jordan Rife <jordan@jrife.io>
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
net/ipv4/udp.c

index f2740802ee86630ee66b83700eb4f7b351a462a9..fe1438b2bcbafa0f89bf8f96200e587f9979b80b 100644 (file)
@@ -3413,13 +3413,17 @@ struct bpf_iter__udp {
        int bucket __aligned(8);
 };
 
+union bpf_udp_iter_batch_item {
+       struct sock *sk;
+};
+
 struct bpf_udp_iter_state {
        struct udp_iter_state state;
        unsigned int cur_sk;
        unsigned int end_sk;
        unsigned int max_sk;
        int offset;
-       struct sock **batch;
+       union bpf_udp_iter_batch_item *batch;
 };
 
 static int bpf_iter_udp_realloc_batch(struct bpf_udp_iter_state *iter,
@@ -3480,7 +3484,7 @@ fill_batch:
                                }
                                if (iter->end_sk < iter->max_sk) {
                                        sock_hold(sk);
-                                       iter->batch[iter->end_sk++] = sk;
+                                       iter->batch[iter->end_sk++].sk = sk;
                                }
                                batch_sks++;
                        }
@@ -3516,7 +3520,7 @@ fill_batch:
                        }
 
                        /* Pick up where we left off. */
-                       sk = iter->batch[iter->end_sk - 1];
+                       sk = iter->batch[iter->end_sk - 1].sk;
                        sk = hlist_entry_safe(sk->__sk_common.skc_portaddr_node.next,
                                              struct sock,
                                              __sk_common.skc_portaddr_node);
@@ -3533,7 +3537,7 @@ next_bucket:
        }
 
        WARN_ON_ONCE(iter->end_sk != batch_sks);
-       return iter->end_sk ? iter->batch[0] : NULL;
+       return iter->end_sk ? iter->batch[0].sk : NULL;
 }
 
 static void *bpf_iter_udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
@@ -3545,7 +3549,7 @@ static void *bpf_iter_udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
         * done with seq_show(), so unref the iter->cur_sk.
         */
        if (iter->cur_sk < iter->end_sk) {
-               sock_put(iter->batch[iter->cur_sk++]);
+               sock_put(iter->batch[iter->cur_sk++].sk);
                ++iter->offset;
        }
 
@@ -3553,7 +3557,7 @@ static void *bpf_iter_udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
         * available in the current bucket batch.
         */
        if (iter->cur_sk < iter->end_sk)
-               sk = iter->batch[iter->cur_sk];
+               sk = iter->batch[iter->cur_sk].sk;
        else
                /* Prepare a new batch. */
                sk = bpf_iter_udp_batch(seq);
@@ -3620,7 +3624,7 @@ static void bpf_iter_udp_put_batch(struct bpf_udp_iter_state *iter)
        unsigned int cur_sk = iter->cur_sk;
 
        while (cur_sk < iter->end_sk)
-               sock_put(iter->batch[cur_sk++]);
+               sock_put(iter->batch[cur_sk++].sk);
 }
 
 static void bpf_iter_udp_seq_stop(struct seq_file *seq, void *v)
@@ -3890,7 +3894,7 @@ DEFINE_BPF_ITER_FUNC(udp, struct bpf_iter_meta *meta,
 static int bpf_iter_udp_realloc_batch(struct bpf_udp_iter_state *iter,
                                      unsigned int new_batch_sz, gfp_t flags)
 {
-       struct sock **new_batch;
+       union bpf_udp_iter_batch_item *new_batch;
 
        new_batch = kvmalloc_array(new_batch_sz, sizeof(*new_batch),
                                   flags | __GFP_NOWARN);