int bucket __aligned(8);
};
+union bpf_udp_iter_batch_item {
+ struct sock *sk;
+};
+
struct bpf_udp_iter_state {
struct udp_iter_state state;
unsigned int cur_sk;
unsigned int end_sk;
unsigned int max_sk;
int offset;
- struct sock **batch;
+ union bpf_udp_iter_batch_item *batch;
};
static int bpf_iter_udp_realloc_batch(struct bpf_udp_iter_state *iter,
}
if (iter->end_sk < iter->max_sk) {
sock_hold(sk);
- iter->batch[iter->end_sk++] = sk;
+ iter->batch[iter->end_sk++].sk = sk;
}
batch_sks++;
}
}
/* Pick up where we left off. */
- sk = iter->batch[iter->end_sk - 1];
+ sk = iter->batch[iter->end_sk - 1].sk;
sk = hlist_entry_safe(sk->__sk_common.skc_portaddr_node.next,
struct sock,
__sk_common.skc_portaddr_node);
}
WARN_ON_ONCE(iter->end_sk != batch_sks);
- return iter->end_sk ? iter->batch[0] : NULL;
+ return iter->end_sk ? iter->batch[0].sk : NULL;
}
static void *bpf_iter_udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
* done with seq_show(), so unref the iter->cur_sk.
*/
if (iter->cur_sk < iter->end_sk) {
- sock_put(iter->batch[iter->cur_sk++]);
+ sock_put(iter->batch[iter->cur_sk++].sk);
++iter->offset;
}
* available in the current bucket batch.
*/
if (iter->cur_sk < iter->end_sk)
- sk = iter->batch[iter->cur_sk];
+ sk = iter->batch[iter->cur_sk].sk;
else
/* Prepare a new batch. */
sk = bpf_iter_udp_batch(seq);
unsigned int cur_sk = iter->cur_sk;
while (cur_sk < iter->end_sk)
- sock_put(iter->batch[cur_sk++]);
+ sock_put(iter->batch[cur_sk++].sk);
}
static void bpf_iter_udp_seq_stop(struct seq_file *seq, void *v)
static int bpf_iter_udp_realloc_batch(struct bpf_udp_iter_state *iter,
unsigned int new_batch_sz, gfp_t flags)
{
- struct sock **new_batch;
+ union bpf_udp_iter_batch_item *new_batch;
new_batch = kvmalloc_array(new_batch_sz, sizeof(*new_batch),
flags | __GFP_NOWARN);