unsigned int max_sk;
int offset;
struct sock **batch;
- bool st_bucket_done;
};
static int bpf_iter_udp_realloc_batch(struct bpf_udp_iter_state *iter,
resume_offset = iter->offset;
/* The current batch is done, so advance the bucket. */
- if (iter->st_bucket_done)
+ if (iter->cur_sk == iter->end_sk)
state->bucket++;
udptable = udp_get_table_seq(seq, net);
*/
iter->cur_sk = 0;
iter->end_sk = 0;
- iter->st_bucket_done = true;
batch_sks = 0;
for (; state->bucket <= udptable->mask; state->bucket++) {
static void bpf_iter_udp_put_batch(struct bpf_udp_iter_state *iter)
{
- while (iter->cur_sk < iter->end_sk)
- sock_put(iter->batch[iter->cur_sk++]);
+ unsigned int cur_sk = iter->cur_sk;
+
+ while (cur_sk < iter->end_sk)
+ sock_put(iter->batch[cur_sk++]);
}
static void bpf_iter_udp_seq_stop(struct seq_file *seq, void *v)
(void)udp_prog_seq_show(prog, &meta, v, 0, 0);
}
- if (iter->cur_sk < iter->end_sk) {
+ if (iter->cur_sk < iter->end_sk)
bpf_iter_udp_put_batch(iter);
- iter->st_bucket_done = false;
- }
}
static const struct seq_operations bpf_iter_udp_seq_ops = {
if (ret)
bpf_iter_fini_seq_net(priv_data);
+ iter->state.bucket = -1;
+
return ret;
}