From: Igor Putovny Date: Mon, 19 Aug 2024 13:24:52 +0000 (+0200) Subject: Bugfix X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=5673e838a8b9811e0209b64ce5c20ff8c9a659b3;p=thirdparty%2Fbird.git Bugfix During implementation of the new mechanism of storing potential buckets it was found that aggregator allocates more than 4000 buckets despite routes in the configuration file being grouped to approximately 200 buckets. Upon closer inspection it became clear that tmp_bucket in aggregator_rt_notify() was allocated in the linpool when it should have been allocated on stack. Moreover, tmp_bucket was allocated without additional space for aggr_data. These data were never copied from tmp_bucket to new_bucket. This was the source of false negative results of HASH_FIND. Buckets are compared not only by their hashes, but also lists of aggr_data. Because same_val_list() was reading beyond allocated memory, buckets were never equal. HASH_FIND therefore returned NULL, which prompted aggregator to create many more buckets despite already existing buckets with the same hash. --- diff --git a/proto/aggregator/aggregator.c b/proto/aggregator/aggregator.c index d49fdbab2..299c04eeb 100644 --- a/proto/aggregator/aggregator.c +++ b/proto/aggregator/aggregator.c @@ -1554,7 +1554,7 @@ aggregator_rt_notify(struct proto *P, struct channel *src_ch, net *net, rte *new return; /* Evaluate route attributes. */ - struct aggregator_bucket *tmp_bucket = lp_allocz(p->bucket_pool, sizeof(*tmp_bucket)); + struct aggregator_bucket *tmp_bucket = allocz(sizeof(*tmp_bucket) + sizeof(tmp_bucket->aggr_data[0]) * p->aggr_on_count); assert(tmp_bucket->id == 0); for (uint val_idx = 0; val_idx < p->aggr_on_count; val_idx++) @@ -1663,7 +1663,8 @@ aggregator_rt_notify(struct proto *P, struct channel *src_ch, net *net, rte *new ; else { - new_bucket = tmp_bucket; + new_bucket = lp_allocz(p->bucket_pool, sizeof(*new_bucket) + sizeof(new_bucket->aggr_data[0]) * p->aggr_on_count); + memcpy(new_bucket, tmp_bucket, sizeof(*new_bucket) + sizeof(new_bucket->aggr_data[0]) * p->aggr_on_count); HASH_INSERT2(p->buckets, AGGR_BUCK, p->p.pool, new_bucket); }