static void set_sys_flag (sockaddr_u *, endpt *, struct req_pkt *);
static void clr_sys_flag (sockaddr_u *, endpt *, struct req_pkt *);
static void setclr_flags (sockaddr_u *, endpt *, struct req_pkt *, u_long);
-static void list_restrict4 (restrict_u *, struct info_restrict **);
-static void list_restrict6 (restrict_u *, struct info_restrict **);
+static void list_restrict4 (const restrict_u *, struct info_restrict **);
+static void list_restrict6 (const restrict_u *, struct info_restrict **);
static void list_restrict (sockaddr_u *, endpt *, struct req_pkt *);
static void do_resaddflags (sockaddr_u *, endpt *, struct req_pkt *);
static void do_ressubflags (sockaddr_u *, endpt *, struct req_pkt *);
ms->hashcount[i] = (u_char)
max((u_int)peer_hash_count[i], UCHAR_MAX);
- more_pkt();
+ (void) more_pkt();
flush_pkt();
}
li->compliance = htonl((u_int32)(tc_counter));
li->watchdog_timer = htonl((u_int32)(current_time - sys_epoch));
- more_pkt();
+ (void) more_pkt();
flush_pkt();
}
req_ack(srcadr, inter, inpkt, INFO_OKAY);
}
+/* There have been some issues with the restrict list processing,
+ * ranging from problems with deep recursion (resulting in stack
+ * overflows) and overfull reply buffers.
+ *
+ * To avoid this trouble the list reversal is done iteratively using a
+ * scratch pad.
+ */
+typedef struct RestrictStack RestrictStackT;
+struct RestrictStack {
+ RestrictStackT *link;
+ size_t fcnt;
+ const restrict_u *pres[63];
+};
+
+static size_t
+getStackSheetSize(
+ RestrictStackT *sp
+ )
+{
+ if (sp)
+ return sizeof(sp->pres)/sizeof(sp->pres[0]);
+ return 0u;
+}
+
+static int/*BOOL*/
+pushRestriction(
+ RestrictStackT **spp,
+ const restrict_u *ptr
+ )
+{
+ RestrictStackT *sp;
+
+ if (NULL == (sp = *spp) || 0 == sp->fcnt) {
+ /* need another sheet in the scratch pad */
+ sp = emalloc(sizeof(*sp));
+ sp->link = *spp;
+ sp->fcnt = getStackSheetSize(sp);
+ *spp = sp;
+ }
+ sp->pres[--sp->fcnt] = ptr;
+ return TRUE;
+}
+
+static int/*BOOL*/
+popRestriction(
+ RestrictStackT **spp,
+ const restrict_u **opp
+ )
+{
+ RestrictStackT *sp;
+
+ if (NULL == (sp = *spp) || sp->fcnt >= getStackSheetSize(sp))
+ return FALSE;
+
+ *opp = sp->pres[sp->fcnt++];
+ if (sp->fcnt >= getStackSheetSize(sp)) {
+ /* discard sheet from scratch pad */
+ *spp = sp->link;
+ free(sp);
+ }
+ return TRUE;
+}
+
+static void
+flushRestrictionStack(
+ RestrictStackT **spp
+ )
+{
+ RestrictStackT *sp;
+
+ while (NULL != (sp = *spp)) {
+ *spp = sp->link;
+ free(sp);
+ }
+}
+
/*
- * list_restrict4 - recursive helper for list_restrict dumps IPv4
+ * list_restrict4 - iterative helper for list_restrict dumps IPv4
* restriction list in reverse order.
*/
static void
list_restrict4(
- restrict_u * res,
+ const restrict_u * res,
struct info_restrict ** ppir
)
{
+ RestrictStackT * rpad;
struct info_restrict * pir;
- if (res->link != NULL)
- list_restrict4(res->link, ppir);
-
pir = *ppir;
- pir->addr = htonl(res->u.v4.addr);
- if (client_v6_capable)
- pir->v6_flag = 0;
- pir->mask = htonl(res->u.v4.mask);
- pir->count = htonl(res->count);
- pir->flags = htons(res->flags);
- pir->mflags = htons(res->mflags);
- *ppir = (struct info_restrict *)more_pkt();
+ for (rpad = NULL; res; res = res->link)
+ if (!pushRestriction(&rpad, res))
+ break;
+
+ while (pir && popRestriction(&rpad, &res)) {
+ pir->addr = htonl(res->u.v4.addr);
+ if (client_v6_capable)
+ pir->v6_flag = 0;
+ pir->mask = htonl(res->u.v4.mask);
+ pir->count = htonl(res->count);
+ pir->flags = htons(res->flags);
+ pir->mflags = htons(res->mflags);
+ pir = (struct info_restrict *)more_pkt();
+ }
+ flushRestrictionStack(&rpad);
+ *ppir = pir;
}
-
/*
- * list_restrict6 - recursive helper for list_restrict dumps IPv6
+ * list_restrict6 - iterative helper for list_restrict dumps IPv6
* restriction list in reverse order.
*/
static void
list_restrict6(
- restrict_u * res,
+ const restrict_u * res,
struct info_restrict ** ppir
)
{
+ RestrictStackT * rpad;
struct info_restrict * pir;
- if (res->link != NULL)
- list_restrict6(res->link, ppir);
-
pir = *ppir;
- pir->addr6 = res->u.v6.addr;
- pir->mask6 = res->u.v6.mask;
- pir->v6_flag = 1;
- pir->count = htonl(res->count);
- pir->flags = htons(res->flags);
- pir->mflags = htons(res->mflags);
- *ppir = (struct info_restrict *)more_pkt();
+ for (rpad = NULL; res; res = res->link)
+ if (!pushRestriction(&rpad, res))
+ break;
+
+ while (pir && popRestriction(&rpad, &res)) {
+ pir->addr6 = res->u.v6.addr;
+ pir->mask6 = res->u.v6.mask;
+ pir->v6_flag = 1;
+ pir->count = htonl(res->count);
+ pir->flags = htons(res->flags);
+ pir->mflags = htons(res->mflags);
+ pir = (struct info_restrict *)more_pkt();
+ }
+ flushRestrictionStack(&rpad);
+ *ppir = pir;
}
/*
* The restriction lists are kept sorted in the reverse order
* than they were originally. To preserve the output semantics,
- * dump each list in reverse order. A recursive helper function
- * achieves that.
+ * dump each list in reverse order. The workers take care of that.
*/
list_restrict4(restrictlist4, &ir);
if (client_v6_capable)
u_long trust
)
{
- register u_long *kp;
+ register uint32_t *kp;
register int items;
items = INFO_NITEMS(inpkt->err_nitems);
- kp = (u_long *)&inpkt->u;
+ kp = (uint32_t *)&inpkt->u;
while (items-- > 0) {
authtrust(*kp, trust);
kp++;
it = (struct info_trap *)prepare_pkt(srcadr, inter, inpkt,
v6sizeof(struct info_trap));
- for (i = 0, tr = ctl_traps; i < COUNTOF(ctl_traps); i++, tr++) {
+ for (i = 0, tr = ctl_traps; it && i < COUNTOF(ctl_traps); i++, tr++) {
if (tr->tr_flags & TRAP_INUSE) {
if (IS_IPV4(&tr->tr_addr)) {
if (tr->tr_localaddr == any_interface)
ic = (struct info_clock *)prepare_pkt(srcadr, inter, inpkt,
sizeof(struct info_clock));
- while (items-- > 0) {
+ while (items-- > 0 && ic) {
NSRCADR(&addr) = *clkaddr++;
if (!ISREFCLOCKADR(&addr) || NULL ==
findexistingpeer(&addr, NULL, NULL, -1, 0)) {
ic = (struct info_clkbug *)prepare_pkt(srcadr, inter, inpkt,
sizeof(struct info_clkbug));
- while (items-- > 0) {
+ while (items-- > 0 && ic) {
NSRCADR(&addr) = *clkaddr++;
if (!ISREFCLOCKADR(&addr) || NULL ==
findexistingpeer(&addr, NULL, NULL, -1, 0)) {
struct info_if_stats **ifsp = (struct info_if_stats **)data;
struct info_if_stats *ifs = *ifsp;
endpt *ep = interface_info->ep;
+
+ if (NULL == ifs)
+ return;
ZERO(*ifs);
if (IS_IPV6(&ep->sin)) {
- if (!client_v6_capable) {
+ if (!client_v6_capable)
return;
- }
ifs->v6_flag = 1;
ifs->unaddr.addr6 = SOCK_ADDR6(&ep->sin);
ifs->unbcast.addr6 = SOCK_ADDR6(&ep->bcast);