From: Juergen Perlinger Date: Sat, 21 Sep 2019 05:31:44 +0000 (+0200) Subject: [Bug 3615] accelerate refclock startup X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=81e89caea116a775621df984d2a23a7727ba3860;p=thirdparty%2Fntp.git [Bug 3615] accelerate refclock startup bk: 5d85b5c0D5waxNScUaNxlUqEQoT9LA --- diff --git a/ChangeLog b/ChangeLog index 20b3c6325..ed4ba2a84 100644 --- a/ChangeLog +++ b/ChangeLog @@ -2,6 +2,7 @@ * [Sec 3610] process_control() should bail earlier on short packets. stenn@ - Reported by Philippe Antoine +* [Bug 3615] accelerate refclock startup * [Bug 3613] Propagate noselect to mobilized pool servers - Reported by Martin Burnicki * [Bug 3611] NMEA time interpreted incorrectly diff --git a/include/ntp_refclock.h b/include/ntp_refclock.h index 4b3951d94..4a9bfa140 100644 --- a/include/ntp_refclock.h +++ b/include/ntp_refclock.h @@ -142,6 +142,7 @@ struct refclockproc { u_char currentstatus; /* clock status */ u_char lastevent; /* last exception event */ u_char type; /* clock type */ + u_char inpoll; /* waiting for 'refclock_receive()' */ const char *clockdesc; /* clock description */ u_long nextaction; /* local activity timeout */ void (*action)(struct peer *); /* timeout callback */ diff --git a/ntpd/ntp_loopfilter.c b/ntpd/ntp_loopfilter.c index b9ed675de..3fce31414 100644 --- a/ntpd/ntp_loopfilter.c +++ b/ntpd/ntp_loopfilter.c @@ -168,6 +168,9 @@ u_char sys_poll; /* time constant/poll (log2 s) */ int tc_counter; /* jiggle counter */ double last_offset; /* last offset (s) */ +u_int tc_twinlo; /* TC step down not before this time */ +u_int tc_twinhi; /* TC step up not before this time */ + /* * Huff-n'-puff filter variables */ @@ -873,34 +876,52 @@ local_clock( * increased, otherwise it is decreased. A bit of hysteresis * helps calm the dance. Works best using burst mode. Don't * fiddle with the poll during the startup clamp period. + * [Bug 3615] also observe time gates to avoid eager stepping */ if (freq_cnt > 0) { tc_counter = 0; + tc_twinlo = current_time; + tc_twinhi = current_time; } else if (fabs(clock_offset) < CLOCK_PGATE * clock_jitter) { tc_counter += sys_poll; if (tc_counter > CLOCK_LIMIT) { tc_counter = CLOCK_LIMIT; - if (sys_poll < peer->maxpoll) { - tc_counter = 0; - sys_poll++; - } + if (sys_poll < peer->maxpoll) + sys_poll += (current_time >= tc_twinhi); } } else { tc_counter -= sys_poll << 1; if (tc_counter < -CLOCK_LIMIT) { tc_counter = -CLOCK_LIMIT; - if (sys_poll > peer->minpoll) { - tc_counter = 0; - sys_poll--; - } + if (sys_poll > peer->minpoll) + sys_poll -= (current_time >= tc_twinlo); } } /* * If the time constant has changed, update the poll variables. + * + * [bug 3615] also set new time gates + * The time limit for stepping down will be half the TC interval + * or 60 secs from now, whatever is bigger, and the step up time + * limit will be half the TC interval after the step down limit. + * + * The 'sys_poll' value affects the servo loop gain, and + * overshooting sys_poll slows it down unnecessarily. Stepping + * down too fast also has bad effects. + * + * The 'tc_counter' dance itself is something that *should* + * happen *once* every (1 << sys_poll) seconds, I think, but + * that's not how it works right now, and adding time guards + * seems the least intrusive way to handle this. */ - if (osys_poll != sys_poll) + if (osys_poll != sys_poll) { + u_int deadband = 1u << (sys_poll - 1); + tc_counter = 0; + tc_twinlo = current_time + max(deadband, 60); + tc_twinhi = tc_twinlo + deadband; poll_update(peer, sys_poll); + } /* * Yibbidy, yibbbidy, yibbidy; that'h all folks. diff --git a/ntpd/ntp_refclock.c b/ntpd/ntp_refclock.c index 9dea2e7e6..af0e284ae 100644 --- a/ntpd/ntp_refclock.c +++ b/ntpd/ntp_refclock.c @@ -67,9 +67,10 @@ int cal_enable; /* enable refclock calibrate */ /* * Forward declarations */ -static int refclock_cmpl_fp (const void *, const void *); -static int refclock_sample (struct refclockproc *); -static int refclock_ioctl(int, u_int); +static int refclock_cmpl_fp (const void *, const void *); +static int refclock_sample (struct refclockproc *); +static int refclock_ioctl(int, u_int); +static void refclock_checkburst(struct peer *, struct refclockproc *); /* circular buffer functions * @@ -100,6 +101,16 @@ static double clk_pop_sample( return pp->filter[pp->codeproc]; } +static inline u_int clk_cnt_sample( + struct refclockproc * const pp + ) +{ + u_int retv = pp->coderecv - pp->codeproc; + if (retv > MAXSTAGE) + retv += MAXSTAGE; + return retv; +} + #else static inline void clk_add_sample( @@ -123,6 +134,13 @@ static inline double clk_pop_sample( return pp->filter[pp->codeproc]; } +static inline u_int clk_cnt_sample( + struct refclockproc * const pp + ) +{ + return (pp->coderecv - pp->codeproc) & (MAXSTAGE - 1); +} + #endif /* @@ -382,6 +400,7 @@ refclock_transmit( } else { peer->burst--; } + peer->procptr->inpoll = TRUE; if (refclock_conf[clktype]->clock_poll != noentry) (refclock_conf[clktype]->clock_poll)(unit, peer); poll_update(peer, peer->hpoll); @@ -490,6 +509,7 @@ refclock_process_offset( L_SUB(&lftemp, &lastrec); LFPTOD(&lftemp, doffset); clk_add_sample(pp, doffset + fudge); + refclock_checkburst(pp->io.srcclock, pp); } @@ -501,7 +521,7 @@ refclock_process_offset( * seconds and milliseconds/microseconds to internal timestamp format, * then constructs a new entry in the median filter circular buffer. * Return success (1) if the data are correct and consistent with the - * converntional calendar. + * conventional calendar. * * Important for PPS users: Normally, the pp->lastrec is set to the * system time when the on-time character is received and the pp->year, @@ -522,7 +542,7 @@ refclock_process_f( * seconds and milliseconds/microseconds of the timecode. Use * clocktime() for the aggregate seconds and the msec/usec for * the fraction, when present. Note that this code relies on the - * filesystem time for the years and does not use the years of + * file system time for the years and does not use the years of * the timecode. */ if (!clocktime(pp->day, pp->hour, pp->minute, pp->second, GMT, @@ -642,6 +662,7 @@ refclock_receive( * filter. */ pp = peer->procptr; + pp->inpoll = FALSE; peer->leap = pp->leap; if (peer->leap == LEAP_NOTINSYNC) return; @@ -652,7 +673,7 @@ refclock_receive( report_event(PEVNT_REACH, peer, NULL); peer->timereachable = current_time; } - peer->reach |= 1; + peer->reach = (peer->reach << (peer->reach & 1)) | 1; peer->reftime = pp->lastref; peer->aorg = pp->lastrec; peer->rootdisp = pp->disp; @@ -1478,6 +1499,7 @@ refclock_pps( pp->lastrec.l_ui = (u_int32)ap->ts.tv_sec + JAN_1970; pp->lastrec.l_uf = (u_int32)(dtemp * FRAC); clk_add_sample(pp, dcorr); + refclock_checkburst(peer, pp); #ifdef DEBUG if (debug > 1) @@ -1621,6 +1643,55 @@ refclock_ppsaugment( # endif } +/* + * ------------------------------------------------------------------- + * check if it makes sense to schedule an 'early' poll to get the clock + * up fast after start or longer signal dropout. + */ +static void +refclock_checkburst( + struct peer * peer, + struct refclockproc * pp + ) +{ + uint32_t limit; /* when we should poll */ + u_int needs; /* needed number of samples */ + + /* Paranoia: stop here if peer and clockproc don't match up. + * And when a poll is actually pending, we don't have to do + * anything, either. Likewise if the reach mask is full, of + * course, and if the filter has stabilized. + */ + if (pp->inpoll || (peer->procptr != pp) || + ((peer->reach == 0xFF) && (peer->disp <= MAXDISTANCE))) + return; + + /* If the next poll is soon enough, bail out, too: */ + limit = current_time + 1; + if (peer->nextdate <= limit) + return; + + /* Derive the number of samples needed from the popcount of the + * reach mask. With less samples available, we break away. + */ + needs = peer->reach; + needs -= (needs >> 1) & 0x55; + needs = (needs & 0x33) + ((needs >> 2) & 0x33); + needs = (needs + (needs >> 4)) & 0x0F; + if (needs > 6) + needs = 6; + else if (needs < 3) + needs = 3; + if (clk_cnt_sample(pp) < needs) + return; + + /* Get serious. Reduce the poll to minimum and schedule early. + * (Changing the peer poll is probably in vain, as it will be + * re-adjusted, but maybe some time the hint will work...) + */ + peer->hpoll = peer->minpoll; + peer->nextdate = limit; +} /* * ------------------------------------------------------------------- diff --git a/ntpd/refclock_nmea.c b/ntpd/refclock_nmea.c index 211537326..b9ac1e33a 100644 --- a/ntpd/refclock_nmea.c +++ b/ntpd/refclock_nmea.c @@ -816,24 +816,25 @@ nmea_receive( return; } + /* check clock sanity; [bug 2143] */ + if (pp->leap == LEAP_NOTINSYNC) { /* no good status? */ + checkres = CEVNT_PROP; + up->tally.rejected++; + } /* Check sanity of time-of-day. */ - if (rc_time == 0) { /* no time or conversion error? */ + else if (rc_time == 0) { /* no time or conversion error? */ checkres = CEVNT_BADTIME; up->tally.malformed++; } /* Check sanity of date. */ - else if (rc_date == 0) {/* no date or conversion error? */ + else if (rc_date == 0) { /* no date or conversion error? */ checkres = CEVNT_BADDATE; up->tally.malformed++; } - /* check clock sanity; [bug 2143] */ - else if (pp->leap == LEAP_NOTINSYNC) { /* no good status? */ - checkres = CEVNT_BADREPLY; - up->tally.rejected++; - } - else + else { checkres = -1; - + } + if (checkres != -1) { refclock_save_lcode(pp, rd_lastcode, rd_lencode); refclock_report(peer, checkres); @@ -985,12 +986,15 @@ nmea_poll( */ if (pp->coderecv == pp->codeproc) { peer->flags &= ~FLAG_PPS; - refclock_report(peer, CEVNT_TIMEOUT); + if (pp->currentstatus < CEVNT_TIMEOUT) + refclock_report(peer, CEVNT_TIMEOUT); memset(&up->last_gpsdate, 0, sizeof(up->last_gpsdate)); } else { pp->polls++; pp->lastref = pp->lastrec; refclock_receive(peer); + if (pp->currentstatus > CEVNT_NOMINAL) + refclock_report(peer, CEVNT_NOMINAL); } /*