int tc_counter; /* jiggle counter */
double last_offset; /* last offset (s) */
+u_int tc_twinlo; /* TC step down not before this time */
+u_int tc_twinhi; /* TC step up not before this time */
+
/*
* Huff-n'-puff filter variables
*/
* increased, otherwise it is decreased. A bit of hysteresis
* helps calm the dance. Works best using burst mode. Don't
* fiddle with the poll during the startup clamp period.
+ * [Bug 3615] also observe time gates to avoid eager stepping
*/
if (freq_cnt > 0) {
tc_counter = 0;
+ tc_twinlo = current_time;
+ tc_twinhi = current_time;
} else if (fabs(clock_offset) < CLOCK_PGATE * clock_jitter) {
tc_counter += sys_poll;
if (tc_counter > CLOCK_LIMIT) {
tc_counter = CLOCK_LIMIT;
- if (sys_poll < peer->maxpoll) {
- tc_counter = 0;
- sys_poll++;
- }
+ if (sys_poll < peer->maxpoll)
+ sys_poll += (current_time >= tc_twinhi);
}
} else {
tc_counter -= sys_poll << 1;
if (tc_counter < -CLOCK_LIMIT) {
tc_counter = -CLOCK_LIMIT;
- if (sys_poll > peer->minpoll) {
- tc_counter = 0;
- sys_poll--;
- }
+ if (sys_poll > peer->minpoll)
+ sys_poll -= (current_time >= tc_twinlo);
}
}
/*
* If the time constant has changed, update the poll variables.
+ *
+ * [bug 3615] also set new time gates
+ * The time limit for stepping down will be half the TC interval
+ * or 60 secs from now, whatever is bigger, and the step up time
+ * limit will be half the TC interval after the step down limit.
+ *
+ * The 'sys_poll' value affects the servo loop gain, and
+ * overshooting sys_poll slows it down unnecessarily. Stepping
+ * down too fast also has bad effects.
+ *
+ * The 'tc_counter' dance itself is something that *should*
+ * happen *once* every (1 << sys_poll) seconds, I think, but
+ * that's not how it works right now, and adding time guards
+ * seems the least intrusive way to handle this.
*/
- if (osys_poll != sys_poll)
+ if (osys_poll != sys_poll) {
+ u_int deadband = 1u << (sys_poll - 1);
+ tc_counter = 0;
+ tc_twinlo = current_time + max(deadband, 60);
+ tc_twinhi = tc_twinlo + deadband;
poll_update(peer, sys_poll);
+ }
/*
* Yibbidy, yibbbidy, yibbidy; that'h all folks.
/*
* Forward declarations
*/
-static int refclock_cmpl_fp (const void *, const void *);
-static int refclock_sample (struct refclockproc *);
-static int refclock_ioctl(int, u_int);
+static int refclock_cmpl_fp (const void *, const void *);
+static int refclock_sample (struct refclockproc *);
+static int refclock_ioctl(int, u_int);
+static void refclock_checkburst(struct peer *, struct refclockproc *);
/* circular buffer functions
*
return pp->filter[pp->codeproc];
}
+static inline u_int clk_cnt_sample(
+ struct refclockproc * const pp
+ )
+{
+ u_int retv = pp->coderecv - pp->codeproc;
+ if (retv > MAXSTAGE)
+ retv += MAXSTAGE;
+ return retv;
+}
+
#else
static inline void clk_add_sample(
return pp->filter[pp->codeproc];
}
+static inline u_int clk_cnt_sample(
+ struct refclockproc * const pp
+ )
+{
+ return (pp->coderecv - pp->codeproc) & (MAXSTAGE - 1);
+}
+
#endif
/*
} else {
peer->burst--;
}
+ peer->procptr->inpoll = TRUE;
if (refclock_conf[clktype]->clock_poll != noentry)
(refclock_conf[clktype]->clock_poll)(unit, peer);
poll_update(peer, peer->hpoll);
L_SUB(&lftemp, &lastrec);
LFPTOD(&lftemp, doffset);
clk_add_sample(pp, doffset + fudge);
+ refclock_checkburst(pp->io.srcclock, pp);
}
* seconds and milliseconds/microseconds to internal timestamp format,
* then constructs a new entry in the median filter circular buffer.
* Return success (1) if the data are correct and consistent with the
- * converntional calendar.
+ * conventional calendar.
*
* Important for PPS users: Normally, the pp->lastrec is set to the
* system time when the on-time character is received and the pp->year,
* seconds and milliseconds/microseconds of the timecode. Use
* clocktime() for the aggregate seconds and the msec/usec for
* the fraction, when present. Note that this code relies on the
- * filesystem time for the years and does not use the years of
+ * file system time for the years and does not use the years of
* the timecode.
*/
if (!clocktime(pp->day, pp->hour, pp->minute, pp->second, GMT,
* filter.
*/
pp = peer->procptr;
+ pp->inpoll = FALSE;
peer->leap = pp->leap;
if (peer->leap == LEAP_NOTINSYNC)
return;
report_event(PEVNT_REACH, peer, NULL);
peer->timereachable = current_time;
}
- peer->reach |= 1;
+ peer->reach = (peer->reach << (peer->reach & 1)) | 1;
peer->reftime = pp->lastref;
peer->aorg = pp->lastrec;
peer->rootdisp = pp->disp;
pp->lastrec.l_ui = (u_int32)ap->ts.tv_sec + JAN_1970;
pp->lastrec.l_uf = (u_int32)(dtemp * FRAC);
clk_add_sample(pp, dcorr);
+ refclock_checkburst(peer, pp);
#ifdef DEBUG
if (debug > 1)
# endif
}
+/*
+ * -------------------------------------------------------------------
+ * check if it makes sense to schedule an 'early' poll to get the clock
+ * up fast after start or longer signal dropout.
+ */
+static void
+refclock_checkburst(
+ struct peer * peer,
+ struct refclockproc * pp
+ )
+{
+ uint32_t limit; /* when we should poll */
+ u_int needs; /* needed number of samples */
+
+ /* Paranoia: stop here if peer and clockproc don't match up.
+ * And when a poll is actually pending, we don't have to do
+ * anything, either. Likewise if the reach mask is full, of
+ * course, and if the filter has stabilized.
+ */
+ if (pp->inpoll || (peer->procptr != pp) ||
+ ((peer->reach == 0xFF) && (peer->disp <= MAXDISTANCE)))
+ return;
+
+ /* If the next poll is soon enough, bail out, too: */
+ limit = current_time + 1;
+ if (peer->nextdate <= limit)
+ return;
+
+ /* Derive the number of samples needed from the popcount of the
+ * reach mask. With less samples available, we break away.
+ */
+ needs = peer->reach;
+ needs -= (needs >> 1) & 0x55;
+ needs = (needs & 0x33) + ((needs >> 2) & 0x33);
+ needs = (needs + (needs >> 4)) & 0x0F;
+ if (needs > 6)
+ needs = 6;
+ else if (needs < 3)
+ needs = 3;
+ if (clk_cnt_sample(pp) < needs)
+ return;
+
+ /* Get serious. Reduce the poll to minimum and schedule early.
+ * (Changing the peer poll is probably in vain, as it will be
+ * re-adjusted, but maybe some time the hint will work...)
+ */
+ peer->hpoll = peer->minpoll;
+ peer->nextdate = limit;
+}
/*
* -------------------------------------------------------------------
return;
}
+ /* check clock sanity; [bug 2143] */
+ if (pp->leap == LEAP_NOTINSYNC) { /* no good status? */
+ checkres = CEVNT_PROP;
+ up->tally.rejected++;
+ }
/* Check sanity of time-of-day. */
- if (rc_time == 0) { /* no time or conversion error? */
+ else if (rc_time == 0) { /* no time or conversion error? */
checkres = CEVNT_BADTIME;
up->tally.malformed++;
}
/* Check sanity of date. */
- else if (rc_date == 0) {/* no date or conversion error? */
+ else if (rc_date == 0) { /* no date or conversion error? */
checkres = CEVNT_BADDATE;
up->tally.malformed++;
}
- /* check clock sanity; [bug 2143] */
- else if (pp->leap == LEAP_NOTINSYNC) { /* no good status? */
- checkres = CEVNT_BADREPLY;
- up->tally.rejected++;
- }
- else
+ else {
checkres = -1;
-
+ }
+
if (checkres != -1) {
refclock_save_lcode(pp, rd_lastcode, rd_lencode);
refclock_report(peer, checkres);
*/
if (pp->coderecv == pp->codeproc) {
peer->flags &= ~FLAG_PPS;
- refclock_report(peer, CEVNT_TIMEOUT);
+ if (pp->currentstatus < CEVNT_TIMEOUT)
+ refclock_report(peer, CEVNT_TIMEOUT);
memset(&up->last_gpsdate, 0, sizeof(up->last_gpsdate));
} else {
pp->polls++;
pp->lastref = pp->lastrec;
refclock_receive(peer);
+ if (pp->currentstatus > CEVNT_NOMINAL)
+ refclock_report(peer, CEVNT_NOMINAL);
}
/*