* [Sec 3610] process_control() should bail earlier on short packets. stenn@
- Reported by Philippe Antoine
+* [Bug 3628] raw DCF decoding - improve robustness with Zeller's congruence
+ - implement Zeller's congruence in libparse and libntp <perlinger@ntp.org>
* [Bug 3620] memory leak in ntpq sysinfo <perlinger@ntp.org>
- applied patch by Gerry Garvey
* [Bug 3619] Honour drefid setting in cooked mode and sysinfo <perlinger@ntp.org>
* Assuming that the true division is the most costly operation, this
* sequence should give most bang for the buck.
*/
+
+/* misc */
+extern int u32mod7(uint32_t x);
+extern int i32mod7(int32_t x);
+extern uint32_t i32fmod(int32_t x, uint32_t d);
+
+extern int32_t ntpcal_expand_century(uint32_t y, uint32_t m, uint32_t d, uint32_t wd);
+
#endif
* We simply pre-calculate the offsets and cycle shifts for the real GPS
* calendar, which starts at 1980-01-06, to simplyfy some expressions.
*
- * This has fringe benefit that sould not be overlooked: Since week zero
- * is around 1900, and we should never have to deal with dates befor
+ * This has a fringe benefit that should not be overlooked: Since week zero
+ * is around 1900, and we should never have to deal with dates before
* 1970 or 1980, a week number of zero can be easily used to indicate
* an invalid week time stamp.
*/
* Note that MSVC declares DATE and TIME to be in the local time
* zone, while neither the C standard nor the GCC docs make any
* statement about this. As a result, we may be +/-12hrs off
- * UTC. But for practical purposes, this should not be a
+ * UTC. But for practical purposes, this should not be a
* problem.
*
*/
char monstr[4];
const char * cp;
unsigned short hour, minute, second, day, year;
- /* Note: The above quantities are used for sscanf 'hu' format,
+ /* Note: The above quantities are used for sscanf 'hu' format,
* so using 'uint16_t' is contra-indicated!
*/
# ifdef DEBUG
- static int ignore = 0;
+ static int ignore = 0;
# endif
ZERO(*jd);
* ====================================================================
*/
+/*
+ *---------------------------------------------------------------------
+ * fast modulo 7 operations (floor/mathematical convention)
+ *---------------------------------------------------------------------
+ */
+int
+u32mod7(
+ uint32_t x
+ )
+{
+ /* This is a combination of tricks from "Hacker's Delight" with
+ * some modifications, like a multiplication that rounds up to
+ * drop the final adjustment stage.
+ *
+ * Do a partial reduction by digit sum to keep the value in the
+ * range permitted for the mul/shift stage. There are several
+ * possible and absolutely equivalent shift/mask combinations;
+ * this one is ARM-friendly because of a mask that fits into 16
+ * bit.
+ */
+ x = (x >> 15) + (x & UINT32_C(0x7FFF));
+ /* Take reminder as (mod 8) by mul/shift. Since the multiplier
+ * was calculated using ceil() instead of floor(), it skips the
+ * value '7' properly.
+ * M <- ceil(ldexp(8/7, 29))
+ */
+ return (int)((x * UINT32_C(0x24924925)) >> 29);
+}
+
+int
+i32mod7(
+ int32_t x
+ )
+{
+ /* We add (2**32 - 2**32 % 7), which is (2**32 - 4), to negative
+ * numbers to map them into the postive range. Only the term '-4'
+ * survives, obviously.
+ */
+ uint32_t ux = (uint32_t)x;
+ return u32mod7((x < 0) ? (ux - 4u) : ux);
+}
+
+uint32_t
+i32fmod(
+ int32_t x,
+ uint32_t d
+ )
+{
+ uint32_t ux = (uint32_t)x;
+ uint32_t sf = UINT32_C(0) - (x < 0);
+ ux = (sf ^ ux ) % d;
+ return (d & sf) + (sf ^ ux);
+}
+
/*
*---------------------------------------------------------------------
* Do a periodic extension of 'value' around 'pivot' with a period of
uint32_t uv = (uint32_t)value;
uint32_t up = (uint32_t)pivot;
uint32_t uc, sf;
-
+
if (cycle > 1)
{
uc = (uint32_t)cycle;
uint64_t sf64 = (uint64_t)-(ts->q_s < 0);
Q = (uint32_t)(sf64 ^ ((sf64 ^ ts->Q_s) / SECSPERDAY));
R = (uint32_t)(ts->Q_s - Q * SECSPERDAY);
-
+
# elif defined(UINT64_MAX) && !defined(__arm__)
/* We rely on the compiler to do efficient 64bit divisions as
R = ts->D_s.lo - Q * SECSPERDAY;
# else
-
+
/* We don't have 64bit regs. That hurts a bit.
*
* Here we use a mean trick to get away with just one explicit
R = (ts->d_s.hi < 0) ? 239 : 0;/* sign bit value */
R += (al & 0xFFFF);
- R += (al >> 16 ) * 61u; /* 2**16 % 675 */
+ R += (al >> 16 ) * 61u; /* 2**16 % 675 */
R += (ah & 0xFFFF) * 346u; /* 2**32 % 675 */
- R += (ah >> 16 ) * 181u; /* 2**48 % 675 */
+ R += (ah >> 16 ) * 181u; /* 2**48 % 675 */
R %= 675u; /* final reduction */
Q = (al - R) * 0x2D21C10Bu; /* modinv(675, 2**32) */
R = (R << 7) | (ts->d_s.lo & 0x07F);
-
+
# endif
res.hi = uint32_2cpl_to_int32(Q);
R = (ts->d_s.hi < 0) ? 2264 : 0;/* sign bit value */
R += (al & 0xFFFF);
- R += (al >> 16 ) * 4111u; /* 2**16 % 4725 */
+ R += (al >> 16 ) * 4111u; /* 2**16 % 4725 */
R += (ah & 0xFFFF) * 3721u; /* 2**32 % 4725 */
- R += (ah >> 16 ) * 2206u; /* 2**48 % 4725 */
+ R += (ah >> 16 ) * 2206u; /* 2**48 % 4725 */
R %= 4725u; /* final reduction */
Q = (al - R) * 0x98BBADDDu; /* modinv(4725, 2**32) */
R = (R << 7) | (ts->d_s.lo & 0x07F);
uint32_t us, um, uh, ud, sf32;
sf32 = int32_sflag(ts);
-
+
us = (uint32_t)ts;
um = (sf32 ^ us) / SECSPERMIN;
uh = um / MINSPERHR;
* multiplicative inverse of 146097 (mod 2^32) gives us the full
* quotient.
*
- * (-2^33) % 146097 --> 130717 : the sign bit value
- * ( 2^20) % 146097 --> 25897 : the upper digit value
+ * (-2^33) % 146097 --> 130717 : the sign bit value
+ * ( 2^20) % 146097 --> 25897 : the upper digit value
* modinv(146097, 2^32) --> 660721233 : the inverse
*/
uint32_t ux = ((uint32_t)days << 2) | 3;
ntpcal_split
ntpcal_split_yeardays(
int32_t eyd,
- int isleap
+ int isleap
)
{
/* Use the unshifted-year, February-with-30-days approach here.
* Fractional interpolations are used in both directions, with
* the smallest power-of-two divider to avoid any true division.
*/
- ntpcal_split res = {-1, -1};
+ ntpcal_split res = {-1, -1};
/* convert 'isleap' to number of defective days */
isleap = 1 + !isleap;
int leapy;
u_int ymask;
- /* Get day-of-week first. Since rd is signed, the remainder can
- * be in the range [-6..+6], but the assignment to an unsigned
- * variable maps the negative values to positive values >=7.
- * This makes the sign correction look strange, but adding 7
- * causes the needed wrap-around into the desired value range of
- * zero to six, both inclusive.
- */
- jd->weekday = rd % DAYSPERWEEK;
- if (jd->weekday >= DAYSPERWEEK) /* weekday is unsigned! */
- jd->weekday += DAYSPERWEEK;
+ /* Get day-of-week first. It's simply the RD (mod 7)... */
+ jd->weekday = i32mod7(rd);
split = ntpcal_split_eradays(rd - 1, &leapy);
/* Get year and day-of-year, with overflow check. If any of the
int leapy;
/* get day-of-week first */
- utm->tm_wday = rd % DAYSPERWEEK;
- if (utm->tm_wday < 0)
- utm->tm_wday += DAYSPERWEEK;
+ utm->tm_wday = i32mod7(rd);
/* get year and day-of-year */
split = ntpcal_split_eradays(rd - 1, &leapy);
int32_t lo
)
{
- vint64 res;
+ vint64 res;
uint32_t p1, p2, sf;
/* get sign flag and absolute value of 'hi' in p1 */
res.D_s.lo = (res.D_s.lo << 7);
/* fix up sign: res <-- (res + [sf|sf]) ^ [sf|sf] */
- M_ADD(res.D_s.hi, res.D_s.lo, sf, sf);
+ M_ADD(res.D_s.hi, res.D_s.lo, sf, sf);
res.D_s.lo ^= sf;
res.D_s.hi ^= sf;
sf32 = int32_sflag(res.lo);
mu = (uint32_t)res.lo;
Q = sf32 ^ ((sf32 ^ mu) / 12u);
-
+
res.hi += uint32_2cpl_to_int32(Q);
- res.lo = mu - Q * 12u;
+ res.lo = mu - Q * 12u;
}
/* Get cummulated days in year with unshift. Use the fractional
const struct calendar *jd
)
{
- vint64 join;
+ vint64 join;
int32_t days, secs;
days = ntpcal_date_to_rd(jd) - DAY_UNIX_STARTS;
int
ntpcal_ntp64_to_date(
struct calendar *jd,
- const vint64 *ntp
+ const vint64 *ntp
)
{
ntpcal_split ds;
*/
static const uint16_t bctab[4] = { 157, 449, 597, 889 };
- int32_t cs, cw;
+ int32_t cs, cw;
uint32_t cc, ci, yu, sf32;
sf32 = int32_sflag(years);
static const uint16_t bctab[4] = { 85, 130, 17, 62 };
ntpcal_split res;
- int32_t cc, ci;
+ int32_t cc, ci;
uint32_t sw, cy, Q;
/* Use two fast cycle-split divisions again. Herew e want to
* a 64bit target and exact division after calculating the
* remainder first on a 32bit target. With the smaller divider,
* that's even a bit neater.
- */
+ */
# if defined(HAVE_64BITREGS)
/* Full floor division with 64bit values. */
/* Exact division after calculating the remainder via partial
* reduction by digit sum.
- * (-2^33) % 20871 --> 5491 : the sign bit value
- * ( 2^20) % 20871 --> 5026 : the upper digit value
+ * (-2^33) % 20871 --> 5491 : the sign bit value
+ * ( 2^20) % 20871 --> 5026 : the upper digit value
* modinv(20871, 2^32) --> 330081335 : the inverse
*/
uint32_t ux = ((uint32_t)weeks << 2) | 2;
Q = (ux - sw) * 330081335u; /* exact div */
# endif
-
+
ci = Q & 3u;
cc = uint32_2cpl_to_int32(Q);
)
{
ntpcal_split ds;
- int32_t ts[3];
+ int32_t ts[3];
uint32_t uw, ud, sf32;
/*
ud = (uint32_t)ds.hi;
uw = sf32 ^ ((sf32 ^ ud) / DAYSPERWEEK);
ud -= uw * DAYSPERWEEK;
-
+
ds.hi = uint32_2cpl_to_int32(uw);
ds.lo = ud;
* ====================================================================
*/
+/* --------------------------------------------------------------------
+ * reconstruct the centrury from a truncated date and a day-of-week
+ *
+ * Given a date with truncated year (2-digit, 0..99) and a day-of-week
+ * from 1(Mon) to 7(Sun), recover the full year between 1900AD and 2300AD.
+ */
+int32_t
+ntpcal_expand_century(
+ uint32_t y,
+ uint32_t m,
+ uint32_t d,
+ uint32_t wd)
+{
+ /* This algorithm is short but tricky... It's related to
+ * Zeller's congruence, partially done backwards.
+ *
+ * A few facts to remember:
+ * 1) The Gregorian calendar has a cycle of 400 years.
+ * 2) The weekday of the 1st day of a century shifts by 5 days
+ * during a great cycle.
+ * 3) For calendar math, a century starts with the 1st year,
+ * which is year 1, !not! zero.
+ *
+ * So we start with taking the weekday difference (mod 7)
+ * between the truncated date (which is taken as an absolute
+ * date in the 1st century in the proleptic calendar) and the
+ * weekday given.
+ *
+ * When dividing this residual by 5, we obtain the number of
+ * centuries to add to the base. But since the residual is (mod
+ * 7), we have to make this an exact division by multiplication
+ * with the modular inverse of 5 (mod 7), which is 3:
+ * 3*5 === 1 (mod 7).
+ *
+ * If this yields a result of 4/5/6, the given date/day-of-week
+ * combination is impossible, and we return zero as resulting
+ * year to indicate failure.
+ *
+ * Then we remap the century to the range starting with year
+ * 1900.
+ */
+
+ uint32_t c;
+
+ /* check basic constraints */
+ if ((y >= 100u) || (--m >= 12u) || (--d >= 31u))
+ return 0;
+
+ if ((m += 10u) >= 12u) /* shift base to prev. March,1st */
+ m -= 12u;
+ else if (--y >= 100u)
+ y += 100u;
+ d += y + (y >> 2) + 2u; /* year share */
+ d += (m * 83u + 16u) >> 5; /* month share */
+
+ /* get (wd - d), shifted to positive value, and multiply with
+ * 3(mod 7). (Exact division, see to comment)
+ * Note: 1) d <= 184 at this point.
+ * 2) 252 % 7 == 0, but 'wd' is off by one since we did
+ * '--d' above, so we add just 251 here!
+ */
+ c = u32mod7(3 * (251u + wd - d));
+ if (c > 3u)
+ return 0;
+
+ if ((m > 9u) && (++y >= 100u)) {/* undo base shift */
+ y -= 100u;
+ c = (c + 1) & 3u;
+ }
+ y += (c * 100u); /* combine into 1st cycle */
+ y += (y < 300u) ? 2000 : 1600; /* map to destination era */
+ return (int)y;
+}
+
char *
ntpcal_iso8601std(
char * buf,
return psum;
}
+static int/*BOOL*/
+zeller_expand(
+ clocktime_t *clock_time,
+ unsigned int wd
+ )
+{
+ unsigned int y = (unsigned int)clock_time->year;
+ unsigned int m = (unsigned int)clock_time->month - 1u;
+ unsigned int d = (unsigned int)clock_time->day - 1u;
+ unsigned int c;
+
+ /* Check basic constraints first. */
+ if ((y >= 100u) || (m >= 12u) || (d >= 31u) || (--wd >= 7u))
+ return FALSE;
+
+ /* Get weekday of date in 1st century by a variation on Zeller's
+ * congruence. All operands are non-negative, and the month
+ * formula is adjusted to use a divider of 32, so we can do a
+ * shift instead of a 'true' division:
+ */
+ if ((m += 10u) >= 12u) /* shift base to 0000-03-01 */
+ m -= 12u;
+ else if (--y >= 100u)
+ y += 100;
+ d += y + (y >> 2) + 2u; /* year-related share */
+ d += (m * 83u + 16u) >> 5; /* month-related share */
+
+ /* The next step combines the exact division by modular inverse
+ * with the (mod 7) step in such way that no true division and
+ * only one multiplication is needed. The multiplier is
+ * M <- ceil((3*8)/7 * 2**29)
+ * and combines multiplication by invmod(5, 7) -> 3 and modulus
+ * by 7 transformation to (mod 8) in one step.
+ * Note that 252 == 0 (mod 7) and that 'd' is less than 185,
+ * so the number to invert and reduce is strictly positive. In
+ * the end, 'c' is number of centuries since start of a great
+ * cycle and must be in [0..3] or we had bad input.
+ */
+ c = (((252u + wd - d) * 0x6db6db6eU) >> 29) & 7u;
+ if (c >= 4)
+ return FALSE;
+ /* undo calendar base shift now */
+ if ((m > 9u) && (++y >= 100u)) {
+ y -= 100u;
+ c = (c + 1u) & 3u;
+ }
+ /* combine year with centuries & map to [1970..2369] */
+ y += (c * 100u);
+ clock_time->year = (int)y + ((y < 370u) ? 2000 : 1600);
+ return TRUE;
+}
+
static u_long
convert_rawdcf(
unsigned char *buffer,
clock_time->year = ext_bf(buffer, DCF_Y10, dcfprm->zerobits);
clock_time->year = TIMES10(clock_time->year) + ext_bf(buffer, DCF_Y1, dcfprm->zerobits);
+ if (!zeller_expand(clock_time, ext_bf(buffer, DCF_DW, dcfprm->zerobits)))
+ return CVT_FAIL|CVT_BADFMT;
+
switch (ext_bf(buffer, DCF_Z, dcfprm->zerobits))
{
case DCF_Z_MET:
void test_NtpToNtp(void);
void test_NtpToTime(void);
+void test_CalUMod7(void);
+void test_CalIMod7(void);
+void test_RellezCentury1_1(void);
+void test_RellezCentury3_1(void);
+void test_RellezYearZero(void);
+
+
void
setUp(void)
{
lfptoa(&lfpe, 9), lfptoa(&lfpr, 9));
TEST_ASSERT_TRUE_MESSAGE(L_ISEQU(&lfpe, &lfpr), mbuf);
}
+
+void
+test_CalUMod7(void)
+{
+ TEST_ASSERT_EQUAL(0, u32mod7(0));
+ TEST_ASSERT_EQUAL(1, u32mod7(INT32_MAX));
+ TEST_ASSERT_EQUAL(2, u32mod7(UINT32_C(1)+INT32_MAX));
+ TEST_ASSERT_EQUAL(3, u32mod7(UINT32_MAX));
+}
+
+void
+test_CalIMod7(void)
+{
+ TEST_ASSERT_EQUAL(5, i32mod7(INT32_MIN));
+ TEST_ASSERT_EQUAL(6, i32mod7(-1));
+ TEST_ASSERT_EQUAL(0, i32mod7(0));
+ TEST_ASSERT_EQUAL(1, i32mod7(INT32_MAX));
+}
+
+/* Century expansion tests. Reverse application of Zeller's congruence,
+ * sort of... hence the name "Rellez", Zeller backwards. Just in case
+ * you didn't notice ;)
+ */
+
+void
+test_RellezCentury1_1()
+{
+ /* 1st day of a century */
+ TEST_ASSERT_EQUAL(1901, ntpcal_expand_century( 1, 1, 1, CAL_TUESDAY ));
+ TEST_ASSERT_EQUAL(2001, ntpcal_expand_century( 1, 1, 1, CAL_MONDAY ));
+ TEST_ASSERT_EQUAL(2101, ntpcal_expand_century( 1, 1, 1, CAL_SATURDAY ));
+ TEST_ASSERT_EQUAL(2201, ntpcal_expand_century( 1, 1, 1, CAL_THURSDAY ));
+ /* bad/impossible cases: */
+ TEST_ASSERT_EQUAL( 0, ntpcal_expand_century( 1, 1, 1, CAL_WEDNESDAY));
+ TEST_ASSERT_EQUAL( 0, ntpcal_expand_century( 1, 1, 1, CAL_FRIDAY ));
+ TEST_ASSERT_EQUAL( 0, ntpcal_expand_century( 1, 1, 1, CAL_SUNDAY ));
+}
+
+void
+test_RellezCentury3_1()
+{
+ /* 1st day in March of a century (the tricky point) */
+ TEST_ASSERT_EQUAL(1901, ntpcal_expand_century( 1, 3, 1, CAL_FRIDAY ));
+ TEST_ASSERT_EQUAL(2001, ntpcal_expand_century( 1, 3, 1, CAL_THURSDAY ));
+ TEST_ASSERT_EQUAL(2101, ntpcal_expand_century( 1, 3, 1, CAL_TUESDAY ));
+ TEST_ASSERT_EQUAL(2201, ntpcal_expand_century( 1, 3, 1, CAL_SUNDAY ));
+ /* bad/impossible cases: */
+ TEST_ASSERT_EQUAL( 0, ntpcal_expand_century( 1, 3, 1, CAL_MONDAY ));
+ TEST_ASSERT_EQUAL( 0, ntpcal_expand_century( 1, 3, 1, CAL_WEDNESDAY));
+ TEST_ASSERT_EQUAL( 0, ntpcal_expand_century( 1, 3, 1, CAL_SATURDAY ));
+}
+
+void
+test_RellezYearZero()
+{
+ /* the infamous year zero */
+ TEST_ASSERT_EQUAL(1900, ntpcal_expand_century( 0, 1, 1, CAL_MONDAY ));
+ TEST_ASSERT_EQUAL(2000, ntpcal_expand_century( 0, 1, 1, CAL_SATURDAY ));
+ TEST_ASSERT_EQUAL(2100, ntpcal_expand_century( 0, 1, 1, CAL_FRIDAY ));
+ TEST_ASSERT_EQUAL(2200, ntpcal_expand_century( 0, 1, 1, CAL_WEDNESDAY));
+ /* bad/impossible cases: */
+ TEST_ASSERT_EQUAL( 0, ntpcal_expand_century( 0, 1, 1, CAL_TUESDAY ));
+ TEST_ASSERT_EQUAL( 0, ntpcal_expand_century( 0, 1, 1, CAL_THURSDAY ));
+ TEST_ASSERT_EQUAL( 0, ntpcal_expand_century( 0, 1, 1, CAL_SUNDAY ));
+}
+
+void test_RellezEra(void);
+void test_RellezEra(void)
+{
+ static const unsigned int mt[13] = { 0, 31,28,31,30,31,30,31,31,30,31,30,31 };
+ unsigned int yi, yo, m, d, wd;
+
+ /* last day before our era -- fold forward */
+ yi = 1899;
+ m = 12;
+ d = 31;
+ wd = ntpcal_edate_to_eradays(yi-1, m-1, d-1) % 7 + 1;
+ yo = ntpcal_expand_century((yi%100), m, d, wd);
+ snprintf(mbuf, sizeof(mbuf), "failed, di=%04u-%02u-%02u, wd=%u",
+ yi, m, d, wd);
+ TEST_ASSERT_EQUAL_MESSAGE(2299, yo, mbuf);
+
+ /* 1st day after our era -- fold back */
+ yi = 2300;
+ m = 1;
+ d = 1;
+ wd = ntpcal_edate_to_eradays(yi-1, m-1, d-1) % 7 + 1;
+ yo = ntpcal_expand_century((yi%100), m, d, wd);
+ snprintf(mbuf, sizeof(mbuf), "failed, di=%04u-%02u-%02u, wd=%u",
+ yi, m, d, wd);
+ TEST_ASSERT_EQUAL_MESSAGE(1900, yo, mbuf);
+
+ /* test every month in our 400y era */
+ for (yi = 1900; yi < 2300; ++yi) {
+ for (m = 1; m < 12; ++m) {
+ /* test first day of month */
+ d = 1;
+ wd = ntpcal_edate_to_eradays(yi-1, m-1, d-1) % 7 + 1;
+ yo = ntpcal_expand_century((yi%100), m, d, wd);
+ snprintf(mbuf, sizeof(mbuf), "failed, di=%04u-%02u-%02u, wd=%u",
+ yi, m, d, wd);
+ TEST_ASSERT_EQUAL_MESSAGE(yi, yo, mbuf);
+
+ /* test last day of month */
+ d = mt[m] + (m == 2 && is_leapyear(yi));
+ wd = ntpcal_edate_to_eradays(yi-1, m-1, d-1) % 7 + 1;
+ yo = ntpcal_expand_century((yi%100), m, d, wd);
+ snprintf(mbuf, sizeof(mbuf), "failed, di=%04u-%02u-%02u, wd=%u",
+ yi, m, d, wd);
+ TEST_ASSERT_EQUAL_MESSAGE(yi, yo, mbuf);
+ }
+ }
+}
+
+/* This is nearly a verbatim copy of the in-situ implementation of
+ * Zeller's congruence in libparse/clk_rawdcf.c, so the algorithm
+ * can be tested.
+ */
+static int
+zeller_expand(
+ unsigned int y,
+ unsigned int m,
+ unsigned int d,
+ unsigned int wd
+ )
+{
+ unsigned int c;
+
+ if ((y >= 100u) || (--m >= 12u) || (--d >= 31u) || (--wd >= 7u))
+ return 0;
+
+ if ((m += 10u) >= 12u)
+ m -= 12u;
+ else if (--y >= 100u)
+ y += 100u;
+ d += y + (y >> 2) + 2u;
+ d += (m * 83u + 16u) >> 5;
+
+ c = (((252u + wd - d) * 0x6db6db6eU) >> 29) & 7u;
+ if (c > 3u)
+ return 0;
+
+ if ((m > 9u) && (++y >= 100u)) {
+ y -= 100u;
+ c = (c + 1) & 3u;
+ }
+ y += (c * 100u);
+ y += (y < 370u) ? 2000 : 1600;
+ return (int)y;
+}
+
+void test_zellerDirect(void);
+void test_zellerDirect(void)
+{
+ static const unsigned int mt[13] = { 0, 31,28,31,30,31,30,31,31,30,31,30,31 };
+ unsigned int yi, yo, m, d, wd;
+
+ /* last day before our era -- fold forward */
+ yi = 1969;
+ m = 12;
+ d = 31;
+ wd = ntpcal_edate_to_eradays(yi-1, m-1, d-1) % 7 + 1;
+ yo = zeller_expand((yi%100), m, d, wd);
+ snprintf(mbuf, sizeof(mbuf), "failed, di=%04u-%02u-%02u, wd=%u",
+ yi, m, d, wd);
+ TEST_ASSERT_EQUAL_MESSAGE(2369, yo, mbuf);
+
+ /* 1st day after our era -- fold back */
+ yi = 2370;
+ m = 1;
+ d = 1;
+ wd = ntpcal_edate_to_eradays(yi-1, m-1, d-1) % 7 + 1;
+ yo = zeller_expand((yi%100), m, d, wd);
+ snprintf(mbuf, sizeof(mbuf), "failed, di=%04u-%02u-%02u, wd=%u",
+ yi, m, d, wd);
+ TEST_ASSERT_EQUAL_MESSAGE(1970, yo, mbuf);
+
+ /* test every month in our 400y era */
+ for (yi = 1970; yi < 2370; ++yi) {
+ for (m = 1; m < 12; ++m) {
+ /* test first day of month */
+ d = 1;
+ wd = ntpcal_edate_to_eradays(yi-1, m-1, d-1) % 7 + 1;
+ yo = zeller_expand((yi%100), m, d, wd);
+ snprintf(mbuf, sizeof(mbuf), "failed, di=%04u-%02u-%02u, wd=%u",
+ yi, m, d, wd);
+ TEST_ASSERT_EQUAL_MESSAGE(yi, yo, mbuf);
+
+ /* test last day of month */
+ d = mt[m] + (m == 2 && is_leapyear(yi));
+ wd = ntpcal_edate_to_eradays(yi-1, m-1, d-1) % 7 + 1;
+ yo = zeller_expand((yi%100), m, d, wd);
+ snprintf(mbuf, sizeof(mbuf), "failed, di=%04u-%02u-%02u, wd=%u",
+ yi, m, d, wd);
+ TEST_ASSERT_EQUAL_MESSAGE(yi, yo, mbuf);
+ }
+ }
+}
+
+void test_ZellerDirectBad(void);
+void test_ZellerDirectBad(void)
+{
+ unsigned int y, n, wd;
+ for (y = 2001; y < 2101; ++y) {
+ wd = ntpcal_edate_to_eradays(y-1, 0, 0) % 7 + 1;
+ /* move 4 centuries ahead */
+ wd = (wd + 5) % 7 + 1;
+ for (n = 0; n < 3; ++n) {
+ TEST_ASSERT_EQUAL(0, zeller_expand((y%100), 1, 1, wd));
+ wd = (wd + 4) % 7 + 1;
+ }
+ }
+}
+
+void test_zellerModInv(void);
+void test_zellerModInv(void)
+{
+ unsigned int i, r1, r2;
+
+ for (i = 0; i < 2048; ++i) {
+ r1 = (3 * i) % 7;
+ r2 = ((i * 0x6db6db6eU) >> 29) & 7u;
+ snprintf(mbuf, sizeof(mbuf), "i=%u", i);
+ TEST_ASSERT_EQUAL_MESSAGE(r1, r2, mbuf);
+ }
+}
+
+
extern void test_GpsNtpFixpoints(void);
extern void test_NtpToNtp(void);
extern void test_NtpToTime(void);
+extern void test_CalUMod7(void);
+extern void test_CalIMod7(void);
+extern void test_RellezCentury1_1(void);
+extern void test_RellezCentury3_1(void);
+extern void test_RellezYearZero(void);
+extern void test_RellezEra(void);
+extern void test_zellerDirect(void);
+extern void test_ZellerDirectBad(void);
+extern void test_zellerModInv(void);
//=======Suite Setup=====
RUN_TEST(test_GpsNtpFixpoints, 51);
RUN_TEST(test_NtpToNtp, 52);
RUN_TEST(test_NtpToTime, 53);
+ RUN_TEST(test_CalUMod7, 55);
+ RUN_TEST(test_CalIMod7, 56);
+ RUN_TEST(test_RellezCentury1_1, 57);
+ RUN_TEST(test_RellezCentury3_1, 58);
+ RUN_TEST(test_RellezYearZero, 59);
+ RUN_TEST(test_RellezEra, 1059);
+ RUN_TEST(test_zellerDirect, 1144);
+ RUN_TEST(test_ZellerDirectBad, 1192);
+ RUN_TEST(test_zellerModInv, 1207);
return (UnityEnd());
}