rm -f ./ns1/example.db.map ./ns1/signed.db.map
rm -f ./ns1/huge.db ./ns1/huge.db.raw
rm -f ./ns1/uber.db ./ns1/uber.db.raw
+rm -f ./ns1/255types.db ./ns1/255types.db.raw
+rm -f ./ns1/on-limit.db ./ns1/on-limit.db.raw
+rm -f ./ns1/over-limit.db ./ns1/over-limit.db.raw
+rm -f ./ns1/under-limit.db ./ns1/under-limit.db.raw
+rm -f ./ns2/under-limit.bk
rm -f ./ns1/session.key
rm -f ./dig.out.*
rm -f ./dig.out
example.db >/dev/null 2>&1
$CHECKZONE -D -F raw -L 3333 -o example.db.serial.raw example \
example.db >/dev/null 2>&1
-$CHECKZONE -D -F raw -o large.db.raw large large.db >/dev/null 2>&1
+$CHECKZONE -D -F raw -o under-limit.db.raw under-limit under-limit.db >/dev/null 2>&1
$CHECKZONE -D -F map -o example.db.map example-map \
example.db >/dev/null 2>&1
-$CHECKZONE -D -F raw -o huge.db.raw huge huge.db >/dev/null 2>&1
-$CHECKZONE -D -F raw -o uber.db.raw uber uber.db >/dev/null 2>&1
-$CHECKZONE -D -F raw -o many.db.raw many many.db >/dev/null 2>&1
+$CHECKZONE -D -F raw -o on-limit.db.raw on-limit on-limit.db >/dev/null 2>&1
+$CHECKZONE -D -F raw -o over-limit.db.raw over-limit over-limit.db >/dev/null 2>&1
+$CHECKZONE -D -F raw -o 255types.db.raw 255types 255types.db >/dev/null 2>&1
$KEYGEN -q -a "$DEFAULT_ALGORITHM" -b "$DEFAULT_BITS" -f KSK signed >/dev/null 2>&1
$KEYGEN -q -a "$DEFAULT_ALGORITHM" -b "$DEFAULT_BITS" signed >/dev/null 2>&1
};
-zone "large" {
+zone "under-limit" {
type primary;
- file "large.db.raw";
+ file "under-limit.db.raw";
masterfile-format raw;
allow-transfer { any; };
};
-zone "huge" {
+zone "on-limit" {
type primary;
- file "huge.db.raw";
+ file "on-limit.db.raw";
masterfile-format raw;
allow-transfer { any; };
};
-zone "uber" {
+zone "over-limit" {
type primary;
- file "uber.db.raw";
+ file "over-limit.db.raw";
masterfile-format raw;
allow-transfer { any; };
};
-zone "many" {
+zone "255types" {
type primary;
- file "many.db.raw";
+ file "255types.db.raw";
masterfile-format raw;
allow-transfer { any; };
};
file "transfer.db.full";
};
-zone "large" {
+zone "under-limit" {
type secondary;
primaries { 10.53.0.1; };
masterfile-format raw;
- file "large.bk";
+ file "under-limit.bk";
};
-zone "huge" {
+zone "on-limit" {
type secondary;
primaries { 10.53.0.1; };
masterfile-format raw;
- file "huge.bk";
+ file "on-limit.bk";
};
-zone "many" {
+zone "255types" {
type secondary;
primaries { 10.53.0.1; };
masterfile-format raw;
- file "many.bk";
+ file "255types.bk";
};
cp ns1/example.db ns2/
cp ns2/formerly-text.db.in ns2/formerly-text.db
-cp ns1/large.db.in ns1/large.db
+cp ns1/under-limit.db.in ns1/under-limit.db
+
+# counts are set with respect to these limits in named.conf:
+# max-records-per-type 2050;
+# max-types-per-name 500;
awk 'END {
for (i = 0; i < 500; i++ ) { print "500-txt TXT", i; }
for (i = 0; i < 1000; i++ ) { print "1000-txt TXT", i; }
for (i = 0; i < 2000; i++ ) { print "2000-txt TXT", i; }
-}' </dev/null >>ns1/large.db
-cp ns1/huge.db.in ns1/huge.db
+}' </dev/null >>ns1/under-limit.db
+cp ns1/on-limit.db.in ns1/on-limit.db
awk 'END {
for (i = 0; i < 500; i++ ) { print "500-txt TXT", i; }
for (i = 0; i < 1000; i++ ) { print "1000-txt TXT", i; }
for (i = 0; i < 2000; i++ ) { print "2000-txt TXT", i; }
for (i = 0; i < 2050; i++ ) { print "2050-txt TXT", i; }
-}' </dev/null >>ns1/huge.db
-cp ns1/uber.db.in ns1/uber.db
+}' </dev/null >>ns1/on-limit.db
+cp ns1/over-limit.db.in ns1/over-limit.db
awk 'END {
for (i = 0; i < 500; i++ ) { print "500-txt TXT", i; }
for (i = 0; i < 1000; i++ ) { print "1000-txt TXT", i; }
for (i = 0; i < 2000; i++ ) { print "2000-txt TXT", i; }
for (i = 0; i < 2050; i++ ) { print "2050-txt TXT", i; }
for (i = 0; i < 2100; i++ ) { print "2100-txt TXT", i; }
-}' </dev/null >>ns1/uber.db
-cp ns1/many.db.in ns1/many.db
+}' </dev/null >>ns1/over-limit.db
+cp ns1/255types.db.in ns1/255types.db
for ntype in $(seq 65280 65534); do
echo "m TYPE${ntype} \# 0"
-done >>ns1/many.db
-echo "m TXT bunny" >>ns1/many.db
+done >>ns1/255types.db
+echo "m TXT bunny" >>ns1/255types.db
cd ns1 && $SHELL compile.sh
[ $ret -eq 0 ] || echo_i "failed"
status=$((status + ret))
-echo_i "checking that large rdatasets loaded ($n)"
+echo_i "checking that under-limit rdatasets loaded ($n)"
for _attempt in 0 1 2 3 4 5 6 7 8 9; do
ret=0
for rrcount in 500-txt 1000-txt 2000-txt; do
- $DIG +tcp txt "${rrcount}.large" @10.53.0.1 -p "${PORT}" >"dig.out.ns1.$rrcount.test$n"
+ $DIG +tcp txt "${rrcount}.under-limit" @10.53.0.1 -p "${PORT}" >"dig.out.ns1.$rrcount.test$n"
grep "status: NOERROR" "dig.out.ns1.$rrcount.test$n" >/dev/null || ret=1
done
[ $ret -eq 0 ] && break
[ $ret -eq 0 ] || echo_i "failed"
status=$((status + ret))
-echo_i "checking that large rdatasets transfered ($n)"
+echo_i "checking that under-limit rdatasets transfered ($n)"
for _attempt in 0 1 2 3 4 5 6 7 8 9; do
ret=0
for rrcount in 500-txt 1000-txt 2000-txt; do
- $DIG +tcp txt "${rrcount}.large" @10.53.0.2 -p "${PORT}" >"dig.out.ns2.$rrcount.test$n"
+ $DIG +tcp txt "${rrcount}.under-limit" @10.53.0.2 -p "${PORT}" >"dig.out.ns2.$rrcount.test$n"
grep "status: NOERROR" "dig.out.ns2.$rrcount.test$n" >/dev/null || ret=1
done
[ $ret -eq 0 ] && break
[ $ret -eq 0 ] || echo_i "failed"
status=$((status + ret))
-echo_i "checking that huge rdatasets loaded ($n)"
+echo_i "checking that on-limit rdatasets loaded ($n)"
for _attempt in 0 1 2 3 4 5 6 7 8 9; do
ret=0
for rrcount in 500-txt 1000-txt 2000-txt 2050-txt; do
- $DIG +tcp txt "${rrcount}.huge" @10.53.0.1 -p "${PORT}" >"dig.out.ns1.$rrcount.test$n"
+ $DIG +tcp txt "${rrcount}.on-limit" @10.53.0.1 -p "${PORT}" >"dig.out.ns1.$rrcount.test$n"
grep "status: NOERROR" "dig.out.ns1.$rrcount.test$n" >/dev/null || ret=1
done
[ $ret -eq 0 ] && break
[ $ret -eq 0 ] || echo_i "failed"
status=$((status + ret))
-echo_i "checking that huge rdatasets not transfered ($n)"
+echo_i "checking that on-limit rdatasets not transfered ($n)"
for _attempt in 0 1 2 3 4 5 6 7 8 9; do
ret=0
for rrcount in 500-txt 1000-txt 2000-txt 2050-txt; do
- $DIG +tcp txt "${rrcount}.huge" @10.53.0.2 -p "${PORT}" >"dig.out.ns2.$rrcount.test$n"
+ $DIG +tcp txt "${rrcount}.on-limit" @10.53.0.2 -p "${PORT}" >"dig.out.ns2.$rrcount.test$n"
grep "status: SERVFAIL" "dig.out.ns2.$rrcount.test$n" >/dev/null || ret=1
done
[ $ret -eq 0 ] && break
[ $ret -eq 0 ] || echo_i "failed"
status=$((status + ret))
-echo_i "checking that uber rdatasets not loaded ($n)"
+echo_i "checking that over-limit rdatasets not loaded ($n)"
for _attempt in 0 1 2 3 4 5 6 7 8 9; do
ret=0
for rrcount in 500-txt 1000-txt 2000-txt 2050-txt 2100-txt; do
- $DIG +tcp txt "${rrcount}.uber" @10.53.0.1 -p "${PORT}" >"dig.out.ns1.$rrcount.test$n"
+ $DIG +tcp txt "${rrcount}.over-limit" @10.53.0.1 -p "${PORT}" >"dig.out.ns1.$rrcount.test$n"
grep "status: SERVFAIL" "dig.out.ns1.$rrcount.test$n" >/dev/null || ret=1
done
[ $ret -eq 0 ] && break
[ $ret -eq 0 ] || echo_i "failed"
status=$((status + ret))
-echo_i "checking that many types are loaded ($n)"
+echo_i "checking that 255 types are loaded ($n)"
for _attempt in 0 1 2 3 4 5 6 7 8 9; do
ret=0
- $DIG +tcp TXT "m.many" @10.53.0.1 -p "${PORT}" >"dig.out.ns1.test$n"
+ $DIG +tcp TXT "m.255types" @10.53.0.1 -p "${PORT}" >"dig.out.ns1.test$n"
grep "status: NOERROR" "dig.out.ns1.test$n" >/dev/null || ret=1
[ $ret -eq 0 ] && break
sleep 1
[ $ret -eq 0 ] || echo_i "failed"
status=$((status + ret))
-echo_i "checking that many types are not transfered ($n)"
+echo_i "checking that 255 types types are not transfered ($n)"
for _attempt in 0 1 2 3 4 5 6 7 8 9; do
- $DIG +tcp TXT "m.many" @10.53.0.2 -p "${PORT}" >"dig.out.ns2.test$n"
+ $DIG +tcp TXT "m.255types" @10.53.0.2 -p "${PORT}" >"dig.out.ns2.test$n"
grep "status: SERVFAIL" "dig.out.ns2.test$n" >/dev/null || ret=1
[ $ret -eq 0 ] && break
sleep 1