cp ns2/formerly-text.db.in ns2/formerly-text.db
cp ns1/large.db.in ns1/large.db
awk 'END {
- for (i = 0; i < 500; i++ ) { print "a TXT", i; }
- for (i = 0; i < 1000; i++ ) { print "b TXT", i; }
- for (i = 0; i < 2000; i++ ) { print "c TXT", i; }
+ for (i = 0; i < 500; i++ ) { print "500-txt TXT", i; }
+ for (i = 0; i < 1000; i++ ) { print "1000-txt TXT", i; }
+ for (i = 0; i < 2000; i++ ) { print "2000-txt TXT", i; }
}' </dev/null >>ns1/large.db
cp ns1/huge.db.in ns1/huge.db
awk 'END {
- for (i = 0; i < 500; i++ ) { print "a TXT", i; }
- for (i = 0; i < 1000; i++ ) { print "b TXT", i; }
- for (i = 0; i < 2000; i++ ) { print "c TXT", i; }
- for (i = 0; i < 2050; i++ ) { print "d TXT", i; }
+ for (i = 0; i < 500; i++ ) { print "500-txt TXT", i; }
+ for (i = 0; i < 1000; i++ ) { print "1000-txt TXT", i; }
+ for (i = 0; i < 2000; i++ ) { print "2000-txt TXT", i; }
+ for (i = 0; i < 2050; i++ ) { print "2050-txt TXT", i; }
}' </dev/null >>ns1/huge.db
cp ns1/uber.db.in ns1/uber.db
awk 'END {
- for (i = 0; i < 500; i++ ) { print "a TXT", i; }
- for (i = 0; i < 1000; i++ ) { print "b TXT", i; }
- for (i = 0; i < 2000; i++ ) { print "c TXT", i; }
- for (i = 0; i < 2050; i++ ) { print "d TXT", i; }
- for (i = 0; i < 2100; i++ ) { print "e TXT", i; }
+ for (i = 0; i < 500; i++ ) { print "500-txt TXT", i; }
+ for (i = 0; i < 1000; i++ ) { print "1000-txt TXT", i; }
+ for (i = 0; i < 2000; i++ ) { print "2000-txt TXT", i; }
+ for (i = 0; i < 2050; i++ ) { print "2050-txt TXT", i; }
+ for (i = 0; i < 2100; i++ ) { print "2100-txt TXT", i; }
}' </dev/null >>ns1/uber.db
cp ns1/many.db.in ns1/many.db
for ntype in $(seq 65280 65534); do
echo_i "checking that large rdatasets loaded ($n)"
for _attempt in 0 1 2 3 4 5 6 7 8 9; do
ret=0
- for rrcount in a b c; do
+ for rrcount in 500-txt 1000-txt 2000-txt; do
$DIG +tcp txt "${rrcount}.large" @10.53.0.1 -p "${PORT}" >"dig.out.ns1.$rrcount.test$n"
grep "status: NOERROR" "dig.out.ns1.$rrcount.test$n" >/dev/null || ret=1
done
echo_i "checking that large rdatasets transfered ($n)"
for _attempt in 0 1 2 3 4 5 6 7 8 9; do
ret=0
- for rrcount in a b c; do
+ for rrcount in 500-txt 1000-txt 2000-txt; do
$DIG +tcp txt "${rrcount}.large" @10.53.0.2 -p "${PORT}" >"dig.out.ns2.$rrcount.test$n"
grep "status: NOERROR" "dig.out.ns2.$rrcount.test$n" >/dev/null || ret=1
done
echo_i "checking that huge rdatasets loaded ($n)"
for _attempt in 0 1 2 3 4 5 6 7 8 9; do
ret=0
- for rrcount in a b c d; do
+ for rrcount in 500-txt 1000-txt 2000-txt 2050-txt; do
$DIG +tcp txt "${rrcount}.huge" @10.53.0.1 -p "${PORT}" >"dig.out.ns1.$rrcount.test$n"
grep "status: NOERROR" "dig.out.ns1.$rrcount.test$n" >/dev/null || ret=1
done
echo_i "checking that huge rdatasets not transfered ($n)"
for _attempt in 0 1 2 3 4 5 6 7 8 9; do
ret=0
- for rrcount in a b c d; do
+ for rrcount in 500-txt 1000-txt 2000-txt 2050-txt; do
$DIG +tcp txt "${rrcount}.huge" @10.53.0.2 -p "${PORT}" >"dig.out.ns2.$rrcount.test$n"
grep "status: SERVFAIL" "dig.out.ns2.$rrcount.test$n" >/dev/null || ret=1
done
echo_i "checking that uber rdatasets not loaded ($n)"
for _attempt in 0 1 2 3 4 5 6 7 8 9; do
ret=0
- for rrcount in a b c d e; do
+ for rrcount in 500-txt 1000-txt 2000-txt 2050-txt 2100-txt; do
$DIG +tcp txt "${rrcount}.uber" @10.53.0.1 -p "${PORT}" >"dig.out.ns1.$rrcount.test$n"
grep "status: SERVFAIL" "dig.out.ns1.$rrcount.test$n" >/dev/null || ret=1
done