-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Verify that 'ctdb getdbmap' operates as expected.
-
-This test creates some test databases using 'ctdb attach'.
-
-Prerequisites:
-
-* An active CTDB cluster with at least 2 active nodes.
-
-Steps:
-
-1. Verify that the status on all of the ctdb nodes is 'OK'.
-2. Get the database on using 'ctdb getdbmap'.
-3. Verify that the output is valid.
-
-Expected results:
-
-* 'ctdb getdbmap' shows a valid listing of databases.
-EOF
-}
+# Verify that 'ctdb getdbmap' operates as expected
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
make_temp_db_filename ()
{
-#!/bin/bash
-
-test_info()
-{
- cat <<EOF
-Verify the operation of 'ctdb attach' command.
-
-Prerequisites:
-
-* An active CTDB cluster with at least 2 active nodes.
-
-Steps:
-
-1. Verify that the status on all of the ctdb nodes is 'OK'.
-2. Shut down one of the nodes
-3. Attach test databases
-4. Start shutdown node
-5. Verify that the databases are attached.
-6. Restart one of the nodes
-7. Verify that the databses are attached.
+#!/usr/bin/env bash
-
-Expected results:
-
-* Command 'ctdb attach' command successfully attaches databases.
-EOF
-}
+# Verify that databases are attached a node joins the cluster:
+# 1. Shut down CTDB on one node
+# 2. Attach test databases
+# 3. Check that databases are attached on all up nodes
+# 4. Start CTDB on the node where it is shut down
+# 5. Verify that the test databases are attached on this node
+# 6. Restart one of the nodes
+# 7. Verify that the test databases are attached on this node
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
######################################################################
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Verify the operation of 'ctdb detach' command.
-
-Prerequisites:
-
-* An active CTDB cluster with at least 2 active nodes.
-
-Steps:
-
-1. Verify that the status on all of the ctdb nodes is 'OK'.
-2. Attach test databases
-3. Detach test databases
-4. Verify that the databases are not attached.
-
-Expected results:
-
-* Command 'ctdb detach' command successfully removes attached databases.
-EOF
-}
+# Verify that 'ctdb detach' works as expected:
+# 1. Attach test databases
+# 2. Detach test databases
+# 3. Confirm test databases are not attached
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
######################################################################
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-The command 'ctdb wipedb' is used to clear a database across the whole
-cluster.
-
-Prerequisites:
-
-* An active CTDB cluster with at least 2 active nodes.
-
-Steps:
-
-1. Verify that the status on all of the ctdb nodes is 'OK'.
-2. Create a persistent test database
-3. Add some records to node #0 and node #1
-4. Perform wipedb on node #0 and verify the database is empty on both node 0 and 1
-
-Expected results:
-
-* An empty database will result
-
-EOF
-}
+# Verify that 'ctdb wipedb' can clear a persistent database:
+# 1. Verify that the status on all of the ctdb nodes is 'OK'.
+# 2. Create a persistent test database
+# 3. Add some records to node 0 and node 1
+# 4. Run wipedb on node 0
+# 5. verify the database is empty on both node 0 and 1
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
try_command_on_node 0 "$CTDB listnodes | wc -l"
num_nodes="$out"
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-The command 'ctdb restoredb' is used to restore a database across the
-whole cluster.
-
-Prerequisites:
-
-* An active CTDB cluster with at least 2 active nodes.
-
-Steps:
-
-1. Verify that the status on all of the ctdb nodes is 'OK'.
-2. Create a persistent test database
-3. Add some records to test database
-4. Backup database
-5. Wipe database and verify the database is empty on all nodes
-6. Restore database and make sure all the records are restored
-7. Make sure no recovery has been triggered
-
-Expected results:
-
-* Database operations should not cause a recovery
-
-EOF
-}
+# Confirm that 'ctdb restoredb' works correctly:
+# 1. Create a persistent test database
+# 2. Add some records to test database
+# 3. Backup database
+# 4. Wipe database and verify the database is empty on all nodes
+# 5. Restore database and make sure all the records are restored
+# 6. Make sure no recovery has been triggered
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
try_command_on_node 0 $CTDB status
generation=$(sed -n -e 's/^Generation:\([0-9]*\)/\1/p' "$outfile")
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Run the fetch_ring test and sanity check the output.
-
-Prerequisites:
-
-* An active CTDB cluster with at least 2 active nodes.
-EOF
-}
+# Run the fetch_ring test and sanity check the output
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
try_command_on_node 0 "$CTDB listnodes | wc -l"
num_nodes="$out"
-#!/bin/bash
+#!/usr/bin/env bash
# Run the fetch_ring test, sanity check the output and check hot keys
# statistics
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Read-only records can be activated at runtime using a ctdb command.
-If read-only records are not activated, then any attempt to fetch a read-only
-copy should be automatically upgraded to a read-write fetch_lock().
-
-If read-only delegations are present, then any attempt to aquire a read-write
-fetch_lock will trigger all delegations to be revoked before the fetch lock
-completes.
-
-
-Prerequisites:
-
-* An active CTDB cluster with at least 2 active nodes.
-
-Steps:
+# Test support for read-only records
-1. Verify that the status on all of the ctdb nodes is 'OK'.
-2. create a test database and some records
-3. try to fetch read-only records, this should not result in any delegations
-4. activate read-only support
-5. try to fetch read-only records, this should result in delegations
-6. do a fetchlock and the delegations should be revoked
-7. try to fetch read-only records, this should result in delegations
-8. do a recovery and the delegations should be revoked
+# Read-only records can be activated at runtime using a ctdb command.
+# If read-only records are not activated, then any attempt to fetch a
+# read-only copy should be automatically upgraded to a read-write
+# fetch_locked().
-Expected results:
+# If read-only delegations are present, then any attempt to acquire a
+# read-write fetch_lock will trigger revocation of all delegations
+# before the fetch_locked().
-Delegations should be created and revoked as above
-
-EOF
-}
+# 1. Create a test database and some records
+# 2. Try to fetch read-only records, this should not result in any delegations
+# 3. Activate read-only support
+# 4. Try to fetch read-only records, this should result in delegations
+# 5. Do a fetchlock and the delegations should be revoked
+# 6. Try to fetch read-only records, this should result in delegations
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
######################################################################
-#!/bin/bash
-
-test_info()
-{
- cat <<EOF
-Recovery can under certain circumstances lead to old record copies
-resurrecting: Recovery selects the newest record copy purely by RSN. At
-the end of the recovery, the recovery master is the dmaster for all
-records in all (non-persistent) databases. And the other nodes locally
-hold the complete copy of the databases. The bug is that the recovery
-process does not increment the RSN on the recovery master at the end of
-the recovery. Now clients acting directly on the Recovery master will
-directly change a record's content on the recmaster without migration
-and hence without RSN bump. So a subsequent recovery can not tell that
-the recmaster's copy is newer than the copies on the other nodes, since
-their RSN is the same. Hence, if the recmaster is not node 0 (or more
-precisely not the active node with the lowest node number), the recovery
-will choose copies from nodes with lower number and stick to these.
-
-Steps:
-
-1. Create a test database
-2. Add a record with value value1 on recovery master
-3. Force a recovery
-4. Update the record with value value2 on recovery master
-5. Force a recovery
-6. Fetch the record
-
-Expected results:
-
-* The record should have value value2 and not value1
-
-EOF
-}
+#!/usr/bin/env bash
+
+# Test that recovery correctly handles RSNs
+
+# Recovery can under certain circumstances lead to old record copies
+# resurrecting: Recovery selects the newest record copy purely by RSN. At
+# the end of the recovery, the recovery master is the dmaster for all
+# records in all (non-persistent) databases. And the other nodes locally
+# hold the complete copy of the databases. The bug is that the recovery
+# process does not increment the RSN on the recovery master at the end of
+# the recovery. Now clients acting directly on the Recovery master will
+# directly change a record's content on the recmaster without migration
+# and hence without RSN bump. So a subsequent recovery can not tell that
+# the recmaster's copy is newer than the copies on the other nodes, since
+# their RSN is the same. Hence, if the recmaster is not node 0 (or more
+# precisely not the active node with the lowest node number), the recovery
+# will choose copies from nodes with lower number and stick to these.
+
+# 1. Create a test database
+# 2. Add a record with value value1 on recovery master
+# 3. Force a recovery
+# 4. Update the record with value value2 on recovery master
+# 5. Force a recovery
+# 6. Confirm that the value is value2
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
#
# Main test
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Older style of recovery using PULL_DB and PUSH_DB controls tries to
-construct a single large marshall buffer for all the records in the
-database. However, this approach is problematic as talloc restricts the
-maximum size of buffer to 256M. Also, trying to construct and send large
-buffers is inefficient and can cause CTDB daemon to be tied up for long
-periods of time.
+# Test recovery of large volatile and persistent databases
-Instead new style recovery is introduced using DB_PULL and
-DB_PUSH_START/DB_PUSH_CONFIRM controls. This sends the records in
-batches of ~RecBufferSizeLimit in size at a time.
+# Older style of recovery using PULL_DB and PUSH_DB controls tries to
+# construct a single large marshall buffer for all the records in the
+# database. However, this approach is problematic as talloc restricts the
+# maximum size of buffer to 256M. Also, trying to construct and send large
+# buffers is inefficient and can cause CTDB daemon to be tied up for long
+# periods of time.
-Expected results:
-
-* The recovery should complete successfully
-
-EOF
-}
+# Instead new style recovery is introduced using DB_PULL and
+# DB_PUSH_START/DB_PUSH_CONFIRM controls. This sends the records in
+# batches of ~RecBufferSizeLimit in size at a time.
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
#
# Main test
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Ensure recovery doesn't resurrect deleted records from recently inactive nodes
-EOF
-}
+# Ensure recovery doesn't resurrect deleted records from recently
+# inactive nodes
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
testdb="rec_test.tdb"
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-The persistent databases are recovered using sequence number.
-The recovery is performed by picking the copy of the database from the
-node that has the highest sequence number and ignore the content on all
-other nodes.
-
-
-Prerequisites:
-
-* An active CTDB cluster with at least 2 active nodes.
-
-Steps:
-
-1. Verify that the status on all of the ctdb nodes is 'OK'.
-2. create a persistent test database
-3. test that no seqnum record blends the database during recovery
-4. test that seqnum record does not blend the database during recovery
-
-Expected results:
-
-* that 3,4 will recover the highest seqnum database
-
-EOF
-}
+# Ensure that persistent databases are correctly recovered by database
+# sequence number
+#
+# 1. Create and wipe a persistent test database
+# 2. Directly add a single record to the database on each node
+# 3. Trigger a recover
+# 4. Ensure that the database contains only a single record
+#
+# Repeat but with sequence numbers set by hand on each node
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
try_command_on_node 0 "$CTDB listnodes | wc -l"
num_nodes="$out"
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-This test confirms that the deleted records are not resurrected after recovery.
-
-Steps:
-
-1. Create a persistent database
-2. Add a record and update it few times.
-3. Delete the record
-4. Turn off one of the nodes
-5. Add a record with same key.
-6. Turn on the stopped node
-
-Expected results:
-
-* Check that the deleted record is present after recovery.
-
-EOF
-}
+# Confirm that the deleted records are not resurrected after recovery
+#
+# 1. Create a persistent database
+# 2. Add a record and update it few times.
+# 3. Delete the record
+# 4. Use "ctdb stop" to stop one of the nodes
+# 5. Add a record with same key.
+# 6. Continue on the stopped node
+# 7. Confirm that the record still exists
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
do_test()
{
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Verify that the ctdb ptrans works as expected
-
-Prerequisites:
-
-* An active CTDB cluster with at least 2 active nodes.
-
-Steps:
-
-1. Verify that the status on all of the ctdb nodes is 'OK'.
-2. Pipe some operation to ctdb ptrans and validate the TDB contents with ctdb catdb
-
-Expected results:
-
-* ctdb ptrans works as expected.
-EOF
-}
+# Verify that the 'ctdb ptrans' works as expected
+#
+# Pipe some operation to ctdb ptrans and validate the TDB contents
+# with ctdb catdb
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
TESTDB="ptrans_test.tdb"
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Verify that the transaction_loop test succeeds.
-
-Prerequisites:
-
-* An active CTDB cluster with at least 2 active nodes.
-EOF
-}
+# Verify that the transaction_loop test succeeds
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
TESTDB="persistent_trans.tdb"
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Verify that the transaction_loop test succeeds with recoveries.
+# Verify that the transaction_loop test succeeds with recoveries.
-Prerequisites:
+. "${TEST_SCRIPTS_DIR}/integration.bash"
-* An active CTDB cluster with at least 2 active nodes.
-EOF
-}
+set -e
+
+ctdb_test_init
recovery_loop()
{
ctdb_test_exit_hook_add "kill $RECLOOP_PID >/dev/null 2>&1"
}
-. "${TEST_SCRIPTS_DIR}/integration.bash"
-
-ctdb_test_init
-
-set -e
-
-cluster_is_healthy
-
TESTDB="persistent_trans.tdb"
try_command_on_node 0 "$CTDB attach $TESTDB persistent"
-#!/bin/bash
-
-test_info()
-{
- cat <<EOF
-UPDATE_RECORD control should be able to create new records and update
-existing records in a persistent database.
-
-Prerequisites:
-
-* An active CTDB cluster with at least one active node.
-
-Steps:
-
-1. Verify that the status on all of the ctdb nodes is 'OK'.
-2. create a persistent test database
-3, wipe the database to make sure it is empty
-4, create a new record
-5, update the record
-
-Expected results:
-
-* 4 created record found in the tdb
-* 5 updated record found in the tdb
-
-EOF
-}
+#!/usr/bin/env bash
+
+# Verify that "ctdb update_record_persistent" creates new records and
+# updates existing records in a persistent database
+#
+# 1. Create and wipe a persistent test database
+# 2. Do a recovery
+# 3. Confirm that the database is empty
+# 4. Create a new record using "ctdb update_record_persistent"
+# 5. Confirm the record exists in the database using "ctdb cattdb"
+# 6. Update the record's value using "ctdb update_record_persistent"
+# 7. Confirm that the original value no longer exists using "ctdb cattdb"
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
try_command_on_node 0 "$CTDB listnodes | wc -l"
num_nodes="$out"
try_command_on_node 0 $CTDB attach "$test_db" persistent
-# 3,
+# 3.
echo "Wipe the persistent test database"
try_command_on_node 0 $CTDB wipedb "$test_db"
echo "Force a recovery"
exit 1
fi
-# 4,
+# 4.
echo "Create a new record in the persistent database using UPDATE_RECORD"
try_command_on_node 0 $CTDB_TEST_WRAPPER $VALGRIND update_record_persistent \
-D "$test_db" -k "Update_Record_Persistent" -v "FirstValue"
exit 1
fi
-# 5,
+# 5.
echo Modify an existing record in the persistent database using UPDATE_RECORD
try_command_on_node 0 $CTDB_TEST_WRAPPER $VALGRIND update_record_persistent \
-D "$test_db" -k "Update_Record_Persistent" -v "SecondValue"
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Verify that the transaction_loop test succeeds with recoveries for replicated
-databases.
+# Verify that the transaction_loop test succeeds with recoveries for
+# replicated databases
-Prerequisites:
+. "${TEST_SCRIPTS_DIR}/integration.bash"
-* An active CTDB cluster with at least 2 active nodes.
-EOF
-}
+set -e
+
+ctdb_test_init
recovery_loop()
{
ctdb_test_exit_hook_add "kill $RECLOOP_PID >/dev/null 2>&1"
}
-. "${TEST_SCRIPTS_DIR}/integration.bash"
-
-ctdb_test_init
-
-set -e
-
-cluster_is_healthy
-
TESTDB="replicated_trans.tdb"
try_command_on_node 0 "$CTDB attach $TESTDB replicated"
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Confirm that traverses of volatile databases work as expected
-
-This is a very simple example. It writes a single record, updates it
-on another node and then confirms that the correct value is found when
-traversing. It then repeats this after removing the LMASTER role from
-the node where the value is updated.
-
-Expected results:
+# Confirm that traverses of volatile databases work as expected
-* The expected records should be found
-
-EOF
-}
+# This is a very simple example. It writes a single record, updates it
+# on another node and then confirms that the correct value is found when
+# traversing. It then repeats this after removing the LMASTER role from
+# the node where the value is updated.
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
#
# Main test
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Test CTDB cluster wide traverse code.
-
-Prerequisites:
-
-* An active CTDB cluster with at least 2 active nodes.
-
-Steps:
-
-1. Create a test database
-2. Add records on different nodes
-3. Run traverse
-
-Expected results:
-
-* All records are retrieved.
-
-EOF
-}
+# Test cluster wide traverse code
+#
+# 1. Create a volatile test database
+# 2. Add records on different nodes
+# 3. Use "ctdb catdb" to confirm that all added records are present
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
try_command_on_node 0 "$CTDB listnodes"
num_nodes=$(echo "$out" | wc -l)
-#!/bin/bash
+#!/usr/bin/env bash
# Ensure that vacuuming deletes records on all nodes
-#!/bin/bash
+#!/usr/bin/env bash
# Ensure a full vacuuming run deletes records
-#!/bin/bash
+#!/usr/bin/env bash
# Ensure that vacuuming does not delete a record that is recreated
# before vacuuming completes. This needs at least 3 nodes.
-#!/bin/bash
+#!/usr/bin/env bash
# Confirm that a record is not vacuumed if it is locked when the 1st
# fast vacuuming run occurs on the node on which it was deleted, but
-#!/bin/bash
+#!/usr/bin/env bash
# Confirm that a record is vacuumed if it is locked on the deleting
# node when the 2nd fast vacuuming run occurs, but vacuuming is
-#!/bin/bash
+#!/usr/bin/env bash
# Confirm that a record is not vacuumed if it is locked on the lmaster
# when the 3rd fast vacuuming run occurs, but is dropped from the
-#!/bin/bash
+#!/usr/bin/env bash
# Confirm that a record is not vacuumed if it is locked on the
# deleting node when the 3rd fast vacuuming run occurs, but is dropped
-#!/bin/bash
+#!/usr/bin/env bash
# Confirm that a record is not vacuumed if it is locked on another
# (non-lmaster, non-deleting) node when the 3rd fast vacuuming run
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Verify that 'ctdb ip' shows the correct output.
-
-Prerequisites:
-
-* An active CTDB cluster with at least 2 active nodes.
-
-Steps:
-
-1. Verify that the status on all of the ctdb nodes is 'OK'.
-2. Run 'ctdb ip' on one of the nodes and verify the list of IP
- addresses displayed (cross check the result with the output of
- 'ip addr show' on the node).
-3. Verify that pipe-separated output is generated with the -X option.
-
-Expected results:
-
-* 'ctdb ip' shows the list of public IPs being served by a node.
-EOF
-}
+# Verify that 'ctdb ip' shows the correct output
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
echo "Getting list of public IPs..."
try_command_on_node -v 1 "$CTDB ip all | tail -n +2"
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Verify that an IP address can be added to a node using 'ctdb addip'.
-
-This test does not do any network level checks to make sure IP
-addresses are actually on interfaces. It just consults "ctdb ip".
-EOF
-}
+# Verify that an IP address can be added to a node using 'ctdb addip'
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
select_test_node_and_ips
get_test_ip_mask_and_iface
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Verify that a node's public IP address can be deleted using 'ctdb deleteip'.
-
-This test does not do any network level checks to make sure IP
-addresses are actually on interfaces. It just consults "ctdb ip".
-EOF
-}
+# Verify that a node's public IP address can be deleted using 'ctdb deleteip'
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
select_test_node_and_ips
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Verify that IPs can be rearrranged using 'ctdb reloadips'.
-
-Various sub-tests that remove addresses from the public_addresses file
-on a node or delete the entire contents of the public_addresses file.
+# Verify that IPs can be reconfigured using 'ctdb reloadips'
-Prerequisites:
-
-* An active CTDB cluster with at least 2 active nodes.
-
-Expected results:
-
-* When addresses are deconfigured "ctdb ip" no longer reports them and
- when added they are seen again.
-EOF
-}
+# Various sub-tests that remove addresses from the public_addresses file
+# on a node or delete the entire contents of the public_addresses file.
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
select_test_node_and_ips
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Check that CTDB operates correctly if:
+# Check that CTDB operates correctly if:
-* failover is disabled; or
-* there are 0 public IPs configured
-
-This test only does anything with local daemons. On a real cluster it
-has no way of updating configuration.
-EOF
-}
+# * failover is disabled; or
+# * there are 0 public IPs configured
. "${TEST_SCRIPTS_DIR}/integration.bash"
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Verify that an interface is deleted when all IPs on it are deleted.
-EOF
-}
+# Verify that an interface is deleted when all IPs on it are deleted
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
select_test_node_and_ips
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Verify that 'ctdb moveip' allows movement of public IPs between cluster nodes.
+# Verify that 'ctdb moveip' allows movement of public IPs between nodes
-This test does not do any network level checks to make sure IP
-addresses are actually on interfaces. It just consults "ctdb ip".
+# This test does not do any network level checks to make sure IP
+# addresses are actually on interfaces. It just consults "ctdb ip".
-To work, this test ensures that IPAllocAlgorithm is not set to 0
-(Deterministic IPs) and sets NoIPFailback.
-EOF
-}
+# To work, this test ensures that IPAllocAlgorithm is not set to 0
+# (Deterministic IPs) and sets NoIPFailback.
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
select_test_node_and_ips
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Verify the operation of "ctdb disable" and "ctdb enable"
-EOF
-}
+# Verify the operation of "ctdb disable" and "ctdb enable"
. "${TEST_SCRIPTS_DIR}/integration.bash"
+set -e
+
ctdb_test_init
########################################
-set -e
-
-cluster_is_healthy
-
select_test_node_and_ips
echo "Disabling node $test_node"
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Verify the operation of "ctdb stop" and "ctdb continue"
-EOF
-}
+# Verify the operation of "ctdb stop" and "ctdb continue"
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
select_test_node_and_ips
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Verify that 'ctdb setvar NoIPTakeover 1' stops ip addresses from being failed
-over onto the node.
-
-Prerequisites:
-
-* An active CTDB cluster with at least 2 active nodes.
-
-Steps:
-
-1. Verify that the status on all of the ctdb nodes is 'OK'.
-2. Use 'ctdb ip' on one of the nodes to list the IP addresses being
- served.
-3. Use 'ctdb moveip' to move an address from one node to another.
-4. Verify that the IP is no longer being hosted by the first node and is now being hosted by the second node.
-
-Expected results:
-
-* 'ctdb moveip' allows an IP address to be moved between cluster nodes.
-EOF
-}
+# Verify that 'ctdb setvar NoIPTakeover 1' stops IP addresses being taken over
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
+set -e
-cluster_is_healthy
+ctdb_test_init
try_command_on_node 0 "$CTDB listnodes | wc -l"
num_nodes="$out"
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Verify that the recovery daemon handles unhosted IPs properly.
+# Verify that the recovery daemon handles unhosted IPs properly
-This test does not do any network level checks to make sure the IP
-address is actually on an interface. It just consults "ctdb ip".
+# This test does not do any network level checks to make sure the IP
+# address is actually on an interface. It just consults "ctdb ip".
-This is a variation of the "addip" test.
-EOF
-}
+# This is a variation of the "addip" test.
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
select_test_node_and_ips
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Use 'onnode' to confirm connectivity between all cluster nodes.
-
-Steps:
-
-1. Do a recursive "onnode all" to make sure all the nodes can connect
- to each other. On a cluster this ensures that SSH keys are known
- between all hosts, which will stop output being corrupted with
- messages about nodes being added to the list of known hosts.
-
-Expected results:
-
-* 'onnode' works between all nodes.
-EOF
-}
+# Use 'onnode' to confirm connectivity between all cluster nodes
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
+set -e
-#
+ctdb_test_init
echo "Checking connectivity between nodes..."
onnode all onnode -p all hostname
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Verify that 'ctdb listnodes' shows the list of nodes in a ctdb cluster.
-
-Prerequisites:
-
-* An active CTDB cluster with at least 2 active nodes.
-
-Steps:
-
-1. Verify that the status on all of the ctdb nodes is 'OK'.
-2. Run 'ctdb listnodes' on all the nodes of the cluster.
-3. Verify that one all the nodes the command displays a list of
- current cluster nodes.
-
-Expected results:
-
-* 'ctdb listnodes' displays the correct information.
-EOF
-}
+# Verify that 'ctdb listnodes' shows the list of nodes
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
try_command_on_node -v 0 "$CTDB listnodes"
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Verify the operation of "ctdb listvars", "ctdb getvar", "ctdb setvar"
-EOF
-}
+# Verify the operation of "ctdb listvars", "ctdb getvar", "ctdb setvar"
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
try_command_on_node -v 0 "$CTDB listvars"
-#!/bin/bash
-
-test_info()
-{
- cat <<EOF
-Verify the operation of the 'ctdb ping' command.
-
-Prerequisites:
-
-* An active CTDB cluster with at least 2 active nodes.
-
-Steps:
-
-1. Verify that the status on all of the ctdb nodes is 'OK'.
-2. Run the 'ctdb ping' command on one of the nodes and verify that it
- shows valid and expected output.
-3. Shutdown one of the cluster nodes, using the 'ctdb shutdown'
- command.
-4. Run the 'ctdb ping -n <node>' command from another node to this
- node.
-5. Verify that the command is not successful since th ctdb daemon is
- not running on the node.
-
-Expected results:
-
-* The 'ctdb ping' command shows valid and expected output.
-EOF
-}
+#!/usr/bin/env bash
+
+# Verify the operation of the 'ctdb ping' command
+#
+# 1. Run the 'ctdb ping' command on one of the nodes and verify that it
+# shows valid and expected output.
+# 2. Shutdown one of the cluster nodes, using the 'ctdb shutdown'
+# command.
+# 3. Run the 'ctdb ping -n <node>' command from another node to this
+# node.
+# 4. Verify that the command is not successful since th ctdb daemon is
+# not running on the node.
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
try_command_on_node -v 0 "$CTDB ping -n 1"
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Verify that 'ctdb getpid' works as expected.
-
-Prerequisites:
-
-* An active CTDB cluster with at least 2 active nodes.
-
-Steps:
-
-1. Verify that the status on all of the ctdb nodes is 'OK'.
-2. Run 'ctdb getpid -n <number>' on the nodes to check the PID of the
- ctdbd process.
-3. Verify that the output is valid.
-
-Expected results:
-
-* 'ctdb getpid' shows valid output.
-EOF
-}
+# Verify that 'ctdb getpid' works as expected
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
try_command_on_node 0 "$CTDB listnodes | wc -l"
num_nodes="$out"
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Verify that 'ctdb process-exists' shows correct information.
-
-The implementation is creative about how it gets PIDs for existing and
-non-existing processes.
-
-Prerequisites:
-
-* An active CTDB cluster with at least 2 active nodes.
-
-Steps:
-
-1. Verify that the status on all of the ctdb nodes is 'OK'.
-2. On one of the cluster nodes, get the PID of a ctdb client.
-3. Run 'ctdb process-exists <pid>' on the node and verify that the
- correct output is shown.
-4. Run 'ctdb process-exists <pid>' with a pid of ctdb daemon
- process and verify that the correct output is shown.
+# Verify that 'ctdb process-exists' shows correct information
-Expected results:
-
-* 'ctdb process-exists' shows the correct output.
-EOF
-}
+# The implementation is creative about how it gets PIDs for existing and
+# non-existing processes.
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
test_node=1
srvid=0xAE00000012345678
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Verify that 'ctdb statistics' works as expected.
+# Verify that 'ctdb statistics' works as expected
-This is pretty superficial and could do more validation.
-
-Prerequisites:
-
-* An active CTDB cluster with at least 2 active nodes.
-
-Steps:
-
-1. Verify that the status on all of the ctdb nodes is 'OK'.
-2. Run 'ctdb statistics' on a node, and verify that the output is
- valid.
-
-Expected results:
-
-* 'ctdb statistics' shows valid output on all the nodes.
-EOF
-}
+# This is pretty superficial and could do more validation.
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
pattern='^(CTDB version 1|Current time of statistics[[:space:]]*:.*|Statistics collected since[[:space:]]*:.*|Gathered statistics for [[:digit:]]+ nodes|[[:space:]]+[[:alpha:]_]+[[:space:]]+[[:digit:]]+|[[:space:]]+(node|client|timeouts|locks)|[[:space:]]+([[:alpha:]_]+_latency|max_reclock_[[:alpha:]]+)[[:space:]]+[[:digit:]-]+\.[[:digit:]]+[[:space:]]sec|[[:space:]]*(locks_latency|reclock_ctdbd|reclock_recd|call_latency|lockwait_latency|childwrite_latency)[[:space:]]+MIN/AVG/MAX[[:space:]]+[-.[:digit:]]+/[-.[:digit:]]+/[-.[:digit:]]+ sec out of [[:digit:]]+|[[:space:]]+(hop_count_buckets|lock_buckets):[[:space:][:digit:]]+)$'
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Verify that 'ctdb statisticsreset' works as expected.
-
-This is pretty superficial. It just checks that a few particular
-items reduce.
-
-Prerequisites:
-
-* An active CTDB cluster with at least 2 active nodes.
-
-Steps:
-
-1. Verify that the status on all of the ctdb nodes is 'OK'.
-2. Run 'ctdb statisticsreset' on all nodes and verify that it executes
- successfully.
+# Verify that 'ctdb statisticsreset' works as expected
-Expected results:
-
-* 'ctdb statisticsreset' executes successfully.
-EOF
-}
+# This is pretty superficial. It just checks that a few particular
+# items reduce.
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
try_command_on_node 0 "$CTDB listnodes | wc -l"
num_nodes="$out"
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Verify the operation of 'ctdb isnotrecmaster'.
-
-Prerequisites:
-
-* An active CTDB cluster with at least 2 active nodes.
-
-Steps:
-
-1. Verify that the status on all of the ctdb nodes is 'OK'.
-2. Run 'ctdb isnotrecmaster' on each node.
-
-3. Verify that only 1 node shows the output 'This node is the
- recmaster' and all the other nodes show the output 'This node is
- not the recmaster'.
-
-Expected results:
-
-* 'ctdb isnotrecmaster' shows the correct output.
-EOF
-}
+# Verify the operation of 'ctdb isnotrecmaster'
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
cmd="$CTDB isnotrecmaster || true"
try_command_on_node -v all "$cmd"
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Verify that 'ctdb stop' causes a node to yield the recovery master role.
-
-Prerequisites:
-
-* An active CTDB cluster with at least 2 active nodes.
-
-Steps:
-
-1. Determine which node is the recmaster.
-2. Stop this node using the 'ctdb stop' command.
-3. Verify that the status of the node changes to 'stopped'.
-4. Verify that this node no longer has the recovery master role.
-
-Expected results:
-
-* The 'ctdb stop' command causes a node to yield the recmaster role.
-EOF
-}
+# Verify that 'ctdb stop' causes a node to yield the recovery master role
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
echo "Finding out which node is the recovery master..."
try_command_on_node -v 0 "$CTDB recmaster"
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Verify that "ctdb getreclock" gets the recovery lock correctly.
+# Verify that "ctdb getreclock" gets the recovery lock correctly
-Make sure the recovery lock is consistent across all nodes.
-EOF
-}
+# Make sure the recovery lock is consistent across all nodes.
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
echo "Check that recovery lock is set the same on all nodes..."
try_command_on_node -v -q all $CTDB getreclock
-#!/bin/bash
-
-test_info()
-{
- cat <<EOF
-Check that CTDB operates correctly if the recovery lock is configured
-as a command.
-
-This test only does anything with local daemons. On a real cluster it
-has no way of updating configuration.
-EOF
-}
+#!/usr/bin/env bash
+
+# Check that CTDB operates correctly if the recovery lock is configured
+# as a command.
+
+# This test works only with local daemons. On a real cluster it has
+# no way of updating configuration.
. "${TEST_SCRIPTS_DIR}/integration.bash"
-#!/bin/bash
+#!/usr/bin/env bash
# Verify that the cluster recovers if the recovery lock is removed.
-#!/bin/bash
+#!/usr/bin/env bash
# Verify that if the directory containing the recovery lock is moved
# then all nodes are banned (because they can't take the lock).
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Run the message_ring test and sanity check the output.
-
-Prerequisites:
-
-* An active CTDB cluster with at least 2 active nodes.
-EOF
-}
+# Run the message_ring test and sanity check the output
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
try_command_on_node 0 "$CTDB listnodes | wc -l"
num_nodes="$out"
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Run tunnel_test and sanity check the output.
-
-Prerequisites:
-
-* An active CTDB cluster with at least 2 active nodes.
-EOF
-}
+# Run tunnel_test and sanity check the output
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
try_command_on_node 0 "$CTDB listnodes | wc -l"
num_nodes="$out"
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Verify an error occurs if a ctdb command is run against a node without a ctdbd.
+# Verify an error occurs if a ctdb command is run against a node
+# without a ctdbd
-That is, check that an error message is printed if an attempt is made
-to execute a ctdb command against a node that is not running ctdbd.
-
-Prerequisites:
-
-* An active CTDB cluster with at least 2 active nodes.
-
-Steps:
-
-1. Verify that the status on all of the ctdb nodes is 'OK'.
-2. Shutdown ctdb on a node using 'ctdb shutdown -n <node>'.
-3. Verify that the status of the node changes to 'DISCONNECTED'.
-4. Now run 'ctdb ip -n <node>' from another node.
-5. Verify that an error message is printed stating that the node is
- disconnected.
-6. Execute some other commands against the shutdown node. For example,
- disable, enable, ban, unban, listvars.
-7. For each command, verify that an error message is printed stating
- that the node is disconnected.
-
-Expected results:
-
-* For a node on which ctdb is not running, all commands display an
- error message stating that the node is disconnected.
-EOF
-}
+# That is, check that an error message is printed if an attempt is made
+# to execute a ctdb command against a node that is not running ctdbd.
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
test_node=1
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Check that the CTDB version consistency checking operates correctly.
-EOF
-}
+# Check that the CTDB version consistency checking operates correctly
. "${TEST_SCRIPTS_DIR}/integration.bash"
ctdb_test_init
-cluster_is_healthy
-
select_test_node
try_command_on_node -v "$test_node" ctdb version
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Verify that 'ctdb getdebug' works as expected.
-
-Prerequisites:
-
-* An active CTDB cluster with at least 2 active nodes.
-
-Steps:
-
-1. Verify that the status on all of the ctdb nodes is 'OK'.
-2. Get the current debug level on a node, using 'ctdb getdebug -n <node>'.
-
-Expected results:
-
-* 'ctdb getdebug' shows the debug level on all the nodes.
-EOF
-}
+# Verify that 'ctdb getdebug' works as expected
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
try_command_on_node 0 "$CTDB listnodes | wc -l"
num_nodes="$out"
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Verify that 'ctdb setdebug' works as expected.
+# Verify that 'ctdb setdebug' works as expected.
-This is a little superficial. It checks that CTDB thinks the debug
-level has been changed but doesn't actually check that logging occurs
-at the new level.
-EOF
-}
+# This is a little superficial. It checks that CTDB thinks the debug
+# level has been changed but doesn't actually check that logging occurs
+# at the new level.
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
select_test_node
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Verify that 'ctdb dumpmemory' shows expected output.
-
-Prerequisites:
-
-* An active CTDB cluster with at least 2 active nodes.
-
-Steps:
-
-1. Verify that the status on all of the ctdb nodes is 'OK'.
-2. Run 'ctdb dumpmemory' and verify that it shows expected output
-
-Expected results:
-
-* 'ctdb dumpmemory' sows valid output.
-EOF
-}
+# Verify that 'ctdb dumpmemory' shows expected output
. "${TEST_SCRIPTS_DIR}/integration.bash"
-ctdb_test_init
-
set -e
-cluster_is_healthy
+ctdb_test_init
pat='^([[:space:]].+[[:space:]]+contains[[:space:]]+[[:digit:]]+ bytes in[[:space:]]+[[:digit:]]+ blocks \(ref [[:digit:]]+\)[[:space:]]+0x[[:xdigit:]]+|[[:space:]]+reference to: .+|full talloc report on .+ \(total[[:space:]]+[[:digit:]]+ bytes in [[:digit:]]+ blocks\))$'
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Check that CTDB operated correctly if there are 0 event scripts
+# Check that CTDB operates correctly if there are 0 event scripts
-This test only does anything with local daemons. On a real cluster it
-has no way of updating configuration.
-EOF
-}
. "${TEST_SCRIPTS_DIR}/integration.bash"
-#!/bin/bash
+#!/usr/bin/env bash
-test_info()
-{
- cat <<EOF
-Verify CTDB's debugging of timed out eventscripts
-
-Prerequisites:
-
-* An active CTDB cluster with monitoring enabled
-
-Expected results:
-
-* When an eventscript times out the correct debugging is executed.
-EOF
-}
+# Verify CTDB's debugging of timed out eventscripts
. "${TEST_SCRIPTS_DIR}/integration.bash"
ctdb_test_init
-cluster_is_healthy
-
select_test_node
####################