The main change is to source cluster.bash instead of integration.bash.
While touching the preamble, the following additional changes are also
made:
* Drop test_info() definition and replace it with a comment
The use of test_info() is pointless.
* Drop call to ctdb_test_check_real_cluster()
cluster.bash now does this.
* Drop call to cluster_is_healthy()
This is a holdover from when the previous test would restart daemons
to get things ready for a test. There was also a bug where going
into recovery during the restart would sometimes cause the cluster
to become unhealthy. If we really need something like this then we
can add it to ctdb_test_init().
Signed-off-by: Martin Schwenke <martin@meltin.net>
Reviewed-by: Amitay Isaacs <amitay@gmail.com>
#!/bin/bash
-test_info()
-{
- cat <<EOF
-Verify that a node's public IP address can be deleted using 'ctdb deleteip'.
+# Verify that a node's public IP address can be deleted using 'ctdb deleteip'.
-This is an extended version of simple/17_ctdb_config_delete_ip.sh
-EOF
-}
+# This is an extended version of simple/17_ctdb_config_delete_ip.sh
-. "${TEST_SCRIPTS_DIR}/integration.bash"
+. "${TEST_SCRIPTS_DIR}/cluster.bash"
set -e
ctdb_test_init
-ctdb_test_check_real_cluster
-
-cluster_is_healthy
-
select_test_node_and_ips
get_test_ip_mask_and_iface
#!/bin/bash
-test_info()
-{
- cat <<EOF
-Verify that adding/deleting IPs using 'ctdb reloadips' works
+# Verify that adding/deleting IPs using 'ctdb reloadips' works
-Checks that when IPs are added to and deleted from a single node then
-those IPs are actually assigned and unassigned from the specified
-interface.
+# Checks that when IPs are added to and deleted from a single node then
+# those IPs are actually assigned and unassigned from the specified
+# interface.
-Prerequisites:
+# Prerequisites:
-* An active CTDB cluster with public IP addresses configured
+# * An active CTDB cluster with public IP addresses configured
-Expected results:
+# Expected results:
-* When IPs are added to a single node then they are assigned to an
- interface.
+# * When IPs are added to a single node then they are assigned to an
+# interface.
-* When IPs are deleted from a single node then they disappear from an
- interface.
-EOF
-}
+# * When IPs are deleted from a single node then they disappear from an
+# interface.
-. "${TEST_SCRIPTS_DIR}/integration.bash"
+. "${TEST_SCRIPTS_DIR}/cluster.bash"
set -e
ctdb_test_init
-ctdb_test_check_real_cluster
-
-cluster_is_healthy
-
select_test_node_and_ips
####################
#!/bin/bash
-test_info()
-{
- cat <<EOF
-Verify that NFS connections are monitored and that NFS tickles are sent.
+# Verify that NFS connections are monitored and that NFS tickles are sent.
-Create a connection to the NFS server on a node. Then disable the
-relevant NFS server node and ensure that it sends an appropriate reset
-packet. The packet must come from the releasing node.
+# Create a connection to the NFS server on a node. Then disable the
+# relevant NFS server node and ensure that it sends an appropriate reset
+# packet. The packet must come from the releasing node.
-Prerequisites:
+# Prerequisites:
-* An active CTDB cluster with at least 2 nodes with public addresses.
+# * An active CTDB cluster with at least 2 nodes with public addresses.
-* Test must be run on a real or virtual cluster rather than against
- local daemons.
+# * Test must be run on a real or virtual cluster rather than against
+# local daemons.
-* Test must not be run from a cluster node.
+# * Test must not be run from a cluster node.
-* Cluster nodes must be listening on the NFS TCP port (2049).
+# * Cluster nodes must be listening on the NFS TCP port (2049).
-Expected results:
+# Expected results:
-* CTDB on the releasing node should correctly send a reset packet when
- the node is disabled.
-EOF
-}
+# * CTDB on the releasing node should correctly send a reset packet when
+# the node is disabled.
-. "${TEST_SCRIPTS_DIR}/integration.bash"
+. "${TEST_SCRIPTS_DIR}/cluster.bash"
set -e
ctdb_test_init
-ctdb_test_check_real_cluster
-
-cluster_is_healthy
-
select_test_node_and_ips
test_port=2049
#!/bin/bash
-test_info()
-{
- cat <<EOF
-Verify that NFS connections are monitored and that NFS tickles are sent.
+# Verify that NFS connections are monitored and that NFS tickles are sent.
-We create a connection to the NFS server on a node and confirm that
-this connection is registered in the nfs-tickles/ subdirectory in
-shared storage. Then kill ctdbd on the relevant NFS server node and
-ensure that the takeover node sends an appropriate reset packet.
+# We create a connection to the NFS server on a node and confirm that
+# this connection is registered in the nfs-tickles/ subdirectory in
+# shared storage. Then kill ctdbd on the relevant NFS server node and
+# ensure that the takeover node sends an appropriate reset packet.
-Prerequisites:
+# Prerequisites:
-* An active CTDB cluster with at least 2 nodes with public addresses.
+# * An active CTDB cluster with at least 2 nodes with public addresses.
-* Test must be run on a real or virtual cluster rather than against
- local daemons.
+# * Test must be run on a real or virtual cluster rather than against
+# local daemons.
-* Test must not be run from a cluster node.
+# * Test must not be run from a cluster node.
-* Cluster nodes must be listening on the NFS TCP port (2049).
+# * Cluster nodes must be listening on the NFS TCP port (2049).
-Expected results:
+# Expected results:
-* CTDB should correctly record the socket and on failover the takeover
- node should send a reset packet.
-EOF
-}
+# * CTDB should correctly record the socket and on failover the takeover
+# node should send a reset packet.
-. "${TEST_SCRIPTS_DIR}/integration.bash"
+. "${TEST_SCRIPTS_DIR}/cluster.bash"
set -e
ctdb_test_init
-ctdb_test_check_real_cluster
-
-cluster_is_healthy
-
# We need this for later, so we know how long to run nc for.
try_command_on_node any $CTDB getvar MonitorInterval
monitor_interval="${out#*= }"
#!/bin/bash
-test_info()
-{
- cat <<EOF
-Verify that CIFS connections are monitored and that CIFS tickles are sent.
+# Verify that CIFS connections are monitored and that CIFS tickles are sent.
-We create a connection to the CIFS server on a node and confirm that
-this connection is registered by CTDB. Then disable the relevant CIFS
-server node and ensure that the takeover node sends an appropriate
-reset packet.
+# We create a connection to the CIFS server on a node and confirm that
+# this connection is registered by CTDB. Then disable the relevant CIFS
+# server node and ensure that the takeover node sends an appropriate
+# reset packet.
-Prerequisites:
+# Prerequisites:
-* An active CTDB cluster with at least 2 nodes with public addresses.
+# * An active CTDB cluster with at least 2 nodes with public addresses.
-* Test must be run on a real or virtual cluster rather than against
- local daemons.
+# * Test must be run on a real or virtual cluster rather than against
+# local daemons.
-* Test must not be run from a cluster node.
+# * Test must not be run from a cluster node.
-* Clustered Samba must be listening on TCP port 445.
+# * Clustered Samba must be listening on TCP port 445.
-Expected results:
+# Expected results:
-* CTDB should correctly record the connection and the takeover node
- should send a reset packet.
-EOF
-}
+# * CTDB should correctly record the connection and the takeover node
+# should send a reset packet.
-. "${TEST_SCRIPTS_DIR}/integration.bash"
+. "${TEST_SCRIPTS_DIR}/cluster.bash"
set -e
ctdb_test_init
-ctdb_test_check_real_cluster
-
-cluster_is_healthy
-
# We need this for later, so we know how long to sleep.
try_command_on_node 0 $CTDB getvar MonitorInterval
monitor_interval="${out#*= }"
#!/bin/bash
-test_info()
-{
- cat <<EOF
-Verify that a gratuitous ARP is sent when a node is failed out.
+# Verify that a gratuitous ARP is sent when a node is failed out.
-We ping a public IP and lookup the MAC address in the ARP table. We
-then disable the node and check the ARP table again - the MAC address
-should have changed. This test does NOT test connectivity after the
-failover.
+# We ping a public IP and lookup the MAC address in the ARP table. We
+# then disable the node and check the ARP table again - the MAC address
+# should have changed. This test does NOT test connectivity after the
+# failover.
-Prerequisites:
+# Prerequisites:
-* An active CTDB cluster with at least 2 nodes with public addresses.
+# * An active CTDB cluster with at least 2 nodes with public addresses.
-* Test must be run on a real or virtual cluster rather than against
- local daemons.
+# * Test must be run on a real or virtual cluster rather than against
+# local daemons.
-* Test must not be run from a cluster node.
+# * Test must not be run from a cluster node.
-Steps:
+# Steps:
-1. Verify that the cluster is healthy.
-2. Select a public address and its corresponding node.
-3. Remove any entries for the chosen address from the ARP table.
-4. Send a single ping request packet to the selected public address.
-5. Determine the MAC address corresponding to the public address by
- checking the ARP table.
-6. Disable the selected node.
-7. Check the ARP table and check the MAC associated with the public
- address.
+# 1. Verify that the cluster is healthy.
+# 2. Select a public address and its corresponding node.
+# 3. Remove any entries for the chosen address from the ARP table.
+# 4. Send a single ping request packet to the selected public address.
+# 5. Determine the MAC address corresponding to the public address by
+# checking the ARP table.
+# 6. Disable the selected node.
+# 7. Check the ARP table and check the MAC associated with the public
+# address.
-Expected results:
+# Expected results:
-* When a node is disabled the MAC address associated with public
- addresses on that node should change.
-EOF
-}
+# * When a node is disabled the MAC address associated with public
+# addresses on that node should change.
-. "${TEST_SCRIPTS_DIR}/integration.bash"
+. "${TEST_SCRIPTS_DIR}/cluster.bash"
set -e
ctdb_test_init
-ctdb_test_check_real_cluster
-
-cluster_is_healthy
-
select_test_node_and_ips
echo "Removing ${test_ip} from the local ARP table..."
#!/bin/bash
-test_info()
-{
- cat <<EOF
-Verify that a newly started CTDB node gets updated tickle details
+# Verify that a newly started CTDB node gets updated tickle details
-Prerequisites:
+# Prerequisites:
-* An active CTDB cluster with at least 2 nodes with public addresses.
+# * An active CTDB cluster with at least 2 nodes with public addresses.
-* Test must be run on a real or virtual cluster rather than against
- local daemons.
+# * Test must be run on a real or virtual cluster rather than against
+# local daemons.
-* Cluster nodes must be listening on the NFS TCP port (2049).
+# * Cluster nodes must be listening on the NFS TCP port (2049).
-Steps:
+# Steps:
-As with 31_nfs_tickle.sh but restart a node after the tickle is
-registered.
+# As with 31_nfs_tickle.sh but restart a node after the tickle is
+# registered.
-Expected results:
+# Expected results:
-* CTDB should correctly communicated tickles to new CTDB instances as
- they join the cluster.
-EOF
-}
+# * CTDB should correctly communicated tickles to new CTDB instances as
+# they join the cluster.
-. "${TEST_SCRIPTS_DIR}/integration.bash"
+. "${TEST_SCRIPTS_DIR}/cluster.bash"
set -e
ctdb_test_init
-ctdb_test_check_real_cluster
-
-cluster_is_healthy
-
# We need this for later, so we know how long to run nc for.
try_command_on_node any $CTDB getvar MonitorInterval
monitor_interval="${out#*= }"
#!/bin/bash
-test_info()
-{
- cat <<EOF
-Verify that the server end of an SMB connection is correctly reset
+# Verify that the server end of an SMB connection is correctly reset
-Prerequisites:
+# Prerequisites:
-* An active CTDB cluster with at least 2 nodes with public addresses.
+# * An active CTDB cluster with at least 2 nodes with public addresses.
-* Test must be run on a real or virtual cluster rather than against
- local daemons.
+# * Test must be run on a real or virtual cluster rather than against
+# local daemons.
-* Test must not be run from a cluster node.
+# * Test must not be run from a cluster node.
-* Clustered Samba must be listening on TCP port 445.
+# * Clustered Samba must be listening on TCP port 445.
-Expected results:
+# Expected results:
-* CTDB should correctly record the connection and the releasing node
- should reset the server end of the connection.
-EOF
-}
+# * CTDB should correctly record the connection and the releasing node
+# should reset the server end of the connection.
-. "${TEST_SCRIPTS_DIR}/integration.bash"
+. "${TEST_SCRIPTS_DIR}/cluster.bash"
set -e
ctdb_test_init
-ctdb_test_check_real_cluster
-
-cluster_is_healthy
-
# We need this for later, so we know how long to sleep.
try_command_on_node 0 $CTDB getvar MonitorInterval
monitor_interval="${out#*= }"
#!/bin/bash
-test_info()
-{
- cat <<EOF
-Verify that the server end of an NFS connection is correctly reset
+# Verify that the server end of an NFS connection is correctly reset
-Prerequisites:
+# Prerequisites:
-* An active CTDB cluster with at least 2 nodes with public addresses.
+# * An active CTDB cluster with at least 2 nodes with public addresses.
-* Test must be run on a real or virtual cluster rather than against
- local daemons.
+# * Test must be run on a real or virtual cluster rather than against
+# local daemons.
-* Test must not be run from a cluster node.
+# * Test must not be run from a cluster node.
-* Cluster nodes must be listening on the NFS TCP port (2049).
+# * Cluster nodes must be listening on the NFS TCP port (2049).
-Expected results:
+# Expected results:
-* CTDB should correctly record the connection and the releasing node
- should reset the server end of the connection.
-EOF
-}
+# * CTDB should correctly record the connection and the releasing node
+# should reset the server end of the connection.
-. "${TEST_SCRIPTS_DIR}/integration.bash"
+. "${TEST_SCRIPTS_DIR}/cluster.bash"
set -e
ctdb_test_init
-ctdb_test_check_real_cluster
-
-cluster_is_healthy
-
# We need this for later, so we know how long to sleep.
try_command_on_node 0 $CTDB getvar MonitorInterval
monitor_interval="${out#*= }"
#!/bin/bash
-test_info()
-{
- cat <<EOF
-Verify that it is possible to ping a public address after disabling a node.
+# Verify that it is possible to ping a public address after disabling a node.
-We ping a public IP, disable the node hosting it and then ping the
-public IP again.
+# We ping a public IP, disable the node hosting it and then ping the
+# public IP again.
-Prerequisites:
+# Prerequisites:
-* An active CTDB cluster with at least 2 nodes with public addresses.
+# * An active CTDB cluster with at least 2 nodes with public addresses.
-* Test must be run on a real or virtual cluster rather than against
- local daemons.
+# * Test must be run on a real or virtual cluster rather than against
+# local daemons.
-* Test must not be run from a cluster node.
+# * Test must not be run from a cluster node.
-Steps:
+# Steps:
-1. Verify that the cluster is healthy.
-2. Select a public address and its corresponding node.
-3. Send a single ping request packet to the selected public address.
-4. Disable the selected node.
-5. Send another single ping request packet to the selected public address.
+# 1. Verify that the cluster is healthy.
+# 2. Select a public address and its corresponding node.
+# 3. Send a single ping request packet to the selected public address.
+# 4. Disable the selected node.
+# 5. Send another single ping request packet to the selected public address.
-Expected results:
+# Expected results:
-* When a node is disabled the public address fails over and the
- address is still pingable.
-EOF
-}
+# * When a node is disabled the public address fails over and the
+# address is still pingable.
-. "${TEST_SCRIPTS_DIR}/integration.bash"
+. "${TEST_SCRIPTS_DIR}/cluster.bash"
set -e
ctdb_test_init
-ctdb_test_check_real_cluster
-
-cluster_is_healthy
-
select_test_node_and_ips
echo "Removing ${test_ip} from the local neighbor table..."
#!/bin/bash
-test_info()
-{
- cat <<EOF
-Verify that it is possible to SSH to a public address after disabling a node.
+# Verify that it is possible to SSH to a public address after disabling a node.
-We SSH to a public IP and check the hostname, disable the node hosting
-it and then SSH again to confirm that the hostname has changed.
+# We SSH to a public IP and check the hostname, disable the node hosting
+# it and then SSH again to confirm that the hostname has changed.
-Prerequisites:
+# Prerequisites:
-* An active CTDB cluster with at least 2 nodes with public addresses.
+# * An active CTDB cluster with at least 2 nodes with public addresses.
-* Test must be run on a real or virtual cluster rather than against
- local daemons.
+# * Test must be run on a real or virtual cluster rather than against
+# local daemons.
-* Test must not be run from a cluster node.
+# * Test must not be run from a cluster node.
-Steps:
+# Steps:
-1. Verify that the cluster is healthy.
-2. Select a public address and its corresponding node.
-3. SSH to the selected public address and run hostname.
-4. Disable the selected node.
-5. SSH to the selected public address again and run hostname.
+# 1. Verify that the cluster is healthy.
+# 2. Select a public address and its corresponding node.
+# 3. SSH to the selected public address and run hostname.
+# 4. Disable the selected node.
+# 5. SSH to the selected public address again and run hostname.
-Expected results:
+# Expected results:
-* When a node is disabled the public address fails over and it is
- still possible to SSH to the node. The hostname should change.
-EOF
-}
+# * When a node is disabled the public address fails over and it is
+# still possible to SSH to the node. The hostname should change.
-. "${TEST_SCRIPTS_DIR}/integration.bash"
+. "${TEST_SCRIPTS_DIR}/cluster.bash"
set -e
ctdb_test_init
-ctdb_test_check_real_cluster
-
-cluster_is_healthy
-
select_test_node_and_ips
echo "Removing ${test_ip} from the local neighbor table..."
#!/bin/bash
-test_info()
-{
- cat <<EOF
-Verify that a mounted NFS share is still operational after failover.
+# Verify that a mounted NFS share is still operational after failover.
-We mount an NFS share from a node, write a file via NFS and then
-confirm that we can correctly read the file after a failover.
+# We mount an NFS share from a node, write a file via NFS and then
+# confirm that we can correctly read the file after a failover.
-Prerequisites:
+# Prerequisites:
-* An active CTDB cluster with at least 2 nodes with public addresses.
+# * An active CTDB cluster with at least 2 nodes with public addresses.
-* Test must be run on a real or virtual cluster rather than against
- local daemons.
+# * Test must be run on a real or virtual cluster rather than against
+# local daemons.
-* Test must not be run from a cluster node.
+# * Test must not be run from a cluster node.
-Steps:
+# Steps:
-1. Verify that the cluster is healthy.
-2. Select a public address and its corresponding node.
-3. Select the 1st NFS share exported on the node.
-4. Mount the selected NFS share.
-5. Create a file in the NFS mount and calculate its checksum.
-6. Disable the selected node.
-7. Read the file and calculate its checksum.
-8. Compare the checksums.
+# 1. Verify that the cluster is healthy.
+# 2. Select a public address and its corresponding node.
+# 3. Select the 1st NFS share exported on the node.
+# 4. Mount the selected NFS share.
+# 5. Create a file in the NFS mount and calculate its checksum.
+# 6. Disable the selected node.
+# 7. Read the file and calculate its checksum.
+# 8. Compare the checksums.
-Expected results:
+# Expected results:
-* When a node is disabled the public address fails over and it is
- possible to correctly read a file over NFS. The checksums should be
- the same before and after.
-EOF
-}
+# * When a node is disabled the public address fails over and it is
+# possible to correctly read a file over NFS. The checksums should be
+# the same before and after.
-. "${TEST_SCRIPTS_DIR}/integration.bash"
+. "${TEST_SCRIPTS_DIR}/cluster.bash"
set -e
ctdb_test_init
-ctdb_test_check_real_cluster
-
-cluster_is_healthy
-
nfs_test_setup
echo "Create file containing random data..."
#!/bin/bash
-test_info()
-{
- cat <<EOF
-Verify that a file created on a node is readable via NFS after a failover.
+# Verify that a file created on a node is readable via NFS after a failover.
-We write a file into an exported directory on a node, mount the NFS
-share from a node, verify that we can read the file via NFS and that
-we can still read it after a failover.
+# We write a file into an exported directory on a node, mount the NFS
+# share from a node, verify that we can read the file via NFS and that
+# we can still read it after a failover.
-Prerequisites:
+# Prerequisites:
-* An active CTDB cluster with at least 2 nodes with public addresses.
+# * An active CTDB cluster with at least 2 nodes with public addresses.
-* Test must be run on a real or virtual cluster rather than against
- local daemons.
+# * Test must be run on a real or virtual cluster rather than against
+# local daemons.
-* Test must not be run from a cluster node.
+# * Test must not be run from a cluster node.
-Steps:
+# Steps:
-1. Verify that the cluster is healthy.
-2. Select a public address and its corresponding node.
-3. Select the 1st NFS share exported on the node.
-4. Write a file into exported directory on the node and calculate its
- checksum.
-5. Mount the selected NFS share.
-6. Read the file via the NFS mount and calculate its checksum.
-7. Compare checksums.
-8. Disable the selected node.
-9. Read the file via NFS and calculate its checksum.
-10. Compare the checksums.
+# 1. Verify that the cluster is healthy.
+# 2. Select a public address and its corresponding node.
+# 3. Select the 1st NFS share exported on the node.
+# 4. Write a file into exported directory on the node and calculate its
+# checksum.
+# 5. Mount the selected NFS share.
+# 6. Read the file via the NFS mount and calculate its checksum.
+# 7. Compare checksums.
+# 8. Disable the selected node.
+# 9. Read the file via NFS and calculate its checksum.
+# 10. Compare the checksums.
-Expected results:
+# Expected results:
-* Checksums for the file on all 3 occasions should be the same.
-EOF
-}
+# * Checksums for the file on all 3 occasions should be the same.
-. "${TEST_SCRIPTS_DIR}/integration.bash"
+. "${TEST_SCRIPTS_DIR}/cluster.bash"
set -e
ctdb_test_init
-ctdb_test_check_real_cluster
-
-cluster_is_healthy
-
nfs_test_setup
echo "Create file containing random data..."
#!/bin/bash
-test_info()
-{
- cat <<EOF
-Verify that a mounted NFS share is still operational after failover.
+# Verify that a mounted NFS share is still operational after failover.
-We mount an NFS share from a node, write a file via NFS and then
-confirm that we can correctly read the file after a failover.
+# We mount an NFS share from a node, write a file via NFS and then
+# confirm that we can correctly read the file after a failover.
-Prerequisites:
+# Prerequisites:
-* An active CTDB cluster with at least 2 nodes with public addresses.
+# * An active CTDB cluster with at least 2 nodes with public addresses.
-* Test must be run on a real or virtual cluster rather than against
- local daemons.
+# * Test must be run on a real or virtual cluster rather than against
+# local daemons.
-* Test must not be run from a cluster node.
+# * Test must not be run from a cluster node.
-Steps:
+# Steps:
-1. Verify that the cluster is healthy.
-2. Select a public address and its corresponding node.
-3. Select the 1st NFS share exported on the node.
-4. Mount the selected NFS share.
-5. Create a file in the NFS mount and calculate its checksum.
-6. Kill CTDB on the selected node.
-7. Read the file and calculate its checksum.
-8. Compare the checksums.
+# 1. Verify that the cluster is healthy.
+# 2. Select a public address and its corresponding node.
+# 3. Select the 1st NFS share exported on the node.
+# 4. Mount the selected NFS share.
+# 5. Create a file in the NFS mount and calculate its checksum.
+# 6. Kill CTDB on the selected node.
+# 7. Read the file and calculate its checksum.
+# 8. Compare the checksums.
-Expected results:
+# Expected results:
-* When a node is disabled the public address fails over and it is
- possible to correctly read a file over NFS. The checksums should be
- the same before and after.
-EOF
-}
+# * When a node is disabled the public address fails over and it is
+# possible to correctly read a file over NFS. The checksums should be
+# the same before and after.
-. "${TEST_SCRIPTS_DIR}/integration.bash"
+. "${TEST_SCRIPTS_DIR}/cluster.bash"
set -e
ctdb_test_init
-ctdb_test_check_real_cluster
-
-cluster_is_healthy
-
nfs_test_setup
echo "Create file containing random data..."
#!/bin/bash
-test_info()
-{
- cat <<EOF
-Verify that the recovery daemon correctly handles a rogue IP
+# Verify that the recovery daemon correctly handles a rogue IP
-It should be released...
-EOF
-}
+# It should be released...
-. "${TEST_SCRIPTS_DIR}/integration.bash"
-
-ctdb_test_init
+. "${TEST_SCRIPTS_DIR}/cluster.bash"
set -e
-cluster_is_healthy
+ctdb_test_init
select_test_node_and_ips
#!/bin/bash
-test_info()
-{
- cat <<EOF
-Verify that TAKE_IP will work for an IP that is already on an interface
+# Verify that TAKE_IP will work for an IP that is already on an interface
-This is a variation of simple/60_recoverd_missing_ip.sh
-EOF
-}
+# This is a variation of simple/60_recoverd_missing_ip.sh
-. "${TEST_SCRIPTS_DIR}/integration.bash"
-
-ctdb_test_init
+. "${TEST_SCRIPTS_DIR}/cluster.bash"
set -e
-cluster_is_healthy
+ctdb_test_init
select_test_node_and_ips