--- /dev/null
+#
+#
+# Author: Jorge Gea
+# A set of useful functions to test File Daemon plugin
+# This file should be inluced in any {plugin}_helper.sh file
+#
+
+require_linux
+
+# Get joblog and store it in $tmp/joblog_jobid.out. ${1} must be jobid
+llist_joblog() {
+ JOBLOG=joblog_${1}.out
+ echo "" > $tmp/$JOBLOG
+ cat <<END_OF_DATA >$tmp/bconcmds
+@$out ${cwd}/tmp/${JOBLOG}
+llist joblog jobid=${1}
+quit
+END_OF_DATA
+
+ #job log
+ run_bconsole
+}
+
+# Run bconsole reload
+bconsole_reload() {
+ cat <<END_OF_DATA >$tmp/bconcmds
+ reload
+END_OF_DATA
+
+ run_bconsole
+}
+
+# Run cancel all in bconsole
+bconsole_cancelall() {
+ cat <<END_OF_DATA >$tmp/bconcmds
+ cancel all yes
+END_OF_DATA
+
+ run_bconsole
+}
+
+# Receives in first parameter the jobid, and checks termination status is OK, based on the joblog
+check_job_ok_termination() {
+
+ llist_joblog ${1}
+
+ # check job ok
+ grep " Termination:" $tmp/$JOBLOG | grep -iv Warning | grep -iv Error | grep OK > /dev/null
+ if [ $? != 0 ]
+ then
+ print_debug "ERROR: Job ${1} ended in error according to $tmp/$JOBLOG and it should not!"
+ print_debug "Listing error messages:"
+ grep "Error" $tmp/$JOBLOG
+ estat=1
+ else
+ print_debug "Job ${1} ended successfully"
+ fi
+}
+
+# Receives in first parameter the jobid, and checks termination status is NOT OK, based on the joblog
+check_job_fail_termination() {
+
+ llist_joblog ${1}
+
+ # check job failed
+ grep " Termination:" $tmp/$JOBLOG | grep -iv Warning | grep -iv Error | grep OK > /dev/null
+ if [ $? != 0 ]
+ then
+ print_debug "OK: Job ${1} ended in error according to $tmp/$JOBLOG as expected"
+ print_debug "Listing error messages:"
+ grep -i "Error" $tmp/$JOBLOG
+ else
+ print_debug "Job ${1} ended successfully and it should not!"
+ estat=1
+ fi
+}
+
+# Runs job called $JobName and stores session in runjob_DATENOW.out
+run_job() {
+ NOW=$(date +%H%M%S)
+ BACKLOG=runjob_$NOW.out
+
+ # By default we wait the job, but receiving $2 will make a non waiting execution
+ WAIT="wait"
+ if [ "$2" != "" ]
+ then
+ WAIT=""
+ fi
+
+ echo "" > $tmp/bconcmds
+
+ # If Debug is enabled, it enables debug also in the FD
+ DEBUG_CLIENT=""
+ if test "$debug" -eq 1 ; then
+ DEBUG_CLIENT="setdebug level=600 trace=1 options=t client=127.0.0.1-fd"
+ fi
+
+ echo "" > $tmp/$BACKLOG
+ cat <<END_OF_DATA >$tmp/bconcmds
+@$out ${cwd}/tmp/$BACKLOG
+$DEBUG_CLIENT
+run job=$JobName level=$1 yes
+$WAIT
+status client
+quit
+END_OF_DATA
+
+ # If bacula is already running, just run bconsole
+ if ! check_bacula_running
+ then
+ run_bacula
+ else
+ run_bconsole
+ fi
+
+ BACKUPID=$(grep JobId= $tmp/$BACKLOG | sed 's/.*=//')
+}
+
+# Run jobs and check it is ok. Level is first parameter. Stores in BACKUPID the jobid
+run_job_and_check() {
+ run_job $1
+ check_job_ok_termination ${BACKUPID}
+}
+
+# Run jobs and check it is not ok. Level is first parameter. Stores in BACKUPID the jobid
+run_job_and_check_fail() {
+ run_job $1
+ check_job_fail_termination ${BACKUPID}
+}
+
+# Run job and cancel it after 5 secs
+run_job_and_cancel() {
+ run_job $1 "noWait"
+ sleep 25
+ bconsole_cancelall
+ sleep 5
+ llist_joblog ${BACKUPID}
+}
+
+# Generate a new unique name, usually used for directories
+new_unique_dirname() {
+ TODAY=$(date +%Y%m%d%H%M%S)
+ echo "REGRESS_${TODAY}"
+}
+
+# Check we have $2 files at least in path $1
+check_n_files_in_local_path() {
+ NUMRESTORED=$(find "${1}" -type f | wc -l)
+ if [ ${NUMRESTORED} -lt ${2} ]
+ then
+ print_debug "ERROR: Not enough files found in ${1}."
+ estat=1
+ else
+ print_debug "OK: We found enough files in ${1}: ${NUMRESTORED}"
+ fi
+}
+
+# First parameter is string to look for, second is log and third is the number of files
+check_n_files_in_log() {
+ NUMRESTORED=$(grep -i "${1}" $tmp/${2} | wc -l)
+ if [ ${NUMRESTORED} -lt ${3} ]
+ then
+ print_debug "ERROR: Not enough ${1} found in $tmp/${2}. Less than ${3} required"
+ estat=1
+ else
+ print_debug "OK: We found enough ${1} in $tmp/${2}: ${NUMRESTORED}"
+ fi
+}
+
+# Check $1 is not found in log $2
+check_no_files_in_log() {
+ NUMRESTORED=$(grep "${1}" $tmp/${2} | wc -l)
+ if [ ${NUMRESTORED} -gt 1 ]
+ then
+ print_debug "ERROR: Files ${1} were found in $tmp/${2} and should not!"
+ estat=1
+ else
+ print_debug "OK: File ${1} was not found in $tmp/${2}"
+ fi
+}
+
+# Run ESTIMATE job, configuring previously the job, using env vars
+run_estimate() {
+ m365_setup_job
+
+ bconsole_reload
+
+ cat <<END_OF_DATA >$tmp/bconcmds
+@$out ${cwd}/tmp/$1
+estimate job=pluginTest listing
+quit
+END_OF_DATA
+
+ run_bconsole
+}
+
+# Run a false restore, only to show what files are inside the backup
+show_backup_contents() {
+ echo "" > $tmp/${3}
+ cat <<EOF > $tmp/bconcmds
+@$out ${cwd}/tmp/${3}
+restore jobid=${1} Client=127.0.0.1-fd where="/"
+cd "$2"
+dir
+estimate
+done
+EOF
+
+ run_bconsole
+}
\ No newline at end of file
--- /dev/null
+#
+#
+# Author: Jorge Gea
+# A set of useful functions to test rhv plugin
+# This file should be inluced after scripts/functions
+#
+
+require_linux
+
+. scripts/plugin-helpers.sh
+
+# It installs the plugin
+# It makes C compilation, Java compilation and Binaries copy process
+rhv_plugin_install() {
+ rhv_kill
+ if [ -z "$RHV_NOCOMPILE" ]
+ then
+ MVN=${BEE_PLUGINS_REPO}/utils/mvn.wrapper
+ export MVN
+
+ RHV_JAR=${RHV_REPO}/target/bacula-rhv-plugin.jar
+ RHV_BACKEND=${RHV_REPO}/bin/rhv_backend
+
+ mkdir -p ${working}/rhv
+
+ # Compiling java part & generation .jar
+ make -C ${RHV_REPO}
+
+ # Installing
+ make -C build/src/plugins/fd install-rhv
+
+ cp ${RHV_BACKEND} ${bin}/
+
+ # Replace any base= line with base=${PWD}
+ sed -i "/base=/c\base=${PWD}" ${bin}/rhv_backend
+
+ mkdir -p ${lib}/
+ cp ${RHV_JAR} ${lib}/
+
+ rhv_truststore_check
+ fi
+}
+
+# Kills any running execution of the plugin. Useful to be sure we don't have any background process that can interfere with tests
+rhv_kill() {
+ RHVPID=""
+ RHVPID=$(ps ax | grep java | grep rhv-plugin | grep -v grep | awk '{print $1}' | xargs -n1 | head -1)
+ if [ "${RHVPID}" != "" ]
+ then
+ kill -9 ${RHVPID}
+ fi
+}
+
+# Based on selected RHV env vars it generates the appropriate plugin line and stores it in rhv_plugin_line var
+rhv_setup_plugin_line() {
+ rhv_plugin_line="rhv:";
+ if [ "${RHV_SERVER}" != "" ]
+ then
+ rhv_plugin_line="${rhv_plugin_line} server=\\\"${RHV_SERVER}\\\""
+ fi
+ if [ "${RHV_TRUSTSTORE_FILE}" != "" ]
+ then
+ rhv_plugin_line="${rhv_plugin_line} truststore_file=\\\"${RHV_TRUSTSTORE_FILE}\\\""
+ fi
+ if [ "${RHV_TRUSTSTORE_PASSWORD}" != "" ]
+ then
+ rhv_plugin_line="${rhv_plugin_line} truststore_password=\\\"${RHV_TRUSTSTORE_PASSWORD}\\\""
+ fi
+ if [ "${RHV_AUTH}" != "" ]
+ then
+ rhv_plugin_line="${rhv_plugin_line} auth=\\\"${RHV_AUTH}\\\""
+ fi
+ if [ "${RHV_PROFILE}" != "" ]
+ then
+ rhv_plugin_line="${rhv_plugin_line} profile=${RHV_PROFILE}"
+ fi
+ if [ "${RHV_USER}" != "" ]
+ then
+ rhv_plugin_line="${rhv_plugin_line} user=\\\"${RHV_USER}\\\""
+ fi
+ if [ "${RHV_PASSWORD}" != "" ]
+ then
+ rhv_plugin_line="${rhv_plugin_line} password=\\\"${RHV_PASSWORD}\\\""
+ fi
+ if [ "${RHV_TARGET_VIRTUALMACHINE}" != "" ]
+ then
+ rhv_plugin_line="${rhv_plugin_line} target_virtualmachine=\\\"${RHV_TARGET_VIRTUALMACHINE}\\\""
+ fi
+ if [ "${RHV_TARGET_EXCLUDE_DISKS}" != "" ]
+ then
+ rhv_plugin_line="${rhv_plugin_line} target_exclude_disks=\\\"${RHV_TARGET_EXCLUDE_DISKS}\\\""
+ fi
+ if [ "${RHV_TARGET_EXCLUDE_VMS}" != "" ]
+ then
+ rhv_plugin_line="${rhv_plugin_line} target_exclude_vms=\\\"${RHV_TARGET_EXCLUDE_VMS}\\\""
+ fi
+ if [ "${RHV_TARGET_VIRTUALMACHINE_REGEX}" != "" ]
+ then
+ rhv_plugin_line="${rhv_plugin_line} target_virtualmachine_regex=\\\"${RHV_TARGET_VIRTUALMACHINE_REGEX}\\\""
+ fi
+ if [ "${RHV_TARGET_CONFIGURATION_ONLY}" != "" ]
+ then
+ rhv_plugin_line="${rhv_plugin_line} target_configuration_only=${RHV_TARGET_CONFIGURATION_ONLY}"
+ fi
+ if [ "${RHV_PROXY_VM}" != "" ]
+ then
+ rhv_plugin_line="${rhv_plugin_line} proxy_vm=${RHV_PROXY_VM}"
+ fi
+}
+
+# Generates plugin line (using env vars), generates Fileset definition in $conf/FS_RHV.conf and configures the job
+rhv_setup_job() {
+
+ rhv_setup_plugin_line
+
+ echo "" > $conf/FS_RHV.conf
+
+ # Fileset Definition
+ cat << EOF >> $conf/FS_RHV.conf
+
+FileSet {
+ Name = FS_RHV
+ Include {
+ Options {
+ signature = MD5
+ compression = LZO
+ }
+ Plugin = "${rhv_plugin_line}"
+ }
+}
+
+EOF
+
+ # Delete any FS_RHV line so we are sure only our new FS file is applied
+ sed -i '/FS_RHV/d' $conf/bacula-dir.conf
+ echo "@$conf/FS_RHV.conf" >> $conf/bacula-dir.conf
+
+ # Turn on auto-label
+ sed -i "s%# Label Format% Label Format%" $conf/bacula-dir.conf
+
+ $bperl -e 'add_attribute("$conf/bacula-dir.conf", "FileSet", "FS_RHV" , "Job", "pluginTest")'
+ $bperl -e 'add_attribute("$conf/bacula-dir.conf", "Accurate", "yes", "Job", "pluginTest")'
+}
+
+# Wrapper to group common init functions for RHV testing
+rhv_init_test() {
+
+ JobName=pluginTest
+
+ scripts/cleanup
+ scripts/copy-plugin-confs
+
+ if test "$debug" -eq 1 ; then
+ RHV_DEBUG=2
+ else
+ RHV_DEBUG=0
+ fi
+
+ # If we have RHV_NOCOMPILE we don't check java, we don't install and we don't check plugin
+ if [ -z "$RHV_NOCOMPILE" ]
+ then
+ check_java
+ rhv_plugin_install
+ fi
+
+ is_var_defined "$RHV_SERVER" "RHV_SERVER"
+ is_var_defined "$RHV_USER" "RHV_USER"
+ is_var_defined "$RHV_PASSWORD" "RHV_PASSWORD"
+
+ start_test
+
+ if [ -z "$RHV_NOCOMPILE" ]
+ then
+ check_plugin_available "rhv-fd"
+ fi
+}
+
+# Run restore command. First parameter is jobid, second one is directory to restore, third the logname
+# Selected env variables control RHV restore configuration
+rhv_gen_restore_command() {
+
+ if [ "${RHV_RESTORELOCALPATH}" != "" ]
+ then
+ where=${RHV_RESTORELOCALPATH}
+ else
+ where=/
+ fi
+
+ echo "" > $tmp/${3}
+ cat <<EOF > $tmp/bconcmds
+@$out ${cwd}/tmp/${3}
+restore jobid=${1} Client=127.0.0.1-fd where="${where}"
+cd "$2"
+dir
+estimate
+mark *
+done
+mod
+13
+EOF
+ # Optional parameters
+ if [ "${RHV_VM_DISKS}" != "" ]; then
+ echo "mod" >> $tmp/bconcmds
+ echo "8" >> $tmp/bconcmds
+ echo "${RHV_VM_DISKS}" >> $tmp/bconcmds
+ fi
+
+ if [ "${RHV_VM_EXCLUDE_DISKS}" != "" ]; then
+ echo "mod" >> $tmp/bconcmds
+ echo "9" >> $tmp/bconcmds
+ echo "${RHV_VM_EXCLUDE_DISKS}" >> $tmp/bconcmds
+ fi
+ if [ "${RHV_VM_CLUSTER}" != "" ]; then
+ echo "mod" >> $tmp/bconcmds
+ echo "10" >> $tmp/bconcmds
+ echo "${RHV_VM_CLUSTER}" >> $tmp/bconcmds
+ fi
+ if [ "${RHV_VM_STORAGE}" != "" ]; then
+ echo "mod" >> $tmp/bconcmds
+ echo "11" >> $tmp/bconcmds
+ echo "${RHV_VM_STORAGE}" >> $tmp/bconcmds
+ fi
+ if [ "${RHV_VM_NAME}" != "" ]; then
+ echo "mod" >> $tmp/bconcmds
+ echo "12" >> $tmp/bconcmds
+ echo "${RHV_VM_NAME}" >> $tmp/bconcmds
+ fi
+ if [ "${RHV_RESTORE_FORCE_OVERWRITE}" != "" ]; then
+ echo "mod" >> $tmp/bconcmds
+ echo "13" >> $tmp/bconcmds
+ echo "${RHV_RESTORE_FORCE_OVERWRITE}" >> $tmp/bconcmds
+ fi
+ if [ "${RHV_RESTORE_SWITCH_ON}" != "" ]; then
+ echo "mod" >> $tmp/bconcmds
+ echo "14" >> $tmp/bconcmds
+ echo "${RHV_RESTORE_SWITCH_ON}" >> $tmp/bconcmds
+ fi
+ if [ "${RHV_RESTORE_DISK_INTERFACE}" != "" ]; then
+ echo "mod" >> $tmp/bconcmds
+ echo "15" >> $tmp/bconcmds
+ echo "${RHV_RESTORE_DISK_INTERFACE}" >> $tmp/bconcmds
+ fi
+ if [ "${RHV_RESTORE_DISK_ACTIVE}" != "" ]; then
+ echo "mod" >> $tmp/bconcmds
+ echo "16" >> $tmp/bconcmds
+ echo "${RHV_RESTORE_DISK_ACTIVE}" >> $tmp/bconcmds
+ fi
+ if [ "${RHV_RESTORE_DISK_BOOT}" != "" ]; then
+ echo "mod" >> $tmp/bconcmds
+ echo "17" >> $tmp/bconcmds
+ echo "${RHV_RESTORE_DISK_BOOT}" >> $tmp/bconcmds
+ fi
+ if [ "${RHV_RESTORE_NICS}" != "" ]; then
+ echo "mod" >> $tmp/bconcmds
+ echo "18" >> $tmp/bconcmds
+ echo "${RHV_RESTORE_NICS}" >> $tmp/bconcmds
+ fi
+ if [ "${RHV_RESTORE_DISK_NAMES}" != "" ]; then
+ echo "mod" >> $tmp/bconcmds
+ echo "19" >> $tmp/bconcmds
+ echo "${RHV_RESTORE_DISK_NAMES}" >> $tmp/bconcmds
+ fi
+ if [ "${RHV_RESTORE_TEMPLATE_NAME}" != "" ]; then
+ echo "mod" >> $tmp/bconcmds
+ echo "20" >> $tmp/bconcmds
+ echo "${RHV_RESTORE_TEMPLATE_NAME}" >> $tmp/bconcmds
+ fi
+
+ cat <<EOF >> $tmp/bconcmds
+yes
+yes
+wait
+quit
+EOF
+}
+
+
+# Run restore and check that terminates ok.
+#First parameter is jobid, second one is directory to restore and third
+rhv_run_restore_and_check() {
+ rhv_gen_restore_command ${1} "${2}" ${3}
+
+ # Run restore
+ run_bconsole
+
+ sleep 5
+
+ RESTOREID=$(grep JobId= $tmp/${3} | sed 's/.*=//')
+
+ check_job_ok_termination ${RESTOREID}
+}
+
+rhv_truststore_check() {
+
+ RHV_TRUSTSTORE_FILE="${working}/rhv/cacerts"
+
+ check_file ${JAVA_TRUSTSTORE}
+ cp ${JAVA_TRUSTSTORE} ${working}/rhv
+
+ touch ${RHV_TRUSTSTORE_FILE}_tmp
+ if [ ! -f ${RHV_TRUSTSTORE_FILE}_tmp ]; then
+ print_debug "Error: Check truststore directory permissions. The directory must be writable"
+ exit 1
+ fi
+ if [ ! -w ${RHV_TRUSTSTORE_FILE}_tmp ]; then
+ print_debug "Error: Check truststore file permissions. The file must be writable"
+ exit 1
+ fi
+ curl -sS -o ${RHV_TRUSTSTORE_FILE}_tmp "http://${RHV_SERVER}/ovirt-engine/services/pki-resource?resource=ca-certificate&format=X509-PEM-CA"
+ if [ ! -f ${RHV_TRUSTSTORE_FILE}_tmp ]; then
+ print_debug "Error: downloading public certificate!"
+ exit 1
+ fi
+ EHHO=`${JAVA_KEY_TOOL} -import -alias RHV_ALIAS_TEST -file ${RHV_TRUSTSTORE_FILE}_tmp -keystore ${RHV_TRUSTSTORE_FILE} -storepass ${RHV_TRUSTSTORE_PASSWORD} -noprompt`
+
+ check_file ${RHV_TRUSTSTORE_FILE}
+
+ print_debug "Truststore generate successfully"
+}
--- /dev/null
+#!/bin/sh
+#
+# Copyright (C) 2000-2020 Kern Sibbald
+# License: BSD 2-Clause; see file LICENSE-FOSS
+#
+# Attempt to backup and restore a VM with the RHV Plugin
+#
+# HOWTO:
+# Add to your ./config
+#
+# RHV_REPO=/path/to/git/rhv
+#
+# JAVA_TRUSTSTORE=/etc/ssl/certs/java/cacerts
+# JAVA_KEY_TOOL=keytool
+#
+# RHV_SERVER="rhv.fqdn"
+# RHV_AUTH="http"
+# RHV_USER="admin"
+# RHV_PROFILE="internal"
+# RHV_PASSWORD="rhvadminpass"
+# RHV_TRUSTSTORE_PASSWORD="changeit"
+# RHV_TARGET_VIRTUALMACHINE="VMNameToBackup"
+#
+
+# ==== Test inizialization =======================
+TestName="rhv-backup-restore-vm-test"
+. scripts/functions
+. scripts/rhv-helpers.sh
+
+is_var_defined "$RHV_TARGET_VIRTUALMACHINE" "RHV_TARGET_VIRTUALMACHINE"
+
+rhv_init_test
+# ================================================
+
+# ==== Run backup ================================
+rhv_setup_job
+run_job_and_check "Full"
+# ================================================
+
+# ==== Run restore ===============================
+RESTORE_SOURCE="/@rhv/"
+RHV_RESTORE_FORCE_OVERWRITE=yes
+RHV_VM_NAME=${new_unique_dirname}
+
+rhv_run_restore_and_check ${BACKUPID} ${RESTORE_SOURCE} "log-session.out"
+# ================================================
+
+# ==== End and cleaning=============================
+stop_bacula
+end_test
+# ==================================================
--- /dev/null
+#!/bin/sh
+#
+# Copyright (C) 2000-2020 Kern Sibbald
+# License: BSD 2-Clause; see file LICENSE-FOSS
+#
+# Attempt to backup and restore a VM with the RHV Plugin
+#
+# HOWTO:
+# Add to your ./config
+#
+# RHV_REPO=/path/to/git/rhv
+#
+# JAVA_TRUSTSTORE=/etc/ssl/certs/java/cacerts
+# JAVA_KEY_TOOL=keytool
+#
+# RHV_SERVER="rhv.fqdn"
+# RHV_AUTH="http"
+# RHV_USER="admin"
+# RHV_PROFILE="internal"
+# RHV_PASSWORD="rhvadminpass"
+# RHV_TRUSTSTORE_PASSWORD="changeit"
+# RHV_TARGET_VIRTUALMACHINE="VMNameToBackup"
+# RHV_PROXY_REMOTE_ADDR=1.2.3.4
+# RHV_PROXY_REMOTE_PASSWORD=6eorureoureo
+# RHV_PROXY_REMOTE_DIRNAME=oVirt-VM-BEE-dir
+# RHV_PROXY_VM=proxy-VM
+#
+# TODO:
+# - Test Incremental backup
+# - Test from different storages (ISCSI & NFS)
+# - Test Backup Template
+# - Test restore variations (play with id, name, etc)
+#
+
+# ==== Test inizialization =======================
+TestName="rhv-proxy-vm-test"
+. scripts/functions
+. scripts/rhv-helpers.sh
+
+is_var_defined "$RHV_TARGET_VIRTUALMACHINE" "RHV_TARGET_VIRTUALMACHINE"
+is_var_defined "$RHV_PROXY_REMOTE_ADDR" "RHV_PROXY_REMOTE_ADDR"
+is_var_defined "$RHV_PROXY_REMOTE_PASSWORD" "RHV_PROXY_REMOTE_PASSWORD"
+is_var_defined "$RHV_PROXY_REMOTE_DIRNAME" "RHV_PROXY_REMOTE_DIRNAME"
+is_var_defined "$RHV_PROXY_VM" "RHV_PROXY_VM"
+
+rhv_init_test
+# ================================================
+
+# ==== Proxy client config: Where FD and RHV plugin is installed and running ====================================
+# We use our local client as if it was the remote, so we put remote address and the password the remote FD has
+$bperl -e "add_attribute('$conf/bacula-dir.conf', 'address', '$RHV_PROXY_REMOTE_ADDR', 'Client', '127.0.0.1-fd')"
+$bperl -e "add_attribute('$conf/bacula-dir.conf', 'password', '$RHV_PROXY_REMOTE_PASSWORD', 'Client', '127.0.0.1-fd')"
+$bperl -e "add_attribute('$conf/bacula-dir.conf', 'FDPort', '9102', 'Client', '127.0.0.1-fd')"
+$bperl -e "add_attribute('$conf/bacula-dir.conf', 'SD Calls Client', 'yes', 'Client', '127.0.0.1-fd')"
+
+# We need to know how the remote FD is configured, so w need to know the remote dirname in remote bacula-fd.conf
+$bperl -e "add_attribute('$conf/bacula-dir.conf', 'Name', '$RHV_PROXY_REMOTE_DIRNAME', 'Director')"
+$bperl -e "add_attribute('$conf/bacula-sd.conf', 'Name', '$RHV_PROXY_REMOTE_DIRNAME', 'Director')"
+
+# ==== Run backup ================================
+RHV_TRUSTSTORE_FILE=/opt/bacula/etc/rhv.cacerts
+rhv_setup_job
+run_job_and_check "Full"
+# ================================================
+
+# ==== Run restore ===============================
+RESTORE_SOURCE="/@rhv/"
+RHV_RESTORE_FORCE_OVERWRITE=yes
+RHV_VM_NAME=${new_unique_dirname}
+
+rhv_run_restore_and_check ${BACKUPID} ${RESTORE_SOURCE} "log-session.out"
+# ================================================
+
+# ==== End and cleaning=============================
+stop_bacula
+end_test
+# ==================================================