]> git.ipfire.org Git - thirdparty/bacula.git/commitdiff
regress: Add test to check objects after a migration
authorEric Bollengier <eric@baculasystems.com>
Wed, 11 May 2022 08:50:13 +0000 (10:50 +0200)
committerEric Bollengier <eric@baculasystems.com>
Thu, 14 Sep 2023 11:56:58 +0000 (13:56 +0200)
regress/tests/migration-plugin-test [new file with mode: 0755]

diff --git a/regress/tests/migration-plugin-test b/regress/tests/migration-plugin-test
new file mode 100755 (executable)
index 0000000..f4c056c
--- /dev/null
@@ -0,0 +1,141 @@
+#!/bin/sh
+#
+# Copyright (C) 2000-2021 Kern Sibbald
+# Copyright (C) 2021-2022 Bacula Systems SA
+# License: BSD 2-Clause; see file LICENSE-FOSS
+#
+# Run a simple backup of the Bacula plugins then migrate it
+#   to another device.
+#
+# This script uses the virtual disk autochanger
+#
+TestName="migration-plugin-test"
+JobName=MigrateJobSave
+. scripts/functions
+
+
+scripts/cleanup
+scripts/copy-migration-confs
+
+make -C $cwd/build/src/plugins/fd install-test-plugin
+
+echo "${cwd}/build" >${cwd}/tmp/file-list
+change_jobname NightlySave $JobName
+
+cat <<EOF >> $conf/bacula-dir.conf
+FileSet {
+  Name = "Plugins"
+  Include {
+     Plugin = "test-plugin:/@test-plugin@/fr.po:/tmp/regress/build/po/fr.po:/dev/null"
+  }
+}
+EOF
+
+
+start_test
+
+#
+# Note, we first backup into Pool Default, 
+#          then Copy into Pool Full. 
+#              Pool Default uses Storage=File
+#              Pool Full    uses Storage=DiskChanger
+
+# Write out bconsole commands
+cat <<END_OF_DATA >${cwd}/tmp/bconcmds
+@output /dev/null
+messages
+@$out ${cwd}/tmp/log1.out
+setdebug level=100 tags=sql trace=1 dir
+label storage=File volume=FileVolume001 Pool=Default
+label storage=DiskChanger volume=ChangerVolume001 slot=1 Pool=Full drive=0
+label storage=DiskChanger volume=ChangerVolume002 slot=2 Pool=Full drive=0
+@# run two jobs (both will be copied)
+status client
+run job=$JobName fileset=Plugins yes
+wait
+messages
+list jobs
+list volumes
+@output $tmp/log-ro1.out
+.jlist restoreobjects jobid=1
+@output $tmp/log-meta1.out
+.jlist metadata type=email tenant=xxxx owner=xxxx jobid=1
+@output $tmp/log-object1.out
+.jlist object jobid=1
+@$out ${cwd}/tmp/log1.out
+@#setdebug level=100 dir
+@# should copy two jobs
+@#setdebug level=51 storage=DiskChanger
+@#setdebug level=100 storage=File tags=dedup,asx,network options=h
+run job=migrate-job jobid=1 yes
+wait
+messages
+@#purge volume=FileVolume001
+list jobs 
+list volumes
+@output $tmp/log-ro1-after.out
+.jlist restoreobjects jobid=1
+@output $tmp/log-meta1-after.out
+.jlist metadata type=email tenant=xxxx owner=xxxx jobid=1
+@output $tmp/log-object1-after.out
+.jlist object jobid=1
+@output $tmp/log-ro3.out
+.jlist restoreobjects jobid=3
+@output $tmp/log-meta3.out
+.jlist metadata type=email tenant=xxxx owner=xxxx jobid=3
+@output $tmp/log-object3.out
+.jlist object jobid=3
+@$out ${cwd}/tmp/log3.out
+@# 
+@# now do a restore
+@#
+@$out ${cwd}/tmp/log2.out
+list volumes 
+@#setdebug level=15 director
+@#setdebug level=150 storage=DiskChanger
+@# Note, here we are restoring from the original backup,
+@#  so if you specify storage=DiskChanger the restore will fail
+restore where=${cwd}/tmp/bacula-restores fileset=Plugins jobid=3 select
+unmark *
+mark *
+done
+yes
+list volumes
+wait
+messages
+quit
+END_OF_DATA
+
+run_bacula
+check_for_zombie_jobs storage=File
+stop_bacula
+
+check_two_logs
+
+
+for i in 1 3; do
+    grep '{' $tmp/log-ro$i.out | perl -MJSON -e '$l = <>; $j = JSON::decode_json($l) ; exit (scalar(@{$j->{data}}) == 1);'
+    if [ $? != 1 ]; then
+        estat=2
+        print_debug "ERROR: Should find RestoreObject records for job $i in $tmp/log-ro$i.out"
+    fi
+
+    grep '{' $tmp/log-meta$i.out | perl -MJSON -e '$l = <>; $j = JSON::decode_json($l) ; exit (scalar(@{$j->{data}}) == 2);'
+    if [ $? != 1 ]; then
+        estat=2
+        print_debug "ERROR: Should find Metadata records for job $i in $tmp/log-meta$i.out"
+    fi
+
+    grep '{' $tmp/log-object$i.out | perl -MJSON -e '$l = <>; $j = JSON::decode_json($l) ; exit (scalar(@{$j->{data}}) == 3);'
+    if [ $? != 1 ]; then
+        estat=2
+        print_debug "ERROR: Should find Object records for job $i in $tmp/log-object$i.out"
+    fi
+done
+#grep 'Backup Level:' tmp/log3.out  | grep Incremental > /dev/null
+#if [ $? != 0 ]; then
+#    bstat=2
+#    print_debug "The incremental job must use copied jobs"
+#fi
+
+end_test