]> git.ipfire.org Git - thirdparty/bacula.git/commitdiff
BEE Backport regress/tests/cloud-xfer-test
authorNorbert Bizet <norbert.bizet@baculasystems.com>
Wed, 2 Sep 2020 13:20:57 +0000 (15:20 +0200)
committerEric Bollengier <eric@baculasystems.com>
Tue, 1 Mar 2022 14:36:18 +0000 (15:36 +0100)
This commit is the result of the squash of the following main commits:

Author: Eric Bollengier <eric@baculasystems.com>
Date:   Tue Jul 21 10:28:13 2020 +0200

    regress: Add copyright to regress scripts

Author: Eric Bollengier <eric@baculasystems.com>
Date:   Tue Sep 3 10:56:14 2019 +0200

    regress: tweak cloud-xfer-test

Author: Norbert Bizet <norbert.bizet@baculasystems.com>
Date:   Mon Sep 2 15:03:35 2019 -0400

    regress: Add a test to reproduce cloud download issue MT5373

regress/tests/cloud-xfer-test [new file with mode: 0755]

diff --git a/regress/tests/cloud-xfer-test b/regress/tests/cloud-xfer-test
new file mode 100755 (executable)
index 0000000..1ca04f3
--- /dev/null
@@ -0,0 +1,71 @@
+#!/bin/sh
+#
+# Copyright (C) 2000-2020 Kern Sibbald
+# License: BSD 2-Clause; see file LICENSE-FOSS
+#
+# Run a cloud backup of the Bacula build directory
+# with a very high number of parts and try to restore it
+#
+# The test should reproduce the issue #5373. It was a string
+# edition problem, and we force it with a very long JobId
+# and a large number or parts.
+#
+TestName="cloud-xfer-test"
+JobName=backup
+. scripts/functions
+
+require_cloud
+
+scripts/cleanup
+scripts/copy-confs
+
+#
+# Zap out any schedule in default conf file so that
+#  it doesn't start during our test
+#
+outf="$tmp/sed_tmp"
+echo "s%  Schedule =%# Schedule =%g" >${outf}
+cp $scripts/bacula-dir.conf $tmp/1
+sed -f ${outf} $tmp/1 >$scripts/bacula-dir.conf
+
+change_jobname BackupClient1 $JobName
+start_test
+
+$bperl -e 'add_attribute("$conf/bacula-sd.conf", "MaximumPartSize", "1000", "Device")'
+
+
+cat <<END_OF_DATA >$tmp/bconcmds
+@output /dev/null
+messages
+@$out $tmp/log1.out
+sql
+SELECT pg_catalog.setval('public.job_jobid_seq', 1000000000, true);
+
+label volume=TestVolume001 storage=File1 pool=File slot=1 drive=0
+run job=$JobName yes
+run job=$JobName yes
+wait
+run job=$JobName yes
+wait
+messages
+@# 
+@# now do a restore and truncate the volume first
+@#
+@$out $tmp/log2.out  
+setdebug level=50 tags=cloud trace=1 storage=File1
+cloud truncate storage=File1 volume=TestVolume001
+@exec "ls -1 $tmp/TestVolume001"
+restore where=$tmp/bacula-restores select all done
+yes
+wait
+messages
+quit
+END_OF_DATA
+
+run_bacula
+check_for_zombie_jobs storage=File1
+stop_bacula
+
+check_two_logs
+check_restore_diff
+end_test