From: Norbert Bizet Date: Wed, 2 Sep 2020 13:20:57 +0000 (+0200) Subject: BEE Backport regress/tests/cloud-xfer-test X-Git-Tag: Release-11.3.2~1224 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=6cefe542e1ff4521173cdd30bfb66681be66fd50;p=thirdparty%2Fbacula.git BEE Backport regress/tests/cloud-xfer-test This commit is the result of the squash of the following main commits: Author: Eric Bollengier Date: Tue Jul 21 10:28:13 2020 +0200 regress: Add copyright to regress scripts Author: Eric Bollengier Date: Tue Sep 3 10:56:14 2019 +0200 regress: tweak cloud-xfer-test Author: Norbert Bizet Date: Mon Sep 2 15:03:35 2019 -0400 regress: Add a test to reproduce cloud download issue MT5373 --- diff --git a/regress/tests/cloud-xfer-test b/regress/tests/cloud-xfer-test new file mode 100755 index 0000000000..1ca04f31c4 --- /dev/null +++ b/regress/tests/cloud-xfer-test @@ -0,0 +1,71 @@ +#!/bin/sh +# +# Copyright (C) 2000-2020 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# +# Run a cloud backup of the Bacula build directory +# with a very high number of parts and try to restore it +# +# The test should reproduce the issue #5373. It was a string +# edition problem, and we force it with a very long JobId +# and a large number or parts. +# +TestName="cloud-xfer-test" +JobName=backup +. scripts/functions + +require_cloud + +scripts/cleanup +scripts/copy-confs + +# +# Zap out any schedule in default conf file so that +# it doesn't start during our test +# +outf="$tmp/sed_tmp" +echo "s% Schedule =%# Schedule =%g" >${outf} +cp $scripts/bacula-dir.conf $tmp/1 +sed -f ${outf} $tmp/1 >$scripts/bacula-dir.conf + +change_jobname BackupClient1 $JobName +start_test + +$bperl -e 'add_attribute("$conf/bacula-sd.conf", "MaximumPartSize", "1000", "Device")' + + +cat <$tmp/bconcmds +@output /dev/null +messages +@$out $tmp/log1.out +sql +SELECT pg_catalog.setval('public.job_jobid_seq', 1000000000, true); + +label volume=TestVolume001 storage=File1 pool=File slot=1 drive=0 +run job=$JobName yes +run job=$JobName yes +wait +run job=$JobName yes +wait +messages +@# +@# now do a restore and truncate the volume first +@# +@$out $tmp/log2.out +setdebug level=50 tags=cloud trace=1 storage=File1 +cloud truncate storage=File1 volume=TestVolume001 +@exec "ls -1 $tmp/TestVolume001" +restore where=$tmp/bacula-restores select all done +yes +wait +messages +quit +END_OF_DATA + +run_bacula +check_for_zombie_jobs storage=File1 +stop_bacula + +check_two_logs +check_restore_diff +end_test