From: Eric Bollengier Date: Mon, 30 May 2022 09:00:31 +0000 (+0200) Subject: regress: Add test for VirtualFull plugin support X-Git-Tag: Beta-15.0.0~569 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=79cfa8586e2b31bffcba2f5b28f8f25d524176b7;p=thirdparty%2Fbacula.git regress: Add test for VirtualFull plugin support --- diff --git a/bacula/src/plugins/fd/test-plugin-fd.c b/bacula/src/plugins/fd/test-plugin-fd.c index cca2b59c4..bbeb8000a 100644 --- a/bacula/src/plugins/fd/test-plugin-fd.c +++ b/bacula/src/plugins/fd/test-plugin-fd.c @@ -409,17 +409,60 @@ static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp) { struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext; static int obj_uuid = 1; + int jobid; + bfuncs->getBaculaValue(ctx, bVarJobId, &jobid); if (!p_ctx) { return bRC_Error; } Dmsg1(0, "nb_obj = %d\n", p_ctx->nb_obj); if (p_ctx->job_level != 'F') { - if (p_ctx->nb_obj++ == 1) { + switch(p_ctx->nb_obj++) { + case 0: + sp->restore_obj.object_name = (char *)"james.xml"; + sp->restore_obj.object = (char *)"This is test data for the restore object incr."; + sp->restore_obj.object_len = strlen(sp->restore_obj.object)+1; + sp->type = FT_RESTORE_FIRST; + break; + case 1: + // The file is needed to reference the plugin object + sp->type = FT_REG; + sp->link = sp->fname = (char *)NT_("/@testplugin/test.zero"); // Use the filename in argument + stat(p_ctx->reader, &sp->statp); + break; + case 2: + sp->plugin_obj.path = (char *)NT_("/@testplugin/test.zero"); + sp->plugin_obj.plugin_name = (char *)NT_("Test Plugin"); + sp->plugin_obj.object_category = (char *)NT_("Virtual Machine"); + sp->plugin_obj.object_type = (char *)NT_("VMWare"); + sp->plugin_obj.object_name = (char *)NT_("test vm"); + sp->plugin_obj.object_source = (char *)NT_("test plugin source"); + sp->plugin_obj.object_uuid = (char *)NT_("1234-abc-testplugin"); + sp->plugin_obj.status = PLUG_OBJ_STATUS_TERMINATED; + sp->plugin_obj.count = 1; + sp->plugin_obj.object_size = jobid; + sp->type = FT_PLUGIN_OBJECT; + bfuncs->JobMessage(ctx, fi, li, M_INFO, 0, "Generating Plugin Object size=%lu\n", sp->plugin_obj.object_size); + break; + case 3: + // New version of a file present in full job + { + time_t now = time(NULL); + p_ctx->nb_obj++; + sp->type = FT_REG; + sp->fname = (char *)"/@size_update_file@"; + sp->statp.st_mode = 0640; + sp->statp.st_ctime = now; + sp->statp.st_mtime = now; + sp->statp.st_atime = now; + sp->statp.st_size = -1; /* Size is unknown at the beginning, should be updated in the next step */ + sp->statp.st_blksize = 4096; + Dmsg0(0, "@size_update_file@ initial step\n"); + } + break; + + default: return bRC_Stop; } - sp->type = FT_REG; - sp->link = sp->fname = p_ctx->fname; - stat(p_ctx->reader, &sp->statp); return bRC_OK; } @@ -681,7 +724,6 @@ static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp) sp->plugin_obj.object_uuid = (char *)NT_("1234-abc-testplugin"); sp->plugin_obj.status = PLUG_OBJ_STATUS_TERMINATED; sp->plugin_obj.count = 2; - sp->plugin_obj.object_size = obj_uuid++; sp->type = FT_PLUGIN_OBJECT; p_ctx->nb_obj++; @@ -694,6 +736,7 @@ static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp) sp->plugin_obj.object_name = (char *)NT_("test db"); sp->plugin_obj.object_source = (char *)NT_("test plugin source"); sp->plugin_obj.object_uuid = (char *)NT_("5678-abc-testplugin"); + sp->plugin_obj.status = PLUG_OBJ_STATUS_ERROR; sp->plugin_obj.object_size = obj_uuid++; sp->type = FT_PLUGIN_OBJECT; p_ctx->nb_obj++; @@ -707,6 +750,7 @@ static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp) sp->plugin_obj.object_name = (char *)NT_("everything"); sp->plugin_obj.object_source = (char *)NT_("test plugin source"); sp->plugin_obj.object_uuid = (char *)NT_("5678-abc-testplugin"); + sp->plugin_obj.status = PLUG_OBJ_STATUS_TERMINATED; sp->plugin_obj.object_size = obj_uuid++; sp->type = FT_PLUGIN_OBJECT; p_ctx->nb_obj++; diff --git a/bacula/src/stored/append.c b/bacula/src/stored/append.c index 236c40d14..4c9c76348 100644 --- a/bacula/src/stored/append.c +++ b/bacula/src/stored/append.c @@ -452,7 +452,7 @@ bool send_attrs_to_dir(JCR *jcr, DEV_RECORD *rec) if (are_attributes_spooled(jcr)) { dir->set_spooling(); } - Dmsg1(100, "Send attributes to dir. FI=%d\n", rec->FileIndex); + Dmsg1(850, "Send attributes to dir. FI=%d\n", rec->FileIndex); if (!dir_update_file_attributes(jcr->dcr, rec)) { Jmsg(jcr, M_FATAL, 0, _("Error updating file attributes. ERR=%s\n"), dir->bstrerror()); diff --git a/bacula/src/stored/vbackup.c b/bacula/src/stored/vbackup.c index 56412094e..8d2f31a7c 100644 --- a/bacula/src/stored/vbackup.c +++ b/bacula/src/stored/vbackup.c @@ -308,7 +308,7 @@ static bool record_cb(DCR *dcr, DEV_RECORD *rec) goto bail_out; } jcr->JobBytes += rec->data_len; /* increment bytes this job */ - Dmsg5(200, "wrote_record JobId=%d FI=%s SessId=%d Strm=%s len=%d\n", + Dmsg5(500, "wrote_record JobId=%d FI=%s SessId=%d Strm=%s len=%d\n", jcr->JobId, FI_to_ascii(buf1, rec->FileIndex), rec->VolSessionId, stream_to_ascii(buf2, rec->Stream, rec->FileIndex), rec->data_len); diff --git a/regress/tests/virtualfull-plugin-pvf-test b/regress/tests/virtualfull-plugin-pvf-test index 865c622ae..0d21958f1 100755 --- a/regress/tests/virtualfull-plugin-pvf-test +++ b/regress/tests/virtualfull-plugin-pvf-test @@ -26,13 +26,9 @@ rm -f $cwd/build/inc1 $cwd/build/inc2 $cwd/build/diff1 change_jobname NightlySave $JobName $bperl -e "add_attribute('$conf/bacula-dir.conf', 'DeleteConsolidatedJobs', 'yes', 'Job', 'Vbackup')" - $bperl -e "add_attribute('$conf/bacula-dir.conf', 'BackupsToKeep', '3', 'Job', 'Vbackup')" - $bperl -e "add_attribute('$conf/bacula-dir.conf', 'SpoolData', 'no', 'Job', 'Vbackup')" - $bperl -e "add_attribute('$conf/bacula-dir.conf', 'NextPool', 'Full', 'Pool', 'Full')" - $bperl -e "add_attribute('$conf/bacula-dir.conf', 'FileSet', 'PluginSet', 'Job', '$JobName')" #$bperl -e "add_attribute('$conf/bacula-dir.conf', 'runafterjob', '/tmp/regress/bin/vbackup.pl %i %l %p', 'Job', 'Vbackup')" @@ -68,7 +64,9 @@ cat <${cwd}/tmp/bconcmds messages @$out ${cwd}/tmp/log1.out status client -@#setdebug level=100 storage=File +setdebug level=100 storage=File trace=1 +setdebug level=100 client trace=1 +setdebug level=100 tags=sql dir trace=1 label storage=File volume=FileVolume001 Pool=Default label storage=DiskChanger volume=ChangerVolume001 slot=1 Pool=Full drive=0 label storage=DiskChanger volume=ChangerVolume002 slot=2 Pool=Full drive=0 @@ -105,10 +103,18 @@ messages @$out $tmp/count.out list jobs .bvfs_get_jobids client=$HOST-fd count +@$out $tmp/log123-ro.out +.jlist pluginrestoreconf objecttype=all jobid=1,2,3 +@$out $tmp/log123-obj.out +.jlist object jobid=1,2,3 @$out $tmp/log5.out run job=$JobName level=VirtualFull yes wait messages +@$out $tmp/log7-ro.out +.jlist pluginrestoreconf objecttype=all jobid=7 +@$out $tmp/log7-obj.out +.jlist object jobid=7 @$out $tmp/count0.out list jobs .bvfs_get_jobids client=$HOST-fd count @@ -121,6 +127,8 @@ messages @$out $tmp/count1.out list jobs .bvfs_get_jobids client=$HOST-fd count +@$out $tmp/count74-obj.out +.jlist object jobid=7,4 @$out $tmp/log5.out run job=$JobName level=VirtualFull yes wait @@ -130,6 +138,8 @@ list jobs .bvfs_get_jobids client=$HOST-fd count @$out $tmp/count3.out .bvfs_get_jobids jobid=8 +@$out $tmp/count9-obj.out +.jlist object jobid=9 @# @# now do a restore of the consolidated Full @# @@ -181,6 +191,67 @@ if [ $? -ne 0 ]; then estat=1 fi +# TODO: Need to check object attributes as well +cat $tmp/log123-ro.out $tmp/log7-ro.out | grep '{' | perl -MJSON -e ' + $l = <>; + $j123 = JSON::decode_json($l) ; + $l = <>; + $j7 = JSON::decode_json($l) ; + exit (scalar(@{$j123->{data}}) == scalar(@{$j7->{data}})); +' +if [ $? != 1 ]; then + print_debug "ERROR: Should find same number of objects in $tmp/log123-ro.out and $tmp/log7-ro.out" + estat=1 +fi + +# Got 3 Objects from the Full and 1 from the Incrmemental +cat $tmp/count74-obj.out | grep '{' | perl -MJSON -e ' + $l = <>; + $j = JSON::decode_json($l) ; + exit (scalar(@{$j->{data}}) == 4); +' +if [ $? != 1 ]; then + print_debug "ERROR: Should find 3 objects in $tmp/count74-obj.out" + estat=1 +fi + +# Got 3 Objects from the Full and 1 from the Incrmemental => One overwritten by VF +cat $tmp/count9-obj.out | grep '{' | perl -MJSON -e ' + $l = <>; + $j = JSON::decode_json($l) ; + ($d) = grep { $_->{objectcategory} eq "Virtual Machine" } @{$j->{data}}; + exit 2 unless ($d->{objectsize} == 4); + exit 3 unless (scalar(@{$j->{data}}) == 3); + exit 1; +' +if [ $? != 1 ]; then + print_debug "ERROR: Should 3 object from jobid 9 in $tmp/count9-obj.out" + estat=1 +fi + +cat $tmp/log123-obj.out $tmp/log7-obj.out | grep '{' | perl -MJSON -e ' + $l = <>; + $j123 = JSON::decode_json($l); + exit 1 unless (scalar(@{$j123->{data}}) == 5); # Must find 5 Objects + exit 2 unless ($j123->{data}->[0]->{jobid} == 1); # check the order + exit 3 unless ($j123->{data}->[1]->{jobid} == 1); # check the order + exit 4 unless ($j123->{data}->[2]->{jobid} == 1); # check the order + exit 5 unless ($j123->{data}->[3]->{jobid} == 2); # check the order + exit 6 unless ($j123->{data}->[4]->{jobid} == 3); # check the order + $l = <>; + $j7 = JSON::decode_json($l); + exit 7 unless (scalar(@{$j7->{data}}) == 3); # Must find 3 Objects + exit 8 unless ($j7->{data}->[0]->{jobid} == 7); # check the order + exit 9 unless ($j7->{data}->[1]->{jobid} == 7); # check the order + exit 10 unless ($j7->{data}->[2]->{objectsize} == 3); # Object in Job 1 and 2 are replaced + exit (0); +' +ret=$? +if [ $ret != 0 ]; then + print_debug "ERROR: Got $ret in $tmp/log123-obj.out and $tmp/log7-obj.out analysis" + estat=1 +fi + # # We only used one log so copy it to the second log # so that any restore errors will be picked up