-lbaccfg -lbac -lm $(DLIB) $(DB_LIBS) $(LIBS) \
$(WRAPLIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) $(CAP_LIBS)
+store_mngr_test: Makefile ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) dird.c store_mngr.c
+ $(RMF) store_mngr.o
+ $(CXX) -DTEST_PROGRAM $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) store_mngr.c
+ $(LIBTOOL_LINK) $(CXX) $(WLDFLAGS) $(LDFLAGS) -L../lib -L../cats -L../findlib -o $@ $(SVROBJS) $(ZLIBS) \
+ -lbacfind -lbacsql -lbaccats -lbaccfg -lbac -lm $(DLIB) $(DB_LIBS) $(LIBS) \
+ $(WRAPLIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) $(CAP_LIBS)
+
Makefile: $(srcdir)/Makefile.in $(topdir)/config.status
cd $(topdir) \
&& CONFIG_FILES=$(thisdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status
wstore_group = true;
/* Apply policy for the write storage list */
- jcr->store_mngr->apply_write_policy();
+ jcr->store_mngr->apply_write_policy(jcr);
Dmsg2(100, "Configured storages: %s, source: %s\n",
jcr->store_mngr->print_origin_wlist(), jcr->store_mngr->get_wsource());
Dmsg2(100, "Possible storage choices after applying \"%s\" policy: %s\n",
{"CheckMalware", store_bool, ITEM(res_job.CheckMalware), 0, 0, 0},
{"Storage", store_alist_res, ITEM(res_job.storage), R_STORAGE, 0, 0},
{"StorageGroupPolicy", store_storage_mngr, ITEM(res_job.storage_policy), 0, 0, 0},
+ {"StorageGroupPolicyThreshold", store_size64, ITEM(res_job.storage_policy_threshold), 0, 0, 0},
{"Pool", store_res, ITEM(res_job.pool), R_POOL, ITEM_REQUIRED, 0},
{"NextPool", store_res, ITEM(res_job.next_pool), R_POOL, 0, 0},
{"FullBackupPool", store_res, ITEM(res_job.full_pool), R_POOL, 0, 0},
{"NextPool", store_res, ITEM(res_pool.NextPool), R_POOL, 0, 0},
{"Storage", store_alist_res, ITEM(res_pool.storage), R_STORAGE, 0, 0},
{"StorageGroupPolicy", store_storage_mngr, ITEM(res_pool.storage_policy), 0, 0, 0},
+ {"StorageGroupPolicyThreshold", store_size64, ITEM(res_pool.storage_policy_threshold), 0, 0, 0},
{"AutoPrune", store_bool, ITEM(res_pool.AutoPrune), 0, ITEM_DEFAULT, true},
{"Recycle", store_bool, ITEM(res_pool.Recycle), 0, ITEM_DEFAULT, true},
{"RecyclePool", store_res, ITEM(res_pool.RecyclePool), R_POOL, 0, 0},
FILESET *fileset; /* What to backup -- Fileset */
alist *storage; /* Where is device -- list of Storage to be used */
char *storage_policy; /* Storage policy (e.g. listed order, least used...) */
+ uint64_t storage_policy_threshold; /* Storage policy threshold size value (optionnal) */
POOL *pool; /* Where is media -- Media Pool */
POOL *next_pool; /* Next Pool for Copy/Migrate/VirtualFull */
POOL *full_pool; /* Pool for Full backups */
POOL *NextPool; /* Next pool for migration */
alist *storage; /* Where is device -- list of Storage to be used */
char *storage_policy; /* Storage policy (e.g. listed order, least used...) */
+ uint64_t storage_policy_threshold; /* Storage policy threshold size value (optionnal) */
bool use_catalog; /* maintain catalog for media */
bool catalog_files; /* maintain file entries in catalog */
bool use_volume_once; /* write on volume only once */
static bool job_check_maxruntime(JCR *jcr);
static bool job_check_maxrunschedtime(JCR *jcr);
static void set_jcr_default_store(JCR *jcr, JOB *job);
-static void init_store_manager(JCR *jcr, const char *policy);
+static void init_store_manager(JCR *jcr, const char *policy, int64_t policy_threshold=0);
static const int dbglvl_store_mngr = 200;
/* Imported subroutines and variables */
goto bail_out;
}
- init_store_manager(jcr, StorageManager::get_default_policy());
+ init_store_manager(jcr, StorageManager::get_default_policy(), 0);
jcr->store_mngr->set_wstorage(store.store, store.store_source);
if (!cancel_sd_job(ua, "cancel", jcr)) {
ua->error_msg(_("Failed to cancel storage dameon job for JobId=%d\n"), jcr->JobId);
}
/* Init storage manager with specified storage group policy */
-static void init_store_manager(JCR *jcr, const char *policy)
+static void init_store_manager(JCR *jcr, const char *policy, int64_t policy_threshold)
{
if (jcr->store_mngr) {
if (strcmp(jcr->store_mngr->get_policy_name(), policy) == 0) {
} else if (strcmp(policy, "FreeSpace") == 0) {
Dmsg1(dbglvl_store_mngr, "Setting FreeSpace storage group policy for JobId: %d\n", jcr->JobId);
jcr->store_mngr = New(FreeSpaceStore());
+ } else if (strcmp(policy, "LastBackupedTo") == 0) {
+ Dmsg1(dbglvl_store_mngr, "Setting LastBackupedTo storage group policy for JobId: %d\n", jcr->JobId);
+ jcr->store_mngr = New(LastBackupedToStore());
+ } else if (strcmp(policy, "FreeSpaceLeastUsed") == 0) {
+ Dmsg1(dbglvl_store_mngr, "Setting FreeSpaceLeastUsed storage group policy for JobId: %d\n", jcr->JobId);
+ jcr->store_mngr = New(FreeSpaceLeastUsedStore(policy_threshold));
} else {
Dmsg1(dbglvl_store_mngr, "Invalid policy for JobId: %d, setting default (ListedOrder)\n", jcr->JobId);
jcr->store_mngr = New(ListedOrderStore());
* If no policy defined, used the default one.
*/
const char *store_policy = StorageManager::get_default_policy();
+ uint64_t store_policy_threshold = 0;
if (job->pool->storage_policy) {
Dmsg1(dbglvl_store_mngr, "Using Storage Group Policy from the Pool resource for JobId: %d\n", jcr->JobId);
store_policy = job->pool->storage_policy;
+ store_policy_threshold = job->pool->storage_policy_threshold;
} else if (job->storage_policy) {
Dmsg1(dbglvl_store_mngr, "Using Storage Group Policy from the Job resource for JobId: %d\n", jcr->JobId);
store_policy = job->storage_policy;
+ store_policy_threshold = job->storage_policy_threshold;
}
- init_store_manager(jcr, store_policy);
+ init_store_manager(jcr, store_policy, store_policy_threshold);
/* Use storage definition from proper resource */
if (job->pool->storage) {
return ret;
}
+static void swapit(uint32_t *v1, uint32_t *v2)
+{
+ uint32_t temp = *v1;
+ *v1 = *v2;
+ *v2 = temp;
+}
+
+static void swapit(int64_t *v1, int64_t *v2)
+{
+ int64_t temp = *v1;
+ *v1 = *v2;
+ *v2 = temp;
+}
+
void LeastUsedStore::apply_policy(bool write_store) {
alist *store = write_store ? wstore.get_list() : rstore.get_list();
alist tmp_list(10, not_owned_by_alist);
free(idx_arr);
}
-void LeastUsedStore::apply_write_policy() {
+void LeastUsedStore::apply_write_policy(JCR*) {
return apply_policy(true);
}
-void LeastUsedStore::apply_read_policy() {
+void LeastUsedStore::apply_read_policy(JCR*) {
return apply_policy(false);
}
+void LastBackupedToStore::apply_policy(bool) {
+ /* Do nothing for now */
+}
+
+void LastBackupedToStore::apply_write_policy(JCR *jcr)
+{
+ if (jcr)
+ {
+ alist *store = wstore.get_list();
+ alist tmp_list(10, not_owned_by_alist);
+ uint32_t store_count = store->size();
+ uint32_t i, j;
+
+ utime_t *conc_arr = (utime_t *)malloc((store_count + 1) * sizeof(utime_t));
+ uint32_t *idx_arr = (uint32_t *)malloc((store_count + 1) * sizeof(uint32_t));
+
+ for (uint32_t i = 0; i < store_count; i++)
+ {
+ tmp_list.append(store->get(i));
+ }
+
+ /* Reset list */
+ store->destroy();
+ store->init(10, not_owned_by_alist);
+
+ STORE *storage;
+ POOL_MEM buf;
+ foreach_alist_index(i, storage, &tmp_list)
+ {
+ db_int64_ctx nb;
+ Mmsg(buf, "SELECT Job.JobTDate FROM Job JOIN Storage on (WriteStorageId = StorageId) WHERE Job.Name='%s' AND Job.Level = '%c' AND Storage.Name = '%s' ORDER BY (Job.JobTDate) DESC LIMIT 1;",
+ jcr->job->name(),
+ jcr->getJobLevel(),
+ storage->name());
+ db_sql_query(jcr->db, buf.c_str(), db_int64_handler, &nb);
+
+ idx_arr[i] = i;
+ conc_arr[i] = nb.value;
+ }
+
+ /* Simple sort */
+ for (i = 0; i < store_count - 1; i++)
+ {
+ for (j = 0; j < store_count - i - 1; j++)
+ {
+ if (conc_arr[j] > conc_arr[j + 1])
+ {
+ swapit(&conc_arr[j], &conc_arr[j + 1]);
+ swapit(&idx_arr[j], &idx_arr[j + 1]);
+ }
+ }
+ }
+ for (i = 0; i < store_count; i++)
+ {
+ storage = (STORE *)tmp_list.get(idx_arr[i]);
+ store->append(storage);
+ }
+ free(conc_arr);
+ free(idx_arr);
+ }
+}
+
+void LastBackupedToStore::apply_read_policy(JCR *) {
+ apply_policy(false);
+}
+
StorageManager::StorageManager(const char *policy) {
this->policy = bstrdup(policy);
rstore.set_rw(false);
void StorageManager::dec_unused_wstores() {
wstore.dec_unused_stores();
}
+
+/* FreeSpaceLeastUsedStore::query orders d_list by size */
+/* Now, FreeSpaceLeastUsedStore::reorder_list will apply concurrent job criteria */
+void FreeSpaceLeastUsedStore::reorder_list(alist *list, dlist *d_list) {
+
+ sm_ctx *ctx, *ctx2;
+
+ list->destroy();
+ list->init(10, not_owned_by_alist);
+
+ Dmsg0(dbglvl, "FreeSpaceLeastUsedStore. Sorted on store size\n");
+ int count = 0;
+ foreach_dlist(ctx, d_list)
+ {
+
+ Dmsg3(dbglvl, "list[%d] size=%d num=%d\n", count, ctx->number, ctx->store->getNumConcurrentJobs());
+ count++;
+ }
+
+
+ if ( d_list && d_list->first() )
+ {
+ ctx = (sm_ctx *)d_list->first();
+ uint64_t max_size = ctx->number - threshold;
+ Dmsg2(dbglvl, "FreeSpaceLeastUsedStore. max_size=%d threshold=%d\n", max_size, threshold);
+ /* count number of nodes to sort */
+ int free_store_count = 1;
+ foreach_dlist(ctx, d_list)
+ {
+ if (ctx->number < max_size)
+ {
+ break;
+ }
+ free_store_count++;
+ }
+
+ Dmsg1(dbglvl, "FreeSpaceLeastUsedStore. free_store_count=%d\n", free_store_count);
+
+ for (int i=0; i<free_store_count-1; ++i)
+ {
+ ctx = (sm_ctx *)d_list->first();
+ ctx2 = (sm_ctx *)d_list->next(ctx);
+ for (int j=0; j<free_store_count-i-1 && ctx && ctx2; j++) {
+ if (ctx->store->getNumConcurrentJobs() > ctx2->store->getNumConcurrentJobs()) {
+ /* swap : detach ctx*/
+ d_list->remove(ctx);
+ /* re-attach after */
+ d_list->insert_after(ctx,ctx2);
+ /* ctx has implicitely move forward, update only ctx2 */
+ } else {
+ /* move forward ctx and ctx2 */
+ ctx = (sm_ctx *)d_list->next(ctx);
+ }
+ ctx2 = (sm_ctx *)d_list->next(ctx);
+ }
+ }
+ }
+
+ Dmsg0(dbglvl, "FreeSpaceLeastUsedStore. Sorted on store size AND number concurrent jobs\n");
+ count = 0;
+ foreach_dlist(ctx, d_list)
+ {
+
+ Dmsg3(dbglvl, "list[%d] size=%d num=%d\n", count, ctx->number, ctx->store->getNumConcurrentJobs());
+ count++;
+ }
+
+
+
+ foreach_dlist(ctx, d_list) {
+ list->append((STORE *)ctx->store);
+ }
+}
+
+
+#ifdef TEST_PROGRAM
+
+int main()
+{
+ int nbtests = 100;
+ for (int t=0; t< nbtests; ++t) {
+
+
+ FreeSpaceLeastUsedStore *fslus = New(FreeSpaceLeastUsedStore(10000000));
+ alist *list = New(alist(10, not_owned_by_alist));
+
+ sm_ctx *context = 0;
+ dlist *d_list = New(dlist(context, &context->link));
+
+ /* random number of ctx between 10 and 100 */
+ srand(time(0));
+ int nbctx = rand() % (91) + 10;
+ int n=0;
+ for (; n<nbctx; ++n) {
+ STORE_GLOBALS *globals = new STORE_GLOBALS();
+ globals->NumConcurrentJobs = rand() % (10);
+ STORE *s = new STORE();
+ s->globals = globals;
+ context = New(sm_ctx(s));
+ int num = rand() % (10000000);
+ context->number = num;
+ d_list->prepend(context);
+ }
+ {
+ Pmsg0(0, " ORIGINAL\n");
+ sm_ctx * h = (sm_ctx *)d_list->first();
+ int count = 0;
+ while (h) {
+ Pmsg3(0, " array[%d] = %d %d\n", count, h->number, h->store->getNumConcurrentJobs());
+ h = (sm_ctx *)d_list->next(h);
+ count++;
+ }
+ }
+
+ fslus->reorder_list(list, d_list);
+
+ // sm_ctx *ctx, *ctx2;
+
+ // if ( d_list && d_list->first() )
+ // {
+ // //int free_store_count = 7;
+ // int free_store_count = d_list->size();
+ // for (int i=0; i<free_store_count-1; ++i)
+ // {
+ // ctx = (sm_ctx *)d_list->first();
+ // ctx2 = (sm_ctx *)d_list->next(ctx);
+ // int count=0;
+ // for (int j=0; j<free_store_count-i-1 && ctx && ctx2; j++) {
+ // if (ctx->number > ctx2->number) {
+ // /* swap : detach ctx*/
+ // d_list->remove(ctx);
+ // /* re-attach after */
+ // d_list->insert_after(ctx,ctx2);
+ // /* ctx has implicitely move forward, update only ctx2 */
+ // } else {
+ // /* move forward ctx and ctx2 */
+ // ctx = (sm_ctx *)d_list->next(ctx);
+ // }
+ // ctx2 = (sm_ctx *)d_list->next(ctx);
+ // count++;
+ // }
+ // Pmsg1(0, " count %d \n", count);
+
+ // Pmsg1(0, " interation %d \n", i);
+ // sm_ctx * h = (sm_ctx *)d_list->first();
+ // for (int k=0; k<free_store_count; ++k) {
+ // Pmsg3(0, " array[%d] = %d %c\n", count, h->number, h->tag);
+ // h = (sm_ctx *)d_list->next(h);
+ // }
+ // }
+
+ // }
+ {
+ Pmsg0(0, " FINAL\n");
+ sm_ctx * h = (sm_ctx *)d_list->first();
+ int count = 0;
+ while (h) {
+ Pmsg3(0, " array[%d] = %d %d\n", count, h->number, h->store->getNumConcurrentJobs());
+ h = (sm_ctx *)d_list->next(h);
+ count++;
+ }
+ }
+
+ if (d_list) {
+ delete d_list;
+ }
+ }
+}
+#endif
virtual void apply_policy(bool write_store) = 0;
public:
- virtual void apply_write_policy() = 0;
- virtual void apply_read_policy() = 0;
+ virtual void apply_write_policy(JCR*) = 0;
+ virtual void apply_read_policy(JCR*) = 0;
virtual ~StorageManager() {
reset_rwstorage();
private:
void apply_policy(bool write_store);
public:
- void apply_write_policy();
- void apply_read_policy();
+ void apply_write_policy(JCR*);
+ void apply_read_policy(JCR*);
LeastUsedStore() : StorageManager("LeastUsed") {
}
/* Do nothing for now */
}
public:
- void apply_write_policy() {
+ void apply_write_policy(JCR*) {
return apply_policy(true);
}
- void apply_read_policy() {
+ void apply_read_policy(JCR*) {
return apply_policy(false);
}
}
};
+class LastBackupedToStore : public StorageManager {
+ private:
+ void apply_policy(bool write_store);
+ public:
+ void apply_write_policy(JCR* jcr);
+ void apply_read_policy(JCR* jcr);
+
+ LastBackupedToStore() : StorageManager("LastBackupedTo") {
+ }
+
+ ~LastBackupedToStore() {
+ }
+};
+
/* Context with per-policy specific data (as of now there's only single uint64_t value available for each policy)*/
class sm_ctx : public SMARTALLOC {
public:
virtual void reorder_list(alist *list, dlist *d_list) = 0;
public:
- void apply_policy(bool write_store);
+ virtual void apply_policy(bool write_store);
QueryStore (const char *policy="VirtualPolicy_QueryStore"): StorageManager(policy) {
}
dlink link;
};
- bool query(BSOCK *sd, dlist *d_list, sm_ctx *context);
-
/* Comparator for easy list ordering */
static int cmp(void *item1, void *item2) {
sm_ctx *ctx1 = (sm_ctx *) item1;
}
}
- void reorder_list(alist *list, dlist *d_list);
+ protected:
+ bool query(BSOCK *sd, dlist *d_list, sm_ctx *context);
+
+ virtual void reorder_list(alist *list, dlist *d_list);
public:
- void apply_write_policy() {
+ void apply_write_policy(JCR*) {
return apply_policy(true);
}
- void apply_read_policy() {
+ void apply_read_policy(JCR*) {
return apply_policy(false);
}
FreeSpaceStore(): QueryStore("FreeSpace") {
}
- ~FreeSpaceStore() {
+ FreeSpaceStore(const char *policy): QueryStore("policy") {
+ }
+
+ virtual ~FreeSpaceStore() {
}
};
-#endif // STORE_MNGR_H
+class FreeSpaceLeastUsedStore : public FreeSpaceStore {
+ private:
+ uint64_t threshold;
+
+ protected:
+ virtual void reorder_list(alist *list, dlist *d_list);
+
+ public:
+ FreeSpaceLeastUsedStore(uint64_t thres=0): FreeSpaceStore("FreeSpaceLeastUsed") {
+ threshold = thres;
+ }
+
+ virtual ~FreeSpaceLeastUsedStore() {
+ }
+};
+
+
+#endif // STORE_MNGR_H
\ No newline at end of file
"LeastUsed",
"ListedOrder",
"FreeSpace",
+ "LastBackupedTo",
+ "FreeSpaceLeastUsed",
NULL
};
--- /dev/null
+#!/bin/bash
+#
+# Copyright (C) 2000-2021 Kern Sibbald
+# Copyright (C) 2021-2022 Bacula Systems SA
+# License: BSD 2-Clause; see file LICENSE-FOSS
+#
+# Simple test for the 'FreeSpace' storage policy.
+# We create 3 devices, each of different size and assing them to storage group.
+# During first backup, Disk3 device should be used since it's the biggest one.
+# Later we use some more space on Disk2 and Disk3 so that Disk1 has the biggest free space amount
+# and it should be used during second backup.
+#
+
+TestName="store-mngr-freespace-test"
+. scripts/functions
+
+scripts/cleanup
+scripts/copy-confs
+
+# Cleanup of mounted dirs
+trap mount_cleanup err exit
+
+function mount_cleanup() {
+ sudo umount -ql $tmp/dev*
+}
+
+function largest_free_drive() {
+ local -n largest=$1
+ a=($(df --output=avail $tmp/dev* | sed 1d))
+ echo ${a[@]}
+ max=0
+ max_idx=0
+ idx=0
+
+ for avail in ${a[@]}; do
+ if [[ $avail -gt $max ]]; then
+ max=$avail
+ max_idx=$idx
+ fi
+ idx=$((idx+1))
+ done
+
+ largest=$((max_idx+1))
+ return 0
+}
+
+PATH=${PATH}":/sbin"
+
+dev1="${tmp}/dev1"
+dev2="${tmp}/dev2"
+dev3="${tmp}/dev3"
+dev4="${tmp}/dev4"
+dev5="${tmp}/dev5"
+dev6="${tmp}/dev6"
+dev7="${tmp}/dev7"
+dev8="${tmp}/dev8"
+
+dd if=/dev/zero of=$tmp/disk1 bs=1M count=400 > /dev/null
+dd if=/dev/zero of=$tmp/disk2 bs=1M count=800 > /dev/null
+dd if=/dev/zero of=$tmp/disk3 bs=1M count=1000 > /dev/null
+dd if=/dev/zero of=$tmp/disk4 bs=1M count=400 > /dev/null
+dd if=/dev/zero of=$tmp/disk5 bs=1M count=800 > /dev/null
+dd if=/dev/zero of=$tmp/disk6 bs=1M count=1000 > /dev/null
+dd if=/dev/zero of=$tmp/disk7 bs=1M count=400 > /dev/null
+dd if=/dev/zero of=$tmp/disk8 bs=1M count=800 > /dev/null
+
+
+mkfs.ext4 -F $tmp/disk1 > /dev/null
+mkfs.ext4 -F $tmp/disk2 > /dev/null
+mkfs.ext4 -F $tmp/disk3 > /dev/null
+mkfs.ext4 -F $tmp/disk4 > /dev/null
+mkfs.ext4 -F $tmp/disk5 > /dev/null
+mkfs.ext4 -F $tmp/disk6 > /dev/null
+mkfs.ext4 -F $tmp/disk7 > /dev/null
+mkfs.ext4 -F $tmp/disk8 > /dev/null
+
+mkdir -p $dev1
+mkdir -p $dev2
+mkdir -p $dev3
+mkdir -p $dev4
+mkdir -p $dev5
+mkdir -p $dev6
+mkdir -p $dev7
+mkdir -p $dev8
+
+user=`whoami`
+
+sudo mount -o loop $tmp/disk1 $dev1
+sudo mount -o loop $tmp/disk2 $dev2
+sudo mount -o loop $tmp/disk3 $dev3
+sudo mount -o loop $tmp/disk4 $dev4
+sudo mount -o loop $tmp/disk5 $dev5
+sudo mount -o loop $tmp/disk6 $dev6
+sudo mount -o loop $tmp/disk7 $dev7
+sudo mount -o loop $tmp/disk8 $dev8
+#TODO add some err handling here
+
+sudo chown -R $user:$user $dev1
+sudo chown -R $user:$user $dev2
+sudo chown -R $user:$user $dev3
+sudo chown -R $user:$user $dev4
+sudo chown -R $user:$user $dev5
+sudo chown -R $user:$user $dev6
+sudo chown -R $user:$user $dev7
+sudo chown -R $user:$user $dev8
+
+# Fill devices 5->8
+
+dd if=/dev/urandom of=$dev5/data bs=1M count=600
+dd if=/dev/urandom of=$dev6/data bs=1M count=500
+dd if=/dev/urandom of=$dev7/data bs=1M count=200
+dd if=/dev/urandom of=$dev8/data bs=1M count=600
+
+# ... So dev 1-> 4 should mostly be used
+
+
+# Get SD password
+sd_pass=`grep -i password ${bin}/bacula-sd.conf | head -n 1`
+
+# Add simple job with store group
+cat <<END_OF_DATA >> $bin/bacula-dir.conf
+Pool {
+ Name = FreeSpacePool
+ Pool Type = Backup
+ Recycle = yes # Bacula can automatically recycle Volumes
+ AutoPrune = yes # Prune expired volumes
+ Volume Retention = 365 days # one year
+ Maximum Volume Bytes = 50G # Limit Volume size to something reasonable
+ Maximum Volumes = 100 # Limit number of Volumes in Pool
+ Storage = Disk1, Disk2, Disk3, Disk4, Disk5, Disk6, Disk7, Disk8
+ Label Format = "Pool1-"
+ Storage Group Policy = Free Space Least Used
+ StorageGroupPolicyThreshold = 250 MB
+}
+
+ Pool {
+ Name = Disk2Pool
+ Pool Type = Backup
+ Recycle = yes # Bacula can automatically recycle Volumes
+ AutoPrune = yes # Prune expired volumes
+ Volume Retention = 365 days # one year
+ Maximum Volume Bytes = 50G # Limit Volume size to something reasonable
+ Maximum Volumes = 100 # Limit number of Volumes in Pool
+ Storage = Disk2
+ Label Format = "Pool2-"
+}
+
+Job {
+ Name = "FreeSpaceJob"
+ Type = Backup
+ Messages = Standard
+ JobDefs = DefaultJob
+ FileSet = "Full Set"
+ Pool = FreeSpacePool
+ Maximum Concurrent Jobs = 10
+}
+
+Job {
+ Name = "Disk2Job"
+ Type = Backup
+ Messages = Standard
+ JobDefs = DefaultJob
+ FileSet = "Full Set"
+ Pool = Disk2Pool
+ Maximum Concurrent Jobs = 10
+ MaximumBandwidth = 700kB
+}
+
+Autochanger {
+ Name = Disk1
+ Address = stretch # N.B. Use a fully qualified name here
+ SDPort = 8103
+${sd_pass}
+ Device = Disk1
+ Media Type = Disk1
+ Autochanger = Disk1 # point to ourself
+ Maximum Concurrent Jobs = 10 # run up to 10 jobs a the same time
+}
+
+Autochanger {
+ Name = Disk2
+ Address = stretch # N.B. Use a fully qualified name here
+ SDPort = 8103
+${sd_pass}
+ Device = Disk2
+ Media Type = Disk2
+ Autochanger = Disk2 # point to ourself
+ Maximum Concurrent Jobs = 10 # run up to 10 jobs a the same time
+}
+
+Autochanger {
+ Name = Disk3
+ Address = stretch # N.B. Use a fully qualified name here
+ SDPort = 8103
+${sd_pass}
+ Device = Disk3
+ Media Type = Disk3
+ Autochanger = Disk3 # point to ourself
+ Maximum Concurrent Jobs = 10 # run up to 10 jobs a the same time
+}
+
+Autochanger {
+ Name = Disk4
+ Address = stretch # N.B. Use a fully qualified name here
+ SDPort = 8103
+${sd_pass}
+ Device = Disk4
+ Media Type = Disk4
+ Autochanger = Disk4 # point to ourself
+ Maximum Concurrent Jobs = 10 # run up to 10 jobs a the same time
+}
+
+Autochanger {
+ Name = Disk5
+ Address = stretch # N.B. Use a fully qualified name here
+ SDPort = 8103
+${sd_pass}
+ Device = Disk5
+ Media Type = Disk5
+ Autochanger = Disk5 # point to ourself
+ Maximum Concurrent Jobs = 10 # run up to 10 jobs a the same time
+}
+
+Autochanger {
+ Name = Disk6
+ Address = stretch # N.B. Use a fully qualified name here
+ SDPort = 8103
+${sd_pass}
+ Device = Disk6
+ Media Type = Disk6
+ Autochanger = Disk6 # point to ourself
+ Maximum Concurrent Jobs = 10 # run up to 10 jobs a the same time
+}
+
+Autochanger {
+ Name = Disk7
+ Address = stretch # N.B. Use a fully qualified name here
+ SDPort = 8103
+${sd_pass}
+ Device = Disk7
+ Media Type = Disk7
+ Autochanger = Disk7 # point to ourself
+ Maximum Concurrent Jobs = 10 # run up to 10 jobs a the same time
+}
+
+Autochanger {
+ Name = Disk8
+ Address = stretch # N.B. Use a fully qualified name here
+ SDPort = 8103
+${sd_pass}
+ Device = Disk8
+ Media Type = Disk8
+ Autochanger = Disk8 # point to ourself
+ Maximum Concurrent Jobs = 10 # run up to 10 jobs a the same time
+}
+END_OF_DATA
+
+cat <<END_OF_DATA >> $bin/bacula-sd.conf
+Autochanger {
+ Name = Disk1
+ Device = Disk1-Dev
+ Changer Command = ""
+ Changer Device = /dev/null
+}
+
+Device {
+ Name = Disk1-Dev
+ Media Type = Disk1
+ Archive Device = ${dev1}
+ LabelMedia = yes; # lets Bacula label unlabeled media
+ Random Access = Yes;
+ AutomaticMount = yes; # when device opened, read it
+ RemovableMedia = no;
+ AlwaysOpen = no;
+ Maximum Concurrent Jobs = 10
+}
+
+Autochanger {
+ Name = Disk2
+ Device = Disk2-Dev
+ Changer Command = ""
+ Changer Device = /dev/null
+}
+
+Device {
+ Name = Disk2-Dev
+ Media Type = Disk2
+ Archive Device = ${dev2}
+ LabelMedia = yes; # lets Bacula label unlabeled media
+ Random Access = Yes;
+ AutomaticMount = yes; # when device opened, read it
+ RemovableMedia = no;
+ AlwaysOpen = no;
+ Maximum Concurrent Jobs = 10
+}
+
+Autochanger {
+ Name = Disk3
+ Device = Disk3-Dev
+ Changer Command = ""
+ Changer Device = /dev/null
+}
+
+Device {
+ Name = Disk3-Dev
+ Media Type = Disk3
+ Archive Device = ${dev3}
+ LabelMedia = yes; # lets Bacula label unlabeled media
+ Random Access = Yes;
+ AutomaticMount = yes; # when device opened, read it
+ RemovableMedia = no;
+ AlwaysOpen = no;
+ Maximum Concurrent Jobs = 10
+}
+
+Autochanger {
+ Name = Disk4
+ Device = Disk4-Dev
+ Changer Command = ""
+ Changer Device = /dev/null
+}
+
+Device {
+ Name = Disk4-Dev
+ Media Type = Disk4
+ Archive Device = ${dev4}
+ LabelMedia = yes; # lets Bacula label unlabeled media
+ Random Access = Yes;
+ AutomaticMount = yes; # when device opened, read it
+ RemovableMedia = no;
+ AlwaysOpen = no;
+ Maximum Concurrent Jobs = 10
+}
+
+Autochanger {
+ Name = Disk5
+ Device = Disk5-Dev
+ Changer Command = ""
+ Changer Device = /dev/null
+}
+
+Device {
+ Name = Disk5-Dev
+ Media Type = Disk5
+ Archive Device = ${dev5}
+ LabelMedia = yes; # lets Bacula label unlabeled media
+ Random Access = Yes;
+ AutomaticMount = yes; # when device opened, read it
+ RemovableMedia = no;
+ AlwaysOpen = no;
+ Maximum Concurrent Jobs = 10
+}
+
+Autochanger {
+ Name = Disk6
+ Device = Disk6-Dev
+ Changer Command = ""
+ Changer Device = /dev/null
+}
+
+Device {
+ Name = Disk6-Dev
+ Media Type = Disk6
+ Archive Device = ${dev6}
+ LabelMedia = yes; # lets Bacula label unlabeled media
+ Random Access = Yes;
+ AutomaticMount = yes; # when device opened, read it
+ RemovableMedia = no;
+ AlwaysOpen = no;
+ Maximum Concurrent Jobs = 10
+}
+
+Autochanger {
+ Name = Disk7
+ Device = Disk7-Dev
+ Changer Command = ""
+ Changer Device = /dev/null
+}
+
+Device {
+ Name = Disk7-Dev
+ Media Type = Disk7
+ Archive Device = ${dev7}
+ LabelMedia = yes; # lets Bacula label unlabeled media
+ Random Access = Yes;
+ AutomaticMount = yes; # when device opened, read it
+ RemovableMedia = no;
+ AlwaysOpen = no;
+ Maximum Concurrent Jobs = 10
+}
+
+Autochanger {
+ Name = Disk8
+ Device = Disk8-Dev
+ Changer Command = ""
+ Changer Device = /dev/null
+}
+
+Device {
+ Name = Disk8-Dev
+ Media Type = Disk8
+ Archive Device = ${dev8}
+ LabelMedia = yes; # lets Bacula label unlabeled media
+ Random Access = Yes;
+ AutomaticMount = yes; # when device opened, read it
+ RemovableMedia = no;
+ AlwaysOpen = no;
+ Maximum Concurrent Jobs = 10
+}
+END_OF_DATA
+
+$bperl -e 'add_attribute("$conf/bacula-dir.conf", "MaximumConcurrentJobs", "20", "Client")'
+
+touch ${cwd}/tmp/bconcmds
+
+start_test
+
+run_bacula
+
+# one by one, the numJob per drive is not relevant
+
+l=0
+largest_free_drive l
+echo "LARGEST drive is Disk$l"
+cat <<END_OF_DATA >${cwd}/tmp/bconcmds
+@$out ${cwd}/tmp/log1.out
+setdebug level=200 trace=1 director
+run job=FreeSpaceJob level=Full yes
+wait
+messages
+END_OF_DATA
+run_bconsole
+
+n_disk=`cat ${cwd}/tmp/log1.out | grep "Storage:" | tr -s ' ' | grep "Storage: \"Disk$l\"" | wc -l`
+if [ $n_disk -ne 1 ]; then
+ estat=1
+ echo "ERROR: Disk$l Storage should have been used for backup, see: ${tmp}/log$j.out"
+fi
+
+l=0
+largest_free_drive l
+echo "LARGEST drive is Disk$l"
+cat <<END_OF_DATA >${cwd}/tmp/bconcmds
+@$out ${cwd}/tmp/log2.out
+run job=FreeSpaceJob level=Full yes
+wait
+messages
+END_OF_DATA
+run_bconsole
+
+n_disk=`cat ${cwd}/tmp/log2.out | grep "Storage:" | tr -s ' ' | grep "Storage: \"Disk$l\"" | wc -l`
+if [ $n_disk -ne 1 ]; then
+ estat=1
+ echo "ERROR: Disk$l Storage should have been used for backup, see: ${tmp}/log$j.out"
+fi
+
+l=0
+largest_free_drive l
+echo "LARGEST drive is Disk$l"
+cat <<END_OF_DATA >${cwd}/tmp/bconcmds
+@$out ${cwd}/tmp/log3.out
+run job=FreeSpaceJob level=Full yes
+wait
+messages
+END_OF_DATA
+run_bconsole
+
+n_disk=`cat ${cwd}/tmp/log3.out | grep "Storage:" | tr -s ' ' | grep "Storage: \"Disk$l\"" | wc -l`
+if [ $n_disk -ne 1 ]; then
+ estat=1
+ echo "ERROR: Disk$l Storage should have been used for backup, see: ${tmp}/log3.out"
+fi
+
+#spawn jobs 4-7 on 1 particular drive (Disk2)
+cat <<END_OF_DATA >${cwd}/tmp/bconcmds
+messages
+setdebug level=200 trace=1 director
+run job=Disk2Job level=Full yes
+run job=Disk2Job level=Full yes
+run job=Disk2Job level=Full yes
+run job=Disk2Job level=Full yes
+END_OF_DATA
+run_bconsole
+
+# spawn more jobs 8-11 none should run on Disk2
+cat <<END_OF_DATA >${cwd}/tmp/bconcmds
+setdebug level=200 trace=1 director
+run job=FreeSpaceJob level=Full yes
+run job=FreeSpaceJob level=Full yes
+run job=FreeSpaceJob level=Full yes
+run job=FreeSpaceJob level=Full yes
+wait
+messages
+END_OF_DATA
+run_bconsole
+
+cat <<END_OF_DATA >${cwd}/tmp/bconcmds
+@$out ${cwd}/tmp/log47.out
+setdebug level=200 trace=1 director
+llist jobid=4
+llist jobid=5
+llist jobid=6
+llist jobid=7
+wait
+messages
+END_OF_DATA
+run_bconsole
+
+n_disk=`cat ${cwd}/tmp/log47.out | grep "writestorage:" | tr -s ' ' | grep "Disk2" | wc -l`
+if [ $n_disk -ne 4 ]; then
+ estat=1
+ echo "ERROR: Disk2 Storage should have been used for backup, see: ${tmp}/log47.out"
+fi
+
+cat <<END_OF_DATA >${cwd}/tmp/bconcmds
+@$out ${cwd}/tmp/log811.out
+setdebug level=200 trace=1 director
+llist jobid=8
+llist jobid=9
+llist jobid=10
+llist jobid=11
+wait
+messages
+END_OF_DATA
+run_bconsole
+
+n_disk=`cat ${cwd}/tmp/log811.out | grep "writestorage:" | tr -s ' ' | grep "Disk2" | wc -l`
+if [ $n_disk -ne 0 ]; then
+ estat=1
+ echo "ERROR: Disk2 Storage should not have been used for backup, see: ${tmp}/log811.out"
+fi
+
+stop_bacula
+end_test
--- /dev/null
+#!/bin/sh
+#
+# Copyright (C) 2000-2021 Kern Sibbald
+# Copyright (C) 2022-2023 Bacula Systems SA
+# License: BSD 2-Clause; see file LICENSE-FOSS
+#
+# Simple test to check if storage list is correctly handled for the LastBackupedTo policy
+#
+
+TestName="store-mngr-LastBackupedTo"
+. scripts/functions
+
+scripts/cleanup
+scripts/copy-test-confs
+
+# Limit per-device max concurrent jobs to 1
+$bperl -e 'add_virtual_changer("vDisk1", 5)'
+$bperl -e 'add_virtual_changer("vDisk2", 5)'
+$bperl -e 'add_virtual_changer("vDisk3", 5)'
+$bperl -e 'add_attribute("$conf/bacula-dir.conf", "LabelFormat", "Vol", "Pool")'
+$bperl -e 'add_attribute("$conf/bacula-dir.conf", "CommCompression", "no", "Director")'
+$bperl -e 'add_attribute("$conf/bacula-fd.conf", "CommCompression", "no", "FileDaemon")'
+$bperl -e 'add_attribute("$conf/bacula-sd.conf", "CommCompression", "no", "Storage")'
+$bperl -e 'add_attribute("$conf/bacula-dir.conf", "AllowCompression", "no", "Storage")'
+sed 's/sparse=yes;//' $conf/bacula-dir.conf > $tmp/1
+mv $tmp/1 $conf/bacula-dir.conf
+
+echo $tmp/f > $tmp/file-list
+dd if=/dev/zero of=$tmp/f count=10000
+
+# Add two simple jobs (one of them with store group)
+cat <<END_OF_DATA >> $bin/bacula-dir.conf
+
+Job {
+ Name = "SingleStoreJob"
+ Client = $HOST-fd
+ Messages = Standard
+ Type = Backup
+ Storage = vDisk1
+ Pool = Default
+ FileSet="Full Set"
+}
+Job {
+ Name = "StoreGroupJob"
+ Type = Backup
+ Client = $HOST-fd
+ Messages = Standard
+ Storage = vDisk1, vDisk2
+ Storage Group Policy = LastBackupedTo
+ Pool = Default
+ FileSet = "Full Set"
+}
+END_OF_DATA
+
+$bperl -e 'set_global_maximum_concurrent_jobs(10)'
+
+start_test
+
+cat <<END_OF_DATA >${cwd}/tmp/bconcmds
+@output /dev/null
+messages
+@$out ${cwd}/tmp/log1.out
+setdebug level=0 trace=1 dir
+setbandwidth limit=500kb/s client
+label volume=TestVolume001 storage=File1 pool=Default slot=1 drive=0
+label volume=TestVolume002 storage=File2 pool=Default slot=1 drive=0
+run job=StoreGroupJob level=Full yes
+wait
+@$out ${cwd}/tmp/log1.out
+messages
+
+run job=StoreGroupJob level=Full yes
+wait
+@$out ${cwd}/tmp/log2.out
+messages
+
+run job=StoreGroupJob yes
+wait
+@$out ${cwd}/tmp/log1.out
+messages
+
+
+run job=StoreGroupJob yes
+wait
+@$out ${cwd}/tmp/log2.out
+messages
+
+run job=StoreGroupJob yes
+wait
+@$out ${cwd}/tmp/log1.out
+messages
+
+run job=StoreGroupJob level=Full yes
+wait
+@$out ${cwd}/tmp/log1.out
+messages
+
+run job=StoreGroupJob yes
+wait
+@$out ${cwd}/tmp/log2.out
+messages
+
+run job=StoreGroupJob yes
+wait
+@$out ${cwd}/tmp/log1.out
+messages
+
+run job=StoreGroupJob yes
+wait
+@$out ${cwd}/tmp/log2.out
+messages
+
+run job=StoreGroupJob level=Full yes
+wait
+@$out ${cwd}/tmp/log2.out
+messages
+
+run job=StoreGroupJob yes
+wait
+@$out ${cwd}/tmp/log1.out
+messages
+
+run job=StoreGroupJob level=Full yes
+wait
+@$out ${cwd}/tmp/log1.out
+messages
+
+run job=StoreGroupJob yes
+wait
+@$out ${cwd}/tmp/log2.out
+messages
+
+run job=StoreGroupJob yes
+wait
+@$out ${cwd}/tmp/log1.out
+messages
+
+run job=StoreGroupJob level=Full yes
+wait
+@$out ${cwd}/tmp/log2.out
+messages
+
+run job=StoreGroupJob yes
+wait
+@$out ${cwd}/tmp/log2.out
+messages
+
+run job=StoreGroupJob yes
+wait
+@$out ${cwd}/tmp/log1.out
+messages
+
+run job=StoreGroupJob level=Full yes
+wait
+@$out ${cwd}/tmp/log1.out
+messages
+
+list jobs
+
+quit
+END_OF_DATA
+
+echo "*********** Run some backups with vDisk1 and vDisk2 *************************"
+run_bacula
+
+stop_bacula
+
+# add a new vDisk Storage
+sed -i 's/vDisk1, vDisk2/vDisk1, vDisk2, vDisk3/g' $bin/bacula-dir.conf
+
+echo "*********** Run some backups after adding vDisk3 *************************"
+
+cat <<END_OF_DATA >${cwd}/tmp/bconcmds
+@output /dev/null
+messages
+
+run job=StoreGroupJob level=Full yes
+wait
+@$out ${cwd}/tmp/log3.out
+messages
+
+run job=StoreGroupJob level=Full yes
+wait
+@$out ${cwd}/tmp/log2.out
+messages
+
+run job=StoreGroupJob level=Full yes
+wait
+@$out ${cwd}/tmp/log1.out
+messages
+
+run job=StoreGroupJob yes
+wait
+@$out ${cwd}/tmp/log3.out
+messages
+
+run job=StoreGroupJob yes
+wait
+@$out ${cwd}/tmp/log2.out
+messages
+
+run job=StoreGroupJob yes
+wait
+@$out ${cwd}/tmp/log1.out
+messages
+
+run job=StoreGroupJob yes
+wait
+@$out ${cwd}/tmp/log3.out
+messages
+
+run job=StoreGroupJob level=Full yes
+wait
+@$out ${cwd}/tmp/log3.out
+messages
+
+run job=StoreGroupJob yes
+wait
+@$out ${cwd}/tmp/log2.out
+messages
+
+run job=StoreGroupJob yes
+wait
+@$out ${cwd}/tmp/log1.out
+messages
+
+run job=StoreGroupJob level=Full yes
+wait
+@$out ${cwd}/tmp/log2.out
+messages
+
+run job=StoreGroupJob level=Full yes
+wait
+@$out ${cwd}/tmp/log1.out
+messages
+
+run job=StoreGroupJob yes
+wait
+@$out ${cwd}/tmp/log3.out
+messages
+
+run job=StoreGroupJob level=Full yes
+wait
+@$out ${cwd}/tmp/log3.out
+messages
+
+run job=StoreGroupJob yes
+wait
+@$out ${cwd}/tmp/log2.out
+messages
+
+run job=StoreGroupJob yes
+wait
+@$out ${cwd}/tmp/log1.out
+messages
+
+run job=StoreGroupJob level=Full yes
+wait
+@$out ${cwd}/tmp/log2.out
+messages
+
+run job=StoreGroupJob yes
+wait
+@$out ${cwd}/tmp/log3.out
+
+@output /dev/null
+list jobs
+messages
+quit
+END_OF_DATA
+
+run_bacula
+
+stop_bacula
+
+#log1.out only contains vDisk1
+grep -l vDisk1 ${cwd}/tmp/log1.out
+if [ $? -ne 0 ]; then
+ print_debug "Could not find vDisk1 in log1"
+ estat=1
+fi
+grep -L vDisk2 ${cwd}/tmp/log1.out
+if [ $? -ne 1 ]; then
+ print_debug "vDisk2 in log1. Should not be the case"
+ estat=1
+fi
+grep -L vDisk3 ${cwd}/tmp/log1.out
+if [ $? -ne 1 ]; then
+ print_debug "vDisk3 in log1. Should not be the case"
+ estat=1
+fi
+
+#log2.out only contains vDisk2
+grep -L vDisk1 ${cwd}/tmp/log2.out
+if [ $? -ne 1 ]; then
+ print_debug "vDisk1 in log2. Should not be the case"
+ estat=1
+fi
+
+grep -l vDisk2 ${cwd}/tmp/log2.out
+if [ $? -ne 0 ]; then
+ print_debug "Could not find vDisk2 in log2"
+ estat=1
+fi
+grep -L vDisk3 ${cwd}/tmp/log2.out
+if [ $? -ne 1 ]; then
+ print_debug "vDisk3 in log2. Should not be the case"
+ estat=1
+fi
+
+#log3.out only contains vDisk3
+grep -L vDisk1 ${cwd}/tmp/log3.out
+if [ $? -ne 1 ]; then
+ print_debug "vDisk1 in log3. Should not be the case"
+ estat=1
+fi
+grep -L vDisk2 ${cwd}/tmp/log3.out
+if [ $? -ne 1 ]; then
+ print_debug "vDisk2 in log3. Should not be the case"
+ estat=1
+fi
+grep -l vDisk3 ${cwd}/tmp/log3.out
+if [ $? -ne 0 ]; then
+ print_debug "Could not find vDisk3 in log3"
+ estat=1
+fi
+
+end_test