/*
Bacula(R) - The Network Backup Solution
- Copyright (C) 2000-2017 Kern Sibbald
+ Copyright (C) 2000-2018 Kern Sibbald
The original author of Bacula is Kern Sibbald, with contributions
from many others, a complete list can be found in the file AUTHORS.
Bacula(R) is a registered trademark of Kern Sibbald.
*/
+
+/*
+ * Generic routines for creating Cloud compatibile Volumes.
+ * NOTE!!! This cloud device is not compatible with
+ * any disk-changer script for changing Volumes.
+ * It does however work with Bacula Virtual autochangers.
+ *
+ * Written by Kern Sibbald, May MMXVI
+ *
+ */
+
+#include "bacula.h"
+#include "stored.h"
+#include "cloud_dev.h"
+#include "s3_driver.h"
+#include "file_driver.h"
+#include "cloud_parts.h"
+#include "math.h"
+
+static const int dbglvl = 450;
+
+#define ASYNC_TRANSFER 1
+
+/* Debug only: Enable to introduce random transfer delays*/
+/* #define RANDOM_WAIT_ENABLE*/
+#define RANDOM_WAIT_MIN 2 /* minimum delay time*/
+#define RANDOM_WAIT_MAX 12 /* maxinum delay time*/
+
+#define XFER_TMP_NAME "xfer"
+#include <fcntl.h>
+
+#if defined(HAVE_WIN32)
+ #define lseek _lseeki64
+#endif
+
+/* standard dcr cancel callback function */
+bool DCR_cancel_cb(void* arg)
+{
+ DCR *dcr = (DCR*)arg;
+ if (dcr && dcr->jcr) {
+ return dcr->jcr->is_canceled();
+ }
+ return false;
+}
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+DEVICE *BaculaSDdriver(JCR *jcr, DEVRES *device)
+{
+ DEVICE *dev;
+ if (!device->cloud) {
+ Jmsg0(jcr, M_FATAL, 0, _("A Cloud resource is required for the Cloud driver, but is missing.\n"));
+ return NULL;
+ }
+ dev = New(cloud_dev(jcr, device));
+ dev->capabilities |= CAP_LSEEK;
+ return dev;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+transfer_manager cloud_dev::download_mgr(transfer_manager(0));
+transfer_manager cloud_dev::upload_mgr(transfer_manager(0));
+
+/* Imported functions */
+const char *mode_to_str(int mode);
+int breaddir(DIR *dirp, POOLMEM *&dname);
+
+/* Forward referenced functions */
+bool makedir(JCR *jcr, char *path, mode_t mode);
+
+/* Const and Static definitions */
+
+/* Address manipulations:
+ *
+ * The current idea is internally use part and offset-in-part
+ * However for sending back JobMedia, we need to use
+ * get_full_addr() which puts the file in the top 20 bits.
+ */
+
+static boffset_t get_offset(boffset_t ls_offset)
+{
+ boffset_t pos = ls_offset & off_mask;
+ return pos;
+}
+
+static boffset_t make_addr(uint32_t my_part, boffset_t my_offset)
+{
+ return (boffset_t)(((uint64_t)my_part)<<off_bits) | my_offset;
+}
+
+/* From lst (a transfer alist), retrieve the first transfer matching VolumeName and part idx
+ */
+transfer *get_list_transfer(alist *lst, const char* VolumeName, uint32_t upart)
+{
+ transfer *t;
+ foreach_alist(t, lst) {
+ if (t && bstrcmp(VolumeName, t->m_volume_name) && (upart == t->m_part)) {
+ return t;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * This upload_engine is called by workq in a worker thread.
+ */
+void *upload_engine(transfer *tpkt)
+{
+#ifdef RANDOM_WAIT_ENABLE
+ srand(time(NULL));
+ /* wait between 2 and 12 seconds */
+ int s_time = RANDOM_WAIT_MIN + rand() % (RANDOM_WAIT_MAX-RANDOM_WAIT_MIN);
+ bmicrosleep(s_time, 0);
+#endif
+ if (tpkt && tpkt->m_driver) {
+ /* call the driver method async */
+ Dmsg4(dbglvl, "Upload start %s-%d JobId : %d driver :%p\n",
+ tpkt->m_volume_name, tpkt->m_part, tpkt->m_dcr->jcr->JobId, tpkt->m_driver);
+ if (!tpkt->m_driver->copy_cache_part_to_cloud(tpkt)) {
+ /* Error message already sent by Qmsg() */
+ Dmsg4(dbglvl, "Upload error!! JobId=%d part=%d Vol=%s cache=%s\n",
+ tpkt->m_dcr->jcr->JobId, tpkt->m_part, tpkt->m_volume_name, tpkt->m_cache_fname);
+ POOL_MEM dmsg(PM_MESSAGE);
+ tpkt->append_status(dmsg);
+ Dmsg1(dbglvl, "%s\n",dmsg.c_str());
+ return tpkt;
+ }
+ Dmsg2(dbglvl, "Upload end JobId : %d driver :%p\n",
+ tpkt->m_dcr->jcr->JobId, tpkt->m_driver);
+
+ if (tpkt->m_do_cache_truncate && tpkt->m_part!=1) {
+ if (unlink(tpkt->m_cache_fname) != 0) {
+ berrno be;
+ Dmsg2(dbglvl, "Truncate cache option after upload. Unable to delete %s. ERR=%s\n", tpkt->m_cache_fname, be.bstrerror());
+ } else {
+ Dmsg1(dbglvl, "Truncate cache option after upload. Unlink file %s\n", tpkt->m_cache_fname);
+ }
+ }
+ }
+ return NULL;
+}
+
+/*
+ * This download_engine is called by workq in a worker thread.
+ */
+void *download_engine(transfer *tpkt)
+{
+#ifdef RANDOM_WAIT_ENABLE
+ srand(time(NULL));
+ /* wait between 2 and 12 seconds */
+ int s_time = RANDOM_WAIT_MIN + rand() % (RANDOM_WAIT_MAX-RANDOM_WAIT_MIN);
+ bmicrosleep(s_time, 0);
+#endif
+ if (tpkt && tpkt->m_driver) {
+ /* call the driver method async */
+ Dmsg4(dbglvl, "Download starts %s-%d : job : %d driver :%p\n",
+ tpkt->m_volume_name, tpkt->m_part, tpkt->m_dcr->jcr->JobId, tpkt->m_driver);
+ if (!tpkt->m_driver->copy_cloud_part_to_cache(tpkt)) {
+ Dmsg4(dbglvl, "Download error!! JobId=%d part=%d Vol=%s cache=%s\n",
+ tpkt->m_dcr->jcr->JobId, tpkt->m_part, tpkt->m_volume_name, tpkt->m_cache_fname);
+ POOL_MEM dmsg(PM_MESSAGE);
+ tpkt->append_status(dmsg);
+ Dmsg1(dbglvl, "%s\n",dmsg.c_str());
+
+ /* download failed -> remove the temp xfer file */
+ if (unlink(tpkt->m_cache_fname) != 0) {
+ berrno be;
+ Dmsg2(dbglvl, "Unable to delete %s. ERR=%s\n", tpkt->m_cache_fname, be.bstrerror());
+ } else {
+ Dmsg1(dbglvl, "Unlink file %s\n", tpkt->m_cache_fname);
+ }
+
+ return tpkt;
+ }
+ else {
+ POOLMEM *cache_fname = get_pool_memory(PM_FNAME);
+ pm_strcpy(cache_fname, tpkt->m_cache_fname);
+ char *p = strstr(cache_fname, XFER_TMP_NAME);
+ char partnumber[20];
+ bsnprintf(partnumber, sizeof(partnumber), "part.%d", tpkt->m_part);
+ strcpy(p,partnumber);
+ if (rename(tpkt->m_cache_fname, cache_fname) != 0) {
+ Dmsg5(dbglvl, "Download copy error!! JobId=%d part=%d Vol=%s temp cache=%s cache=%s\n",
+ tpkt->m_dcr->jcr->JobId, tpkt->m_part, tpkt->m_volume_name, tpkt->m_cache_fname, cache_fname);
+ free_pool_memory(cache_fname);
+ return tpkt;
+ }
+ free_pool_memory(cache_fname);
+ }
+ Dmsg2(dbglvl, "Download end JobId : %d driver :%p\n",
+ tpkt->m_dcr->jcr->JobId, tpkt->m_driver);
+ }
+ return NULL;
+}
+
+/*
+ * Upload the given part to the cloud
+ */
+bool cloud_dev::upload_part_to_cloud(DCR *dcr, const char *VolumeName, uint32_t upart)
+{
+ if (upload_opt == UPLOAD_NO) {
+ /* lets pretend everything is OK */
+ return true;
+ }
+ bool ret=false;
+ if (upart == 0 || get_list_transfer(dcr->uploads, VolumeName, upart)) {
+ return ret;
+ }
+
+ uint64_t file_size=0;
+ POOLMEM *cache_fname = get_pool_memory(PM_FNAME);
+ make_cache_filename(cache_fname, VolumeName, upart);
+
+ /* part is valid and no upload for the same part is scheduled */
+ if (!upload_mgr.find(VolumeName,upart)) {
+ Enter(dbglvl);
+
+ /* statistics require the size to transfer */
+ struct stat statbuf;
+ if (lstat(cache_fname, &statbuf) < 0) {
+ berrno be;
+ Mmsg2(errmsg, "Failed to find cache part file %s. ERR=%s\n",
+ cache_fname, be.bstrerror());
+ Dmsg1(dbglvl, "%s", errmsg);
+ free_pool_memory(cache_fname);
+ return false;
+ }
+ file_size = statbuf.st_size;
+
+ /* Nothing to do with this empty part */
+ if (file_size == 0) {
+ free_pool_memory(cache_fname);
+ return true; /* consider the transfer OK */
+ }
+
+ ret=true;
+ }
+
+ Dmsg1(dbglvl, "upload_part_to_cloud: %s\n", cache_fname);
+ /* get_xfer either returns a new transfer or a similar one if it already exists.
+ * in this case, the transfer is shared and ref_count is incremented. The caller should only care to release()
+ * the transfer eventually. The transfer_mgr is in charge of deleting the transfer when no one shares it anymore*/
+ transfer *item = upload_mgr.get_xfer(file_size,
+ upload_engine,
+ cache_fname,/* cache_fname is duplicated in the transfer constructor*/
+ VolumeName, /* VolumeName is duplicated in the transfer constructor*/
+ upart,
+ driver,
+ dcr,
+ cloud_prox);
+ dcr->uploads->append(item);
+ /* transfer are queued manually, so the caller has control on when the transfer is scheduled
+ * this should come handy for upload_opt */
+ item->set_do_cache_truncate(trunc_opt == TRUNC_AFTER_UPLOAD);
+ if (upload_opt == UPLOAD_EACHPART) {
+ /* in each part upload option, queue right away */
+ item->queue();
+ }
+ free_pool_memory(cache_fname);
+
+ if (ret) {
+ /* Update the Media information */
+ if (upart >= VolCatInfo.VolCatParts) {
+ VolCatInfo.VolCatParts = upart;
+ VolCatInfo.VolLastPartBytes = file_size;
+ }
+ /* We do not call dir_update_volume_info() because the part is not yet
+ * uploaded, but we may call it to update VolCatParts or VolLastPartBytes.
+ */
+ }
+
+ return ret;
+}
+
+/* Small helper to get */
+static int64_t part_get_size(ilist *cachep, int index)
+{
+ int64_t ret=0;
+ if (index <= cachep->last_index()) {
+ cloud_part *p = (cloud_part*) cachep->get(index);
+ if (p) {
+ ret = p->size;
+ }
+ }
+ return ret;
+}
+
+/*
+ * Download the part_idx part to the cloud. The result is store in the DCR context
+ * The caller should use free_transfer()
+ */
+transfer *cloud_dev::download_part_to_cache(DCR *dcr, const char *VolumeName, uint32_t dpart)
+{
+ if (dpart == 0) {
+ return NULL;
+ }
+
+ /* if item's already in the dcr list, it's already in the download_mgr, we don't need any duplication*/
+ transfer *item = get_list_transfer(dcr->downloads, VolumeName, dpart);
+ if (!item) {
+ POOLMEM *cache_fname = get_pool_memory(PM_FNAME);
+ pm_strcpy(cache_fname, dev_name);
+ /* create a uniq xfer file name with XFER_TMP_NAME and the pid */
+ char xferbuf[32];
+ bsnprintf(xferbuf, sizeof(xferbuf), "%s_%d", XFER_TMP_NAME, (int)getpid());
+ add_vol_and_part(cache_fname, VolumeName, xferbuf, dpart);
+
+ /* use the cloud proxy to retrieve the transfer size */
+ uint64_t cloud_size = cloud_prox->get_size(VolumeName, dpart);
+
+ /* check if the part is already in the cache and if it's bigger or equal to the cloud conterpart*/
+ ilist cachep;
+ if (!get_cache_volume_parts_list(dcr, getVolCatName(), &cachep)) {
+ free_pool_memory(cache_fname);
+ return NULL;
+ }
+ uint64_t cache_size = part_get_size(&cachep, dpart);
+
+ Dmsg3(dbglvl, "download_part_to_cache: %s. cache_size=%d cloud_size=%d\n", cache_fname, cache_size, cloud_size);
+
+ if (cache_size >= cloud_size) {
+ /* We could/should use mtime */
+ /* cache is "better" than cloud, no need to download */
+ Dmsg2(dbglvl, "part %ld is up-to-date in the cache %lld\n", (int32_t)dpart, cache_size);
+ free_pool_memory(cache_fname);
+ return NULL;
+ }
+
+ /* Unlikely, but still possible : the xfer cache file already exists */
+ struct stat statbuf;
+ if (lstat(cache_fname, &statbuf) == 0) {
+ Dmsg1(dbglvl, "download_part_to_cache: %s already exists: remove it.", cache_fname);
+ if (unlink(cache_fname) < 0) {
+ berrno be;
+ Dmsg2(dbglvl, "download_part_to_cache: failed to remove file %s. ERR: %s\n",cache_fname, be.bstrerror());
+ } else {
+ Dmsg1(dbglvl, "=== unlinked: %s\n", cache_fname);
+ }
+ }
+
+ /* get_xfer either returns a new transfer or a similar one if it already exists.
+ * in this case, the transfer is shared and ref_count is incremented. The caller should only care to release()
+ * the transfer eventually. The transfer_mgr is in charge of deleting the transfer when no one shares it anymore*/
+ item = download_mgr.get_xfer(cloud_size,
+ download_engine,
+ cache_fname,/* cache_fname is duplicated in the transfer constructor*/
+ VolumeName, /* VolumeName is duplicated in the transfer constructor*/
+ dpart,
+ driver,
+ dcr,
+ NULL); // no proxy on download to cache
+ dcr->downloads->append(item);
+ /* transfer are queued manually, so the caller has control on when the transfer is scheduled */
+ item->queue();
+
+ free_pool_memory(cache_fname);
+ }
+ return item;
+}
+
+/*
+ * Note, we might want to make a new download_first_part_to_read()
+ * where it waits for the first part, then this routine
+ * can simply start the other downloads that will be needed, and
+ * we can wait for them in each new open().
+ */
+bool cloud_dev::download_parts_to_read(DCR *dcr, alist* parts)
+{
+ intptr_t part;
+ transfer *part_1=NULL, *item;
+ ilist cachep;
+ int64_t size;
+
+ /* Find and download any missing parts for read */
+ if (!driver) {
+ return false;
+ }
+
+ if (!get_cache_volume_parts_list(dcr, getVolCatName(), &cachep)) {
+ return false;
+ }
+
+ foreach_alist(part, parts) {
+ /* TODO: get_cache_sizes is called before; should be an argument */
+ size = part_get_size(&cachep, part);
+ if (size == 0) {
+ item = download_part_to_cache(dcr, getVolCatName(), (int32_t)part);
+ if (part == 1) {
+ part_1 = item; /* Keep it, we continue only if the part1 is downloaded */
+ }
+ } else {
+ Dmsg2(dbglvl, "part %ld is already in the cache %lld\n", (int32_t)part, size);
+ }
+ }
+
+ /* wait for the part.1 */
+ if (part_1) {
+ wait_end_of_transfer(dcr, part_1);
+ }
+ return true;
+}
+
+uint32_t cloud_dev::get_part(boffset_t ls_offset)
+{
+ return (uint32_t)(ls_offset>>off_bits);
+}
+
+DEVICE *cloud_dev::get_dev(DCR */*dcr*/)
+{
+ return this;
+}
+
+uint32_t cloud_dev::get_hi_addr()
+{
+ return (uint32_t)(file_addr >> 32);
+}
+
+uint32_t cloud_dev::get_low_addr()
+{
+ return (uint32_t)(file_addr);
+}
+
+uint64_t cloud_dev::get_full_addr()
+{
+ uint64_t pos;
+ pos = make_addr(part, get_offset(file_addr));
+ return pos;
+}
+
+uint64_t cloud_dev::get_full_addr(boffset_t addr)
+{
+ uint64_t pos;
+ pos = make_addr(part, get_offset(addr));
+ return pos;
+}
+
+
+
+#ifdef is_loadable_driver
+/* Main entry point when loaded */
+extern "C" cloud_dev *BaculaSDdriver(JCR *jcr, DEVRES *device)
+{
+ Enter(dbglvl);
+ cloud_dev *dev = New(cloud_dev);
+ return dev;
+}
+#endif
+
+#if 0
+static transfer* find_transfer(DCR *dcr, const char *VolumeName, uint32_t part)
+{
+ transfer *item;
+ foreach_alist(item, dcr->transfers) {
+ if (part == item->m_part && strcmp(item->m_volume_name, VolumeName) == 0) {
+ return item;
+ }
+ }
+ return NULL;
+}
+#endif
+
+/*
+ * Make a list of cache sizes and count num_cache_parts
+ */
+bool cloud_dev::get_cache_sizes(DCR *dcr, const char *VolumeName)
+{
+ DIR* dp = NULL;
+ struct dirent *entry = NULL;
+ struct stat statbuf;
+ int name_max;
+ POOLMEM *vol_dir = get_pool_memory(PM_NAME);
+ POOLMEM *fname = get_pool_memory(PM_NAME);
+ uint32_t cpart;
+ bool ok = false;
+
+ POOL_MEM dname(PM_FNAME);
+ int status = 0;
+
+ /*
+ * **FIXME**** do this only once for each Volume. Currently,
+ * it is done for each part that is opened.
+ * NB : this should be substituted with get_cache_volume_parts_list
+ */
+ Enter(dbglvl);
+ max_cache_size = 100;
+ if (cache_sizes) {
+ free(cache_sizes);
+ }
+ cache_sizes = (uint64_t *)malloc(max_cache_size * sizeof(uint64_t));
+ memset(cache_sizes, 0, max_cache_size * sizeof(uint64_t));
+ num_cache_parts = 0;
+ max_cache_part = 0;
+
+ name_max = pathconf(".", _PC_NAME_MAX);
+ if (name_max < 1024) {
+ name_max = 1024;
+ }
+
+ make_cache_volume_name(vol_dir, VolumeName);
+ if (!(dp = opendir(vol_dir))) {
+ berrno be;
+ Mmsg2(errmsg, "Cannot opendir to get cache sizes. Volume=%s does not exist. ERR=%s\n",
+ vol_dir, be.bstrerror());
+ Dmsg1(dbglvl, "%s", errmsg);
+ goto get_out;
+ }
+
+ entry = (struct dirent *)malloc(sizeof(struct dirent) + name_max + 1000);
+
+ for ( ;; ) {
+ if (dcr->jcr->is_canceled()) {
+ goto get_out;
+ }
+ errno = 0;
+ status = breaddir(dp, dname.addr());
+ if (status == -1) {
+ break;
+ } else if (status > 0) {
+ Mmsg1(errmsg, "breaddir failed: ERR=%s", status);
+ Dmsg1(dbglvl, "%s\n", errmsg);
+ goto get_out;
+ }
+ /* Always ignore . and .. */
+ if (strcmp(".", dname.c_str()) == 0 || strcmp("..", dname.c_str()) == 0) {
+ continue;
+ }
+
+ /* Look only for part files */
+ if (strncmp("part.", dname.c_str(), 5) != 0) {
+ continue;
+ }
+
+ /* Get size of part */
+ Mmsg(fname, "%s/%s", vol_dir, dname.c_str());
+ if (lstat(fname, &statbuf) == -1) {
+ berrno be;
+ Mmsg2(errmsg, "Failed to stat file %s: %s\n", fname, be.bstrerror());
+ Dmsg1(dbglvl, "%s\n", errmsg);
+ goto get_out;
+ }
+
+ cpart = (int)str_to_int64((char *)&(dname.c_str()[5]));
+ Dmsg2(dbglvl, "part=%d file=%s\n", cpart, dname.c_str());
+ if (cpart > max_cache_part) {
+ max_cache_part = cpart;
+ }
+ if (cpart >= max_cache_size) {
+ max_cache_size = cpart + 100;
+ cache_sizes = (uint64_t *)realloc(cache_sizes, max_cache_size * sizeof(uint64_t));
+ for (int i=cpart; i<(int)max_cache_size; i++) cache_sizes[i] = 0;
+ }
+ num_cache_parts++;
+ cache_sizes[cpart] = (uint64_t)statbuf.st_size;
+ Dmsg2(dbglvl, "found part=%d size=%llu\n", cpart, cache_sizes[cpart]);
+ }
+
+ if (chk_dbglvl(dbglvl)) {
+ Pmsg1(0, "Cache objects Vol=%s:\n", VolumeName);
+ for (int i=1; i <= (int)max_cache_part; i++) {
+ Pmsg2(0, " part num=%d size=%llu\n", i, cache_sizes[i]);
+ }
+ Pmsg2(0, "End cache obj list: nparts=%d max_cache_part=%d\n",
+ num_cache_parts, max_cache_part);
+ }
+ ok = true;
+
+get_out:
+ if (dp) {
+ closedir(dp);
+ }
+ if (entry) {
+ free(entry);
+ }
+ free_pool_memory(vol_dir);
+ free_pool_memory(fname);
+ return ok;
+}
+
+
+/* Utility routines */
+
+void cloud_dev::add_vol_and_part(POOLMEM *&filename,
+ const char *VolumeName, const char *name, uint32_t apart)
+{
+ Enter(dbglvl);
+ char partnumber[20];
+ int len = strlen(filename);
+
+ if (len > 0 && !IsPathSeparator((filename)[len-1])) {
+ pm_strcat(filename, "/");
+ }
+
+ pm_strcat(filename, VolumeName);
+ bsnprintf(partnumber, sizeof(partnumber), "/%s.%d", name, apart);
+ pm_strcat(filename, partnumber);
+}
+
+void cloud_dev::make_cache_filename(POOLMEM *&filename,
+ const char *VolumeName, uint32_t upart)
+{
+ Enter(dbglvl);
+
+ pm_strcpy(filename, dev_name);
+ add_vol_and_part(filename, VolumeName, "part", upart);
+}
+
+void cloud_dev::make_cache_volume_name(POOLMEM *&volname,
+ const char *VolumeName)
+{
+ Enter(dbglvl);
+ POOL_MEM archive_name(PM_FNAME);
+
+ pm_strcpy(archive_name, dev_name);
+ if (!IsPathSeparator(archive_name.c_str()[strlen(archive_name.c_str())-1])) {
+ pm_strcat(archive_name, "/");
+ }
+ pm_strcat(archive_name, VolumeName);
+ pm_strcpy(volname, archive_name.c_str());
+}
+
+/*
+ * DEVICE virtual functions that we redefine.
+ */
+cloud_dev::~cloud_dev()
+{
+ Enter(dbglvl);
+
+ cloud_prox->release();
+
+ if (cache_sizes) {
+ free(cache_sizes);
+ cache_sizes = NULL;
+ }
+ if (driver) {
+ driver->term(NULL);
+ delete driver;
+ driver = NULL;
+ }
+ if (m_fd != -1) {
+ d_close(m_fd);
+ m_fd = -1;
+ }
+}
+
+cloud_dev::cloud_dev(JCR *jcr, DEVRES *device)
+{
+ Enter(dbglvl);
+ m_fd = -1;
+ capabilities |= CAP_LSEEK;
+
+ /* Initialize Cloud driver */
+ if (!driver) {
+ switch (device->cloud->driver_type) {
+#ifdef HAVE_LIBS3
+ case C_S3_DRIVER:
+ driver = New(s3_driver);
+ break;
+#endif
+ case C_FILE_DRIVER:
+ driver = New(file_driver);
+ break;
+ default:
+ break;
+ }
+ if (!driver) {
+ Qmsg2(jcr, M_FATAL, 0, _("Could not open Cloud driver type=%d for Device=%s.\n"),
+ device->cloud->driver_type, device->hdr.name);
+ return;
+ }
+ /* Make local copy in device */
+ if (device->cloud->upload_limit) {
+ driver->upload_limit.set_bwlimit(device->cloud->upload_limit);
+ }
+
+ if (device->cloud->download_limit) {
+ driver->download_limit.set_bwlimit(device->cloud->download_limit);
+ }
+
+ trunc_opt = device->cloud->trunc_opt;
+ upload_opt = device->cloud->upload_opt;
+ Dmsg2(dbglvl, "Trunc_opt=%d upload_opt=%d\n", trunc_opt, upload_opt);
+ if (device->cloud->max_concurrent_uploads) {
+ upload_mgr.m_wq.max_workers = device->cloud->max_concurrent_uploads;
+ }
+ if (device->cloud->max_concurrent_downloads) {
+ download_mgr.m_wq.max_workers = device->cloud->max_concurrent_downloads;
+ }
+
+ /* Initialize the driver */
+ driver->init(jcr, this, device);
+ }
+
+ /* the cloud proxy owns its cloud_parts, so we can 'set and forget' them */
+ cloud_prox = cloud_proxy::get_instance();
+
+}
+
+/*
+ * DEVICE virtuals that we redefine.
+ */
+
+static const char *seek_where(int whence)
+{
+ switch (whence) {
+ case SEEK_SET:
+ return "SEEK_SET";
+ case SEEK_CUR:
+ return "SEEK_CUR";
+ case SEEK_END:
+ return "SEEK_END";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+
+/*
+ * Note, we can enter with a full address containing a part number
+ * and an offset or with an offset. If the part number is zero
+ * at entry, we use the current part.
+ *
+ * This routine always returns a full address (part, offset).
+ *
+ */
+boffset_t cloud_dev::lseek(DCR *dcr, boffset_t ls_offset, int whence)
+{
+ boffset_t pos;
+ uint32_t new_part;
+ boffset_t new_offset;
+ char ed1[50];
+
+ if (!dcr) { /* can be NULL when called from rewind(NULL) */
+ return -1;
+ }
+
+ /* Convert input ls_offset into part and off */
+ if (ls_offset < 0) {
+ return -1;
+ }
+ new_part = get_part(ls_offset);
+ new_offset = get_offset(ls_offset);
+ if (new_part == 0) {
+ new_part = part;
+ if (new_part == 0) {
+ new_part = 1;
+ }
+ }
+ Dmsg6(dbglvl, "lseek(%d, %s, %s) part=%d nparts=%d off=%lld\n",
+ m_fd, print_addr(ed1, sizeof(ed1), ls_offset), seek_where(whence), part, num_cache_parts, new_offset);
+ if (whence != SEEK_CUR && new_part != part) {
+ Dmsg2(dbglvl, "new_part=%d part=%d call close_part()\n", new_part, part);
+ close_part(dcr);
+ part = new_part;
+ Dmsg0(dbglvl, "now open_device()\n");
+ if (!open_device(dcr, openmode)) {
+ return -1;
+ }
+ ASSERT2(part==new_part, "Big problem part!=new_partn");
+ }
+
+ switch (whence) {
+ case SEEK_SET:
+ /* We are staying in the current part, just seek */
+ pos = ::lseek(m_fd, new_offset, SEEK_SET);
+ if (pos < 0) {
+ berrno be;
+ dev_errno = errno;
+ Mmsg2(errmsg, _("lseek error on %s. ERR=%s.\n"),
+ print_name(), be.bstrerror());
+ Dmsg1(000, "Seek error. ERR=%s\n", errmsg);
+ return pos;
+ }
+ Dmsg4(dbglvl, "lseek_set part=%d pos=%s fd=%d offset=%lld\n",
+ part, print_addr(ed1, sizeof(ed1), pos), m_fd, new_offset);
+ return get_full_addr(pos);
+
+ case SEEK_CUR:
+ pos = ::lseek(m_fd, 0, SEEK_CUR);
+ if (pos < 0) {
+ berrno be;
+ dev_errno = errno;
+ Mmsg2(errmsg, _("lseek error on %s. ERR=%s.\n"),
+ print_name(), be.bstrerror());
+ Dmsg1(000, "Seek error. ERR=%s\n", errmsg);
+ return pos;
+ }
+ Dmsg4(dbglvl, "lseek %s fd=%d offset=%lld whence=%s\n",
+ print_addr(ed1, sizeof(ed1)), m_fd, new_offset, seek_where(whence));
+ return get_full_addr(pos);
+
+ case SEEK_END:
+ /*
+ * Bacula does not use offsets for SEEK_END
+ * Also, Bacula uses seek_end only when it wants to
+ * append to the volume.
+ */
+ pos = ::lseek(m_fd, new_offset, SEEK_END);
+ if (pos < 0) {
+ berrno be;
+ dev_errno = errno;
+ Mmsg2(errmsg, _("lseek error on %s. ERR=%s.\n"),
+ print_name(), be.bstrerror());
+ Dmsg1(000, "Seek error. ERR=%s\n", errmsg);
+ return pos;
+ }
+ Dmsg4(dbglvl, "lseek_end part=%d pos=%lld fd=%d offset=%lld\n",
+ part, pos, m_fd, new_offset);
+ return get_full_addr(pos);
+
+ default:
+ Dmsg0(dbglvl, "Seek call error.\n");
+ errno = EINVAL;
+ return -1;
+ }
+}
+
+/* use this to track file usage */
+bool cloud_dev::update_pos(DCR *dcr)
+{
+ Enter(dbglvl);
+ return file_dev::update_pos(dcr);
+}
+
+bool cloud_dev::rewind(DCR *dcr)
+{
+ Enter(dbglvl);
+ Dmsg3(dbglvl, "rewind res=%d fd=%d %s\n", num_reserved(), m_fd, print_name());
+ state &= ~(ST_EOT|ST_EOF|ST_WEOT); /* remove EOF/EOT flags */
+ block_num = file = 0;
+ file_size = 0;
+ if (m_fd < 0) {
+ Mmsg1(errmsg, _("Rewind failed: device %s is not open.\n"), print_name());
+ return false;
+ }
+ if (part != 1) {
+ close_part(dcr);
+ part = 1;
+ if (!open_device(dcr, openmode)) {
+ return false;
+ }
+ }
+ if (lseek(dcr, (boffset_t)0, SEEK_SET) < 0) {
+ berrno be;
+ dev_errno = errno;
+ Mmsg2(errmsg, _("lseek to 0 error on %s. ERR=%s.\n"),
+ print_name(), be.bstrerror());
+ return false;
+ }
+ file_addr = 0;
+ return true;
+}
+
+bool cloud_dev::reposition(DCR *dcr, uint64_t raddr)
+{
+ Enter(dbglvl);
+ char ed1[50];
+ Dmsg2(dbglvl, "part=%d num_cache_parts=%d\n", part, num_cache_parts);
+ if (!is_open()) {
+ dev_errno = EBADF;
+ Mmsg0(errmsg, _("Bad call to reposition. Device not open\n"));
+ Qmsg0(dcr->jcr, M_FATAL, 0, errmsg);
+ return false;
+ }
+
+ if (lseek(dcr, (boffset_t)raddr, SEEK_SET) == (boffset_t)-1) {
+ berrno be;
+ dev_errno = errno;
+ Mmsg2(errmsg, _("lseek error on %s. ERR=%s.\n"),
+ print_name(), be.bstrerror());
+ return false;
+ }
+ file_addr = raddr;
+ Dmsg1(dbglvl, "=== reposition lseeked to %s\n", print_addr(ed1, sizeof(ed1)));
+ return true;
+}
+
+#define INTPTR(a) (void*)(intptr_t)(a)
+
+/* Small cloud scanner for the BSR list, we check if all parts are in the cache area. */
+class BSRPartScanner {
+private:
+ DCR *dcr;
+ cloud_dev *dev;
+ uint32_t lastPart; /* Last part used, mark the part for download only one time */
+ alist *parts;
+
+public:
+ BSRPartScanner(DCR *adcr, cloud_dev *adev) {
+ dcr = adcr;
+ dev = adev;
+ lastPart = 0;
+ parts = New(alist(100, not_owned_by_alist));
+ };
+
+ ~BSRPartScanner() {
+ delete parts;
+ };
+
+ /* accessor for parts list*/
+ alist *get_parts_list() {
+ return parts;
+ };
+
+ /* We check if the current Parts in the voladdr list are needed
+ * The BSR structure should progress forward in the volume.
+ */
+ void get_parts(BSR_VOLUME *volume, BSR_VOLADDR *voladdr)
+ {
+ while (voladdr) {
+ for (uint32_t part = dev->get_part(voladdr->saddr); part <=dev->get_part(voladdr->eaddr); ++part)
+ {
+ if (lastPart != part) {
+ lastPart = part;
+ parts->append(INTPTR(part));
+ }
+ }
+ voladdr = voladdr->next;
+ }
+ };
+
+ /* Get Volume/Parts that must be downloaded For each MediaType, we must find
+ * the right device and check if it's a Cloud device. If we have a cloud device,
+ * then we need to check all VolAddress to get the Part list that is associated.
+ *
+ * It's likely that we will always query the same MediaType and the same
+ * Volume.
+ */
+ void get_all_parts(BSR *bsr, const char *cur_volume)
+ {
+ bool done=false;
+ BSR_VOLUME *volume;
+ parts->destroy();
+ /* Always download the part.1 */
+ parts->append(INTPTR(1));
+
+ while (bsr) {
+ volume = bsr->volume;
+
+ if (strcmp(volume->VolumeName, cur_volume) == 0) {
+ get_parts(volume, bsr->voladdr);
+ done = true;
+
+ } else if (done) { /* TODO: We can stop when it's no longer the right volume */
+ break;
+ }
+
+ bsr = bsr->next;
+ }
+
+ intptr_t part;
+ if (chk_dbglvl(dbglvl)) {
+ Dmsg1(0, "Display list of parts to download for volume %s:\n", cur_volume);
+ foreach_alist(part, parts) {
+ Dmsg2(0, " Must download part %s/part.%lld\n", cur_volume, (int64_t)part);
+ }
+ }
+ };
+};
+
+/* Wait for the download of a particular part */
+bool cloud_dev::wait_one_transfer(DCR *dcr, char *VolName, uint32_t part)
+{
+ dcr->jcr->setJobStatus(JS_CloudDownload);
+
+ transfer *item = download_part_to_cache(dcr, VolName, part);
+ if (item) {
+ bool ok = wait_end_of_transfer(dcr, item);
+ ok &= (item->m_state == TRANS_STATE_DONE);
+ dcr->jcr->setJobStatus(JS_Running);
+
+ if (!ok) {
+ Qmsg2(dcr->jcr, M_FATAL, 0,
+ _("Unable to download Volume=\"%s\"%s.\n"), VolName,
+ (part==1)?" label":"");
+ }
+ return ok;
+ } else {
+ /* no item to download -> up-to-date */
+ return true;
+ }
+ return false;
+}
+
+bool cloud_dev::open_device(DCR *dcr, int omode)
+{
+ POOL_MEM archive_name(PM_FNAME);
+ POOL_MEM part_name(PM_FNAME);
+ struct stat sp;
+
+ Enter(dbglvl);
+ /* Call base class to define generic variables */
+ if (DEVICE::open_device(dcr, omode)) {
+ Dmsg2(dbglvl, "fd=%d device %s already open\n", m_fd, print_name());
+ Leave(dbglvl);
+ return true;
+ }
+ omode = openmode;
+
+ /* At this point, the device is closed, so we open it */
+
+ /* reset the cloud parts proxy for the current volume */
+ probe_cloud_proxy(dcr, getVolCatName());
+
+ /* Now Initialize the Cache part */
+ pm_strcpy(archive_name, dev_name);
+ if (!IsPathSeparator(archive_name.c_str()[strlen(archive_name.c_str())-1])) {
+ pm_strcat(archive_name, "/");
+ }
+ pm_strcat(archive_name, getVolCatName());
+
+ /* If create make directory with Volume name */
+ if (part <= 0 && omode == CREATE_READ_WRITE) {
+ Dmsg1(dbglvl, "=== makedir=%s\n", archive_name.c_str());
+ if (!makedir(dcr->jcr, archive_name.c_str(), 0740)) {
+ Dmsg0(dbglvl, "makedir failed.\n");
+ Leave(dbglvl);
+ return false;
+ }
+ }
+ if (part <= 0) {
+ part = 1; /* always start from 1 */
+ }
+ Dmsg2(dbglvl, "part=%d num_cache_parts=%d\n", part, num_cache_parts);
+
+ /*
+ * If we are doing a restore, get the necessary parts
+ */
+ if (dcr->is_reading()) {
+ BSRPartScanner scanner(dcr, this);
+ scanner.get_all_parts(dcr->jcr->bsr, getVolCatName());
+ download_parts_to_read(dcr, scanner.get_parts_list());
+ }
+ get_cache_sizes(dcr, getVolCatName()); /* refresh with what may have downloaded */
+
+ /* We need to make sure the current part is loaded */
+ uint64_t cld_size = cloud_prox->get_size(getVolCatName(), 1);
+ if (cache_sizes[1] == 0 && cld_size != 0) {
+ if (!wait_one_transfer(dcr, getVolCatName(), 1)) {
+ return false;
+ }
+ }
+
+ /* TODO: Merge this part of the code with the previous section */
+ cld_size = cloud_prox->get_size(getVolCatName(), part);
+ if (dcr->is_reading() && part > 1 && cache_sizes[part] == 0
+ && cld_size != 0) {
+ if (!wait_one_transfer(dcr, getVolCatName(), part)) {
+ return false;
+ }
+ }
+
+ Mmsg(part_name, "/part.%d", part);
+ pm_strcat(archive_name, part_name.c_str());
+
+ set_mode(omode);
+ /* If creating file, give 0640 permissions */
+ Dmsg3(dbglvl, "open mode=%s open(%s, 0x%x, 0640)\n", mode_to_str(omode),
+ archive_name.c_str(), mode);
+
+ /* Use system open() */
+ errmsg[0] = 0;
+ if ((m_fd = ::open(archive_name.c_str(), mode|O_CLOEXEC, 0640)) < 0) {
+ berrno be;
+ dev_errno = errno;
+ if (part == 1 && omode != CREATE_READ_WRITE) {
+ part = 0; /* Open failed, reset part number */
+ Mmsg3(errmsg, _("Could not open(%s,%s,0640): ERR=%s\n"),
+ archive_name.c_str(), mode_to_str(omode), be.bstrerror());
+ Dmsg1(dbglvl, "open failed: %s", errmsg);
+ }
+ }
+ if (m_fd >= 0 && !get_cache_sizes(dcr, getVolCatName())) {
+ return false;
+ }
+ /* TODO: Make sure max_cache_part and max_cloud_part are up to date */
+ uint32_t max_cloud_part = cloud_prox->last_index(getVolCatName());
+ if (can_read() && m_fd < 0 && part > MAX(max_cache_part, max_cloud_part)) {
+ Dmsg4(dbglvl, "set_eot: part=%d num_cache_parts=%d max_cache_part=%d max_cloud_part=%d\n",
+ part, num_cache_parts, max_cache_part, max_cloud_part);
+ set_eot();
+ }
+ if (m_fd >= 0) {
+ if (omode == CREATE_READ_WRITE || omode == OPEN_READ_WRITE) {
+ set_append();
+ }
+ dev_errno = 0;
+ file = 0;
+ file_addr = 0;
+ if (part > num_cache_parts) {
+ num_cache_parts = part;
+ if (part > max_cache_part) {
+ max_cache_part = part;
+ }
+ }
+
+ /* Refresh the device id */
+ if (fstat(m_fd, &sp) == 0) {
+ devno = sp.st_dev;
+ }
+ } else if (dcr->jcr) {
+ pm_strcpy(dcr->jcr->errmsg, errmsg);
+ }
+ state |= preserve; /* reset any important state info */
+
+ Dmsg3(dbglvl, "fd=%d part=%d num_cache_parts=%d\n", m_fd, part, num_cache_parts);
+ Leave(dbglvl);
+ return m_fd >= 0;
+}
+
+bool cloud_dev::close(DCR *dcr)
+{
+ Enter(dbglvl);
+ bool ok = true;
+
+ Dmsg6(dbglvl, "close_dev vol=%s part=%d fd=%d dev=%p adata=%d dev=%s\n",
+ VolHdr.VolumeName, part, m_fd, this, adata, print_name());
+
+ if (!is_open()) {
+ //Dmsg2(1000, "Device %s already closed vol=%s\n", print_name(),VolHdr.VolumeName);
+ Leave(dbglvl);
+ return true; /* already closed */
+ }
+
+ if (d_close(m_fd) != 0) {
+ berrno be;
+ dev_errno = errno;
+ Mmsg2(errmsg, _("Error closing device %s. ERR=%s.\n"),
+ print_name(), be.bstrerror());
+ ok = false;
+ }
+
+ unmount(1); /* do unmount if required */
+
+ /* Ensure the last written part is uploaded */
+ if ((part > 0) && dcr->is_writing()) {
+ if (!upload_part_to_cloud(dcr, VolHdr.VolumeName, part)) {
+ if (errmsg[0]) {
+ Qmsg(dcr->jcr, M_ERROR, 0, "%s", errmsg);
+ }
+ }
+ }
+
+ /*
+ * Clean up device packet so it can be re-opened
+ *
+ */
+ state &= ~(ST_LABEL|ST_READ|ST_APPEND|ST_EOT|ST_WEOT|ST_EOF|
+ ST_NOSPACE|ST_MOUNTED|ST_MEDIA|ST_SHORT);
+ label_type = B_BACULA_LABEL;
+ clear_opened();
+ file = block_num = 0;
+ part = 0;
+ EndAddr = get_full_addr();
+ file_addr = 0;
+ EndFile = EndBlock = 0;
+ openmode = 0;
+ clear_volhdr();
+ memset(&VolCatInfo, 0, sizeof(VolCatInfo));
+ if (tid) {
+ stop_thread_timer(tid);
+ tid = 0;
+ }
+ Leave(dbglvl);
+ return ok;
+}
+
+/* When constructed, jcr_killable_lock captures the jcr current killable state and set it to false.
+ * The original state is re-applied at destruction
+ */
+class jcr_not_killable
+{
+ JCR *m_jcr;
+ bool m_killable;
+public:
+ jcr_not_killable(JCR* jcr) :
+ m_jcr(jcr),
+ m_killable(jcr->is_killable())
+ {
+ if (m_killable) {
+ m_jcr->set_killable(false);
+ }
+ }
+ ~jcr_not_killable()
+ {
+ /* reset killable state */
+ m_jcr->set_killable(m_killable);
+ }
+};
+
+/* update the cloud_proxy at VolName key. Only if necessary or if force-d */
+bool cloud_dev::probe_cloud_proxy(DCR *dcr,const char *VolName, bool force)
+{
+ /* check if the current volume is present in the proxy by probing the label (part.1)*/
+ if (!cloud_prox->volume_lookup(VolName)|| force) {
+ /* Make sure the Job thread will not be killed in this function */
+ jcr_not_killable jkl(dcr->jcr);
+ ilist cloud_parts(100, false); /* !! dont own the parts here */
+ /* first, retrieve the volume content within cloud_parts list*/
+ if (!driver->get_cloud_volume_parts_list(dcr, VolName, &cloud_parts, errmsg)) {
+ Dmsg2(dbglvl, "Cannot get cloud sizes for Volume=%s Err=%s\n", VolName, errmsg);
+ return false;
+ }
+
+ /* then, add the content of cloud_parts in the proxy table */
+ if (!cloud_prox->reset(VolName, &cloud_parts)) {
+ Dmsg1(dbglvl, "could not reset cloud proxy for Volume=%s\n", VolName);
+ return false;
+ }
+ }
+ return true;
+}
+
+/*
+ * Truncate cache parts that are also in the cloud
+ * NOTE! We do not delete part.1 so that after this
+ * truncate cache command (really a sort of purge),
+ * the user can still do a restore.
+ */
+int cloud_dev::truncate_cache(DCR *dcr, const char *VolName, int64_t *size)
+{
+ int i, nbpart=0;
+ Enter(dbglvl);
+ ilist cache_parts;
+ /* init the dev error message */
+ errmsg[0] = 0;
+ POOLMEM *vol_dir = get_pool_memory(PM_NAME);
+ POOLMEM *fname = get_pool_memory(PM_NAME);
+
+ if (!probe_cloud_proxy(dcr, VolName)) {
+ if (errmsg[0] == 0) {
+ Mmsg1(errmsg, "Truncate cache cannot get cache volume parts list for Volume=%s\n", VolName);
+ }
+ Dmsg1(dbglvl, "%s\n", errmsg);
+ nbpart = -1;
+ goto bail_out;
+ }
+
+ if (!get_cache_volume_parts_list(dcr, VolName, &cache_parts)) {
+ if (errmsg[0] == 0) {
+ Mmsg1(errmsg, "Truncate cache cannot get cache volume parts list for Volume=%s\n", VolName);
+ }
+ Dmsg1(dbglvl, "%s\n", errmsg);
+ nbpart = -1;
+ goto bail_out;
+ }
+
+ make_cache_volume_name(vol_dir, VolName);
+
+ /*
+ * Remove every cache part that is also in the cloud
+ */
+ for (i=2; i <= (int)cache_parts.last_index(); i++) {
+ int64_t cache_size = part_get_size(&cache_parts, i);
+ int64_t cloud_size = cloud_prox->get_size(VolName, i);
+
+ /* remove cache parts that are empty or cache parts with matching cloud_part size*/
+ if (cache_size != 0 && cache_size != cloud_size) {
+ Dmsg3(dbglvl, "Skip truncate for part=%d scloud=%lld scache=%lld\n", i, cloud_size, cache_size);
+ continue;
+ }
+
+ /* Look in the transfer list if we have a download/upload for the current volume */
+ if (download_mgr.find(VolName, i)) {
+ Dmsg1(dbglvl, "Skip truncate for part=%d\n", i);
+ continue;
+ }
+
+ Mmsg(fname, "%s/part.%d", vol_dir, i);
+ if (unlink(fname) < 0) {
+ berrno be;
+ Mmsg2(errmsg, "Truncate cache failed to remove file %s. ERR: %s\n", fname, be.bstrerror());
+ Dmsg1(dbglvl, "%s\n", errmsg);
+ } else {
+ *size = *size + cache_size;
+ nbpart++;
+ Dmsg1(dbglvl, "=== unlinked: part=%s\n", fname);
+ }
+ }
+bail_out:
+ free_pool_memory(vol_dir);
+ free_pool_memory(fname);
+ Leave(dbglvl);
+ return nbpart;
+}
+
+/*
+ * Truncate both cache and cloud
+ */
+bool cloud_dev::truncate(DCR *dcr)
+{
+ DIR* dp = NULL;
+ struct dirent *entry = NULL;
+ int name_max;
+ POOLMEM *vol_dir = get_pool_memory(PM_NAME);
+ POOLMEM *fname = get_pool_memory(PM_NAME);
+ bool ok = false;
+ POOL_MEM dname(PM_FNAME);
+ int status = 0;
+ ilist * iuploads = New(ilist(100,true)); /* owns the parts */
+ ilist *truncate_list = NULL;
+ FILE *fp;
+ errmsg[0] = 0;
+ Enter(dbglvl);
+
+ /* Make sure the Job thread will not be killed in this function */
+ jcr_not_killable jkl(dcr->jcr);
+
+ if (cache_sizes) {
+ free(cache_sizes);
+ cache_sizes = NULL;
+ }
+ num_cache_parts = 0;
+ max_cache_part = 0;
+ part = 0;
+ if (m_fd) {
+ ::close(m_fd);
+ m_fd = -1;
+ }
+
+ name_max = pathconf(".", _PC_NAME_MAX);
+ if (name_max < 1024) {
+ name_max = 1024;
+ }
+
+ make_cache_volume_name(vol_dir, getVolCatName());
+ Dmsg1(dbglvl, "===== truncate: %s\n", vol_dir);
+ if (!(dp = opendir(vol_dir))) {
+ berrno be;
+ Mmsg2(errmsg, "Cannot opendir to get cache sizes. Volume %s does not exist. ERR=%s\n",
+ vol_dir, be.bstrerror());
+ Dmsg1(dbglvl, "%s\n", errmsg);
+ goto get_out;
+ }
+
+ entry = (struct dirent *)malloc(sizeof(struct dirent) + name_max + 1000);
+ for ( ;; ) {
+ errno = 0;
+ status = breaddir(dp, dname.addr());
+ if (status == -1) {
+ break;
+ } else if (status > 0) {
+ Mmsg1(errmsg, "breaddir failed: status=%d", status);
+ Dmsg1(dbglvl, "%s\n", errmsg);
+ goto get_out;
+ }
+
+ /* Always ignore . and .. */
+ if (strcmp(".", dname.c_str()) == 0 || strcmp("..", dname.c_str()) == 0) {
+ continue;
+ }
+
+ /* Look only for part files */
+ if (strncmp("part.", dname.c_str(), 5) != 0) {
+ continue;
+ }
+ Mmsg(fname, "%s/%s", vol_dir, dname.c_str());
+ if (unlink(fname) < 0) {
+ berrno be;
+ Mmsg2(errmsg, "Failed to remove file %s ERR: %s\n", fname, be.bstrerror());
+ Dmsg1(dbglvl, "%s\n", errmsg);
+ goto get_out;
+ } else {
+ Dmsg1(dbglvl, "=== unlinked: part=%s\n", fname);
+ }
+ }
+
+ /* All parts have been unlinked. Recreate an empty part.1
+ * FIX MT3450:Fatal error: Failed to re-open device after truncate on Cloud device */
+ Dmsg1(dbglvl, "Recreate empty part.1 for volume: %s\n", vol_dir);
+ Mmsg(fname, "%s/part.1", vol_dir);
+ fp = bfopen(fname, "a");
+ if (fp) {
+ fclose(fp);
+ } else {
+ berrno be;
+ Mmsg2(errmsg, "Failed to create empty file %s ERR: %s\n", fname,
+ be.bstrerror());
+ }
+
+ if (!dir_get_volume_info(dcr, getVolCatName(), GET_VOL_INFO_FOR_READ)) {
+ /* It may happen for label operation */
+ Dmsg2(100, "dir_get_vol_info failed for vol=%s: %s\n", getVolCatName(), dcr->jcr->errmsg);
+ goto get_out;
+ }
+
+ /* Update the Catalog information */
+ dcr->VolCatInfo.VolCatParts = 0;
+ dcr->VolCatInfo.VolLastPartBytes = 0;
+ dcr->VolCatInfo.VolCatCloudParts = 0;
+
+ openmode = CREATE_READ_WRITE;
+ if (!open_next_part(dcr)) {
+ goto get_out;
+ }
+
+ /* check if the current volume is present in the proxy */
+ if (!probe_cloud_proxy(dcr, getVolCatName())) {
+ goto get_out;
+ }
+
+ /* wrap the uploads in a parts ilist */
+ transfer *tpkt;
+ foreach_alist(tpkt, dcr->uploads) {
+ /* convert xfer into part when VolName match*/
+ if (strcmp(tpkt->m_volume_name,getVolCatName())!=0) {
+ continue;
+ }
+ cloud_part *part = (cloud_part*) malloc(sizeof(cloud_part));
+ part->index = tpkt->m_part;
+ part->mtime = tpkt->m_res_mtime;
+ part->size = tpkt->m_res_size;
+ iuploads->put(part->index, part);
+ }
+ /* returns the list of items to truncate : cloud parts-uploads*/
+ truncate_list = cloud_prox->exclude(getVolCatName(), iuploads);
+ if (truncate_list && !driver->truncate_cloud_volume(dcr, getVolCatName(), truncate_list, errmsg)) {
+ Qmsg(dcr->jcr, M_ERROR, 0, "truncate_cloud_volume for %s: ERR=%s\n", getVolCatName(), errmsg);
+ goto get_out;
+ }
+ /* force proxy refresh (volume should be empty so it should be fast) */
+ /* another approach would be to reuse truncate_list to remove items */
+ if (!probe_cloud_proxy(dcr, getVolCatName(), true)) {
+ goto get_out;
+ }
+ /* check content of the list : only index should be available */
+ for(uint32_t index=1; index<=cloud_prox->last_index(getVolCatName()); index++ ) {
+ if (cloud_prox->get(getVolCatName(), index)) {
+ Dmsg2(0, "truncate_cloud_volume proxy for volume %s got part.%d should be empty\n", getVolCatName(), index);
+ Qmsg(dcr->jcr, M_WARNING, 0, "truncate_cloud_volume: %s/part.%d is still present\n", getVolCatName(), index);
+ }
+ }
+ ok = true;
+
+get_out:
+ if (dp) {
+ closedir(dp);
+ }
+ if (entry) {
+ free(entry);
+ }
+ free_pool_memory(vol_dir);
+ free_pool_memory(fname);
+
+ delete iuploads;
+ delete truncate_list;
+
+ Leave(dbglvl);
+ return ok;
+}
+
+
+int cloud_dev::read_dev_volume_label(DCR *dcr)
+{
+ int stat;
+ Enter(dbglvl);
+ Dmsg2(dbglvl, "part=%d num_cache_parts=%d\n", part, num_cache_parts);
+ if (!is_open()) {
+ part = 0;
+ }
+ stat = file_dev::read_dev_volume_label(dcr);
+ Dmsg2(dbglvl, "part=%d num_cache_parts=%d\n", part, num_cache_parts);
+ return stat;
+}
+
+const char *cloud_dev::print_type()
+{
+ return "Cloud";
+}
+
+
+/*
+ * makedir() is a lightly modified copy of the same function
+ * in findlib/mkpath.c
+ *
+ */
+bool makedir(JCR *jcr, char *path, mode_t mode)
+{
+ struct stat statp;
+
+ if (mkdir(path, mode) != 0) {
+ berrno be;
+ if (lstat(path, &statp) != 0) {
+ Qmsg2(jcr, M_ERROR, 0, _("Cannot create directory %s: ERR=%s\n"),
+ path, be.bstrerror());
+ return false;
+ } else if (!S_ISDIR(statp.st_mode)) {
+ Qmsg1(jcr, M_ERROR, 0, _("%s exists but is not a directory.\n"), path);
+ return false;
+ }
+ return true; /* directory exists */
+ }
+ return true;
+}
+
+/*
+ * This call closes the device, but it is used for part handling
+ * where we close one part and then open the next part. The
+ * difference between close_part() and close() is that close_part()
+ * saves the state information of the device (e.g. the Volume label,
+ * the Volume Catalog record, ... This permits opening and closing
+ * the Volume parts multiple times without losing track of what the
+ * main Volume parameters are.
+ */
+bool cloud_dev::close_part(DCR *dcr)
+{
+ bool ok = true;
+
+ Enter(dbglvl);
+ Dmsg5(dbglvl, "close_part vol=%s fd=%d dev=%p adata=%d dev=%s\n",
+ VolHdr.VolumeName, m_fd, this, adata, print_name());
+
+ if (!is_open()) {
+ //Dmsg2(1000, "Device %s already closed vol=%s\n", print_name(),VolHdr.VolumeName);
+ Leave(dbglvl);
+ return true; /* already closed */
+ }
+
+ if (d_close(m_fd) != 0) {
+ berrno be;
+ dev_errno = errno;
+ Mmsg2(errmsg, _("Error closing device %s. ERR=%s.\n"),
+ print_name(), be.bstrerror());
+ ok = false;
+ }
+
+ m_fd = -1;
+ part = 0;
+ file_addr = 0;
+ Leave(dbglvl);
+ return ok;
+}
+
+bool cloud_dev::open_next_part(DCR *dcr)
+{
+ Enter(dbglvl);
+ int save_part;
+ char ed1[50];
+
+ /* When appending, do not open a new part if the current is empty */
+ if (can_append() && (part_size == 0)) {
+ Dmsg2(dbglvl, "open next: part=%d num_cache_parts=%d\n", part, num_cache_parts);
+ Leave(dbglvl);
+ return true;
+ }
+
+ /* TODO: Get the the last max_part */
+ uint32_t max_cloud_part = cloud_prox->last_index(getVolCatName());
+ if (!can_append() && part >= MAX(max_cache_part, max_cloud_part)) {
+ Dmsg3(dbglvl, "EOT: part=%d num_cache_parts=%d max_cloud_part=%d\n", part, num_cache_parts, max_cloud_part);
+ Mmsg2(errmsg, "part=%d no more parts to read. addr=%s\n", part,
+ print_addr(ed1, sizeof(ed1), EndAddr));
+ Dmsg1(dbglvl, "%s", errmsg);
+ part = 0;
+ Leave(dbglvl);
+ return false;
+ }
+
+ save_part = part;
+ if (!close_part(dcr)) { /* close current part */
+ Leave(dbglvl);
+ Mmsg2(errmsg, "close_part failed: part=%d num_cache_parts=%d\n", part, num_cache_parts);
+ Dmsg1(dbglvl, "%s", errmsg);
+ return false;
+ }
+ if (openmode == CREATE_READ_WRITE) {
+ VolCatInfo.VolCatParts = num_cache_parts;
+ if (!dir_update_volume_info(dcr, false, false)) {
+ Dmsg0(dbglvl, "Error from update_vol_info.\n");
+ dev_errno = EIO;
+ return false;
+ }
+ part_size = 0;
+ }
+
+ /* Restore part number */
+ part = save_part;
+
+ if (dcr->is_reading()) {
+ wait_one_transfer(dcr, getVolCatName(), part);
+ }
+
+ /* Write part to cloud */
+ Dmsg2(dbglvl, "=== part=%d num_cache_parts=%d\n", part, num_cache_parts);
+ if (dcr->is_writing()) {
+ if (!upload_part_to_cloud(dcr, getVolCatName(), part)) {
+ if (errmsg[0]) {
+ Qmsg(dcr->jcr, M_ERROR, 0, "%s", errmsg);
+ }
+ }
+ }
+
+ /* Try to open next part */
+ part++;
+ Dmsg2(dbglvl, "=== inc part: part=%d num_cache_parts=%d\n", part, num_cache_parts);
+ if (can_append()) {
+ Dmsg0(dbglvl, "Set openmode to CREATE_READ_WRITE\n");
+ openmode = CREATE_READ_WRITE;
+ }
+ if (open_device(dcr, openmode)) {
+ if (openmode == CREATE_READ_WRITE) {
+ set_append();
+ clear_eof();
+ clear_eot();
+ file_addr = 0;
+ file_addr = get_full_addr();
+ if (lseek(dcr, file_addr, SEEK_SET) < 0) {
+ berrno be;
+ dev_errno = errno;
+ Mmsg2(errmsg, _("lseek to 0 error on %s. ERR=%s.\n"),
+ print_name(), be.bstrerror());
+ Leave(dbglvl);
+ return false;
+ }
+ }
+ } else { /* open failed */
+ /* TODO: Make sure max_cache_part and max_cloud_part are up to date */
+ if (part > MAX(max_cache_part, max_cloud_part)) {
+ Dmsg4(dbglvl, "set_eot: part=%d num_cache_parts=%d max_cache_part=%d max_cloud_part=%d\n",
+ part, num_cache_parts, max_cache_part, max_cloud_part);
+ set_eof();
+ set_eot();
+ }
+ Leave(dbglvl);
+ Mmsg2(errmsg, "EOT: part=%d num_cache_parts=%d\n", part, num_cache_parts);
+ Dmsg1(dbglvl, "%s", errmsg);
+ return false;
+ }
+
+ set_labeled(); /* all parts are labeled */
+
+ Dmsg3(dbglvl, "opened next: append=%d part=%d num_cache_parts=%d\n", can_append(), part, num_cache_parts);
+ Leave(dbglvl);
+ return true;
+}
+
+
+/* Print the object address */
+char *cloud_dev::print_addr(char *buf, int32_t buf_len)
+{
+ uint64_t full_addr = get_full_addr();
+ buf[0] = 0;
+ bsnprintf(buf, buf_len, "%lu:%llu", get_part(full_addr), get_offset(full_addr));
+ return buf;
+}
+
+char *cloud_dev::print_addr(char *buf, int32_t buf_len, boffset_t addr)
+{
+ buf[0] = 0;
+ bsnprintf(buf, buf_len, "%lu:%llu", get_part(addr), get_offset(addr));
+ return buf;
+}
+
+/*
+ * Check if the current position on the volume corresponds to
+ * what is in the catalog.
+ *
+ */
+bool cloud_dev::is_eod_valid(DCR *dcr)
+{
+ JCR *jcr = dcr->jcr;
+ ilist cache_parts;
+ bool do_update=false, ok=true;
+ POOL_MEM err, tmp;
+
+ /* We need up to date information for Cloud and Cache */
+ uint32_t max_cloud_part = cloud_prox->last_index(dcr->VolumeName);
+ uint64_t last_cloud_size = cloud_prox->get_size(dcr->VolumeName, max_cloud_part);
+
+ get_cache_volume_parts_list(dcr, dcr->VolumeName, &cache_parts);
+ uint32_t max_cache_part = cache_parts.last_index();
+ uint64_t last_cache_size = part_get_size(&cache_parts, max_cache_part);
+
+ /* When we open a new part, the actual size is 0, so we are not very interested */
+ if (last_cache_size == 0 && max_cache_part > 0) {
+ max_cache_part--;
+ last_cache_size = part_get_size(&cache_parts, max_cache_part);
+ }
+
+ uint32_t last_p = MAX(max_cloud_part, max_cache_part);
+ uint64_t last_s = MAX(last_cache_size, last_cloud_size);
+
+ Dmsg5(dbglvl, "vol=%s cache part=%ld size=%lld, cloud part=%ld size=%lld\n",
+ dcr->VolumeName, max_cache_part, last_cache_size, max_cloud_part, last_cloud_size);
+
+ /* If we have the same Part number in the cloud and in the cache. We check
+ * the size of the two parts. The cache part may be truncated (size=0).
+ */
+ if (max_cloud_part == max_cache_part) {
+ if (last_cache_size > 0 && last_cloud_size != last_cache_size) {
+ ok = false; /* Big consistency problem, which one do we take? Biggest one? */
+ Mmsg(tmp, "The last Part %ld size do not match between the Cache and the Cloud! Cache=%lld Cloud=%lld.\n",
+ max_cloud_part, last_cloud_size, last_cache_size);
+ pm_strcat(err, tmp.c_str());
+ }
+ }
+
+ /* The catalog should have the right LastPart */
+ if (VolCatInfo.VolCatParts != last_p) {
+ Mmsg(tmp, "The Parts do not match! Metadata Volume=%ld Catalog=%ld.\n",
+ last_p, VolCatInfo.VolCatParts);
+ VolCatInfo.VolCatParts = last_p;
+ VolCatInfo.VolLastPartBytes = last_s;
+ VolCatInfo.VolCatBytes = last_s;
+ pm_strcat(err, tmp.c_str());
+ do_update = true;
+
+ /* The catalog should have the right LastPartBytes */
+ } else if (VolCatInfo.VolLastPartBytes != last_s) {
+ Mmsg(tmp, "The Last Part Bytes %ld do not match! Metadata Volume=%lld Catalog=%lld.\n",
+ last_p, VolCatInfo.VolLastPartBytes, last_s);
+ VolCatInfo.VolLastPartBytes = last_s;
+ VolCatInfo.VolCatBytes = last_s;
+ pm_strcat(err, tmp.c_str());
+ do_update = true;
+ }
+ /* We also check that the last part uploaded in the cloud is correct */
+ if (VolCatInfo.VolCatCloudParts != max_cloud_part) {
+ Mmsg(tmp, "The Cloud Parts do not match! Metadata Volume=%ld Catalog=%ld.\n",
+ max_cloud_part, VolCatInfo.VolCatCloudParts);
+ pm_strcat(err, tmp.c_str());
+ do_update = true;
+ }
+ if (ok) {
+ if (do_update) {
+ Jmsg2(jcr, M_WARNING, 0, _("For Volume \"%s\":\n%s\nCorrecting Catalog\n"), dcr->VolumeName, err.c_str());
+ if (!dir_update_volume_info(dcr, false, true)) {
+ Jmsg(jcr, M_WARNING, 0, _("Error updating Catalog\n"));
+ dcr->mark_volume_in_error();
+ return false;
+ }
+ }
+ } else {
+ Mmsg2(jcr->errmsg, _("Bacula cannot write on disk Volume \"%s\" because: %s"),
+ dcr->VolumeName, err.c_str());
+ Jmsg(jcr, M_ERROR, 0, jcr->errmsg);
+ Dmsg0(100, jcr->errmsg);
+ dcr->mark_volume_in_error();
+ return false;
+ }
+ return true;
+}
+
+/*
+ * We are called here when Bacula wants to append to a Volume
+ */
+bool cloud_dev::eod(DCR *dcr)
+{
+ bool ok;
+ uint32_t max_part = 1;
+ Enter(dbglvl);
+
+ uint32_t max_cloud_part = cloud_prox->last_index(getVolCatName());
+ Dmsg5(dbglvl, "=== eod: part=%d num_cache_parts=%d max_cache_part=%d max_cloud_part=%d vol_parts=%d\n",
+ part, num_cache_parts, max_cache_part,
+ max_cloud_part, VolCatInfo.VolCatParts);
+
+ /* First find maximum part */
+ if (max_part < max_cache_part) {
+ max_part = max_cache_part;
+ }
+ if (max_part < max_cloud_part) {
+ max_part = max_cloud_part;
+ }
+ if (max_part < VolCatInfo.VolCatParts) {
+ max_part = VolCatInfo.VolCatParts;
+ }
+ if (max_part < VolCatInfo.VolCatCloudParts) {
+ max_part = VolCatInfo.VolCatCloudParts;
+ }
+ if (part < max_part) {
+ if (!close_part(dcr)) { /* close current part */
+ Leave(dbglvl);
+ Dmsg2(dbglvl, "close_part failed: part=%d num_cache_parts=%d\n", part, num_cache_parts);
+ return false;
+ }
+ /* Try to open next part */
+ part = max_part;
+ /* Create new part */
+ part_size = 0;
+ part++;
+ openmode = CREATE_READ_WRITE;
+ Dmsg2(dbglvl, "=== eod: set part=%d num_cache_parts=%d\n", part, num_cache_parts);
+ if (!open_device(dcr, openmode)) {
+ Leave(dbglvl);
+ Dmsg2(dbglvl, "Fail open_device: part=%d num_cache_parts=%d\n", part, num_cache_parts);
+ return false;
+ }
+ }
+ ok = file_dev::eod(dcr);
+ return ok;
+}
+
+bool cloud_dev::write_volume_label(DCR *dcr,
+ const char *VolName, const char *PoolName,
+ bool relabel, bool no_prelabel)
+{
+ bool ok = DEVICE::write_volume_label(dcr,
+ VolName, PoolName, relabel, no_prelabel);
+ if (!ok) {
+ Dmsg0(dbglvl, "write_volume_label failed.\n");
+ return false;
+ }
+ if (part != 1) {
+ Dmsg1(000, "Big problem!!! part=%d, but should be 1\n", part);
+ return false;
+ }
+ set_append();
+ return true;
+}
+
+bool cloud_dev::rewrite_volume_label(DCR *dcr, bool recycle)
+{
+ Enter(100);
+ bool ok = DEVICE::rewrite_volume_label(dcr, recycle);
+ /*
+ * Normally, at this point, the label has been written to disk
+ * but remains in the first part of the block, and will be
+ * "rewritten" when the full block is written.
+ * However, in the case of a cloud device the label has
+ * already been written to a part, so we must now clear
+ * the block of the label data.
+ */
+ empty_block(dcr->block);
+ if (!ok || !open_next_part(dcr)) {
+ ok = false;
+ }
+ Leave(100);
+ return ok;
+}
+
+bool cloud_dev::do_size_checks(DCR *dcr, DEV_BLOCK *block)
+{
+ if (!DEVICE::do_size_checks(dcr, block)) {
+ return false;
+ }
+
+ /*
+ * Do Cloud specific size checks
+ */
+ /* Limit maximum part size to value specified by user */
+ if (max_part_size > 0 && ((part_size + block->binbuf) >= max_part_size)) {
+ if (part < num_cache_parts) {
+ Qmsg3(dcr->jcr, M_FATAL, 0, _("Error while writing, current part number"
+ " is less than the total number of parts (%d/%d, device=%s)\n"),
+ part, num_cache_parts, print_name());
+ dev_errno = EIO;
+ return false;
+ }
+
+ if (!open_next_part(dcr)) {
+ return false;
+ }
+ }
+
+ // static, so it's not calculated everytime
+ static uint64_t hard_max_part_size = ((uint64_t)1 << off_bits) -1;
+ static uint32_t hard_max_part_number = ((uint32_t)1 << part_bits) -1;
+
+ if (part_size >= hard_max_part_size) {
+ Qmsg3(dcr->jcr, M_FATAL, 0, _("Error while writing, current part size"
+ " is greater than the maximum part size (%d>%d, device=%s)\n"),
+ part_size, hard_max_part_size, print_name());
+ dev_errno = EIO;
+ return false;
+ }
+
+ if (part >= hard_max_part_number) {
+ Qmsg3(dcr->jcr, M_FATAL, 0, _("Error while writing, current part number"
+ " is greater than the maximum part number (%d>%d, device=%s)\n"),
+ part, hard_max_part_number, print_name());
+ dev_errno = EIO;
+ return false;
+ }
+
+ return true;
+}
+
+bool cloud_dev::start_of_job(DCR *dcr)
+{
+ if (driver) {
+ driver->start_of_job(dcr);
+ }
+ return true;
+}
+
+
+/* Two jobs can try to update the catalog information for a given cloud
+ * volume. It might be avoided by converting the vol_info_mutex to a recursive
+ * lock
+*/
+static pthread_mutex_t update_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+static void update_volume_record(DCR *dcr, transfer *ppkt)
+{
+ lock_guard lg(update_mutex); /* automatically released at exit */
+ bool do_update=false;
+ /*
+ * At this point ppkt should have the last part for the
+ * previous volume, so update the Media record.
+ */
+ if (!dir_get_volume_info(dcr, ppkt->m_volume_name, GET_VOL_INFO_FOR_READ)) {
+ /* It may happen for label operation */
+ Dmsg2((ppkt->m_part == 1 ? 100 : 0) , "dir_get_vol_info failed for vol=%s: %s\n",
+ ppkt->m_volume_name, dcr->jcr->errmsg);
+ return;
+ }
+
+ /* Between the GET and the UPDATE, and other job can call the same
+ * function and put more up to date information. So we are protected
+ * by the update_mutex
+ */
+ /* Update the Media information */
+ if ((ppkt->m_part > dcr->VolCatInfo.VolCatParts) ||
+ (ppkt->m_part == dcr->VolCatInfo.VolCatParts && dcr->VolCatInfo.VolLastPartBytes != ppkt->m_stat_size))
+ {
+ do_update=true;
+ dcr->VolCatInfo.VolCatParts = ppkt->m_part;
+ dcr->VolCatInfo.VolLastPartBytes = ppkt->m_stat_size;
+ }
+ /* We update the CloudParts in the catalog only if the current transfer is correct */
+ if (ppkt->m_state == TRANS_STATE_DONE && ppkt->m_part > dcr->VolCatInfo.VolCatCloudParts && ppkt->m_stat_size > 0) {
+ do_update = true;
+ dcr->VolCatInfo.VolCatCloudParts = ppkt->m_part;
+ }
+ if (do_update) {
+ dir_update_volume_info(dcr, false, true, true/*use_dcr*/);
+ }
+}
+
+bool cloud_dev::end_of_job(DCR *dcr)
+{
+ Enter(dbglvl);
+ transfer *tpkt; /* current packet */
+ transfer *ppkt=NULL; /* previous packet */
+ const char *prefix = "";
+
+ /* before waiting on transfers, we might have to lauch the uploads */
+ if (upload_opt == UPLOAD_AT_ENDOFJOB) {
+ foreach_alist(tpkt, dcr->uploads) {
+ tpkt->queue();
+ }
+ }
+
+ /*
+ * We wait for each of our uploads to complete
+ * Note: we also want to update the cloud parts and cache parts for
+ * each part uploaded. The deletes list contains transfer packet for
+ * each part that was upload in the order of the parts as they were
+ * created. Also, there may be multiple Volumes that were uploaded,
+ * so for each volume, we search until the end of the list or a
+ * different Volume is found in order to find the maximum part
+ * number that was uploaded. Then we read the Media record for
+ * that Volume, update it, and write it back to the catalog.
+ */
+ POOL_MEM msg(PM_MESSAGE);
+ if (!dcr->downloads->empty()) {
+ if (!dcr->jcr->is_internal_job()) {
+ Jmsg(dcr->jcr, M_INFO, 0, _("Cloud Download transfers:\n"));
+ } else {
+ prefix = "3000 Cloud Download: ";
+ }
+ Dmsg1(dbglvl, "%s", msg.c_str());
+ foreach_alist(tpkt, dcr->downloads) {
+ /* Do we really need to wait on downloads : if we didn't
+ * wait for them until now, we basically didn't use them. And we
+ * surelly won't anymore. If the job is canceled we can cancel our
+ * own downloads (do not touch downloads shared with other jobs).
+ */
+ wait_end_of_transfer(dcr, tpkt);
+ POOL_MEM dmsg(PM_MESSAGE);
+ tpkt->append_status(dmsg);
+ Jmsg(dcr->jcr, M_INFO, 0, "%s%s", prefix, dmsg.c_str());
+ download_mgr.release(tpkt);
+ }
+ }
+ dcr->downloads->destroy();
+
+ if (!dcr->uploads->empty()) {
+ int oldstatus = dcr->jcr->JobStatus;
+ dcr->jcr->sendJobStatus(JS_CloudUpload);
+ if (!dcr->jcr->is_internal_job()) {
+ Jmsg(dcr->jcr, M_INFO, 0, _("Cloud Upload transfers:\n"));
+ } else {
+ prefix = "3000 Cloud Upload: ";
+ }
+ foreach_alist(tpkt, dcr->uploads) {
+ wait_end_of_transfer(dcr, tpkt);
+ POOL_MEM umsg(PM_MESSAGE);
+ tpkt->append_status(umsg);
+ Jmsg(dcr->jcr, (tpkt->m_state == TRANS_STATE_ERROR) ? M_ERROR : M_INFO, 0, "%s%s", prefix, umsg.c_str());
+ Dmsg1(dbglvl, "%s", umsg.c_str());
+
+ if (tpkt->m_state == TRANS_STATE_ERROR) {
+ Mmsg(dcr->jcr->StatusErrMsg, _("Upload to Cloud failed"));
+ } else if (trunc_opt == TRUNC_AT_ENDOFJOB && tpkt->m_part!=1) {
+ /* else -> don't remove the cache file if the upload failed */
+ if (unlink(tpkt->m_cache_fname) != 0) {
+ berrno be;
+ Dmsg2(dbglvl, "Truncate cache option at end of job. Unable to delete %s. ERR=%s\n", tpkt->m_cache_fname, be.bstrerror());
+ } else {
+ Dmsg1(dbglvl, "Truncate cache option at end of job. Unlink file %s\n", tpkt->m_cache_fname);
+ }
+ }
+
+ if (ppkt == NULL) {
+ ppkt = tpkt;
+ continue;
+ }
+ if (strcmp(ppkt->m_volume_name, tpkt->m_volume_name) == 0) {
+ ppkt = tpkt;
+ continue;
+ }
+ /* vol name changed so update media for previous transfer */
+ update_volume_record(dcr, ppkt);
+ ppkt = tpkt;
+ }
+ dcr->jcr->sendJobStatus(oldstatus);
+ }
+
+ /* Update the last (previous) one */
+ if (ppkt) {
+ Dmsg3(dbglvl, "== Last part=%d size=%lld Volume=%s\n", ppkt->m_part,
+ ppkt->m_stat_size, ppkt->m_volume_name);
+ update_volume_record(dcr, ppkt);
+ Dmsg3(dbglvl, "=== Very Last part=%d size=%lld Volume=%s\n", ppkt->m_part,
+ ppkt->m_stat_size, ppkt->m_volume_name);
+ }
+
+ /* Now, clear our list and the global one if needed */
+ foreach_alist(tpkt, dcr->uploads) {
+ upload_mgr.release(tpkt);
+ }
+ dcr->uploads->destroy();
+
+ if (driver) {
+ driver->end_of_job(dcr);
+ }
+
+ Leave(dbglvl);
+ return true;
+}
+
+bool cloud_dev::wait_end_of_transfer(DCR *dcr, transfer *elem)
+{
+ if (!elem) {
+ return false;
+ }
+
+ Enter(dbglvl);
+ struct timeval tv;
+ tv.tv_usec = 0;
+ tv.tv_sec = 30;
+
+ int stat = ETIMEDOUT;
+ while (stat == ETIMEDOUT) {
+
+ if (dcr->jcr->is_canceled()) {
+ elem->cancel();
+ break;
+ }
+
+ if (chk_dbglvl(dbglvl)) {
+ POOL_MEM status(PM_FNAME);
+ get_cloud_upload_transfer_status(status, false);
+ Dmsg1(0, "%s\n",status.addr());
+ get_cloud_download_transfer_status(status, false);
+ Dmsg1(0, "%s\n",status.addr());
+ }
+
+ stat = elem->timedwait(tv);
+ }
+
+ Leave(dbglvl);
+ return (stat == 0);
+}
+
+/* TODO: Add .api2 mode for the status message */
+/* format a status message of the cloud transfers. Verbose gives details on each transfer */
+uint32_t cloud_dev::get_cloud_upload_transfer_status(POOL_MEM& msg, bool verbose)
+{
+ upload_mgr.update_statistics();
+ uint32_t ret = 0;
+ ret = Mmsg(msg,_(" Uploads "));
+ ret += upload_mgr.append_status(msg, verbose);
+ return ret;
+}
+
+/* format a status message of the cloud transfers. Verbose gives details on each transfer */
+uint32_t cloud_dev::get_cloud_download_transfer_status(POOL_MEM& msg, bool verbose)
+{
+ download_mgr.update_statistics();
+ uint32_t ret = 0;
+ ret = Mmsg(msg,_(" Downloads "));
+ ret += download_mgr.append_status(msg, verbose);
+ return ret;
+}
+
+/* for a given volume VolumeName, return parts that is a list of the
+ * cache parts within the volume */
+bool cloud_dev::get_cache_volume_parts_list(DCR *dcr, const char* VolumeName, ilist *parts)
+{
+ JCR *jcr = dcr->jcr;
+ Enter(dbglvl);
+
+ if (!parts || strlen(VolumeName) == 0) {
+ return false;
+ }
+
+ POOLMEM *vol_dir = get_pool_memory(PM_NAME);
+ /*NB : *** QUESTION *** : it works with examples but is archive_name() the kosher fct to call to get the cache path? */
+ pm_strcpy(vol_dir, archive_name());
+ if (!IsPathSeparator(vol_dir[strlen(vol_dir)-1])) {
+ pm_strcat(vol_dir, "/");
+ }
+ pm_strcat(vol_dir, VolumeName);
+
+ DIR* dp = NULL;
+ struct dirent *entry = NULL;
+ struct stat statbuf;
+ int name_max;
+ bool ok = false;
+ POOL_MEM dname(PM_FNAME);
+ int status = 0;
+
+ Enter(dbglvl);
+
+ Dmsg1(dbglvl, "Searching for parts in: %s\n", VolumeName);
+
+ if (!(dp = opendir(vol_dir))) {
+ berrno be;
+ Mmsg2(errmsg, "Cannot opendir to get parts list. Volume %s does not exist. ERR=%s\n",
+ VolumeName, be.bstrerror());
+ Dmsg1(dbglvl, "%s", errmsg);
+ goto get_out;
+ }
+
+ name_max = pathconf(".", _PC_NAME_MAX);
+ if (name_max < 1024) {
+ name_max = 1024;
+ }
+
+ entry = (struct dirent *)malloc(sizeof(struct dirent) + name_max + 1000);
+
+ for ( ;; ) {
+ if (jcr->is_canceled()) {
+ goto get_out;
+ }
+ errno = 0;
+ status = breaddir(dp, dname.addr());
+ if (status == -1) {
+ break;
+ } else if (status < 0) {
+ Mmsg1(errmsg, "breaddir failed: status=%d", status);
+ Dmsg1(dbglvl, "%s\n", errmsg);
+ goto get_out;
+ }
+ /* Always ignore . and .. */
+ if (strcmp(".", dname.c_str()) == 0 || strcmp("..", dname.c_str()) == 0) {
+ continue;
+ }
+
+ /* Look only for part files */
+ if (strncmp("part.", dname.c_str(), 5) != 0) {
+ continue;
+ }
+ char *ext = strrchr (dname.c_str(), '.');
+ if (!ext || strlen(ext) < 2) {
+ continue;
+ }
+
+ cloud_part *part = (cloud_part*) malloc(sizeof(cloud_part));
+ if (!part) {
+ berrno be;
+ Dmsg1(dbglvl, "Failed to create part structure: %s\n",
+ be.bstrerror());
+ goto get_out;
+ }
+
+ /* save extension (part number) to cloud_part struct index*/
+ part->index = atoi(&ext[1]);
+
+ /* Bummer : caller is responsible for freeing label */
+ POOLMEM *part_path = get_pool_memory(PM_NAME);
+ pm_strcpy(part_path, vol_dir);
+ if (!IsPathSeparator(part_path[strlen(vol_dir)-1])) {
+ pm_strcat(part_path, "/");
+ }
+ pm_strcat(part_path, dname.c_str());
+
+ /* Get size of part */
+ if (lstat(part_path, &statbuf) == -1) {
+ berrno be;
+ Dmsg2(dbglvl, "Failed to stat file %s: %s\n",
+ part_path, be.bstrerror());
+ free_pool_memory(part_path);
+ free(part);
+ goto get_out;
+ }
+ free_pool_memory(part_path);
+
+ part->size = statbuf.st_size;
+ part->mtime = statbuf.st_mtime;
+ parts->put(part->index, part);
+ }
+
+ ok = true;
+
+get_out:
+ if (dp) {
+ closedir(dp);
+ }
+ if (entry) {
+ free(entry);
+ }
+ free_pool_memory(vol_dir);
+
+ return ok;
+}
+
+/*
+ * Upload cache parts that are not in the cloud
+ */
+bool cloud_dev::upload_cache(DCR *dcr, const char *VolumeName, POOLMEM *&err)
+{
+ int i;
+ Enter(dbglvl);
+ bool ret=true;
+ ilist cloud_parts;
+ ilist cache_parts;
+ POOLMEM *vol_dir = get_pool_memory(PM_NAME);
+ POOLMEM *fname = get_pool_memory(PM_NAME);
+
+ if (!driver->get_cloud_volume_parts_list(dcr, VolumeName, &cloud_parts, err)) {
+ Qmsg2(dcr->jcr, M_ERROR, 0, "Error while uploading parts for volume %s. %s\n", VolumeName, err);
+ ret = false;
+ goto bail_out;
+ }
+
+ if (!get_cache_volume_parts_list(dcr, VolumeName, &cache_parts)) {
+ Qmsg1(dcr->jcr, M_ERROR, 0, "Error while listing cache parts for volume %s.\n", VolumeName);
+ ret = false;
+ goto bail_out;
+ }
+
+ make_cache_volume_name(vol_dir, VolumeName);
+
+ /*
+ * Upload every part where cache_size > cloud_size
+ */
+ for (i=1; i <= (int)cache_parts.last_index(); i++) {
+ if (i <= (int)cloud_parts.last_index()) { /* not on the cloud, but exists in the cache */
+ cloud_part *cachep = (cloud_part *)cache_parts[i];
+ cloud_part *cloudp = (cloud_part *)cloud_parts[i];
+
+ if (!cachep || cachep->size == 0) { /* Not in the current cache */
+ continue;
+ }
+ if (cloudp && cloudp->size >= cachep->size) {
+ continue; /* already uploaded */
+ }
+ }
+ Mmsg(fname, "%s/part.%d", vol_dir, i);
+ Dmsg1(dbglvl, "Do upload of %s\n", fname);
+ if (!upload_part_to_cloud(dcr, VolumeName, i)) {
+ if (errmsg[0]) {
+ Qmsg(dcr->jcr, M_ERROR, 0, "%s", errmsg);
+ }
+ ret = false;
+ } else {
+ Qmsg(dcr->jcr, M_INFO, 0, "Uploaded cache %s\n", fname);
+ }
+ }
+bail_out:
+ free_pool_memory(vol_dir);
+ free_pool_memory(fname);
+ Leave(dbglvl);
+ return ret;
+}
/*
Bacula(R) - The Network Backup Solution
- Copyright (C) 2000-2017 Kern Sibbald
+ Copyright (C) 2000-2018 Kern Sibbald
The original author of Bacula is Kern Sibbald, with contributions
from many others, a complete list can be found in the file AUTHORS.
#ifndef _CLOUD_DEV_H_
#define _CLOUD_DEV_H_
+#define part_bits 20
+#define part_mask 0x7FFFFLL
+#define off_bits (64-part_bits)
+#define off_mask 0xFFFFFFFFFFFLL
+
#include "bacula.h"
#include "stored.h"
#include "cloud_driver.h"
+#include "cloud_transfer_mgr.h"
+#include "cloud_parts.h"
class cloud_dev: public file_dev {
public:
int64_t obj_len;
int status;
+ uint64_t *cache_sizes;
+ uint32_t num_cache_parts;
+ uint32_t max_cache_part;
+ uint32_t max_cache_size;
+
+ uint32_t trunc_opt;
+ uint32_t upload_opt;
+
+ cloud_driver *driver;
+
+ static transfer_manager download_mgr;
+ static transfer_manager upload_mgr;
+
+ cloud_proxy *cloud_prox;
+
+ void add_vol_and_part(POOLMEM *&filename, const char *VolumeName, const char *name, uint32_t part);
+
+private:
+ char *cache_directory;
+ bool download_parts_to_read(DCR *dcr, alist* parts);
+ bool upload_part_to_cloud(DCR *dcr, const char *VolumeName, uint32_t part);
+ transfer *download_part_to_cache(DCR *dcr, const char *VolumeName, uint32_t part);
+ void make_cache_filename(POOLMEM *&filename, const char *VolumeName, uint32_t part);
+ void make_cache_volume_name(POOLMEM *&full_volname, const char *VolumeName);
+ bool get_cache_sizes(DCR *dcr, const char *VolumeName);
+ bool wait_end_of_transfer(DCR *dcr, transfer *elem);
+ bool get_cache_volume_parts_list(DCR *dcr, const char* VolumeName, ilist *parts);
+ bool wait_one_transfer(DCR *dcr, char *VolName, uint32_t part);
+ bool probe_cloud_proxy(DCR *dcr, const char* VolName, bool force=false);
+
public:
cloud_dev(JCR *jcr, DEVRES *device);
~cloud_dev();
- cloud_driver *driver;
+ bool close_part(DCR *dcr);
+ uint32_t get_part(boffset_t ls_offset);
/* DEVICE virtual interfaces that we redefine */
boffset_t lseek(DCR *dcr, off_t offset, int whence);
uint32_t get_cloud_download_transfer_status(POOL_MEM &msg, bool verbose);
};
+/* Exported subroutines */
+bool makedir(JCR *jcr, char *path, mode_t mode);
+
#endif /* _CLOUD_DEV_H_ */
/*
Bacula(R) - The Network Backup Solution
- Copyright (C) 2000-2017 Kern Sibbald
+ Copyright (C) 2000-2018 Kern Sibbald
The original author of Bacula is Kern Sibbald, with contributions
from many others, a complete list can be found in the file AUTHORS.
#include "bacula.h"
#include "stored.h"
+#include "cloud_parts.h"
+#include "cloud_transfer_mgr.h"
+#include "lib/bwlimit.h"
#ifndef _CLOUD_DRIVER_H_
#define _CLOUD_DRIVER_H_
cloud_driver() : max_upload_retries(NUM_UPLOAD_RETRIES) {};
virtual ~cloud_driver() {};
+ virtual bool copy_cache_part_to_cloud(transfer *xfer) = 0;
+ virtual bool copy_cloud_part_to_cache(transfer *xfer) = 0;
virtual bool truncate_cloud_volume(DCR *dcr, const char *VolumeName, ilist *trunc_parts, POOLMEM *&err) = 0;
virtual bool init(JCR *jcr, cloud_dev *dev, DEVRES *device) = 0;
virtual bool term(DCR *dcr) = 0;
virtual bool get_cloud_volume_parts_list(DCR *dcr, const char* VolumeName, ilist *parts, POOLMEM *&err) = 0;
virtual bool get_cloud_volumes_list(DCR* dcr, alist *volumes, POOLMEM *&err) = 0; /* TODO: Adapt the prototype to have a handler instead */
+ bwlimit upload_limit;
+ bwlimit download_limit;
uint32_t max_upload_retries;
};
/*
Bacula(R) - The Network Backup Solution
- Copyright (C) 2000-2017 Kern Sibbald
+ Copyright (C) 2000-2018 Kern Sibbald
The original author of Bacula is Kern Sibbald, with contributions
from many others, a complete list can be found in the file AUTHORS.
Bacula(R) is a registered trademark of Kern Sibbald.
*/
+/*
+ * Routines for writing Cloud drivers
+ *
+ * Written by Kern Sibbald, May MMXVI
+ */
+
+#include "cloud_parts.h"
+
+bool operator==(const cloud_part& lhs, const cloud_part& rhs)
+{
+ return (lhs.index == rhs.index &&
+ lhs.mtime == rhs.mtime &&
+ lhs.size == rhs.size);
+}
+
+bool operator!=(const cloud_part& lhs, const cloud_part& rhs)
+{
+ return !operator==(lhs, rhs);
+}
+
+bool operator==(const cloud_part& lhs, const uint32_t& rhs)
+{
+ return (lhs.index == rhs);
+}
+
+bool operator!=(const cloud_part& lhs, const uint32_t& rhs)
+{
+ return !operator==(lhs, rhs);
+}
+
+/* compares the all cloud_part, according to the operator==() above*/
+bool list_contains_part(ilist *parts, cloud_part *p)
+{
+ if (parts && p) {
+ cloud_part *ap = (cloud_part *)parts->get(p->index);
+ if (ap && *ap == *p) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/* only checks if the part_idx part exists in the parts lst*/
+bool list_contains_part(ilist *parts, uint32_t part_idx)
+{
+ if (parts && part_idx > 0) {
+ return parts->get(part_idx) != NULL;
+ }
+ return false;
+}
+
+bool identical_lists(ilist *parts1, ilist *parts2)
+{
+ if (parts1 && parts2) {
+ /* Using indexed ilist forces us to treat it differently (foreach not working b.e.) */
+ int max_size = parts1->last_index();
+ if (parts2->last_index() > parts1->last_index()) {
+ max_size = parts2->last_index();
+ }
+ for(int index=0; index<=max_size; index++ ) {
+ cloud_part *p1 = (cloud_part *)parts1->get(index);
+ cloud_part *p2 = (cloud_part *)parts2->get(index);
+ if (!p1) {
+ if (p2) return false;
+ } else if (!p2) {
+ if (p1) return false;
+ } else if (*p1 != *p2) {
+ return false;
+ }
+ }
+ return true;
+ }
+ return false;
+}
+
+/* cloud_parts present in source but not in dest are appended to diff.
+ * there's no cloud_part copy made.
+ * Diff only holds references and shoudn't own them */
+bool diff_lists(ilist *source, ilist *dest, ilist *diff)
+{
+ if (source && dest && diff) {
+ /* Using indexed list forces us to treat it differently (foreach not working b.e.) */
+ int max_size = source->last_index();
+ if (dest->last_index() > source->last_index()) {
+ max_size = dest->last_index();
+ }
+ for(int index=0; index<=max_size; index++ ) {
+ cloud_part *p1 = (cloud_part *)source->get(index);
+ cloud_part *p2 = (cloud_part *)dest->get(index);
+ if (!p1) {
+ if (p2) diff->put(index, p1);
+ } else if (!p2) {
+ if (p1) diff->put(index, p1);
+ } else if (*p1 != *p2) {
+ diff->put(index, p1);
+ }
+ }
+ return true;
+ }
+ return false;
+}
+
+/*=================================================
+ * cloud_proxy definitions
+ ================================================= */
+
+cloud_proxy * cloud_proxy::m_pinstance=NULL;
+uint64_t cloud_proxy::m_count=0;
+
+/* hash table node structure */
+typedef struct {
+ hlink hlnk;
+ ilist *parts_lst;
+ char *key_name;
+} VolHashItem;
+
+/* constructor
+ * size: the default hash size
+ * owns: determines if the ilists own the cloud_parts or not */
+cloud_proxy::cloud_proxy(uint32_t size, bool owns)
+{
+ pthread_mutex_init(&m_mutex, 0);
+ VolHashItem *hitem=NULL;
+ m_hash = New(htable(hitem, &hitem->hlnk, size));
+ m_owns = owns;
+}
+
+/* destructor
+ * we need to go thru each htable node and manually delete
+ * the associated alist before deleting the htable itself */
+cloud_proxy::~cloud_proxy()
+{
+ VolHashItem *hitem;
+ foreach_htable(hitem, m_hash) {
+ delete hitem->parts_lst;
+ free (hitem->key_name);
+ }
+ delete m_hash;
+ pthread_mutex_destroy(&m_mutex);
+}
+
+/* insert the cloud_part into the proxy.
+ * create the volume ilist if necessary */
+bool cloud_proxy::set(const char *volume, cloud_part *part)
+{
+ if (part) {
+ return set(volume, part->index, part->mtime, part->size);
+ }
+ return false;
+}
+
+bool cloud_proxy::set(const char *volume, uint32_t index, utime_t mtime, uint64_t size)
+{
+ if (!volume || index < 1) {
+ return false;
+ }
+ lock_guard lg(m_mutex);
+ /* allocate new part */
+ cloud_part *part = (cloud_part*) malloc(sizeof(cloud_part));
+ /* fill it with result info from the transfer */
+ part->index = index;
+ part->mtime = mtime;
+ part->size = size;
+
+ VolHashItem *hitem = (VolHashItem*)m_hash->lookup(const_cast<char*>(volume));
+ if (hitem) { /* when the node already exist, put the cloud_part into the vol list */
+ /* free the existing part */
+ if (hitem->parts_lst->get(index)) {
+ free(hitem->parts_lst->get(index));
+ }
+ hitem->parts_lst->put(index, part);
+ return true;
+ } else { /* if the node doesnt exist for this key, create it */
+ ilist *new_lst = New(ilist(100,m_owns));
+ new_lst->put(part->index, part);
+ /* use hashtable helper malloc */
+ VolHashItem *new_hitem = (VolHashItem *) m_hash->hash_malloc(sizeof(VolHashItem));
+ new_hitem->parts_lst = new_lst;
+ new_hitem->key_name = bstrdup(volume);
+ return m_hash->insert(new_hitem->key_name, new_hitem);
+ }
+ return false;
+}
+
+/* retrieve the cloud_part for the volume name at index part idx
+ * can return NULL */
+cloud_part *cloud_proxy::get(const char *volume, uint32_t index)
+{
+ lock_guard lg(m_mutex);
+ if (volume) {
+ VolHashItem *hitem = (VolHashItem *)m_hash->lookup(const_cast<char*>(volume));
+ if (hitem) {
+ ilist * ilst = hitem->parts_lst;
+ if (ilst) {
+ return (cloud_part*)ilst->get(index);
+ }
+ }
+ }
+ return NULL;
+}
+
+uint64_t cloud_proxy::get_size(const char *volume, uint32_t part_idx)
+{
+ cloud_part *cld_part = get(volume, part_idx);
+ return cld_part ? cld_part->size:0;
+}
+
+/* Check if the volume entry exists and return true if it's the case */
+bool cloud_proxy::volume_lookup(const char *volume)
+{
+ lock_guard lg(m_mutex);
+ return ( (volume) && m_hash->lookup(const_cast<char*>(volume)) );
+}
+
+/* reset the volume list content with the content of part_list */
+bool cloud_proxy::reset(const char *volume, ilist *part_list)
+{
+ lock_guard lg(m_mutex);
+ if (volume && part_list) {
+ VolHashItem *hitem = (VolHashItem*)m_hash->lookup(const_cast<char*>(volume));
+ if (hitem) { /* when the node already exist, recycle it */
+ delete hitem->parts_lst;
+ } else { /* create the node */
+ hitem = (VolHashItem *) m_hash->hash_malloc(sizeof(VolHashItem));
+ hitem->key_name = bstrdup(volume);
+ if (!m_hash->insert(hitem->key_name, hitem)) {
+ return false;
+ }
+ }
+ /* re-create the volume list */
+ hitem->parts_lst = New(ilist(100, m_owns));
+ /* feed it with cloud_part elements */
+ for(int index=1; index<=part_list->last_index(); index++ ) {
+ cloud_part *part = (cloud_part *)part_list->get(index);
+ if (part) {
+ hitem->parts_lst->put(index, part);
+ }
+ }
+ return true;
+ }
+ return false;
+}
+
+uint32_t cloud_proxy::last_index(const char *volume)
+{
+ lock_guard lg(m_mutex);
+ if (volume) {
+ VolHashItem *hitem = (VolHashItem*)m_hash->lookup(const_cast<char*>(volume));
+ if (hitem && hitem->parts_lst) {
+ return hitem->parts_lst->last_index();
+ }
+ }
+ return 0;
+}
+
+ilist *cloud_proxy::exclude(const char *volume, ilist *exclusion_lst)
+{
+ if (volume && exclusion_lst) {
+ VolHashItem *hitem = (VolHashItem*)m_hash->lookup(const_cast<char*>(volume));
+ if (hitem) {
+ ilist *res_lst = New(ilist(100, false));
+ if (diff_lists(hitem->parts_lst, exclusion_lst, res_lst)) {
+ return res_lst;
+ }
+ }
+ }
+ return NULL;
+}
+cloud_proxy *cloud_proxy::get_instance()
+{
+ if (!m_pinstance) {
+ m_pinstance = New(cloud_proxy());
+ }
+ ++m_count;
+ return m_pinstance;
+}
+
+void cloud_proxy::release()
+{
+ if (--m_count == 0) {
+ delete m_pinstance;
+ m_pinstance = NULL;
+ }
+}
+
+void cloud_proxy::dump()
+{
+ VolHashItem *hitem;
+ foreach_htable(hitem, m_hash) {
+ Dmsg2(0, "proxy (%d) Volume:%s\n", m_hash->size(), hitem->hlnk.key.key);
+ for(int index=0; index<=hitem->parts_lst->last_index(); index++ ) {
+ cloud_part *p = (cloud_part *)hitem->parts_lst->get(index);
+ if (p) {
+ Dmsg1(0, "part.%d\n", p->index);
+ }
+ }
+ }
+}
+
+//=================================================
+#ifdef TEST_PROGRAM
+int main (int argc, char *argv[])
+{
+ pthread_attr_t attr;
+
+ void * start_heap = sbrk(0);
+ (void)start_heap;
+
+ setlocale(LC_ALL, "");
+ bindtextdomain("bacula", LOCALEDIR);
+ textdomain("bacula");
+ init_stack_dump();
+ my_name_is(argc, argv, "cloud_parts_test");
+ init_msg(NULL, NULL);
+ daemon_start_time = time(NULL);
+ set_thread_concurrency(150);
+ lmgr_init_thread(); /* initialize the lockmanager stack */
+ pthread_attr_init(&attr);
+ berrno be;
+
+ printf("Test0\n");
+ {
+ cloud_part p1, p2, p3, p4;
+
+ p1.index = 1;
+ p1.mtime = 1000;
+ p1.size = 1000;
+
+ p2.index = 2;
+ p2.mtime = 2000;
+ p2.size = 2020;
+
+ p3.index = 3;
+ p3.mtime = 3000;
+ p3.size = 3030;
+
+ p4.index = 4;
+ p4.mtime = 4000;
+ p4.size = 4040;
+
+ ilist l(10,false);
+ l.put(p1.index,&p1);
+ l.put(p2.index,&p2);
+ l.put(p3.index,&p3);
+
+ ASSERT(list_contains_part(&l, &p1));
+ ASSERT(list_contains_part(&l, &p2));
+ ASSERT(list_contains_part(&l, &p3));
+ ASSERT(!list_contains_part(&l, &p4));
+
+ ASSERT(list_contains_part(&l, 3));
+ ASSERT(list_contains_part(&l, 1));
+ ASSERT(list_contains_part(&l, 2));
+ ASSERT(!list_contains_part(&l, 4));
+ }
+
+ printf("Test1\n");
+ {
+ cloud_part p1, p2, p3;
+
+ p1.index = 1;
+ p1.mtime = 1000;
+ p1.size = 1000;
+
+ p2.index = 2;
+ p2.mtime = 2000;
+ p2.size = 2020;
+
+ p3.index = 3;
+ p3.mtime = 3000;
+ p3.size = 3030;
+
+ ilist cloud(10,false);
+ cloud.put(p1.index, &p1);
+ cloud.put(p2.index, &p2);
+
+ ilist cache(10,false);
+ cache.put(p3.index, &p3);
+
+ ASSERT(!identical_lists(&cloud, &cache));
+
+ cache.put(p1.index, &p1);
+ ASSERT(!identical_lists(&cloud, &cache));
+ }
+
+ printf("Test2\n");
+ {
+ cloud_part p1, p2, p3, p4;
+
+ p1.index = 1;
+ p1.mtime = 1000;
+ p1.size = 1000;
+
+ p2.index = 2;
+ p2.mtime = 2000;
+ p2.size = 2020;
+
+ p3.index = 3;
+ p3.mtime = 3000;
+ p3.size = 3030;
+
+ p4.index = 4;
+ p4.mtime = 4000;
+ p4.size = 4040;
+
+ ilist cloud(10,false);
+ cloud.put(p1.index, &p1);
+ cloud.put(p2.index, &p2);
+
+ ilist cache(10,false);
+ cloud.put(p3.index, &p3);
+ cloud.put(p4.index, &p4);
+
+ ASSERT(!identical_lists(&cloud, &cache));
+
+ cache.put(p1.index, &p1);
+ ASSERT(!identical_lists(&cloud, &cache));
+ }
+
+ printf("Test3\n");
+ {
+ cloud_part p1, p2, p3;
+
+ p1.index = 1;
+ p1.mtime = 1000;
+ p1.size = 1000;
+
+ p2.index = 2;
+ p2.mtime = 2000;
+ p2.size = 2020;
+
+ p3.index = 3;
+ p3.mtime = 3000;
+ p3.size = 3030;
+
+ ilist cloud(10,false);
+ cloud.put(p1.index, &p1);
+ cloud.put(p2.index, &p2);
+ cloud.put(p3.index, &p3);
+
+ ilist cache(10,false);
+ cache.put(p3.index, &p3);
+ cache.put(p1.index, &p1);
+ cache.put(p2.index, &p2);
+
+ ASSERT(identical_lists(&cloud, &cache));
+ }
+
+ printf("Test4\n");
+ {
+ cloud_part p1, p2, p3;
+
+ p1.index = 1;
+ p1.mtime = 1000;
+ p1.size = 1000;
+
+ p2.index = 2;
+ p2.mtime = 2000;
+ p2.size = 2020;
+
+ p3.index = 3;
+ p3.mtime = 3000;
+ p3.size = 3030;
+
+ ilist cloud(10,false);
+ cloud.put(p1.index, &p1);
+ cloud.put(p2.index, &p2);
+ cloud.put(p3.index, &p3);
+
+ ilist cache(10,false);
+ cache.put(p2.index, &p2);
+ cache.put(p1.index, &p1);
+
+ ASSERT(!identical_lists(&cloud, &cache));
+ ilist diff(10,false);
+ ASSERT(diff_lists(&cloud, &cache, &diff));
+ ASSERT(diff.size() == 1);
+ cloud_part *dp = (cloud_part *)diff.get(3);
+ ASSERT(*dp == p3);
+ }
+
+ printf("Test proxy set\\get\n");
+ {
+ cloud_part p1, p2, p3;
+
+ p1.index = 1;
+ p1.mtime = 1000;
+ p1.size = 1000;
+
+ p2.index = 2;
+ p2.mtime = 2000;
+ p2.size = 2020;
+
+ p3.index = 3;
+ p3.mtime = 3000;
+ p3.size = 3030;
+
+ cloud_proxy *prox = cloud_proxy::get_instance();
+
+ /* add to the cloud proxy with no error */
+ /* in volume1 */
+ ASSERT(prox->set("volume1", &p1));
+ ASSERT(prox->set("volume1", &p2));
+ /* in volume2 */
+ ASSERT(prox->set("volume2", &p3));
+
+ /* retrieve the correct elements */
+ ASSERT(prox->get("volume1", 1) != NULL);
+ ASSERT(prox->get("volume1", 1)->mtime == 1000);
+ ASSERT(prox->get("volume1", 1)->size == 1000);
+ ASSERT(prox->get("volume1", 2) != NULL);
+ ASSERT(prox->get("volume1", 2)->mtime == 2000);
+ ASSERT(prox->get("volume1", 2)->size == 2020);
+ /* part3 is in volume2, not in volume1 */
+ ASSERT(prox->get("volume1", 3) == NULL);
+ ASSERT(prox->get("volume2", 3) != NULL);
+ ASSERT(prox->get("volume2", 3)->mtime == 3000);
+ ASSERT(prox->get("volume2", 3)->size == 3030);
+ /* there's no volume3 */
+ ASSERT(prox->get("volume3", 1) == NULL);
+ /* there's no volume3 nor part4 */
+ ASSERT(prox->get("volume3", 4) == NULL);
+ }
+ printf("Test proxy reset\n");
+ {
+ cloud_part p1, p2, p3, p4, p5;
+
+ p1.index = 1;
+ p1.mtime = 1000;
+ p1.size = 1000;
+
+ p2.index = 2;
+ p2.mtime = 2000;
+ p2.size = 2020;
+
+ p3.index = 3;
+ p3.mtime = 3000;
+ p3.size = 3030;
+
+ cloud_proxy *prox = cloud_proxy::get_instance();
+
+ /* add to the cloud proxy with no error */
+ /* in volume1 */
+ ASSERT(prox->set("volume1", &p1));
+ ASSERT(prox->set("volume1", &p2));
+ /* in volume2 */
+ ASSERT(prox->set("volume2", &p3));
+
+ p4.index = 3;
+ p4.mtime = 4000;
+ p4.size = 4040;
+
+ p5.index = 50;
+ p5.mtime = 5000;
+ p5.size = 5050;
+
+ ilist part_list(10,false);
+ part_list.put(p4.index, &p4);
+ part_list.put(p5.index, &p5);
+
+ /* reset volume 1 */
+ prox->reset("volume1", &part_list);
+ /* old elements are gone */
+ ASSERT(prox->get("volume1", 1) == NULL);
+ ASSERT(prox->get("volume1", 2) == NULL);
+ /* new elements are at the correct index */
+ ASSERT(prox->get("volume1", 3) != NULL);
+ ASSERT(prox->get("volume1", 3)->mtime == 4000);
+ ASSERT(prox->get("volume1", 3)->size == 4040);
+ ASSERT(prox->get("volume1", 50) != NULL);
+ ASSERT(prox->get("volume1", 50)->mtime == 5000);
+ ASSERT(prox->get("volume1", 50)->size == 5050);
+ /* part3 is still in volume2 */
+ ASSERT(prox->get("volume2", 3) != NULL);
+ ASSERT(prox->get("volume2", 3)->mtime == 3000);
+ ASSERT(prox->get("volume2", 3)->size == 3030);
+ /* there's no volume3 */
+ ASSERT(prox->get("volume3", 1) == NULL);
+ /* there's no volume3 nor part.index 4 */
+ ASSERT(prox->get("volume3", 4) == NULL);
+ prox->dump();
+ }
+
+
+ return 0;
+
+}
+
+#endif /* TEST_PROGRAM */
/*
Bacula(R) - The Network Backup Solution
- Copyright (C) 2000-2017 Kern Sibbald
+ Copyright (C) 2000-2018 Kern Sibbald
The original author of Bacula is Kern Sibbald, with contributions
from many others, a complete list can be found in the file AUTHORS.
*
* Written by Norbert Bizet, May MMXVI
*/
+#ifndef _CLOUD_PARTS_H_
+#define _CLOUD_PARTS_H_
+
+#include "bacula.h"
+#include "stored.h"
+
+
+struct cloud_part
+{
+ uint32_t index;
+ utime_t mtime;
+ uint64_t size;
+};
+
+/* equality operators for cloud_part structure */
+bool operator==(const cloud_part& lhs, const cloud_part& rhs);
+bool operator!=(const cloud_part& lhs, const cloud_part& rhs);
+/* more equality operators: when compared to int, we match only index */
+bool operator==(const cloud_part& lhs, const uint32_t& rhs);
+bool operator!=(const cloud_part& lhs, const uint32_t& rhs);
+
+/* Check if a part p is contained in a parts list */
+bool list_contains_part(ilist *parts, cloud_part *p);
+/* Check if a part index is contained in a parts list */
+bool list_contains_part(ilist *parts, uint32_t part_idx);
+/* if parts1 and parts2 are synced, return true. false otherwise */
+bool identical_lists(ilist *parts1, ilist *parts2);
+/* cloud_parts present in source but not in dest are appended to diff.
+ * there's no cloud_part copy made.
+ * Diff only holds references and shoudn't own them */
+bool diff_lists(ilist *source, ilist *dest, ilist *diff);
+
+
+/* A proxy view of the cloud, providing existing parts
+ * index/size/date of modification without accessing the cloud itself.
+ * The basic proxy structure is a hash table of ilists:
+ root
+ |
+ -[volume001]-----ilist
+ | |
+ | [01]-->cloud_part
+ | [03]-->cloud_part
+ | |
+ |
+ -[volume002]-----ilist
+ | |
+ | [01]-->cloud_part
+ [02]-->cloud_part
+ |
+ */
+class cloud_proxy : public SMARTALLOC
+{
+private:
+ htable *m_hash; /* the root htable */
+ bool m_owns; /* determines if ilist own the cloud_parts */
+ pthread_mutex_t m_mutex; /* protect access*/
+ static cloud_proxy *m_pinstance; /* singleton instance */
+ static uint64_t m_count; /* static refcount */
+
+ ~cloud_proxy();
+
+public:
+ /* size: the default hash size
+ * owns: determines if the ilists own the cloud_parts or not */
+ cloud_proxy(uint32_t size=100, bool owns=true);
+
+ /* each time a part is added to the cloud, the corresponding cloud_part
+ * should be set here */
+ /* either using a part ptr (part can be disposed afterward)... */
+ bool set(const char *volume, cloud_part *part);
+ /* ...or by passing basic part parameters (part is constructed internally) */
+ bool set(const char *volume, uint32_t index, utime_t mtime, uint64_t size);
+
+ /* one can retrieve the proxied cloud_part using the get method */
+ cloud_part *get(const char *volume, uint32_t part_idx);
+ /* direct access to part size */
+ uint64_t get_size(const char *volume, uint32_t part_idx);
+
+ /* Check if the volume entry exists and return true if it's the case */
+ bool volume_lookup(const char *volume);
+
+ /* reset the volume list content with the content of part_list */
+ bool reset(const char *volume, ilist *part_list);
+
+ /* get the current last (max) index for a given volume */
+ uint32_t last_index(const char *volume);
+
+ /* returns a ilist of elements present in the proxy but not in the exclusion list */
+ ilist *exclude(const char* volume, ilist *exclusion_lst);
+
+ /* refcounted singleton */
+ static cloud_proxy *get_instance();
+ /* instead of deleting, release the cloud_proxy*/
+ void release();
+
+ void dump();
+};
+
+#endif /* _CLOUD_PARTS_H_ */
/*
Bacula(R) - The Network Backup Solution
- Copyright (C) 2000-2017 Kern Sibbald
+ Copyright (C) 2000-2018 Kern Sibbald
The original author of Bacula is Kern Sibbald, with contributions
from many others, a complete list can be found in the file AUTHORS.
Bacula(R) is a registered trademark of Kern Sibbald.
*/
+#include "bacula.h"
+#include "../stored/stored.h"
+
+extern bool parse_sd_config(CONFIG *config, const char *configfile, int exit_code);
+
+static CONFIG *config;
+
+void *start_heap;
+#define CONFIG_FILE "bacula-sd.conf"
+char *configfile = NULL;
+bool detect_errors = false;
+int errors = 0;
+
+static void usage()
+{
+ fprintf(stderr, _(
+PROG_COPYRIGHT
+"\n%sVersion: %s (%s)\n\n"
+"Usage: cloud_test [options] <device-name>\n"
+" -b <file> specify a bootstrap file\n"
+" -c <file> specify a Storage configuration file\n"
+" -d <nn> set debug level to <nn>\n"
+" -dt print timestamp in debug output\n"
+" -v be verbose\n"
+" -V specify Volume names (separated by |)\n"
+" -? print this message\n\n"), 2000, "", VERSION, BDATE);
+ exit(1);
+}
+
+static void get_session_record(JCR *jcr, DEVICE *dev, DEV_RECORD *rec, SESSION_LABEL *sessrec)
+{
+ const char *rtype;
+ memset(sessrec, 0, sizeof(SESSION_LABEL));
+ jcr->JobId = 0;
+ switch (rec->FileIndex) {
+ case PRE_LABEL:
+ rtype = _("Fresh Volume Label");
+ break;
+ case VOL_LABEL:
+ rtype = _("Volume Label");
+ unser_volume_label(dev, rec);
+ break;
+ case SOS_LABEL:
+ rtype = _("Begin Job Session");
+ unser_session_label(sessrec, rec);
+ jcr->JobId = sessrec->JobId;
+ break;
+ case EOS_LABEL:
+ rtype = _("End Job Session");
+ break;
+ case 0:
+ case EOM_LABEL:
+ rtype = _("End of Medium");
+ break;
+ case EOT_LABEL:
+ rtype = _("End of Physical Medium");
+ break;
+ case SOB_LABEL:
+ rtype = _("Start of object");
+ break;
+ case EOB_LABEL:
+ rtype = _("End of object");
+ break;
+ default:
+ rtype = _("Unknown");
+ Dmsg1(10, "FI rtype=%d unknown\n", rec->FileIndex);
+ break;
+ }
+ Dmsg5(10, "%s Record: VolSessionId=%d VolSessionTime=%d JobId=%d DataLen=%d\n",
+ rtype, rec->VolSessionId, rec->VolSessionTime, rec->Stream, rec->data_len);
+ if (verbose) {
+ Pmsg5(-1, _("%s Record: VolSessionId=%d VolSessionTime=%d JobId=%d DataLen=%d\n"),
+ rtype, rec->VolSessionId, rec->VolSessionTime, rec->Stream, rec->data_len);
+ }
+}
+
+/* List just block information */
+static void do_blocks(JCR *jcr, DCR *dcr)
+{
+ DEV_BLOCK *block = dcr->block;
+ DEVICE *dev = dcr->dev;
+ char buf1[100], buf2[100];
+ DEV_RECORD *rec = new_record();
+ for ( ;; ) {
+ if (!dcr->read_block_from_device(NO_BLOCK_NUMBER_CHECK)) {
+ Dmsg1(100, "!read_block(): ERR=%s\n", dev->print_errmsg());
+ if (dev->at_eot()) {
+ if (!mount_next_read_volume(dcr)) {
+ Jmsg(jcr, M_INFO, 0, _("Got EOM at file %u on device %s, Volume \"%s\"\n"),
+ dev->file, dev->print_name(), dcr->VolumeName);
+ break;
+ }
+ /* Read and discard Volume label */
+ DEV_RECORD *record;
+ SESSION_LABEL sessrec;
+ record = new_record();
+ dcr->read_block_from_device(NO_BLOCK_NUMBER_CHECK);
+ read_record_from_block(dcr, record);
+ get_session_record(jcr, dev, record, &sessrec);
+ free_record(record);
+ Jmsg(jcr, M_INFO, 0, _("Mounted Volume \"%s\".\n"), dcr->VolumeName);
+ } else if (dev->at_eof()) {
+ Jmsg(jcr, M_INFO, 0, _("End of file %u on device %s, Volume \"%s\"\n"),
+ dev->part, dev->print_name(), dcr->VolumeName);
+ Dmsg0(20, "read_record got eof. try again\n");
+ continue;
+ } else if (dev->is_short_block()) {
+ Jmsg(jcr, M_INFO, 0, "%s", dev->print_errmsg());
+ continue;
+ } else {
+ /* I/O error */
+ errors++;
+ display_tape_error_status(jcr, dev);
+ break;
+ }
+ }
+ read_record_from_block(dcr, rec);
+ printf("Block: %d size=%d\n", block->BlockNumber, block->block_len);
+ }
+ free_record(rec);
+ return;
+}
+
+int main (int argc, char *argv[])
+{
+ int ch;
+ DEVICE *dev;
+ cloud_dev *cdev;
+ cloud_driver *driver;
+ char *VolumeName=NULL;
+ JCR *jcr=NULL;
+ BSR *bsr = NULL;
+ char *bsrName = NULL;
+ BtoolsAskDirHandler askdir_handler;
+
+ init_askdir_handler(&askdir_handler);
+ setlocale(LC_ALL, "");
+ bindtextdomain("bacula", LOCALEDIR);
+ textdomain("bacula");
+ init_stack_dump();
+ lmgr_init_thread();
+
+ working_directory = "/tmp";
+ my_name_is(argc, argv, "cloud_test");
+ init_msg(NULL, NULL); /* initialize message handler */
+
+ OSDependentInit();
+
+
+ while ((ch = getopt(argc, argv, "b:c:d:vV:?")) != -1) {
+ switch (ch) {
+ case 'c': /* specify config file */
+ if (configfile != NULL) {
+ free(configfile);
+ }
+ configfile = bstrdup(optarg);
+ break;
+
+ case 'b':
+ bsrName = optarg;
+ break;
+
+ case 'd': /* debug level */
+ if (*optarg == 't') {
+ dbg_timestamp = true;
+ } else {
+ char *p;
+ /* We probably find a tag list -d 10,sql,bvfs */
+ if ((p = strchr(optarg, ',')) != NULL) {
+ *p = 0;
+ }
+ debug_level = atoi(optarg);
+ if (debug_level <= 0) {
+ debug_level = 1;
+ }
+ if (p) {
+ debug_parse_tags(p+1, &debug_level_tags);
+ }
+ }
+ break;
+
+ case 'v':
+ verbose++;
+ break;
+
+ case 'V': /* Volume name */
+ VolumeName = optarg;
+ break;
+
+ case '?':
+ default:
+ usage();
+
+ } /* end switch */
+ } /* end while */
+ argc -= optind;
+ argv += optind;
+
+ if (!argc) {
+ Pmsg0(0, _("No archive name specified\n"));
+ usage();
+ }
+
+ if (configfile == NULL) {
+ configfile = bstrdup(CONFIG_FILE);
+ }
+
+ config = New(CONFIG());
+ parse_sd_config(config, configfile, M_ERROR_TERM);
+ setup_me();
+ load_sd_plugins(me->plugin_directory);
+ if (bsrName) {
+ bsr = parse_bsr(NULL, bsrName);
+ }
+ jcr = setup_jcr("cloud_test", argv[0], bsr, VolumeName, SD_READ);
+ dev = jcr->dcr->dev;
+ if (!dev || dev->dev_type != B_CLOUD_DEV) {
+ Pmsg0(0, "Bad device\n");
+ exit(1);
+ }
+ do_blocks(jcr, jcr->dcr);
+ /* Start low level tests */
+ cdev = (cloud_dev *)dev;
+ driver = cdev->driver;
+
+ /* TODO: Put here low level tests for all drivers */
+ if (!cdev->truncate_cache(jcr->dcr)) {
+ Pmsg1(0, "Unable to truncate the cache ERR=%s\n", cdev->errmsg);
+ }
+
+ if (jcr) {
+ release_device(jcr->dcr);
+ free_jcr(jcr);
+ dev->term(NULL);
+ }
+ return 0;
+}
/*
Bacula(R) - The Network Backup Solution
- Copyright (C) 2000-2017 Kern Sibbald
+ Copyright (C) 2000-2018 Kern Sibbald
The original author of Bacula is Kern Sibbald, with contributions
from many others, a complete list can be found in the file AUTHORS.
* Written by Norbert Bizet, May MMXVI
*
*/
+#include "cloud_transfer_mgr.h"
+#include "stored.h"
+
+/* constructor
+ * size : the size in bytes of the transfer
+ * funct : function to process
+ * arg : argument passed to the function
+ * cache_fname : cache file name is duplicated in the transfer constructor
+ * volume_name : volume name is duplicated in the transfer constructor
+ * part : part index
+ * driver : pointer to the cloud_driver
+ * dcr : pointer to DCR
+*/
+transfer::transfer(uint64_t size,
+ void * (*funct)(transfer*),
+ const char *cache_fname,
+ const char *volume_name,
+ uint32_t part,
+ cloud_driver *driver,
+ DCR *dcr,
+ cloud_proxy *proxy) :
+ m_stat_size(size),
+ m_stat_start(0),
+ m_stat_duration(0),
+ m_stat_eta(0),
+ m_message(NULL),
+ m_state(TRANS_STATE_CREATED),
+ m_mgr(NULL),
+ m_funct(funct),
+ m_cache_fname(bstrdup(cache_fname)), /* cache fname is duplicated*/
+ m_volume_name(bstrdup(volume_name)), /* volume name is duplicated*/
+ m_part(part),
+ m_driver(driver),
+ m_dcr(dcr),
+ m_proxy(proxy),
+ m_workq_elem(NULL),
+ m_use_count(0),
+ m_cancel(false),
+ m_do_cache_truncate(false)
+{
+ pthread_mutex_init(&m_stat_mutex, 0);
+ pthread_mutex_init(&m_mutex, 0);
+ pthread_cond_init(&m_done, NULL);
+
+ m_message = get_pool_memory(PM_MESSAGE);
+ *m_message = 0;
+}
+
+/* destructor */
+transfer::~transfer()
+{
+ free_pool_memory(m_message);
+ pthread_cond_destroy(&m_done);
+ pthread_mutex_destroy(&m_mutex);
+ pthread_mutex_destroy(&m_stat_mutex);
+
+ free(m_volume_name);
+ free(m_cache_fname);
+ if (m_use_count > 0) {
+ ASSERT(FALSE);
+ Dmsg1(0, "!!!m_use_count = %d\n", m_use_count);
+ }
+}
+
+/* queue this transfer for processing in the manager workq
+ * ret :true if queuing is successful */
+bool transfer::queue()
+{
+ if (!transition(TRANS_STATE_QUEUED)) {
+ return false;
+ }
+ return true;
+}
+
+
+/* opaque function that processes m_funct with m_arg as parameter
+ * depending on m_funct return value, changes state to TRANS_STATE_DONE
+ * or TRANS_STATE_ERROR
+ */
+void transfer::proceed()
+{
+ if (transition(TRANS_STATE_PROCESSED)) {
+ if (m_funct(this)) {
+ transition(TRANS_STATE_ERROR);
+ } else {
+ transition(TRANS_STATE_DONE);
+ }
+ } else {
+ Mmsg(m_message, _("wrong transition to TRANS_STATE_PROCESS in proceed review logic\n"));
+ }
+}
+
+int transfer::wait()
+{
+ lock_guard lg(m_mutex);
+
+ int stat = 0;
+ while (m_state != TRANS_STATE_DONE &&
+ m_state != TRANS_STATE_ERROR) {
+
+ if ((stat = pthread_cond_wait(&m_done, &m_mutex)) != 0) {
+ return stat;
+ }
+ }
+ return stat;
+}
+
+int transfer::timedwait(const timeval& tv)
+{
+ lock_guard lg(m_mutex);
+ struct timespec timeout;
+ struct timeval ttv;
+ struct timezone tz;
+ int stat = 0;
+ timeout.tv_sec = tv.tv_sec;
+ timeout.tv_nsec = tv.tv_usec * 1000;
+
+ while (m_state != TRANS_STATE_DONE &&
+ m_state != TRANS_STATE_ERROR) {
+
+ gettimeofday(&ttv, &tz);
+ timeout.tv_nsec += ttv.tv_usec * 1000;
+ timeout.tv_sec += ttv.tv_sec;
+
+ if ((stat = pthread_cond_timedwait(&m_done, &m_mutex, &timeout)) != 0) {
+ return stat;
+ }
+ }
+ return stat;
+}
+
+/* place the cancel flag and wait until processing is done */
+bool transfer::cancel()
+{
+ {
+ lock_guard lg(m_mutex);
+ m_cancel = true;
+ }
+ return wait();
+}
+
+/* checking the cancel status : doesnt request locking */
+bool transfer::is_cancelled() const
+{
+ return m_cancel;
+}
+
+uint32_t transfer::append_status(POOL_MEM& msg)
+{
+ POOLMEM *tmp_msg = get_pool_memory(PM_MESSAGE);
+ char ec[30];
+ uint32_t ret=0;
+ static const char *state[] = {"created", "queued", "process", "done", "error"};
+
+ if (m_state > TRANS_STATE_PROCESSED) {
+ ret = Mmsg(tmp_msg,_("%s/part.%-5d state=%-7s size=%sB duration=%ds%s%s\n"),
+ m_volume_name, m_part,
+ state[m_state],
+ edit_uint64_with_suffix(m_stat_size, ec),
+ m_stat_duration,
+ (strlen(m_message) != 0)?" msg=":"",
+ (strlen(m_message) != 0)?m_message:"");
+ pm_strcat(msg, tmp_msg);
+ } else {
+ ret = Mmsg(tmp_msg,_("%s/part.%-5d, state=%-7s size=%sB eta=%dss%s%s\n"),
+ m_volume_name, m_part,
+ state[m_state],
+ edit_uint64_with_suffix(m_stat_size, ec),
+ m_stat_eta,
+ (strlen(m_message) != 0)?" msg=":"",
+ (strlen(m_message) != 0)?m_message:"");
+ pm_strcat(msg, tmp_msg);
+ }
+ free_pool_memory(tmp_msg);
+ return ret;
+}
+
+
+/* the manager references itself through this function */
+void transfer::set_manager(transfer_manager *mgr)
+{
+ lock_guard lg(m_mutex);
+ m_mgr = mgr;
+}
+
+/* change the state */
+bool transfer::transition(transfer_state state)
+{
+ /* lock state mutex*/
+ lock_guard lg(m_mutex);
+
+ /* transition from current state (m_state) to target state (state)*/
+ bool ret = false; /*impossible transition*/
+ switch(m_state)
+ {
+ case TRANS_STATE_CREATED:
+ /* CREATED -> QUEUED */
+ if (state == TRANS_STATE_QUEUED) {
+ /* valid transition*/
+ ret = true;
+ if (m_mgr) {
+ /*lock manager statistics */
+ P(m_mgr->m_stat_mutex);
+ /*increment the number of queued transfer*/
+ m_mgr->m_stat_nb_transfer_queued++;
+ /*add the current size into manager queued size*/
+ m_mgr->m_stat_size_queued += m_stat_size;
+ /*unlock manager statistics */
+ V(m_mgr->m_stat_mutex);
+
+ P(m_mgr->m_mutex);
+ ++m_use_count;
+ m_mgr->add_work(this);
+ V(m_mgr->m_mutex);
+ }
+ }
+ break;
+
+ case TRANS_STATE_QUEUED:
+ /* QUEUED -> CREATED : back to initial state*/
+ if (state == TRANS_STATE_CREATED) {
+ /* valid transition*/
+ ret = true;
+ if (m_mgr) {
+ /*lock manager statistics */
+ P(m_mgr->m_stat_mutex);
+ /*decrement the number of queued transfer*/
+ m_mgr->m_stat_nb_transfer_queued--;
+ /*remove the current size from the manager queued size*/
+ m_mgr->m_stat_size_queued -= m_stat_size;
+ /*unlock manager statistics */
+ V(m_mgr->m_stat_mutex);
+
+ P(m_mgr->m_mutex);
+ m_mgr->remove_work(m_workq_elem);
+ --m_use_count;
+ V(m_mgr->m_mutex);
+ }
+ }
+ /* QUEUED -> PROCESSED : a worker aquired the transfer*/
+ if (state == TRANS_STATE_PROCESSED) {
+ /*valid transition*/
+ ret = true;
+ if (m_mgr) {
+ /*lock manager statistics */
+ P(m_mgr->m_stat_mutex);
+ /*decrement the number of queued transfer*/
+ m_mgr->m_stat_nb_transfer_queued--;
+ /*increment the number of processed transfer*/
+ m_mgr->m_stat_nb_transfer_processed++;
+ /*remove the current size from the manager queued size*/
+ m_mgr->m_stat_size_queued -= m_stat_size;
+ /*... and add it to the manager processed size*/
+ m_mgr->m_stat_size_processed += m_stat_size;
+ /*unlock manager statistics */
+ V(m_mgr->m_stat_mutex);
+
+ /*transfer starts now*/
+ P(m_stat_mutex);
+ m_stat_start = (utime_t)time(NULL);
+ V(m_stat_mutex);
+ }
+ }
+ break;
+
+ case TRANS_STATE_PROCESSED:
+ /* PROCESSED -> DONE : Success! */
+ if (state == TRANS_STATE_DONE) {
+ /*valid transition*/
+ ret = true;
+ /*transfer stops now : compute transfer duration*/
+ P(m_stat_mutex);
+ m_stat_duration = (utime_t)time(NULL)-m_stat_start;
+ V(m_stat_mutex);
+
+ if (m_mgr) {
+ /*lock manager statistics */
+ P(m_mgr->m_stat_mutex);
+ /* ... from processed to done*/
+ m_mgr->m_stat_nb_transfer_processed--;
+ m_mgr->m_stat_nb_transfer_done++;
+ m_mgr->m_stat_size_processed -= m_stat_size;
+ m_mgr->m_stat_size_done += m_stat_size;
+ /*add local duration to manager duration */
+ m_mgr->m_stat_duration_done += m_stat_duration;
+ /*reprocess the manager average rate with it*/
+ if (m_mgr->m_stat_duration_done != 0) {
+ m_mgr->m_stat_average_rate =
+ m_mgr->m_stat_size_done /
+ m_mgr->m_stat_duration_done;
+ }
+ /*unlock manager statistics */
+ V(m_mgr->m_stat_mutex);
+
+ /* process is completed, unref the workq reference */
+ --m_use_count;
+ }
+
+ if (m_proxy) {
+ m_proxy->set(m_volume_name, m_part, m_res_mtime, m_res_size);
+ }
+
+ /* in both case, success or failure, life keeps going on */
+ pthread_cond_broadcast(&m_done);
+ }
+ /* PROCESSED -> ERROR : Failure! */
+ if (state == TRANS_STATE_ERROR) {
+ /*valid transition*/
+ ret = true;
+ /*transfer stops now, even if in error*/
+ P(m_stat_mutex);
+ m_stat_duration = (utime_t)time(NULL)-m_stat_start;
+ V(m_stat_mutex);
+
+ if (m_mgr) {
+ /*lock manager statistics */
+ P(m_mgr->m_stat_mutex);
+ /* ... from processed to error*/
+ m_mgr->m_stat_nb_transfer_processed--;
+ m_mgr->m_stat_nb_transfer_error++;
+ m_mgr->m_stat_size_processed -= m_stat_size;
+ m_mgr->m_stat_size_error += m_stat_size;
+ /*unlock manager statistics */
+ V(m_mgr->m_stat_mutex);
+
+ /* process is completed, unref the workq reference */
+ --m_use_count;
+ }
+ /* in both case, success or failure, life keeps going on */
+ pthread_cond_broadcast(&m_done);
+ }
+ break;
+
+ case TRANS_STATE_DONE:
+ case TRANS_STATE_ERROR:
+ default:
+ ret = false;
+ break;
+ }
+
+ /* update state when transition is valid*/
+ if (ret) {
+ m_state = state;
+ }
+
+ return ret;
+}
+
+void transfer::set_do_cache_truncate(bool do_cache_truncate)
+{
+ m_do_cache_truncate=do_cache_truncate;
+}
+
+int transfer::inc_use_count()
+{
+ lock_guard lg(m_mutex);
+ return ++m_use_count;
+}
+
+int transfer::dec_use_count()
+{
+ lock_guard lg(m_mutex);
+ return --m_use_count;
+}
+
+void *transfer_launcher(void *arg)
+{
+ transfer *t = (transfer *)arg;
+ if (t) {
+ t->proceed();
+ }
+ return NULL;
+}
+
+/* -----------------------------------------------------------
+ transfer manager declarations
+ -----------------------------------------------------------
+ */
+
+/* constructor */
+transfer_manager::transfer_manager(uint32_t n)
+{
+ transfer *item=NULL;
+ m_transfer_list.init(item, &item->link);
+ pthread_mutex_init(&m_stat_mutex, 0);
+ pthread_mutex_init(&m_mutex, 0);
+ workq_init(&m_wq, 1, transfer_launcher);
+}
+
+/* destructor */
+transfer_manager::~transfer_manager()
+{
+ workq_wait_idle(&m_wq);
+ pthread_mutex_destroy(&m_mutex);
+ pthread_mutex_destroy(&m_stat_mutex);
+}
+
+/* create a new or inc-reference a similar transfer. (factory)
+ * ret: transfer* is ref_counted and must be kept, used
+ * and eventually released by caller with release() */
+transfer *transfer_manager::get_xfer(uint64_t size,
+ transfer_engine *funct,
+ POOLMEM *cache_fname,
+ const char *volume_name,
+ uint32_t part,
+ cloud_driver *driver,
+ DCR *dcr,
+ cloud_proxy *proxy)
+{
+ lock_guard lg (m_mutex);
+
+ /* do we have a similar transfer on tap? */
+ transfer *item;
+ foreach_dlist(item, (&m_transfer_list)) {
+ /* this is where "similar transfer" is defined:
+ * same volume_name, same part idx */
+ if (strcmp(item->m_volume_name, volume_name) == 0 && item->m_part == part) {
+ item->inc_use_count();
+ return item;
+ }
+ }
+ /* no existing transfer: create a new one */
+ item = New(transfer(size,
+ funct,
+ cache_fname,/* cache_fname is duplicated in the transfer constructor*/
+ volume_name, /* volume_name is duplicated in the transfer constructor*/
+ part,
+ driver,
+ dcr,
+ proxy));
+
+ ASSERT(item->m_state == TRANS_STATE_CREATED);
+ item->set_manager(this);
+ /* inc use_count once for m_transfer_list insertion */
+ item->inc_use_count();
+ m_transfer_list.append(item);
+ /* inc use_count once for caller ref counting */
+ item->inc_use_count();
+ return item;
+}
+
+/* does the xfer belong to us? */
+bool transfer_manager::owns(transfer *xfer)
+{
+ lock_guard lg(m_mutex);
+ transfer *item;
+ foreach_dlist(item, (&m_transfer_list)) {
+ /* same address */
+ if (item == xfer) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/* un-ref transfer and free if ref count goes to zero
+ * caller must NOT use xfer anymore after this has been called */
+void transfer_manager::release(transfer *xfer)
+{
+ if (xfer) {
+ ASSERTD(owns(xfer), "Wrong Manager");
+ /* wait should have been done already by caller,
+ * but we cannot afford deleting the transfer while it's not completed */
+ wait(xfer);
+ /* decrement the caller reference */
+ if (xfer->dec_use_count() == 1) {
+ /* the only ref left is the one from m_transfer_list
+ * time for deletion */
+ lock_guard lg(m_mutex);
+ m_transfer_list.remove(xfer);
+ xfer->dec_use_count();
+ delete xfer;
+ }
+ }
+}
+
+/* accessors to xfer->queue */
+bool transfer_manager::queue(transfer *xfer)
+{
+ if (xfer) {
+ ASSERTD(owns(xfer), "Wrong Manager");
+ return xfer->queue();
+ }
+ return false;
+}
+
+/* accessors to xfer->wait */
+int transfer_manager::wait(transfer *xfer)
+{
+ if (xfer) {
+ ASSERTD(owns(xfer), "Wrong Manager");
+ return xfer->wait();
+ }
+ return 0;
+}
+
+/* accessors to xfer->timedwait */
+int transfer_manager::timedwait(transfer *xfer, const timeval& tv)
+{
+ if (xfer) {
+ ASSERTD(owns(xfer), "Wrong Manager");
+ return xfer->timedwait(tv);
+ }
+ return 0;
+}
+
+/* accessors to xfer->cancel */
+bool transfer_manager::cancel(transfer *xfer)
+{
+ if (xfer) {
+ ASSERTD(owns(xfer), "Wrong Manager");
+ return xfer->cancel();
+ }
+ return false;
+}
+
+/* append a transfer object to this manager */
+int transfer_manager::add_work(transfer* t)
+{
+ return workq_add(&m_wq, t, t ? &t->m_workq_elem : NULL, 0);
+}
+
+/* remove associated workq_ele_t from this manager workq */
+int transfer_manager::remove_work(workq_ele_t *elem)
+{
+ return workq_remove(&m_wq, elem);
+}
+/* search the transfer list for similar transfer */
+bool transfer_manager::find(const char *VolName, uint32_t index)
+{
+ /* Look in the transfer list if we have a download/upload for the current volume */
+ lock_guard lg(m_mutex);
+ transfer *item;
+ foreach_dlist(item, (&m_transfer_list)) {
+ if (strcmp(item->m_volume_name, VolName) == 0 && item->m_part == index) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/* Call to this function just before displaying global statistics */
+void transfer_manager::update_statistics()
+{
+ /* lock the manager stats */
+ P(m_stat_mutex);
+
+ /* ETA naive calculation for each element in the queue =
+ * (accumulator(previous elements size) / average_rate) / num_workers;
+ */
+ uint64_t accumulator=0;
+
+ /* lock the queue so order and chaining cannot be modified */
+ P(m_mutex);
+ P(m_wq.mutex);
+ m_stat_nb_workers = m_wq.max_workers;
+
+ /* parse the queued and processed transfers */
+ transfer *t;
+ foreach_dlist(t, &m_transfer_list) {
+ if ( (t->m_state == TRANS_STATE_QUEUED) ||
+ (t->m_state == TRANS_STATE_PROCESSED)) {
+ accumulator+=t->m_stat_size;
+ P(t->m_stat_mutex);
+ if ((m_stat_average_rate != 0) && (m_stat_nb_workers != 0)) {
+ /*update eta for each transfer*/
+ t->m_stat_eta = (accumulator / m_stat_average_rate) / m_stat_nb_workers;
+ }
+ V(t->m_stat_mutex);
+ }
+ }
+
+ /* the manager ETA is the ETA of the last transfer in its workq */
+ if (m_wq.last) {
+ transfer *t = (transfer *)m_wq.last->data;
+ if (t) {
+ m_stat_eta = t->m_stat_eta;
+ }
+ }
+
+ V(m_wq.mutex);
+ V(m_mutex);
+ V(m_stat_mutex);
+}
+
+/* short status of the transfers */
+uint32_t transfer_manager::append_status(POOL_MEM& msg, bool verbose)
+{
+ update_statistics();
+ char ec0[30],ec1[30],ec2[30],ec3[30],ec4[30];
+ POOLMEM *tmp_msg = get_pool_memory(PM_MESSAGE);
+ uint32_t ret = Mmsg(tmp_msg, _("(%sB/s) (ETA %d s) "
+ "Queued=%d %sB, Processed=%d %sB, Done=%d %sB, Failed=%d %sB\n"),
+ edit_uint64_with_suffix(m_stat_average_rate, ec0), m_stat_eta,
+ m_stat_nb_transfer_queued, edit_uint64_with_suffix(m_stat_size_queued, ec1),
+ m_stat_nb_transfer_processed, edit_uint64_with_suffix(m_stat_size_processed, ec2),
+ m_stat_nb_transfer_done, edit_uint64_with_suffix(m_stat_size_done, ec3),
+ m_stat_nb_transfer_error, edit_uint64_with_suffix(m_stat_size_error, ec4));
+ pm_strcat(msg, tmp_msg);
+
+ if (verbose) {
+ P(m_mutex);
+ if (!m_transfer_list.empty()) {
+ ret += Mmsg(tmp_msg, _("------------------------------------------------------------ details ------------------------------------------------------------\n"));
+ pm_strcat(msg, tmp_msg);
+ }
+ transfer *tpkt;
+ foreach_dlist(tpkt, &m_transfer_list) {
+ ret += tpkt->append_status(msg);
+ }
+ V(m_mutex);
+ }
+ free_pool_memory(tmp_msg);
+ return ret;
+}
/*
Bacula(R) - The Network Backup Solution
- Copyright (C) 2000-2017 Kern Sibbald
+ Copyright (C) 2000-2018 Kern Sibbald
The original author of Bacula is Kern Sibbald, with contributions
from many others, a complete list can be found in the file AUTHORS.
* Written by Norbert Bizet, May MMXVI
*
*/
+
+#ifndef BCLOUD_TRANSFER_MANAGER_H
+#define BCLOUD_TRANSFER_MANAGER_H
+
+#include "bacula.h"
+#include "lib/workq.h"
+
+/* forward declarations */
+struct transfer_manager;
+class cloud_driver;
+class DCR;
+class transfer;
+class cloud_proxy;
+
+typedef void *(transfer_engine)(transfer *);
+
+
+/* possible states of a transfer object */
+typedef enum {
+/* initial state */
+ /* object has been created but not queued yet*/
+ TRANS_STATE_CREATED = 0,
+/* in the workq states */
+ /* object is queued*/
+ TRANS_STATE_QUEUED,
+ /* object is processed*/
+ TRANS_STATE_PROCESSED,
+/* completed states */
+ /* object processing has completed ok*/
+ TRANS_STATE_DONE,
+ /* object processing has completed but failed*/
+ TRANS_STATE_ERROR,
+/* number of states */
+ NUM_TRANS_STATE
+} transfer_state;
+
+/* each cloud transfer (download, upload, etc.)
+ is wrapped into a transfer object */
+class transfer : public SMARTALLOC
+{
+public:
+ dlink link; /* Used in global manager dlist */
+
+/* m_stat prefixed statistics variables : */
+ /* protect access to statistics resources*/
+ pthread_mutex_t m_stat_mutex;
+ /* size of the transfer: should be filled asap */
+ uint64_t m_stat_size;
+ /* time when process started */
+ utime_t m_stat_start;
+ /* duration of the transfer : automatically filled when transfer is completed*/
+ utime_t m_stat_duration;
+ /* estimate time to arrival : predictive guest approximation of transfer time*/
+ utime_t m_stat_eta;
+
+/* status variables :*/
+ /* protect status changes*/
+ pthread_mutex_t m_mutex;
+ /* cond variable to broadcast transfer completion*/
+ pthread_cond_t m_done;
+ /* status message */
+ POOLMEM *m_message;
+ /* current transfer state*/
+ transfer_state m_state;
+
+/* other variables :*/
+ /* the manager that holds this element */
+ transfer_manager *m_mgr;
+ /* the function processed by this transfer: contrary to the workq, it can be different for each transfer */
+ transfer_engine *m_funct;
+
+ /* variables */
+ const char *m_cache_fname;
+ const char *m_volume_name;
+ uint32_t m_part;
+ cloud_driver *m_driver;
+ DCR *m_dcr;
+ cloud_proxy *m_proxy;
+ /* size of the transfer result : filled by the processor (driver) */
+ uint64_t m_res_size;
+ /* last modification time of the transfer result : filled by the processor (driver) */
+ utime_t m_res_mtime;
+
+ /* the associated workq element */
+ workq_ele_t *m_workq_elem;
+
+ /* reference counter */
+ int m_use_count;
+
+ /* cancel flag */
+ bool m_cancel;
+
+ /* truncate cache once transfer is completed (upload)*/
+ bool m_do_cache_truncate;
+/* methods :*/
+ /* constructor
+ * size : the size in bytes of the transfer
+ * funct : function to process
+ * cache_fname : cache file name is duplicated in the transfer constructor
+ * volume_name : volume name is duplicated in the transfer constructor
+ * part : part index
+ * driver : pointer to the cloud_driver
+ * dcr : pointer to DCR
+ */
+ transfer(uint64_t size,
+ transfer_engine *funct,
+ const char *cache_fname,
+ const char *volume_name,
+ uint32_t part,
+ cloud_driver *driver,
+ DCR *dcr,
+ cloud_proxy *proxy
+ );
+
+ /* destructor*/
+ ~transfer();
+
+ /* opaque function that will process m_funct with m_arg as parameter. Called back from the workq.
+ * depending on m_funct return value, changes m_state to TRANS_STATE_DONE or TRANS_STATE_ERROR */
+ void proceed();
+
+ /* waits for the asynchronous computation to finish (including cancel()ed computations).
+ * ret: 0:Ok, errorcode otherwise */
+ int wait(); /* no timeout */
+ int timedwait(const timeval& tv); /* with timeout */
+
+ /* queue this transfer for processing in the manager workq
+ * ret :true if queuing is successful */
+ bool queue();
+
+ /* cancel processing
+ * ret: true cancel done, false cancel failed */
+ bool cancel();
+
+ /* callback fct that checks if transfer has been cancelled */
+ bool is_cancelled() const;
+
+ /* append a status message into msg*/
+ uint32_t append_status(POOL_MEM& msgs);
+
+ void set_do_cache_truncate(bool do_cache_truncate);
+
+protected:
+friend class transfer_manager;
+
+ /* the manager references itselfs thru this function*/
+ void set_manager(transfer_manager *mgr);
+
+ /* change the state
+ * ret : true if transition is legal, false otherwise */
+ bool transition(transfer_state state);
+
+ /* ref counting must lock the element prior to use */
+ int inc_use_count();
+ /* !!dec use count can delete the transfer */
+ int dec_use_count();
+};
+
+/*
+ The transfer_manager wraps around the work queue and holds the transfer(s)
+*/
+class transfer_manager : public SMARTALLOC
+{
+public:
+
+/* m_stat prefixed statistics variables global for this manager: */
+ /* protect access to statistics resources*/
+ pthread_mutex_t m_stat_mutex;
+ /* number of workers*/
+ uint32_t m_stat_nb_workers;
+ /* current number of transfers in TRANS_STATE_QUEUED state in this manager*/
+ uint64_t m_stat_nb_transfer_queued;
+ /* current number of transfers in TRANS_STATE_PROCESSED state in this manager*/
+ uint64_t m_stat_nb_transfer_processed;
+ /* current number of transfers in TRANS_STATE_DONE state in this manager*/
+ uint64_t m_stat_nb_transfer_done;
+ /* current number of transfers in TRANS_STATE_ERROR state in this manager*/
+ uint64_t m_stat_nb_transfer_error;
+
+ /* size in bytes of transfers in TRANS_STATE_QUEUED state in this manager*/
+ uint64_t m_stat_size_queued;
+ /* size in bytes of transfers in TRANS_STATE_PROCESSED state in this manager*/
+ uint64_t m_stat_size_processed;
+ /* size in bytes of transfers in TRANS_STATE_DONE state in this manager*/
+ uint64_t m_stat_size_done;
+ /* size in bytes of transfers in TRANS_STATE_ERROR state in this manager*/
+ uint64_t m_stat_size_error;
+ /* duration of transfers in TRANS_STATE_DONE state in this manager*/
+ utime_t m_stat_duration_done;
+ /* computed bytes/sec transfer rate */
+ uint64_t m_stat_average_rate;
+ /* computed Estimate Time to Arrival */
+ utime_t m_stat_eta;
+
+
+/* status variables global for this manager: */
+ /* protect status access*/
+ pthread_mutex_t m_mutex;
+ /* status message for this manager TBD*/
+ POOLMEM *m_message;
+ /* m_state for the manager TBD*/
+ int32_t m_state;
+
+/* others: */
+ /* tranfer list*/
+ dlist m_transfer_list;
+
+ /* workq used by this manager*/
+ workq_t m_wq;
+
+/* methods */
+
+ /* constructor */
+ transfer_manager(uint32_t n);
+
+ /* destructor */
+ ~transfer_manager();
+
+/* transfer functions */
+
+ /* create a new or inc-reference a similar transfer. (factory)
+ * ret: transfer* is ref_counted and must be kept, used
+ * and eventually released by caller with release() */
+ transfer *get_xfer(uint64_t size,
+ transfer_engine *funct,
+ POOLMEM *cache_fname,
+ const char *volume_name,
+ uint32_t part,
+ cloud_driver *driver,
+ DCR *dcr,
+ cloud_proxy *proxy);
+
+ /* does the xfer belong to this manager? */
+ bool owns(transfer *xfer);
+
+ /* un-ref transfer and delete if ref count falls to zero
+ * caller must NOT use xfer anymore after calling release() */
+ void release(transfer *xfer);
+
+ /* accessors to xfer->queue */
+ bool queue(transfer *xfer);
+
+ /* accessors to xfer->wait */
+ int wait(transfer *xfer);
+
+ /* accessors to xfer->timedwait */
+ int timedwait(transfer *xfer, const timeval& tv);
+
+ /* accessors to xfer->cancel */
+ bool cancel(transfer *xfer);
+
+ /* search the transfer list for similar transfer */
+ bool find(const char *VolName, uint32_t index);
+
+ /* call to update manager statistics, before displaying it b.e.*/
+ void update_statistics();
+
+ /* append a status message into msg*/
+ uint32_t append_status(POOL_MEM& msg, bool verbose);
+
+protected:
+friend class transfer;
+
+ /* append a transfer object to this manager */
+ int add_work(transfer* t);
+ /* remove associated workq_ele_t from this manager workq*/
+ int remove_work(workq_ele_t *elem);
+};
+
+#endif /* BCLOUD_TRANSFER_MANAGER_H */
/* List volumes in the cloud */
static bool cloud_list_cmd(JCR *jcr)
{
- jcr->dir_bsock->fsend(_("3900 Not yet implemented\n"));
+ BSOCK *dir = jcr->dir_bsock;
+ POOL_MEM dev_name;
+ POOLMEM *errmsg = get_pool_memory(PM_FNAME);
+ errmsg[0] = 0;
+ char volname[MAX_NAME_LENGTH];
+ char mtype[MAX_NAME_LENGTH];
+ int slot, drive;
+ DCR *dcr = NULL;
+
+ if (sscanf(dir->msg, "cloudlist Storage=%127s Volume=%127s MediaType=%127s Slot=%d drive=%d",
+ dev_name.c_str(), volname, mtype, &slot, &drive) != 5) {
+ dir->fsend(_("3912 Error scanning the command\n"));
+ goto bail_out;
+ }
+
+ /* In fact, we do not need to find and reserve a device for this operation,
+ * we just need to find one, idle or not
+ */
+ dcr = find_device(jcr, dev_name, mtype, drive);
+ if (!dcr) {
+ dir->fsend(_("3900 Error reserving device %s %s\n"), dev_name.c_str(), mtype);
+ goto bail_out;
+ }
+
+ if (volname[0] == 0) { /* List all volumes, TODO: Switch to a callback mode */
+ char *vol;
+ alist volumes(100, not_owned_by_alist);
+ if (!dcr->dev->get_cloud_volumes_list(dcr, &volumes, errmsg)) {
+ dir->fsend(_("3900 Error cannot get cloud Volume list. ERR=%s\n"), errmsg);
+ }
+ free_dcr(dcr);
+
+ foreach_alist(vol, &volumes) {
+ bash_spaces(vol);
+ dir->fsend("volume=%s\n", vol);
+ free(vol); /* Walk through the list only one time */
+ }
+
+ } else {
+ ilist parts(100, not_owned_by_alist);
+ if (!dcr->dev->get_cloud_volume_parts_list(dcr, volname, &parts, errmsg)) {
+ dir->fsend(_("3900 Error cannot get cloud Volume list. ERR=%s\n"), errmsg);
+ free_dcr(dcr);
+ goto bail_out;
+ }
+ free_dcr(dcr);
+
+ for (int i=1; i <= parts.last_index() ; i++) {
+ cloud_part *p = (cloud_part *)parts.get(i);
+ if (p) {
+ dir->fsend("part=%d size=%lld mtime=%lld\n", i, p->size, p->mtime);
+ free(p);
+ }
+ }
+ }
+
+bail_out:
+ free_pool_memory(errmsg);
+ dir->signal(BNET_EOD);
return true;
}
/*
Bacula(R) - The Network Backup Solution
- Copyright (C) 2000-2017 Kern Sibbald
+ Copyright (C) 2000-2018 Kern Sibbald
The original author of Bacula is Kern Sibbald, with contributions
from many others, a complete list can be found in the file AUTHORS.
*
*/
+#include "file_driver.h"
+
+static const int dbglvl = 100;
+
+#include <fcntl.h>
+
+/* Imported functions */
+const char *mode_to_str(int mode);
+int breaddir(DIR *dirp, POOLMEM *&dname);
+
+/* Forward referenced functions */
+
+/* Const and Static definitions */
+
+/*
+ * Put a cache object into the cloud (i.e. local disk)
+ * or visa-versa.
+ */
+bool file_driver::put_object(transfer *xfer, const char *in_fname, const char *out_fname, bwlimit *limit)
+{
+ struct stat statbuf;
+ char *p, *f;
+ char save_separator;
+ ssize_t rbytes, wbytes;
+ uint32_t read_len;
+ int64_t obj_len;
+ FILE *infile=NULL, *outfile=NULL;
+ POOLMEM *buf = get_memory(buf_len);
+
+ Enter(dbglvl);
+ Dmsg2(dbglvl, "Put from: %s to %s\n", in_fname, out_fname);
+
+ /*
+ * First work on output file
+ */
+ /* Split out_fname into path + file */
+ for (p=f=const_cast<char*>(out_fname); *p; p++) {
+ if (IsPathSeparator(*p)) {
+ f = p; /* set pos of last slash */
+ }
+ }
+ if (!IsPathSeparator(*f)) { /* did we find a slash? */
+ Mmsg1(xfer->m_message, "Could not find path name for output file: %s\n", out_fname);
+ goto get_out;
+ }
+ save_separator = *f;
+ *f = 0; /* terminate path */
+
+ /* const_cast should not be necessary here but is due the makedir interface */
+ if (!makedir(NULL, const_cast<char*>(out_fname), 0740)) {
+ Mmsg1(xfer->m_message, "Could not makedir output directory: %s\n", out_fname);
+ *f = save_separator;
+ goto get_out;
+ }
+ *f = save_separator;
+
+ if (lstat(out_fname, &statbuf) == -1) {
+ outfile = bfopen(out_fname, "w");
+ } else {
+ /* append to existing file */
+ outfile = bfopen(out_fname, "r+");
+ }
+
+ if (!outfile) {
+ berrno be;
+ Mmsg2(xfer->m_message, "Could not open output file %s. ERR=%s\n",
+ out_fname, be.bstrerror());
+ goto get_out;
+ }
+
+ /*
+ * Now work on input file
+ */
+ if (lstat(in_fname, &statbuf) == -1) {
+ berrno be;
+ Mmsg2(xfer->m_message, "Failed to stat input file %s. ERR=%s\n",
+ in_fname, be.bstrerror());
+ goto get_out;
+ }
+
+ obj_len = statbuf.st_size;
+ Dmsg1(dbglvl, "Object length to copy is: %lld bytes.\n", obj_len);
+ if (obj_len == 0) { /* Not yet created nothing to do */
+ goto get_out;
+ }
+
+ infile = bfopen(in_fname, "r");
+
+ if (!infile) {
+ berrno be;
+ Mmsg2(xfer->m_message, "Failed to open input file %s. ERR=%s\n",
+ in_fname, be.bstrerror());
+ goto get_out;
+ }
+
+ while (obj_len > 0) {
+ if (xfer->is_cancelled()) {
+ Mmsg(xfer->m_message, "Job is canceled.\n");
+ goto get_out;
+ }
+ read_len = (obj_len > buf_len) ? buf_len : obj_len;
+ Dmsg3(dbglvl, "obj_len=%d buf_len=%d read_len=%d\n", obj_len, buf_len, read_len);
+ rbytes = fread(buf, 1, read_len, infile);
+ Dmsg1(dbglvl, "Read %d bytes.\n", rbytes);
+ if (rbytes <= 0) {
+ berrno be;
+ Mmsg2(xfer->m_message, "Error reading input file %s. ERR=%s\n",
+ in_fname, be.bstrerror());
+ goto get_out;
+ }
+ wbytes = fwrite(buf, 1, rbytes, outfile);
+ Dmsg2(dbglvl, "Wrote: %d bytes wanted %d bytes.\n", wbytes, rbytes);
+ if (wbytes < 0) {
+ berrno be;
+ Mmsg2(xfer->m_message, "Error writing output file %s. ERR=%s\n",
+ out_fname, be.bstrerror());
+ }
+ obj_len -= rbytes;
+ if (limit->use_bwlimit()) {
+ limit->control_bwlimit(rbytes);
+ }
+ }
+
+get_out:
+ free_memory(buf);
+ if (infile) {
+ fclose(infile);
+ }
+ if (outfile) {
+ fclose(outfile);
+ /* Get stats on the result part and fill the xfer res */
+ if (lstat(out_fname, &statbuf) == -1) {
+ berrno be;
+ Mmsg2(xfer->m_message, "Failed to stat file %s: %s\n", out_fname, be.bstrerror());
+ } else {
+ xfer->m_res_size = statbuf.st_size;
+ xfer->m_res_mtime = statbuf.st_mtime;
+ }
+ }
+ Leave(dbglvl);
+ return (xfer->m_message[0] == 0);
+}
+
+bool file_driver::get_cloud_object(transfer *xfer, const char *cloud_fname, const char *cache_fname)
+{
+ return put_object(xfer, cloud_fname, cache_fname, &download_limit);
+}
+
+bool file_driver::truncate_cloud_volume(DCR *dcr, const char *VolumeName, ilist *trunc_parts, POOLMEM *&err)
+{
+ bool rtn = true;
+ int i;
+ POOLMEM *filename = get_pool_memory(PM_FNAME);
+ for (i=1; (i <= (int)trunc_parts->last_index()); i++) {
+ if (!trunc_parts->get(i)) {
+ continue;
+ }
+ make_cloud_filename(filename, VolumeName, i);
+ if (unlink(filename) != 0 && errno != ENOENT) {
+ berrno be;
+ Mmsg2(err, "Unable to delete %s. ERR=%s\n", filename, be.bstrerror());
+ Dmsg1(dbglvl, "%s", err);
+ Qmsg(dcr->jcr, M_INFO, 0, "%s", err);
+ rtn = false;
+ } else {
+ Dmsg1(dbglvl, "Unlink file %s\n", filename);
+ }
+ }
+
+ free_pool_memory(filename);
+ return rtn;
+}
+
+void file_driver::make_cloud_filename(POOLMEM *&filename,
+ const char *VolumeName, uint32_t part)
+{
+ Enter(dbglvl);
+
+ pm_strcpy(filename, hostName);
+ dev->add_vol_and_part(filename, VolumeName, "part", part);
+ Dmsg1(dbglvl, "make_cloud_filename: %s\n", filename);
+}
+
+/*
+ * Copy a single cache part to the cloud (local disk)
+ */
+bool file_driver::copy_cache_part_to_cloud(transfer *xfer)
+{
+ Enter(dbglvl);
+ POOLMEM *cloud_fname = get_pool_memory(PM_FNAME);
+ make_cloud_filename(cloud_fname, xfer->m_volume_name, xfer->m_part);
+ Dmsg2(dbglvl, "Call put_object: %s, %s\n", xfer->m_cache_fname, cloud_fname);
+ bool rtn = put_object(xfer, xfer->m_cache_fname, cloud_fname, &upload_limit);
+ free_pool_memory(cloud_fname);
+ return rtn;
+}
+
+/*
+ * Copy a single object (part) from the cloud to the cache
+ */
+bool file_driver::copy_cloud_part_to_cache(transfer *xfer)
+{
+ Enter(dbglvl);
+ POOLMEM *cloud_fname = get_pool_memory(PM_FNAME);
+ make_cloud_filename(cloud_fname, xfer->m_volume_name, xfer->m_part);
+ bool rtn = put_object(xfer, cloud_fname, xfer->m_cache_fname, &download_limit);
+ free_pool_memory(cloud_fname);
+ return rtn;
+}
+
+/*
+ * NOTE: The SD Cloud resource has the following items
+
+ RES hdr;
+ char *host_name;
+ char *bucket_name;
+ char *access_key;
+ char *secret_key;
+ int32_t protocol;
+ int32_t uri_style;
+ uint32_t driver_type;
+ uint32_t trunc_opt;
+ uint32_t upload_opt;
+*/
+
+bool file_driver::init(JCR *jcr, cloud_dev *adev, DEVRES *adevice)
+{
+ dev = adev; /* copy cloud device pointer */
+ device = adevice; /* copy device resource pointer */
+ cloud = device->cloud; /* local pointer to cloud definition */
+
+ /* File I/O buffer */
+ buf_len = dev->max_block_size;
+ if (buf_len == 0) {
+ buf_len = DEFAULT_BLOCK_SIZE;
+ }
+
+ hostName = cloud->host_name;
+ bucketName = cloud->bucket_name;
+ protocol = cloud->protocol;
+ uriStyle = cloud->uri_style;
+ accessKeyId = cloud->access_key;
+ secretAccessKey = cloud->secret_key;
+
+ return true;
+}
+
+bool file_driver::start_of_job(DCR *dcr)
+{
+ Jmsg(dcr->jcr, M_INFO, 0, _("Using File cloud driver Host=%s Bucket=%s\n"),
+ hostName, bucketName);
+ return true;
+}
+
+bool file_driver::end_of_job(DCR *dcr)
+{
+ return true;
+}
+
+/*
+ * Note, dcr may be NULL
+ */
+bool file_driver::term(DCR *dcr)
+{
+ return true;
+}
+
+bool file_driver::get_cloud_volume_parts_list(DCR *dcr, const char* VolumeName, ilist *parts, POOLMEM *&err)
+{
+ Enter(dbglvl);
+
+ if (parts == NULL || strlen(VolumeName) == 0) {
+ pm_strcpy(err, "Invalid argument");
+ return false;
+ }
+
+ POOLMEM *vol_dir = get_pool_memory(PM_NAME);
+
+ pm_strcpy(vol_dir, hostName);
+
+ if (!IsPathSeparator(vol_dir[strlen(vol_dir)-1])) {
+ pm_strcat(vol_dir, "/");
+ }
+ pm_strcat(vol_dir, VolumeName);
+
+ DIR* dp = NULL;
+ struct dirent *entry = NULL;
+ struct stat statbuf;
+ int name_max;
+ bool ok = false;
+ POOL_MEM dname(PM_FNAME);
+ int status = 0;
+
+ Dmsg1(dbglvl, "Searching for parts in: %s\n", vol_dir);
+
+ if (!(dp = opendir(vol_dir))) {
+ berrno be;
+ Mmsg2(err, "Cannot opendir to get parts list. Volume %s does not exist. ERR=%s",
+ VolumeName, be.bstrerror());
+ Dmsg1(dbglvl, "%s\n", err);
+ if (errno == ENOENT) {
+ ok=true; /* No volume, so no part */
+ }
+ goto get_out;
+ }
+
+ name_max = pathconf(".", _PC_NAME_MAX);
+ if (name_max < 1024) {
+ name_max = 1024;
+ }
+
+ entry = (struct dirent *)malloc(sizeof(struct dirent) + name_max + 1000);
+
+ for ( ;; ) {
+ if (dcr->jcr->is_canceled()) {
+ pm_strcpy(err, "Job canceled");
+ goto get_out;
+ }
+ errno = 0;
+ status = breaddir(dp, dname.addr());
+ if (status != 0) {
+ if (status > 0) {
+ Mmsg1(err, "breaddir failed: status=%d", status);
+ Dmsg1(dbglvl, "%s\n", err);
+ }
+ break;
+ }
+ /* Always ignore . and .. */
+ if (strcmp(".", dname.c_str()) == 0 || strcmp("..", dname.c_str()) == 0) {
+ continue;
+ }
+
+ /* Look only for part files */
+ if (strncmp("part.", dname.c_str(), 5) != 0) {
+ continue;
+ }
+
+ char *ext = strrchr (dname.c_str(), '.');
+ if (!ext || strlen(ext) < 2) {
+ continue;
+ }
+
+ cloud_part *part = (cloud_part*) malloc(sizeof(cloud_part));
+
+ /* save extension (part number) to cloud_part struct index*/
+ part->index = atoi(&ext[1]);
+
+ POOLMEM *part_path = get_pool_memory(PM_NAME);
+ pm_strcpy(part_path,vol_dir);
+ if (!IsPathSeparator(part_path[strlen(vol_dir)-1])) {
+ pm_strcat(part_path, "/");
+ }
+ pm_strcat(part_path, dname.c_str());
+
+ /* Get size of part */
+ if (lstat(part_path, &statbuf) == -1) {
+ berrno be;
+ Mmsg(err, "Failed to stat file %s: %s", part_path, be.bstrerror());
+ Dmsg1(dbglvl, "%s\n", err);
+ free_pool_memory(part_path);
+ free(part);
+ goto get_out;
+ }
+ free_pool_memory(part_path);
+
+ part->size = statbuf.st_size;
+ part->mtime = statbuf.st_mtime;
+ parts->put(part->index, part);
+ }
+ ok = true;
+
+get_out:
+ if (dp) {
+ closedir(dp);
+ }
+ if (entry) {
+ free(entry);
+ }
+
+ free_pool_memory(vol_dir);
+
+ return ok;
+}
+
+bool file_driver::get_cloud_volumes_list(DCR *dcr, alist *volumes, POOLMEM *&err)
+{
+ if (!volumes) {
+ pm_strcpy(err, "Invalid argument");
+ return false;
+ }
+
+ Enter(dbglvl);
+
+ DIR* dp = NULL;
+ struct dirent *entry = NULL;
+ struct stat statbuf;
+ int name_max;
+ bool ok = false;
+ POOLMEM *fullpath = get_pool_memory(PM_NAME);
+ POOL_MEM dname(PM_FNAME);
+ int status = 0;
+
+ if (!(dp = opendir(hostName))) {
+ berrno be;
+ Mmsg2(err, "Cannot opendir to get volumes list. host_name %s does not exist. ERR=%s",
+ hostName, be.bstrerror());
+ Dmsg1(dbglvl, "%s\n", err);
+ if (errno == ENOENT) {
+ ok=true; /* No volume, so no part */
+ }
+ goto get_out;
+ }
+
+ name_max = pathconf(".", _PC_NAME_MAX);
+ if (name_max < 1024) {
+ name_max = 1024;
+ }
+
+ entry = (struct dirent *)malloc(sizeof(struct dirent) + name_max + 1000);
+
+ for ( ;; ) {
+ if (dcr->jcr->is_canceled()) {
+ goto get_out;
+ }
+ errno = 0;
+ status = breaddir(dp, dname.addr());
+ if (status != 0) {
+ if (status > 0) {
+ Mmsg1(err, "breaddir failed: status=%d", status);
+ Dmsg1(dbglvl, "%s\n", err);
+ }
+ break;
+ }
+ /* Always ignore . and .. */
+ if (strcmp(".", dname.c_str()) == 0 || strcmp("..", dname.c_str()) == 0) {
+ continue;
+ }
+
+
+ pm_strcpy(fullpath, hostName);
+ if (!IsPathSeparator(fullpath[strlen(fullpath)-1])) {
+ pm_strcat(fullpath, "/");
+ }
+ pm_strcat(fullpath, dname.c_str());
+
+ if (lstat(fullpath, &statbuf) != 0) {
+ berrno be;
+ Dmsg2(dbglvl, "Failed to stat file %s: %s\n",
+ fullpath, be.bstrerror());
+ continue;
+ }
+
+ if (S_ISDIR(statbuf.st_mode)) {
+ volumes->append(bstrdup(dname.c_str()));
+ }
+ }
+ ok = true;
+
+get_out:
+ if (dp) {
+ closedir(dp);
+ }
+ if (entry) {
+ free(entry);
+ }
+
+ free_pool_memory(fullpath);
+
+ return ok;
+}
/*
Bacula(R) - The Network Backup Solution
- Copyright (C) 2000-2017 Kern Sibbald
+ Copyright (C) 2000-2018 Kern Sibbald
The original author of Bacula is Kern Sibbald, with contributions
from many others, a complete list can be found in the file AUTHORS.
#include "cloud_driver.h" /* get base class definitions */
class file_driver: public cloud_driver {
+public:
+ cloud_dev *dev; /* device that is calling us */
+ DEVRES *device;
+ CLOUD *cloud; /* Pointer to CLOUD resource */
+ alist *objects;
+ uint32_t buf_len;
+
+
+ /* Stuff directly from Cloud resource */
+ char *hostName;
+ char *bucketName;
+ char *accessKeyId;
+ char *secretAccessKey;
+ int32_t protocol;
+ int32_t uriStyle;
+
+
+private:
+ void make_cloud_filename(POOLMEM *&filename, const char *VolumeName, uint32_t part);
+ bool init(JCR *jcr, cloud_dev *dev, DEVRES *device);
+ bool start_of_job(DCR *dcr);
+ bool end_of_job(DCR *dcr);
+ bool term(DCR *dcr);
+ bool truncate_cloud_volume(DCR *dcr, const char *VolumeName, ilist *trunc_parts, POOLMEM *&err);
+ bool copy_cache_part_to_cloud(transfer *xfer);
+ bool copy_cloud_part_to_cache(transfer *xfer);
+ bool get_cloud_volume_parts_list(DCR *dcr, const char* VolumeName, ilist *parts, POOLMEM *&err);
+ bool get_cloud_volumes_list(DCR* dcr, alist *volumes, POOLMEM *&err);
+
+ bool put_object(transfer *xfer, const char *cache_fname, const char *cloud_fname, bwlimit *limit);
+ bool get_cloud_object(transfer *xfer, const char *cloud_fname, const char *cache_fname);
+
public:
file_driver() {
};
/*
Bacula(R) - The Network Backup Solution
- Copyright (C) 2000-2017 Kern Sibbald
+ Copyright (C) 2000-2018 Kern Sibbald
The original author of Bacula is Kern Sibbald, with contributions
from many others, a complete list can be found in the file AUTHORS.
bwlimit *limit; /* Used to control the bandwidth */
bacula_ctx(POOLMEM *&err) : jcr(NULL), xfer(NULL), errMsg(err), parts(NULL),
isTruncated(0), nextMarker(NULL), obj_len(0), caller(NULL),
- infile(NULL), outfile(NULL), volumes(NULL), status(S3StatusOK), limit(NULL)
+ infile(NULL), outfile(NULL), volumes(NULL), status(S3StatusOK), limit(NULL)
{}
bacula_ctx(transfer *t) : jcr(NULL), xfer(t), errMsg(t->m_message), parts(NULL),
isTruncated(0), nextMarker(NULL), obj_len(0), caller(NULL),
- infile(NULL), outfile(NULL), volumes(NULL), status(S3StatusOK), limit(NULL)
- {}
+ infile(NULL), outfile(NULL), volumes(NULL), status(S3StatusOK), limit(NULL)
+ {}
};
Enter(dbglvl);
bacula_ctx ctx(xfer);
ctx.limit = upload_limit.use_bwlimit() ? &upload_limit : NULL;
-
+
struct stat statbuf;
if (lstat(cache_fname, &statbuf) == -1) {
berrno be;
{
Enter(dbglvl);
POOLMEM *cloud_fname = get_pool_memory(PM_FNAME);
- make_cloud_filename(cloud_fname, xfer->m_volume_name, xfer->m_part);
+ make_cloud_filename(cloud_fname, xfer->m_volume_name, xfer->m_part);
uint32_t retry = max_upload_retries;
S3Status status = S3StatusOK;
do {
/*
Bacula(R) - The Network Backup Solution
- Copyright (C) 2000-2017 Kern Sibbald
+ Copyright (C) 2000-2018 Kern Sibbald
The original author of Bacula is Kern Sibbald, with contributions
from many others, a complete list can be found in the file AUTHORS.
enum {
TRUNC_NO = 0, /* default value */
TRUNC_AFTER_UPLOAD = 1,
- TRUNC_AT_ENDOFJOB = 2
+ TRUNC_AT_ENDOFJOB = 2,
+ TRUNC_CONF_DEFAULT = 3 /* only use as a parameter, not in the conf */
};
/*
char *bucket_name;
char *access_key;
char *secret_key;
+ char *blob_endpoint;
+ char *file_endpoint;
+ char *queue_endpoint;
+ char *table_endpoint;
+ char *endpoint_suffix;
char *region;
int32_t protocol;
int32_t uri_style;