From: Francisco Manuel Garcia Botella Date: Mon, 23 Sep 2024 22:10:20 +0000 (+0200) Subject: k8s: Add new level(In pvc annotations) in selection of backup mode X-Git-Tag: Release-15.0.3~14 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=3709fce4efc668aa3a53c1cb79735d8b1662abc7;p=thirdparty%2Fbacula.git k8s: Add new level(In pvc annotations) in selection of backup mode --- diff --git a/bacula/src/plugins/fd/kubernetes-backend/baculak8s/jobs/backup_job.py b/bacula/src/plugins/fd/kubernetes-backend/baculak8s/jobs/backup_job.py index 96bb6ad47..f69b132ed 100644 --- a/bacula/src/plugins/fd/kubernetes-backend/baculak8s/jobs/backup_job.py +++ b/bacula/src/plugins/fd/kubernetes-backend/baculak8s/jobs/backup_job.py @@ -24,7 +24,7 @@ from baculak8s.io.packet_definitions import FILE_DATA_START from baculak8s.jobs.estimation_job import PVCDATA_GET_ERROR, EstimationJob from baculak8s.jobs.job_pod_bacula import DEFAULTRECVBUFFERSIZE from baculak8s.plugins.k8sbackend.baculaannotations import ( - BaculaAnnotationsClass, BaculaBackupMode) + BaculaAnnotationsClass, BaculaBackupMode, annotated_pvc_backup_mode) from baculak8s.plugins.k8sbackend.baculabackup import BACULABACKUPPODNAME from baculak8s.plugins.k8sbackend.podexec import ExecStatus, exec_commands from baculak8s.util.respbody import parse_json_descr @@ -39,7 +39,7 @@ BA_MODE_ERROR = "Invalid annotations for Pod: {namespace}/{podname}. Backup Mode BA_EXEC_STDOUT = "{}:{}" BA_EXEC_STDERR = "{} Error:{}" BA_EXEC_ERROR = "Pod Container execution: {}" -POD_BACKUP_SELECTED = "The selected backup mode in pod to the pvc `{}` is `{}`" +POD_BACKUP_SELECTED = "The selected backup mode to do pvc backup of the pvc `{}` is `{}`" CHANGE_BACKUP_MODE_FOR_INCOMPATIBLITY_PVC = "The pvc `{}` is not compatible with snapshot backup, changing mode to clone. Only pvc with storage that they use CSI driver are compatible." PVC_BACKUP_MODE_APPLIED_INFO = "The pvc `{}` will be backup with {} mode." RETRY_BACKUP_WITH_STANDARD_MODE = "If the clone backup is empty. It will try again to do a backup using standard mode." @@ -58,7 +58,7 @@ class BackupJob(EstimationJob): self.fs_backup_mode = BaculaBackupMode.process_param(params.get("backup_mode", BaculaBackupMode.Snapshot)) # Fileset backup mode defined. if _label is not None: self._io.send_info(BACKUP_PARAM_LABELS.format(_label)) - self._io.send_info("The selected default backup mode to do pvc backup is `{}`.".format(self.fs_backup_mode)) + self._io.send_info("The selected default backup mode to do pvc backup in all job is `{}`.".format(self.fs_backup_mode)) def execution_loop(self): super().processing_loop(estimate=False) @@ -229,13 +229,15 @@ class BackupJob(EstimationJob): logging.debug("process_pod_pvcdata:{}/{} {}".format(namespace, pod, pvcnames)) status = None corev1api = self._plugin.corev1api - backupmode = BaculaBackupMode.process_param(pod.get(BaculaAnnotationsClass.BackupMode, BaculaBackupMode.Snapshot)) - if backupmode is None: + pod_backup_mode = BaculaBackupMode.process_param(pod.get(BaculaAnnotationsClass.BackupMode, BaculaBackupMode.Snapshot)) + if pod_backup_mode is None: self._handle_error(BA_MODE_ERROR.format(namespace=namespace, podname=pod.get('name'), mode=pod.get(BaculaAnnotationsClass.BackupMode))) return False + self._io.send_info("The selected default backup mode to do pvc backup of the pod `{}` is `{}`".format(pod.get('name'), pod_backup_mode)) + failonerror = BoolParam.handleParam(pod.get(BaculaAnnotationsClass.RunBeforeJobonError), True) # the default is to fail job on error # here we execute remote command before Pod backup if not self.handle_pod_container_exec_command(corev1api, namespace, pod, BaculaAnnotationsClass.RunBeforeJob, failonerror): @@ -252,7 +254,9 @@ class BackupJob(EstimationJob): original_pvc = self._plugin.get_pvcdata_namespaced(namespace, pvcname) vsnapshot = None logging.debug("handling vol before backup: {}".format(pvcname)) - self._io.send_info(POD_BACKUP_SELECTED.format(pvcname, backupmode)) + pvc_raw = self._plugin.get_persistentvolumeclaim_read_namespaced(namespace, pvcname) + pvc_backup_mode = annotated_pvc_backup_mode(pvc_raw, pod_backup_mode) + self._io.send_info(POD_BACKUP_SELECTED.format(pvcname, pvc_backup_mode)) # Check if pvc has status: 'Terminating'. Because in this state, the backup raise error. if self._plugin.pvc_is_terminating(namespace, original_pvc): @@ -264,7 +268,7 @@ class BackupJob(EstimationJob): self._io.send_warning("Skip pvc `{}` because it is in Pending status.".format(pvcname)) continue - if backupmode == BaculaBackupMode.Snapshot: + if pvc_backup_mode == BaculaBackupMode.Snapshot: logging.debug('Snapshot mode chosen') vsnapshot, pvc_from_vsnap = self.handle_create_vsnapshot_backup(namespace, pvcname) logging.debug("The vsnapshot created from pvc {} is: {}".format(pvcname, vsnapshot)) @@ -273,12 +277,12 @@ class BackupJob(EstimationJob): logging.debug(CHANGE_BACKUP_MODE_FOR_INCOMPATIBLITY_PVC.format(pvcname)) # backupmode = BaculaBackupMode.Clone self._io.send_info(CHANGE_BACKUP_MODE_FOR_INCOMPATIBLITY_PVC.format(pvcname)) - backupmode = BaculaBackupMode.Clone + pvc_backup_mode = BaculaBackupMode.Clone else: pvc = pvc_from_vsnap pvcname = pvc_from_vsnap.get("name") - if backupmode == BaculaBackupMode.Clone: + if pvc_backup_mode == BaculaBackupMode.Clone: pvcname = self.create_pvcclone(namespace, pvcname) cloned_pvc = self._plugin.get_pvcdata_namespaced(namespace, pvcname) if pvcname is None: @@ -296,7 +300,7 @@ class BackupJob(EstimationJob): 'pvcname': pvcname, 'pvc': pvc, 'vsnapshot': vsnapshot, - 'backupmode': backupmode, + 'backupmode': pvc_backup_mode, 'original_pvc': original_pvc }) diff --git a/bacula/src/plugins/fd/kubernetes-backend/baculak8s/jobs/estimation_job.py b/bacula/src/plugins/fd/kubernetes-backend/baculak8s/jobs/estimation_job.py index 5586ecf98..d03af364d 100644 --- a/bacula/src/plugins/fd/kubernetes-backend/baculak8s/jobs/estimation_job.py +++ b/bacula/src/plugins/fd/kubernetes-backend/baculak8s/jobs/estimation_job.py @@ -22,7 +22,7 @@ from baculak8s.entities.file_info import FileInfo from baculak8s.io.default_io import DefaultIO from baculak8s.io.packet_definitions import ESTIMATION_START_PACKET from baculak8s.jobs.job_pod_bacula import (PVCDATA_GET_ERROR, JobPodBacula) -from baculak8s.plugins.k8sbackend.baculaannotations import BaculaAnnotationsClass +from baculak8s.plugins.k8sbackend.baculaannotations import BaculaAnnotationsClass, BaculaBackupMode from baculak8s.util.respbody import parse_json_descr PATTERN_NOT_FOUND = "No matches found for pattern {}" @@ -155,24 +155,25 @@ class EstimationJob(JobPodBacula): bavol=BaculaAnnotationsClass.BaculaPrefix + BaculaAnnotationsClass.BackupVolume)) continue - else: - podname = pod.get('name') - if not estimate: - self._io.send_info(PROCESSING_PODBACKUP_START_INFO.format(namespace=nsname, - podname=podname)) - status = self.process_pod_pvcdata(nsname, pod, pvcnames) - logging.debug("Status in processing_loop: {}".format(status)) - if status is None: - for pvc_name in pvcnames.split(','): - pvc_backed[pvc_name] = 'error' - logging.error("Some unknown error at {namespace}/{podname}!".format(namespace=nsname, podname=podname)) - self._handle_error(PROCESS_POD_PVCDATA_ERROR.format(namespace=nsname, podname=podname)) - break - if not estimate: - for pvc_name in pvcnames.split(','): - pvc_backed[pvc_name] = 'ok' - self._io.send_info(PROCESSING_PODBACKUP_FINISH_INFO.format(namespace=nsname, - podname=podname)) + + podname = pod.get('name') + if not estimate: + self._io.send_info(PROCESSING_PODBACKUP_START_INFO.format(namespace=nsname, + podname=podname)) + + status = self.process_pod_pvcdata(nsname, pod, pvcnames) + logging.debug("Status in processing_loop: {}".format(status)) + if status is None: + for pvc_name in pvcnames.split(','): + pvc_backed[pvc_name] = 'error' + logging.error("Some unknown error at {namespace}/{podname}!".format(namespace=nsname, podname=podname)) + self._handle_error(PROCESS_POD_PVCDATA_ERROR.format(namespace=nsname, podname=podname)) + break + if not estimate: + for pvc_name in pvcnames.split(','): + pvc_backed[pvc_name] = 'ok' + self._io.send_info(PROCESSING_PODBACKUP_FINISH_INFO.format(namespace=nsname, + podname=podname)) if len(podsannotated) > 0 and not estimate: self._io.send_info(PROCESSING_PODBACKUP_PHASE_FINISH_INFO) logging.debug("PVCs backed through pod annotations: {}".format(pvc_backed)) diff --git a/bacula/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/baculaannotations.py b/bacula/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/baculaannotations.py index c33bd9456..afdc3f17a 100644 --- a/bacula/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/baculaannotations.py +++ b/bacula/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/baculaannotations.py @@ -147,3 +147,17 @@ def annotated_namespaced_pods_data(corev1api, namespace, estimate=False, labels= podsdata.append(podobj) return podsdata + +def annotated_pvc_backup_mode(pvc, pod_backup_mode): + """Reads PVC annotations to search for backup mode annotation + Args: + pvc: kubernetes PVC object + pod_backup_mode: Selected backup mode in pod annotation + + Returns: + BaculaBackupMode of pvc if it has, or pod backup mode + """ + metadata = pvc.metadata + if metadata.annotations is None or 'bacula/backup.mode' not in metadata.annotations: + return pod_backup_mode + return BaculaBackupMode.process_param(metadata.annotations.get('bacula/backup.mode')) \ No newline at end of file diff --git a/bacula/src/plugins/fd/kubernetes-backend/baculak8s/plugins/kubernetes_plugin.py b/bacula/src/plugins/fd/kubernetes-backend/baculak8s/plugins/kubernetes_plugin.py index e1eff5c65..c390c44fe 100644 --- a/bacula/src/plugins/fd/kubernetes-backend/baculak8s/plugins/kubernetes_plugin.py +++ b/bacula/src/plugins/fd/kubernetes-backend/baculak8s/plugins/kubernetes_plugin.py @@ -322,6 +322,9 @@ class KubernetesPlugin(Plugin): estimate=estimate)) return self.k8s[K8SObjType.K8SOBJ_STORAGECLASS] + def get_persistentvolumeclaim_read_namespaced(self, namespace, name): + return self.__execute(lambda: persistentvolumeclaims_read_namespaced(self.corev1api, namespace, name)) + def get_pvcdata_namespaced(self, namespace, pvcname, pvcalias=None, estimate=False): logging.debug("pvcdata namespaced: {}/{} pvcalias={}".format(namespace, pvcname, pvcalias)) return self.__execute(lambda: pvcdata_get_namespaced(self.corev1api, namespace, pvcname, pvcalias)) diff --git a/regress/scripts/kubernetes/kubernetes-plugin-test-0005-bacula-dir.conf.in b/regress/scripts/kubernetes/kubernetes-plugin-test-0005-bacula-dir.conf.in new file mode 100644 index 000000000..c4d599350 --- /dev/null +++ b/regress/scripts/kubernetes/kubernetes-plugin-test-0005-bacula-dir.conf.in @@ -0,0 +1,148 @@ +# +# Kubernetes Plugin Bacula Director Configuration file +# Target: Test the feature where avoid pvcs which are in status `Terminating`. +# + +Director { # define myself + Name = @hostname@-dir + DIRPort = @dirport@ # where we listen for UA connections + QueryFile = "@scriptdir@/query.sql" + WorkingDirectory = "@working_dir@" + PidDirectory = "@piddir@" + SubSysDirectory = "@subsysdir@" + Maximum Concurrent Jobs = 1 + Password = "pNvX1WiXnwv2C/F7E52LGvw6rKjbbPvu2kyuPa9pVaL3" # Console password + Messages = Standard +} + +JobDefs { + Name = "BackupJob" + Type = Backup + Pool = Default + Storage = File + Messages = Standard + Priority = 10 + Client=@hostname@-fd + Write Bootstrap = "@working_dir@/%n-%f.bsr" +} + +JobDefs { + Name = "Default" + Type = Backup + Client=@hostname@-fd + Level = Full + Storage = File1 + Messages = Standard + Write Bootstrap = "@working_dir@/%c.bsr" + Pool = Default + SpoolData = yes + Max Run Time = 30min +} + +# List of files to be backed up +FileSet { + Name = "Full Set" + Include { Options { signature=SHA1 } + File =<@tmpdir@/file-list + } +} + +# Client (File Services) to backup +Client { + Name = @hostname@-fd + Address = @hostname@ + FDPort = @fdport@ + Catalog = MyCatalog + Password = "xevrjURYoCHhn26RaJoWbeWXEY/a3VqGKp/37tgWiuHc" # password for FileDaemon + File Retention = 30d # 30 days + Job Retention = 180d # six months + AutoPrune = yes # Prune expired Jobs/Files +} + +# Definiton of file storage device +Storage { + Name = File + Address = @hostname@ # N.B. Use a fully qualified name here + SDPort = @sdport@ + Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9" + Device = FileStorage + Media Type = File +} + +# Definiton of file storage device +Storage { + Name = File1 + Address = @hostname@ # N.B. Use a fully qualified name here + SDPort = @sdport@ + Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9" + Device = FileStorage1 + Media Type = File1 +} + +# Standard Restore template, to be changed by Console program +Job { + Name = "RestoreFiles" + Type = Restore + Client=@hostname@-fd + FileSet="Full Set" + Storage = File1 + Messages = Standard + Pool = Default + Where = @tmpdir@/bacula-restores + Max Run Time = 30min +} + +# Generic catalog service +Catalog { + Name = MyCatalog + @libdbi@ + dbname = @db_name@; user = @db_user@; password = "@db_password@" +} + +# Reasonable message delivery -- send most everything to email address +# and to the console +Messages { + Name = Standard + mailcommand = "@sbindir@/bsmtp -h localhost -f \"\(Bacula regression\) %r\" -s \"Regression: %t %e of %c %l\" %r" + operatorcommand = "@sbindir@/bsmtp -h localhost -f \"\(Bacula regression\) %r\" -s \"Regression: Intervention needed for %j\" %r" +# MailOnError = @job_email@ = all, !terminate +# operator = @job_email@ = mount + console = all + + append = "@working_dir@/log" = all, !skipped + catalog = all, !skipped +} + +Messages { + Name = NoEmail + mailcommand = "@sbindir@/bsmtp -h localhost -f \"\(Bacula regression\) %r\" -s \"Regression: %t %e of %c %l\" %r" + console = all, !skipped, !terminate, !restored + append = "@working_dir@/log" = all, !skipped + catalog = all, !skipped +} + + +# Default pool definition +Pool { + Name = Default + Pool Type = Backup + Recycle = yes # Bacula can automatically recycle Volumes + AutoPrune = yes # Prune expired volumes + Volume Retention = 365d # one year +} + + +### Specific configuration to kubernetes tests + +#### 01 Test standard backup with pod annotation with clone and other annoration in pvc +FileSet { + Name = "Test-K8S-Set-0005-1" + Include { Options { signature=SHA1 } + Plugin = "@LPLUG@ backup_mode=standard pvcdata=@PVC_N1_0005_1@ namespace=@K8S_NAMESPACE_1@" + } +} +Job { + Name = "Test-K8S-0005-1" + JobDefs = Default + FileSet = Test-K8S-Set-0005-1 +} \ No newline at end of file diff --git a/regress/scripts/kubernetes/kubernetes-plugin-test-0005.yaml b/regress/scripts/kubernetes/kubernetes-plugin-test-0005.yaml new file mode 100644 index 000000000..6588f9927 --- /dev/null +++ b/regress/scripts/kubernetes/kubernetes-plugin-test-0005.yaml @@ -0,0 +1,61 @@ +# testing-ns-0005: Config file to test the backup with annotations + +apiVersion: v1 +kind: Namespace +metadata: + name: testing-ns-0005-1 +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: test-pvc-0005-1 + namespace: testing-ns-0005-1 + annotations: + bacula/backup.mode: standard +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: test-pvc-0005-2 + namespace: testing-ns-0005-1 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +--- +apiVersion: v1 +kind: Pod +metadata: + name: test-pod-0005-1 + namespace: testing-ns-0005-1 + annotations: + bacula/backup.mode: clone + bacula/backup.volumes: test-pvc-0005-1, test-pvc-0005-2 +spec: + volumes: + - name: pvc-0005-1 + persistentVolumeClaim: + claimName: test-pvc-0005-1 + - name: pvc-0005-2 + persistentVolumeClaim: + claimName: test-pvc-0005-2 + containers: + - name: test-nginx-container + image: nginx + imagePullPolicy: IfNotPresent + ports: + - containerPort: 80 + name: "http-server" + volumeMounts: + - mountPath: "/pvc-1" + name: pvc-0005-1 + - mountPath: "/pvc-2" + name: pvc-0005-2 diff --git a/regress/tests/kubernetes/kubernetes-plugin-tests-0005 b/regress/tests/kubernetes/kubernetes-plugin-tests-0005 new file mode 100755 index 000000000..542684e67 --- /dev/null +++ b/regress/tests/kubernetes/kubernetes-plugin-tests-0005 @@ -0,0 +1,378 @@ +#!/bin/bash +# +# Copyright (C) 2000-2015 Kern Sibbald +# License: BSD 2-Clause; see file LICENSE-FOSS +# + +# +# Attempt to backup and restore kubernetes pvcs avoiding the pvc +# with Terminating status +# +# Assumes: +# - You have a working K8S cluster avaialable +# - You can create storage class with any local-storage provider + +# +# The k8s cluster status: + +# $ kubectl apply -f scripts/kubernetes/kubernetes-plugin-test-0001.yaml +# namespace/testing-ns-0001-1 created +# storageclass.storage.k8s.io/local-storage unchanged +# persistentvolumeclaim/test-persistent-volume-claim-0001 created +# pod/test-pod-0001 created + + +# $ kubectl -n testing-ns-0001-1 get pods -o wide +# NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +# test-pod-0001 1/1 Running 0 4m59s 10.85.0.124 am-u20-k8s-worker02-bck + +# $ kubectl -n testing-ns-0001-1 get pvc -o wide +# NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE +# test-persistent-volume-claim-0001 Bound pvc-e4b2c7b7-2679-494c-af61-8e1cac026c4d 1Gi RWO local-path 5m29s Filesystem + +# $ kubectl -n testing-ns-0001-1 get svc -o wide +# No resources found in testing-ns-0001-1 namespace. + +# $ kubectl -n testing-ns-0001-1 get rs -o wide +# No resources found in testing-ns-0001-1 namespace. + +# $ kubectl -n testing-ns-0001-1 get sts -o wide +# No resources found in testing-ns-0001-1 namespace. + +# $ kubectl -n testing-ns-0001-1 get storageclass -o wide +# NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +# local-path rancher.io/local-path Delete WaitForFirstConsumer false 16h +# local-storage kubernetes.io/no-provisioner Delete WaitForFirstConsumer false 148m +# nfs-client k8s-sigs.io/nfs-subdir-external-provisioner Delete Immediate false 250d +# rook-ceph-block rook-ceph.rbd.csi.ceph.com Delete Immediate true 236d + + +# $ kubectl -n testing-ns-0001-1 get volumesnapshotclasses -o wide +# NAME DRIVER DELETIONPOLICY AGE +# csi-rbdplugin-snapclass rook-ceph.rbd.csi.ceph.com Delete 235d + +TEST_ID=0005 +TestName="kubernetes-plugin-test-${TEST_ID}" +JobNameBase="Test-K8S-${TEST_ID}" +FileSetName="Test-K8S-Set-${TEST_ID}-" + +# Variables in tests +K8S_SCRIPT_YAML_FILE="scripts/kubernetes/kubernetes-plugin-test-${TEST_ID}.yaml" +K8S_NAMESPACE_1="testing-ns-${TEST_ID}-1" +K8S_NAMESPACE_2="testing-ns-${TEST_ID}-2" +PVC_N1_0005_1="test-pvc-${TEST_ID}-1" +PVC_N1_0005_2="test-pvc-${TEST_ID}-2" +POD_N1_0005_1="test-pod-${TEST_ID}-1" +PVC_N1_0005_1_PATH_IN_POD="/pvc-1" +PVC_N1_0005_2_PATH_IN_POD="/pvc-2" + +. scripts/functions +. scripts/regress-utils.sh + +. tests/kubernetes/k8s-utils.sh + +printf "\nInit test: ${TestName}\n" + +CONNECTION_ARGS="" +if [ ! -z $KUBE_FD_CERT_FILE ] +then + setup_self_signed_cert $KUBE_FD_CERT_DIR $KUBE_FD_CERT_NAME + CONNECTION_ARGS=" fdkeyfile=$KUBE_FD_KEY_FILE fdcertfile=$KUBE_FD_CERT_FILE " +fi + +if [ ! -z "$KUBE_PROXY_POD_PLUGIN_HOST" ] +then + CONNECTION_ARGS="${CONNECTION_ARGS} pluginhost=${KUBE_PROXY_POD_PLUGIN_HOST} " +fi + +if [ ! -z "$KUBE_BACULA_IMAGE" ] +then + CONNECTION_ARGS="${CONNECTION_ARGS} baculaimage=${KUBE_BACULA_IMAGE} imagepullpolicy=ifNotPresent " +fi + +export debug=1 +scripts/cleanup +scripts/copy-kubernetes-plugin-confs ${TEST_ID} + +printf "\n ... Preparing ...\n" + +# export requires variables +setup_plugin_param "kubernetes:" +if [ "x$KUBECONFIG" != "x" ] +then + export KUBECONFIG + LPLUG="${LPLUG} config='$KUBECONFIG' ${CONNECTION_ARGS}" +fi + +KSTORAGECLASS=`${KUBECTL} get storageclass | grep local | wc -l` +if [ $KSTORAGECLASS -eq 0 ] +then + echo "Do you need a local storage class. It is to simplify the errors!" + exit 1 +fi + +tmp="${tmp}/test-${TEST_ID}" + +mkdir -p ${tmp} + +# check the requirements +KNODES=`${KUBECTL} get nodes | grep Ready | wc -l` +if [ $KNODES -eq 0 ] +then + echo "A working Kubernetes cluster required!" + exit 1 +fi + +# check if K8S_NAMESPACE_1 exists +KPLUGTEST_1=`${KUBECTL} get ns | grep "^${K8S_NAMESPACE_1} " | wc -l` +if [ $KPLUGTEST_1 -ne 0 ] && [ "x$1" != "xforce" ]; +then + echo "Namespace \"${K8S_NAMESPACE_1}\" exist on cluster and no force option specified!" + exit 1 +fi + + +# prepare data +printf "\n ... Apply data ... \n" +reset_k8s_env() { + if [ $KPLUGTEST_1 -ne 0 ] + then + printf "Removing namespaces: ${K8S_NAMESPACE_1}\n" + ${KUBECTL} delete ns ${K8S_NAMESPACE_1} 2>&1 > ${tmp}/kube.log + printf "Removed namespaces: ${K8S_NAMESPACE_1}\n" + fi + ${KUBECTL} apply -f ${K8S_SCRIPT_YAML_FILE} 2>&1 >> ${tmp}/kube.log + + i=0 + SPIN=('-' '\\' '|' '/') + printf "\n ... Waiting to ready ... \n" + while true + do + kstat_n1=`${KUBECTL} -n ${K8S_NAMESPACE_1} get pods -o go-template='{{range .items}}{{.status.phase}}{{"\n"}}{{end}}' | grep -v Running | wc -l` + + if [ $kstat_n1 -eq 0 ]; then + break + fi; + w=1 + printf "\b${SPIN[(($i % 4))]}" + if [ $i -eq 600 ] + then + echo "Timeout waiting for test data to populate. Cannot continue!" + exit 1 + fi + ((i++)) + sleep 1 + done + # Command to create a file inside pvc + printf "\n ... Refill data in pvcs ...\n" + SIZE_MB=10 + DD_CMD="dd if=/dev/urandom of=${PVC_N1_0005_1_PATH_IN_POD}/file${SIZE_MB}MB bs=1M count=${SIZE_MB}" + # Exec command inside pod. + ${KUBECTL} exec -it $POD_N1_0005_1 -n ${K8S_NAMESPACE_1} -- /bin/bash -c "$DD_CMD" + SIZE_MB=20 + DD_CMD="dd if=/dev/urandom of=${PVC_N1_0005_2_PATH_IN_POD}/file${SIZE_MB}MB bs=1M count=${SIZE_MB}" + # Exec command inside pod. + ${KUBECTL} exec -it $POD_N1_0005_1 -n ${K8S_NAMESPACE_1} -- /bin/bash -c "$DD_CMD" +} + +reset_k8s_env + + +# wait a bit to objects to populate. +sleep 10 + + +# get variables +printf "\n ... Get Environment Variables ...\n" +${KUBECTL} get ns -o name > ${tmp}/allns.log +${KUBECTL} get pv -o name > ${tmp}/allpv.log +${KUBECTL} get storageclass -o name > ${tmp}/allsc.log + + +# Prepare bacula dir configuration +printf "\n ... Preparing Bacula-dir configuration ...\n" +export PLUGIN_WORKING=${cwd}/working + +out_sed="${tmp}/sed_tmp" +echo "s%@LPLUG@%${LPLUG}%" > ${out_sed} +echo "s%@K8S_NAMESPACE_1@%${K8S_NAMESPACE_1}%" >> ${out_sed} +echo "s%@K8S_NAMESPACE_2@%${K8S_NAMESPACE_2}%" >> ${out_sed} +echo "s%@PVC_N1_0005_1@%${PVC_N1_0005_1}%" >> ${out_sed} + +echo "s%@CONNECTION_ARGS@%${CONNECTION_ARGS}%" >> ${out_sed} +echo "s%@BACKUP_PROXY_WITHOUT_PVC@%${BACKUP_PROXY_WITHOUT_PVC}%" >> ${out_sed} +echo "s%@BACKUP_ONLY_PVC@%${BACKUP_ONLY_PVC}%" >> ${out_sed} +printf "\nCommand launched:\n" +echo "sed -i -f ${out_sed} ${conf}/bacula-dir.conf" + +sed -i -f ${out_sed} ${conf}/bacula-dir.conf + +printf "\n ... Done ...\n" + +## Variables to restore from other jobs +JOB_ID_TO_RESTORE_1=0 +JOB_ID_TO_RESTORE_2=0 + + +start_test + +# We must put the bconsole command in ${cwd}/tmp/bconcmds +cat <${tmp}/bconcmds +@output /dev/null +messages +@$out ${tmp}/log.out +label storage=File1 pool=Default volume=TestVolume001 +@setdebug dir level=500 trace=1 +quit +END_OF_DATA + +run_bacula + +############# +## BTEST 1 ## +############# +btest1 () { + # Test 1 + TEST=1 + OUTPUT_FILE=${tmp}/blog${TEST}.out + JOB_ID_TO_RESTORE_1=${JOBID} + do_regress_backup_test ${TEST} + check_regress_backup_statusT ${TEST} + F=$? + # Check pvc1 is backup once + F_1=0 + RET=`grep "@kubernetes" ${OUTPUT_FILE} | grep "${PVC_N1_0005_1}.tar" | wc -l` + RES=1 + if [ $RET -ne $RES ] + then + F_1=1 + ((bstat++)) + fi + # Check pvc2 data is backup + F_2=0 + RET=`grep "@kubernetes" ${OUTPUT_FILE} | grep "${PVC_N1_0005_2}.tar" | wc -l` + RES=1 + if [ $RET -ne $RES ] + then + F_2=1 + ((bstat++)) + fi + + F_3=0 + RET=`grep "kubernetes:" ${OUTPUT_FILE} | grep "selected default backup mode to do pvc backup in all job" | grep "standard" | wc -l` + RES=1 + if [ $RET -ne $RES ] + then + F_3=1 + ((bstat++)) + fi + F_4=0 + RET=`grep "kubernetes:" ${OUTPUT_FILE} | grep "default backup mode to do pvc backup of the pod" | grep "${POD_N1_0005_1}" | grep "clone" | wc -l` + RES=1 + if [ $RET -ne $RES ] + then + F_4=1 + ((bstat++)) + fi + F_5=0 + RET=`grep "kubernetes:" ${OUTPUT_FILE} | grep "selected backup mode to do pvc backup of the pvc" | grep "${PVC_N1_0005_1}" | grep "standard" | wc -l` + RES=1 + if [ $RET -ne $RES ] + then + F_5=1 + ((bstat++)) + fi + F_6=0 + RET=`grep "kubernetes:" ${OUTPUT_FILE} | grep "selected backup mode to do pvc backup of the pvc" | grep "${PVC_N1_0005_2}" | grep "clone" | wc -l` + RES=1 + if [ $RET -ne $RES ] + then + F_6=1 + ((bstat++)) + fi + + printf "%s\n" "--------" + printf "Results backup test ${TEST}:\n" + printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F}) + printf "%s%s\n" " -> The pvc data of '${PVC_N1_0005_1}' is backup: " $(regress_test_result ${F_1}) + printf "%s%s\n" " -> The pvc data of '${PVC_N1_0005_2}' is backup: " $(regress_test_result ${F_2}) + printf "%s%s\n" " -> The backup mode of pvc for all job is standard: " $(regress_test_result ${F_3}) + printf "%s%s\n" " -> The backup mode of pvc for all pod is clone: " $(regress_test_result ${F_4}) + printf "%s%s\n" " -> The backup mode of pvc '${PVC_N1_0005_1}' is standard: " $(regress_test_result ${F_5}) + printf "%s%s\n" " -> The backup mode of pvc '${PVC_N1_0005_2}' is clone: " $(regress_test_result ${F_6}) + printf "%s\n" "--------" +} + + +############# +## RTEST 1 ## +############# +rtest1 () { + TEST=1 + if [ "${JOB_ID_TO_RESTORE_1}" -eq 0 ]; then + printf "%s\n" "--------------" + printf "%s\n" "The job id to restore ${TEST} was not assigned." + printf "%s\n" "--------------" + exit 1 + fi + # Before delete + echo "---> Before delete the pvc:" 2>&1 > ${tmp}/rlog${TEST}.out + ${KUBECTL} -n ${K8S_NAMESPACE_1} get pvc/${PVC_N1_0005_1} 2>&1 >> ${tmp}/rlog${TEST}.out + ${KUBECTL} -n ${K8S_NAMESPACE_1} get pod/${POD_N1_0005_1} 2>&1 >> ${tmp}/rlog${TEST}.out + echo "---> Deleting the pvc and pod:" 2>&1 >> ${tmp}/rlog${TEST}.out + ${KUBECTL} -n ${K8S_NAMESPACE_1} delete pod/${POD_N1_0005_1} 2>&1 >> ${tmp}/rlog${TEST}.out + ${KUBECTL} -n ${K8S_NAMESPACE_1} delete pvc/${PVC_N1_0005_1} 2>&1 >> ${tmp}/rlog${TEST}.out + echo "---> Deleted the pvc(${PVC_N1_0005_1}) and pod (${POD_N1_0005_1})" 2>&1 >> ${tmp}/rlog${TEST}.out + actions=( + "" # Always starts with empty line. I don't know why is neccesary. + "cd @kubernetes/namespaces/${K8S_NAMESPACE_1}/pods/" + "mark ${POD_N1_0005_1}.yaml" + "cd ../persistentvolumeclaims/" + "mark ${PVC_N1_0005_1}.yaml" + "mark ${PVC_N1_0005_1}.tar" + ) + do_regress_restore_test_jobid ${TEST} ${JOB_ID_TO_RESTORE_1} "/" $actions + check_regress_restore_statusT ${TEST} + F=$? + # check if object restored on kubernetes + + echo "---> After restore the pod and pvc:" 2>&1 >> ${tmp}/rlog${TEST}.out + ${KUBECTL} -n ${K8S_NAMESPACE_1} get pod/${POD_N1_0005_1} 2>&1 >> ${tmp}/rlog${TEST}.out + RET=`${KUBECTL} -n ${K8S_NAMESPACE_1} get pod/${POD_N1_0005_1} -o go-template='{{.metadata.name}}{{"\n"}}' 2>/dev/null | wc -l` + ${KUBECTL} -n ${K8S_NAMESPACE_1} get pvc/${PVC_N1_0005_1} 2>&1 >> ${tmp}/rlog${TEST}.out + RET_1=`${KUBECTL} -n ${K8S_NAMESPACE_1} get pvc/${PVC_N1_0005_1} -o go-template='{{.metadata.name}}{{"\n"}}' 2>/dev/null | wc -l` + + F_1=0 F_2=0 + rets=($RET $RET_1) + fs=("F_1" "F_2") + + for i in ${!rets[@]}; do + echo "RET: ${rets[i]}" >> ${tmp}/rlog${TEST}.out + if [ ${rets[i]} -ne 1 ]; then + eval ${fs[i]}=1 + rstat=$((rstat+1)) + fi + done + + printf "%s\n" "--------" + printf "Result restore test ${TEST}:" + printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F}) + printf "%s%s\n" " -> The pod ${POD_N1_0005_1} was restored: " $(regress_test_result ${F_1}) + printf "%s%s\n" " -> The pvc ${PVC_N1_0005_1} was restored: " $(regress_test_result ${F_2}) + printf "%s\n" "--------" +} + +estat=0 + +bstat=0 +JOBID=1 +# This job is the base of all backup jobs names +JobName=${JobNameBase}- + +btest1 + +rstat=0 +rtest1 + +# stop_bacula +end_test \ No newline at end of file