]> git.ipfire.org Git - thirdparty/bacula.git/commitdiff
k8s: Add parallel job in same namespace
authorFrancisco Manuel Garcia Botella <francisco.garcia@baculasystems.com>
Thu, 26 Sep 2024 07:29:10 +0000 (09:29 +0200)
committerEric Bollengier <eric@baculasystems.com>
Mon, 24 Mar 2025 06:58:38 +0000 (07:58 +0100)
bacula/src/plugins/fd/kubernetes-backend/baculak8s/jobs/backup_job.py
bacula/src/plugins/fd/kubernetes-backend/baculak8s/jobs/job_pod_bacula.py
bacula/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/baculabackup.py
bacula/src/plugins/fd/kubernetes-backend/baculak8s/plugins/kubernetes_plugin.py
regress/scripts/kubernetes/kubernetes-plugin-test-0006-bacula-dir.conf.in [new file with mode: 0644]
regress/scripts/kubernetes/kubernetes-plugin-test-0006.yaml [new file with mode: 0644]
regress/scripts/regress-utils.sh
regress/tests/kubernetes/kubernetes-plugin-tests-0006 [new file with mode: 0755]

index 587cace171e269e5ab4badd0c376288388833b55..2a88482a42b12c2f8e064c70b3e380292f6edde0 100644 (file)
@@ -25,7 +25,6 @@ from baculak8s.jobs.estimation_job import PVCDATA_GET_ERROR, EstimationJob
 from baculak8s.jobs.job_pod_bacula import DEFAULTRECVBUFFERSIZE
 from baculak8s.plugins.k8sbackend.baculaannotations import (
     BaculaAnnotationsClass, BaculaBackupMode, annotated_pvc_backup_mode)
-from baculak8s.plugins.k8sbackend.baculabackup import BACULABACKUPPODNAME
 from baculak8s.plugins.k8sbackend.podexec import ExecStatus, exec_commands
 from baculak8s.util.respbody import parse_json_descr
 from baculak8s.util.boolparam import BoolParam
index 39b51ea6f552ca34a0d853b1f82623984217f50d..db2455e1960f7d60a9f13e10d203920ac118757d 100644 (file)
@@ -28,9 +28,9 @@ from abc import ABCMeta
 import yaml
 from baculak8s.jobs.job import Job
 from baculak8s.plugins.k8sbackend.baculabackup import (BACULABACKUPIMAGE,
-                                                       BACULABACKUPPODNAME,
                                                        ImagePullPolicy,
-                                                       prepare_backup_pod_yaml)
+                                                       prepare_backup_pod_yaml,
+                                                       get_backup_pod_name)
 from baculak8s.plugins.k8sbackend.pvcclone import prepare_backup_clone_yaml
 from baculak8s.plugins.k8sbackend.baculaannotations import BaculaBackupMode
 from baculak8s.util.respbody import parse_json_descr
@@ -241,13 +241,15 @@ class JobPodBacula(Job, metaclass=ABCMeta):
         return True
 
     def execute_pod(self, namespace, podyaml):
-        exist = self._plugin.check_pod(namespace=namespace, name=BACULABACKUPPODNAME)
-        if exist is not None:
-            logging.debug('execute_pod:exist!')
+        prev_bacula_pod_name = self._plugin.check_bacula_pod(namespace, self.jobname)
+        if prev_bacula_pod_name:
+            logging.debug('Exist previous bacula-backup pod! Name: {}'.format(prev_bacula_pod_name))
+            # TODO: Check if we remove or not the bacula-backup pod
+            self._io.send_info('Exist a previous bacula-backup pod. Name: {}'.format(prev_bacula_pod_name))
             response = False
             for a in range(self.timeout):
                 time.sleep(1)
-                response = self._plugin.check_gone_backup_pod(namespace)
+                response = self._plugin.check_gone_backup_pod(namespace, prev_bacula_pod_name)
                 if isinstance(response, dict) and 'error' in response:
                     self._handle_error(CANNOT_REMOVE_BACKUP_POD_ERR.format(parse_json_descr(response)))
                     return False
@@ -255,7 +257,7 @@ class JobPodBacula(Job, metaclass=ABCMeta):
                     if response:
                         break
             if not response:
-                self._handle_error(POD_EXIST_ERR.format(namespace=namespace, podname=BACULABACKUPPODNAME))
+                self._handle_error(POD_EXIST_ERR.format(namespace=namespace, podname=prev_bacula_pod_name))
                 return False
 
         poddata = yaml.safe_load(podyaml)
@@ -265,7 +267,7 @@ class JobPodBacula(Job, metaclass=ABCMeta):
         else:
             for seq in range(self.timeout):
                 time.sleep(1)
-                isready = self._plugin.backup_pod_isready(namespace, seq)
+                isready = self._plugin.backup_pod_isready(namespace, podname=get_backup_pod_name(self.jobname), seq=seq)
                 if isinstance(isready, dict) and 'error' in isready:
                     self._handle_error(CANNOT_CREATE_BACKUP_POD_ERR.format(parse_json_descr(isready)))
                     break
@@ -279,7 +281,7 @@ class JobPodBacula(Job, metaclass=ABCMeta):
     def delete_pod(self, namespace, force=False):
         for a in range(self.timeout):
             time.sleep(1)
-            response = self._plugin.check_gone_backup_pod(namespace, force=force)
+            response = self._plugin.check_gone_backup_pod(namespace, get_backup_pod_name(self.jobname), force=force)
             if isinstance(response, dict) and 'error' in response:
                 self._handle_error(CANNOT_REMOVE_BACKUP_POD_ERR.format(parse_json_descr(response)))
             else:
@@ -302,7 +304,7 @@ class JobPodBacula(Job, metaclass=ABCMeta):
 
     def handle_delete_pod(self, namespace):
         if not self.delete_pod(namespace=namespace):
-            self._handle_error(POD_REMOVE_ERR.format(podname=BACULABACKUPPODNAME))
+            self._handle_error(POD_REMOVE_ERR.format(podname=get_backup_pod_name(self.jobname)))
 
     def handle_tarstderr(self):
         if self.tarexitcode != '0' or len(self.tarstderr) > 0:
index 1bcfc4abd5d9808c53bb718fc046f88fb9a07198..97b9d9d17eb2b2c9b7a75f0f5aa1f499e54a55a5 100644 (file)
@@ -25,8 +25,9 @@ import os
 import logging
 from baculak8s.plugins.k8sbackend.baculabackupimage import KUBERNETES_TAR_IMAGE
 
-
-BACULABACKUPPODNAME = 'bacula-backup'
+JOB_NAME_MAX_CHARS = 23
+JOB_ID_MAX_DIGITS = 12
+BACULABACKUPPODNAME = 'bacula-backup-{job_name}-{job_id}'
 # BACULABACKUPIMAGE = "hub.baculasystems.com/bacula-backup:" + KUBERNETES_TAR_IMAGE
 BACULABACKUPIMAGE = "bacula-backup:" + KUBERNETES_TAR_IMAGE
 DEFAULTPODYAML = os.getenv('DEFAULTPODYAML', "/opt/bacula/scripts/bacula-backup.yaml")
@@ -89,6 +90,26 @@ class ImagePullPolicy(object):
                     return p
         return ImagePullPolicy.IfNotPresent
 
+def exists_bacula_pod(pod_list, job):
+    """Get name of first backup pod belong to previous job.
+    
+    :param pod_list: list of pods in namespace
+    :param job: Name of job, without id
+
+    :return: Name of pod of previous job
+    """
+    name_for_search = 'bacula-backup-' + job.split('.')[0][:JOB_NAME_MAX_CHARS].lower() + '-'
+    num_hyphen=name_for_search.count('-')
+    for pod_name in pod_list:
+        if name_for_search in pod_name and num_hyphen == pod_name.count('-'):
+            return pod_name
+    return ''
+
+def get_backup_pod_name(job):
+    # Get job name and id, and limit to not exceed 63 characters in pod name
+    job_name = job.split('.')[0][:JOB_NAME_MAX_CHARS].lower()
+    job_id = job.split(':')[1][:JOB_ID_MAX_DIGITS]
+    return BACULABACKUPPODNAME.format(job_name=job_name, job_id=job_id)
 
 def prepare_backup_pod_yaml(mode='backup', nodename=None, host='localhost', port=9104, token='', namespace='default',
                             pvcname='', image=BACULABACKUPIMAGE, imagepullpolicy=ImagePullPolicy.IfNotPresent, job=''):
@@ -101,4 +122,4 @@ def prepare_backup_pod_yaml(mode='backup', nodename=None, host='localhost', port
       nodenameparam = "nodeName: {nodename}".format(nodename=nodename)
     logging.debug('host:{} port:{} namespace:{} image:{} job:{}'.format(host, port, namespace, image, job))
     return podyaml.format(mode=mode, nodenameparam=nodenameparam, host=host, port=port, token=token, namespace=namespace,
-                          image=image, pvcname=pvcname, podname=BACULABACKUPPODNAME, imagepullpolicy=imagepullpolicy, job=job)
+                          image=image, pvcname=pvcname, podname=get_backup_pod_name(job), imagepullpolicy=imagepullpolicy, job=job)
index c390c44fefc5d3d9d33debeca4ccc7682825594f..a42e4884eaabb242c8117c83134255f60f472b6f 100644 (file)
@@ -29,7 +29,7 @@ from baculak8s.entities.file_info import (DEFAULT_DIR_MODE, DIRECTORY,
 from baculak8s.entities.k8sobjtype import K8SObjType
 from baculak8s.io.log import Log
 from baculak8s.plugins import k8sbackend
-from baculak8s.plugins.k8sbackend.baculabackup import BACULABACKUPPODNAME
+from baculak8s.plugins.k8sbackend.baculabackup import exists_bacula_pod
 from baculak8s.plugins.k8sbackend.baculaannotations import annotated_namespaced_pods_data
 from baculak8s.plugins.k8sbackend.configmaps import *
 from baculak8s.plugins.k8sbackend.csi_snapshot import *
@@ -641,6 +641,10 @@ class KubernetesPlugin(Plugin):
     def check_pod(self, namespace, name):
         return self.__exec_check_object(lambda: self.corev1api.read_namespaced_pod(name, namespace))
 
+    def check_bacula_pod(self, namespace, job):
+        pod_list = self.get_pods(namespace)
+        return exists_bacula_pod(pod_list, job)
+
     def _check_persistentvolume_claim(self, file_info):
         return self.__exec_check_object(
             lambda: self.corev1api.read_namespaced_persistent_volume_claim(k8sfile2objname(file_info.name),
@@ -919,8 +923,8 @@ class KubernetesPlugin(Plugin):
             'fi': pvcdata.get('fi')
         }
 
-    def backup_pod_status(self, namespace):
-        return self.corev1api.read_namespaced_pod_status(name=BACULABACKUPPODNAME, namespace=namespace)
+    def backup_pod_status(self, namespace, bacula_pod_name):
+        return self.corev1api.read_namespaced_pod_status(name=bacula_pod_name, namespace=namespace)
 
     def pvc_status(self, namespace, pvcname):
         return self.__execute(lambda: self.corev1api.read_namespaced_persistent_volume_claim_status(name=pvcname, namespace=namespace))
@@ -940,8 +944,8 @@ class KubernetesPlugin(Plugin):
     def _vsnapshot_status(self, namespace, snapshot_name):
         return self.__execute(lambda: self.crd_api.get_namespaced_custom_object_status(**prepare_snapshot_action(namespace, snapshot_name)))
 
-    def backup_pod_isready(self, namespace, seq=None, podname=BACULABACKUPPODNAME):
-        pod = self.backup_pod_status(namespace)
+    def backup_pod_isready(self, namespace, podname, seq=None):
+        pod = self.backup_pod_status(namespace, podname)
         status = pod.status
         # logging.debug("backup_pod_isready:status:{} {}".format(type(status), status))
         if status.container_statuses is None:
@@ -1004,7 +1008,7 @@ class KubernetesPlugin(Plugin):
         logging.error('Had a error when try to get pvc status')
         return True
 
-    def remove_backup_pod(self, namespace, podname=BACULABACKUPPODNAME):
+    def remove_backup_pod(self, namespace, podname):
         logging.debug('remove_backup_pod')
         response = self.__execute(lambda: self.corev1api.delete_namespaced_pod(
             podname, namespace, grace_period_seconds=0,
@@ -1036,8 +1040,8 @@ class KubernetesPlugin(Plugin):
         logging.debug('Volume Snapshot removed `{}`'.format(vsnapshot_name))
         return {}
 
-    def check_gone_backup_pod(self, namespace, force=False):
-        """ Checks if $BACULABACKUPPODNAME at selected namespace is already running.
+    def check_gone_backup_pod(self, namespace, backup_pod_name, force=False):
+        """ Checks if `pod_name` at selected namespace is already running.
             If not then we can proceed with Job. If it terminated but not removed then we will safely remove it.
         Args:
             namespace (str): namespace for Pod
@@ -1050,7 +1054,7 @@ class KubernetesPlugin(Plugin):
         status = None
         gone = False
         try:
-            status = self.backup_pod_status(namespace)
+            status = self.backup_pod_status(namespace, backup_pod_name)
         except ApiException as e:
             if e.status == HTTP_NOT_FOUND:
                 gone = True
@@ -1058,7 +1062,7 @@ class KubernetesPlugin(Plugin):
         finally:
             logging.info("check_gone_backup_pod:gone:" + str(gone))
         if status is not None and (force or status.status.phase not in ['Pending', 'Running']):
-            response = self.remove_backup_pod(namespace)
+            response = self.remove_backup_pod(namespace, backup_pod_name)
             if isinstance(response, dict) and 'error' in response:
                 # propagate error up
                 return response
diff --git a/regress/scripts/kubernetes/kubernetes-plugin-test-0006-bacula-dir.conf.in b/regress/scripts/kubernetes/kubernetes-plugin-test-0006-bacula-dir.conf.in
new file mode 100644 (file)
index 0000000..83bdd7d
--- /dev/null
@@ -0,0 +1,246 @@
+#
+# Kubernetes Plugin   Bacula Director Configuration file
+# Target: Test the feature where avoid pvcs which are in status `Terminating`.
+#
+
+Director {                              # define myself
+  Name = @hostname@-dir
+  DIRPort = @dirport@                   # where we listen for UA connections
+  QueryFile = "@scriptdir@/query.sql"
+  WorkingDirectory = "@working_dir@"
+  PidDirectory = "@piddir@"
+  SubSysDirectory = "@subsysdir@"
+  Maximum Concurrent Jobs = 10
+  Password = "pNvX1WiXnwv2C/F7E52LGvw6rKjbbPvu2kyuPa9pVaL3"         # Console password
+  Messages = Standard
+}
+
+JobDefs {
+  Name = "BackupJob"
+  Type = Backup
+  Pool = Default
+  Storage = File
+  Messages = Standard
+  Priority = 10
+  Client=@hostname@-fd
+  Write Bootstrap = "@working_dir@/%n-%f.bsr"
+}
+
+JobDefs {
+  Name = "Default"
+  Type = Backup
+  Client=@hostname@-fd
+  Level = Full
+  Storage = File1
+  Messages = Standard
+  Write Bootstrap = "@working_dir@/%c.bsr"
+  Pool = Default
+  SpoolData = yes
+  Max Run Time = 30min
+}
+
+# List of files to be backed up
+FileSet {
+  Name = "Full Set"
+  Include { Options { signature=SHA1 }
+    File =<@tmpdir@/file-list
+  }
+}
+
+# Client (File Services) to backup
+Client {
+  Name = @hostname@-fd
+  Address = @hostname@
+  FDPort = @fdport@
+  Catalog = MyCatalog
+  Password = "xevrjURYoCHhn26RaJoWbeWXEY/a3VqGKp/37tgWiuHc"          # password for FileDaemon
+  File Retention = 30d                # 30 days
+  Job Retention = 180d                # six months
+  AutoPrune = yes                     # Prune expired Jobs/Files
+  Maximum Concurrent Jobs = 10
+}
+
+# Definiton of file storage device
+Storage {
+  Name = File
+  Address = @hostname@                # N.B. Use a fully qualified name here
+  SDPort = @sdport@
+  Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9"
+  Device = FileStorage
+  Media Type = File
+  Maximum Concurrent Jobs = 10
+}
+
+# Definiton of file storage device
+Storage {
+  Name = File1
+  Address = @hostname@                # N.B. Use a fully qualified name here
+  SDPort = @sdport@
+  Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9"
+  Device = FileStorage1
+  Media Type = File1
+  Maximum Concurrent Jobs = 10
+}
+
+# Standard Restore template, to be changed by Console program
+Job {
+  Name = "RestoreFiles"
+  Type = Restore
+  Client=@hostname@-fd
+  FileSet="Full Set"
+  Storage = File1
+  Messages = Standard
+  Pool = Default
+  Where = @tmpdir@/bacula-restores
+  Max Run Time = 30min
+}
+
+# Generic catalog service
+Catalog {
+  Name = MyCatalog
+  @libdbi@
+  dbname = @db_name@; user = @db_user@; password = "@db_password@"
+}
+
+# Reasonable message delivery -- send most everything to email address
+#  and to the console
+Messages {
+  Name = Standard
+  mailcommand = "@sbindir@/bsmtp -h localhost -f \"\(Bacula regression\) %r\" -s \"Regression: %t %e of %c %l\" %r"
+  operatorcommand = "@sbindir@/bsmtp -h localhost -f \"\(Bacula regression\) %r\" -s \"Regression: Intervention needed for %j\" %r"
+# MailOnError = @job_email@ = all, !terminate
+# operator = @job_email@ = mount
+  console = all
+
+  append = "@working_dir@/log" = all, !skipped
+  catalog = all, !skipped
+}
+
+Messages {
+  Name = NoEmail
+  mailcommand = "@sbindir@/bsmtp -h localhost -f \"\(Bacula regression\) %r\" -s \"Regression: %t %e of %c %l\" %r"
+  console = all, !skipped, !terminate, !restored
+  append = "@working_dir@/log" = all, !skipped
+  catalog = all, !skipped
+}
+
+
+# Default pool definition
+Pool {
+  Name = Default
+  Pool Type = Backup
+  Recycle = yes                       # Bacula can automatically recycle Volumes
+  AutoPrune = yes                     # Prune expired volumes
+  Volume Retention = 365d             # one year
+}
+
+
+### Specific configuration to kubernetes tests
+
+#### 01 Test parallel job without pvcdata
+FileSet {
+  Name = "Test-K8S-Set-0006-1"
+  Include { Options { signature=SHA1 }
+    Plugin = "@LPLUG@ namespace=@K8S_NAMESPACE_1@ pluginport=9104  fdport=9104"
+  }
+}
+Job {
+  Name = "Test-K8S-0006-1"
+  JobDefs = Default
+  FileSet = Test-K8S-Set-0006-1
+  Maximum Concurrent Jobs = 10
+}
+
+FileSet {
+  Name = "Test-K8S-Set-0006-2"
+  Include { Options { signature=SHA1 }
+    Plugin = "@LPLUG@ namespace=@K8S_NAMESPACE_2@ pluginport=9105 fdport=9105"
+  }
+}
+Job {
+  Name = "Test-K8S-0006-2"
+  JobDefs = Default
+  FileSet = Test-K8S-Set-0006-2
+  Maximum Concurrent Jobs = 10
+}
+
+#### 02 Test parallel jobs with pvcdata in different namespace
+FileSet {
+  Name = "Test-K8S-Set-0006-3"
+  Include { Options { signature=SHA1 }
+    Plugin = "@LPLUG@ backup_mode=standard pvcdata=@PVC_N1_0006_1@ namespace=@K8S_NAMESPACE_1@ pluginport=9104  fdport=9104"
+  }
+}
+Job {
+  Name = "Test-K8S-0006-3"
+  JobDefs = Default
+  FileSet = Test-K8S-Set-0006-3
+  Maximum Concurrent Jobs = 10
+}
+
+FileSet {
+  Name = "Test-K8S-Set-0006-4"
+  Include { Options { signature=SHA1 }
+    Plugin = "@LPLUG@ backup_mode=standard pvcdata=@PVC_N2_0006_1@ namespace=@K8S_NAMESPACE_2@ pluginport=9105 fdport=9105"
+  }
+}
+Job {
+  Name = "Test-K8S-0006-4"
+  JobDefs = Default
+  FileSet = Test-K8S-Set-0006-4
+  Maximum Concurrent Jobs = 10
+}
+
+#### 02 Test parallel jobs with pvcdata in same namespace
+FileSet {
+  Name = "Test-K8S-Set-0006-5"
+  Include { Options { signature=SHA1 }
+    Plugin = "@LPLUG@ backup_mode=standard pvcdata=@PVC_N1_0006_1@ namespace=@K8S_NAMESPACE_1@ pluginport=9104  fdport=9104"
+  }
+}
+Job {
+  Name = "Test-K8S-0006-5"
+  JobDefs = Default
+  FileSet = Test-K8S-Set-0006-5
+  Maximum Concurrent Jobs = 10
+}
+
+FileSet {
+  Name = "Test-K8S-Set-0006-6"
+  Include { Options { signature=SHA1 }
+    Plugin = "@LPLUG@ backup_mode=standard pvcdata=@PVC_N1_0006_3@ namespace=@K8S_NAMESPACE_1@ pluginport=9105 fdport=9105"
+  }
+}
+Job {
+  Name = "Test-K8S-0006-6"
+  JobDefs = Default
+  FileSet = Test-K8S-Set-0006-6
+  Maximum Concurrent Jobs = 10
+}
+
+#### 03 Test parallel jobs in same pod
+FileSet {
+  Name = "Test-K8S-Set-0006-7"
+  Include { Options { signature=SHA1 }
+    Plugin = "@LPLUG@ backup_mode=standard pvcdata=@PVC_N1_0006_1@ namespace=@K8S_NAMESPACE_1@ pluginport=9104  fdport=9104"
+  }
+}
+Job {
+  Name = "Test-K8S-0006-7"
+  JobDefs = Default
+  FileSet = Test-K8S-Set-0006-7
+  Maximum Concurrent Jobs = 10
+}
+
+FileSet {
+  Name = "Test-K8S-Set-0006-8"
+  Include { Options { signature=SHA1 }
+    Plugin = "@LPLUG@ backup_mode=standard pvcdata=@PVC_N1_0006_2@ namespace=@K8S_NAMESPACE_1@ pluginport=9105 fdport=9105"
+  }
+}
+Job {
+  Name = "Test-K8S-0006-8"
+  JobDefs = Default
+  FileSet = Test-K8S-Set-0006-8
+  Maximum Concurrent Jobs = 10
+}
\ No newline at end of file
diff --git a/regress/scripts/kubernetes/kubernetes-plugin-test-0006.yaml b/regress/scripts/kubernetes/kubernetes-plugin-test-0006.yaml
new file mode 100644 (file)
index 0000000..e499b16
--- /dev/null
@@ -0,0 +1,131 @@
+# testing-ns-0006: Check parallel jobs
+
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: testing-ns-0006-1
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: testing-ns-0006-2
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: test-n1-pvc-0006-1
+  namespace: testing-ns-0006-1
+spec:
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 2Gi
+  storageClassName: local-path
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: test-n1-pvc-0006-2
+  namespace: testing-ns-0006-1
+spec:
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 2Gi
+  storageClassName: local-path
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: test-n1-pvc-0006-3
+  namespace: testing-ns-0006-1
+spec:
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 2Gi
+  storageClassName: local-path
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: test-n2-pvc-0006-1
+  namespace: testing-ns-0006-2
+spec:
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 2Gi
+  storageClassName: local-path
+---
+apiVersion: v1
+kind: Pod
+metadata:
+  name: test-n1-pod-0006-1
+  namespace: testing-ns-0006-1
+spec:
+  volumes:
+    - name: pvc-0006-1-aasdsadsf
+      persistentVolumeClaim:
+        claimName: test-n1-pvc-0006-1
+    - name: pvc-0006-2-dgdhghrhrhr
+      persistentVolumeClaim:
+        claimName: test-n1-pvc-0006-2
+  containers:
+    - name: test-nginx-container
+      image: nginx
+      imagePullPolicy: IfNotPresent
+      ports:
+        - containerPort: 80
+          name: "http-server"
+      volumeMounts:
+        - mountPath: "/pvc-1"
+          name: pvc-0006-1-aasdsadsf
+        - mountPath: "/pvc-2"
+          name: pvc-0006-2-dgdhghrhrhr
+---
+apiVersion: v1
+kind: Pod
+metadata:
+  name: test-n1-pod-0006-2
+  namespace: testing-ns-0006-1
+spec:
+  volumes:
+    - name: pvc-0006-3-asdgregrhrhffq
+      persistentVolumeClaim:
+        claimName: test-n1-pvc-0006-3
+  containers:
+    - name: test-nginx-container
+      image: nginx
+      imagePullPolicy: IfNotPresent
+      ports:
+        - containerPort: 80
+          name: "http-server"
+      volumeMounts:
+        - mountPath: "/pvc-3"
+          name: pvc-0006-3-asdgregrhrhffq
+---
+apiVersion: v1
+kind: Pod
+metadata:
+  name: test-n2-pod-0006-1
+  namespace: testing-ns-0006-2
+spec:
+  volumes:
+    - name: pvc-0006-1-gkjguybhejw
+      persistentVolumeClaim:
+        claimName: test-n2-pvc-0006-1
+  containers:
+    - name: test-nginx-container
+      image: nginx
+      imagePullPolicy: IfNotPresent
+      ports:
+        - containerPort: 80
+          name: "http-server"
+      volumeMounts:
+        - mountPath: "/pvc-1"
+          name: pvc-0006-1-gkjguybhejw
index f3d08ff932a7a4de662703c3f0f4157e0c84468b..8f9b5d7d74dd1ac0c43f995b3a93a1b415068c26 100755 (executable)
@@ -134,6 +134,57 @@ run_bconsole
 ((JOBID++))
 }
 
+#
+# do parallel backup job test
+#   generate ${tmp}/blog${ltest}.out job output messages logfile
+#
+# in:
+#       $1 - a test number to perform
+#       $@ - a sequence of job name. 
+#
+# for example:
+#   do_regress_parallel_backup_test 10 23 43
+#
+# Explication:
+#   We run the test number 10.
+#   We run the jobs ${JobName}-23 and ${jobName}-43
+#
+do_regress_parallel_backup_test()
+{
+test_id=$1
+shift
+job_ids=""
+blevel="full"
+
+printf "     backup test${test_id} ... "
+cat << END_OF_DATA >${tmp}/bconcmds
+@output /dev/null
+messages
+@$out ${tmp}/blog${test_id}.out
+status client=${CLIENT}
+setdebug level=500 client=${CLIENT} trace=1
+END_OF_DATA
+counter=1
+for job_id in "$@"; do
+   
+   job_ids="${job_ids} jobid=${JOBID}"
+   echo "run job=${JobName}${job_id} level=${blevel} storage=File1 yes" >>${tmp}/bconcmds
+   ((JOBID++))
+
+done
+
+cat << END_OF_DATA >>${tmp}/bconcmds
+wait
+status client=${CLIENT}
+messages
+setdebug level=0 trace=0 client=${CLIENT}
+llist ${job_ids}
+list files ${job_ids}
+quit
+END_OF_DATA
+run_bconsole
+}
+
 #
 # do simpe estimation listing test
 #   generate ${tmp}/elog${ltest}.out job output messages logfile
@@ -250,6 +301,29 @@ else
 fi
 }
 
+#
+# check the expected backup job execution status based on logfile
+#   check for status Successful
+#
+# in:
+# $1 - a test number to examine which means we will check blog${ltest}.out logfile
+# $2 - number of jobs to examine
+#
+check_regress_parallel_backup_statusT()
+{
+   ltest=$1
+   n_jobs=$2
+   RET=`grep "jobstatus: " ${tmp}/blog${ltest}.out | sed 's/^[[:space:]]*//g' | awk '{print $2}' | grep "T" | wc -l`
+   ERRS=$((`grep "joberrors: " ${tmp}/blog${ltest}.out | awk '{print $2}' | awk '{sum+=$1} END {print sum}'`+0))
+   if [ "$RET" != "$n_jobs" -o $ERRS -ne 0 ]
+   then
+      ((bstat++))
+      return 1
+   else
+      return 0
+   fi
+}
+
 #
 # check the expected backup job execution status based on logfile
 #   check for status Warning
diff --git a/regress/tests/kubernetes/kubernetes-plugin-tests-0006 b/regress/tests/kubernetes/kubernetes-plugin-tests-0006
new file mode 100755 (executable)
index 0000000..82cb3c5
--- /dev/null
@@ -0,0 +1,377 @@
+#!/bin/bash
+#
+# Copyright (C) 2000-2015 Kern Sibbald
+# License: BSD 2-Clause; see file LICENSE-FOSS
+#
+
+#
+# Attempt to backup and restore kubernetes pvcs avoiding the pvc
+#   with Terminating status
+#
+# Assumes:
+#   - You have a working K8S cluster avaialable
+#   - You can create storage class with any local-storage provider
+
+#
+# The k8s cluster status:
+
+# $ kubectl apply -f scripts/kubernetes/kubernetes-plugin-test-0001.yaml
+# namespace/testing-ns-0001-1 created
+# storageclass.storage.k8s.io/local-storage unchanged
+# persistentvolumeclaim/test-persistent-volume-claim-0001 created
+# pod/test-pod-0001 created
+
+
+# $ kubectl -n testing-ns-0001-1 get pods -o wide
+# NAME            READY   STATUS    RESTARTS   AGE     IP            NODE                      NOMINATED NODE   READINESS GATES
+# test-pod-0001   1/1     Running   0          4m59s   10.85.0.124   am-u20-k8s-worker02-bck   <none>           <none>
+
+# $ kubectl -n testing-ns-0001-1 get pvc -o wide 
+# NAME                                STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE     VOLUMEMODE
+# test-persistent-volume-claim-0001   Bound    pvc-e4b2c7b7-2679-494c-af61-8e1cac026c4d   1Gi        RWO            local-path     5m29s   Filesystem
+
+# $ kubectl -n testing-ns-0001-1 get svc -o wide
+# No resources found in testing-ns-0001-1 namespace.
+
+# $ kubectl -n testing-ns-0001-1 get rs -o wide
+# No resources found in testing-ns-0001-1 namespace.
+
+# $ kubectl -n testing-ns-0001-1 get sts -o wide
+# No resources found in testing-ns-0001-1 namespace.
+
+# $ kubectl -n testing-ns-0001-1 get storageclass -o wide
+# NAME              PROVISIONER                                   RECLAIMPOLICY   VOLUMEBINDINGMODE      ALLOWVOLUMEEXPANSION   AGE
+# local-path        rancher.io/local-path                         Delete          WaitForFirstConsumer   false                  16h
+# local-storage     kubernetes.io/no-provisioner                  Delete          WaitForFirstConsumer   false                  148m
+# nfs-client        k8s-sigs.io/nfs-subdir-external-provisioner   Delete          Immediate              false                  250d
+# rook-ceph-block   rook-ceph.rbd.csi.ceph.com                    Delete          Immediate              true                   236d
+
+
+# $ kubectl -n testing-ns-0001-1 get volumesnapshotclasses -o wide
+# NAME                      DRIVER                       DELETIONPOLICY   AGE
+# csi-rbdplugin-snapclass   rook-ceph.rbd.csi.ceph.com   Delete           235d
+
+TEST_ID=0006
+TestName="kubernetes-plugin-test-${TEST_ID}"
+JobNameBase="Test-K8S-${TEST_ID}"
+FileSetName="Test-K8S-Set-${TEST_ID}-"
+
+# Variables in tests
+K8S_SCRIPT_YAML_FILE="scripts/kubernetes/kubernetes-plugin-test-${TEST_ID}.yaml"
+K8S_NAMESPACE_1="testing-ns-${TEST_ID}-1"
+K8S_NAMESPACE_2="testing-ns-${TEST_ID}-2"
+
+PVC_N1_0006_1="test-n1-pvc-${TEST_ID}-1"
+PVC_N1_0006_2="test-n1-pvc-${TEST_ID}-2"
+PVC_N1_0006_3="test-n1-pvc-${TEST_ID}-3"
+PVC_N2_0006_1="test-n2-pvc-${TEST_ID}-1"
+
+POD_N1_0006_1="test-n1-pod-${TEST_ID}-1"
+POD_N1_0006_2="test-n1-pod-${TEST_ID}-2"
+POD_N2_0006_1="test-n2-pod-${TEST_ID}-1"
+
+PVC_N1_0006_1_PATH_IN_POD="/pvc-1"
+PVC_N1_0006_2_PATH_IN_POD="/pvc-2"
+PVC_N1_0006_3_PATH_IN_POD="/pvc-3"
+PVC_N2_0006_1_PATH_IN_POD="/pvc-1"
+
+. scripts/functions
+. scripts/regress-utils.sh
+
+. tests/kubernetes/k8s-utils.sh
+
+estat=0
+
+bstat=0
+JOBID=1
+# This job is the base of all backup jobs names
+JobName=${JobNameBase}-
+
+tmp="${tmp}/test-${TEST_ID}"
+mkdir -p ${tmp}
+
+printf "\nInit test: ${TestName}\n"
+
+CONNECTION_ARGS=""
+if [ ! -z $KUBE_FD_CERT_FILE ]
+then
+   setup_self_signed_cert $KUBE_FD_CERT_DIR $KUBE_FD_CERT_NAME
+   CONNECTION_ARGS=" fdkeyfile=$KUBE_FD_KEY_FILE fdcertfile=$KUBE_FD_CERT_FILE "
+fi
+
+if [ ! -z "$KUBE_PROXY_POD_PLUGIN_HOST" ]
+then
+   CONNECTION_ARGS="${CONNECTION_ARGS} pluginhost=${KUBE_PROXY_POD_PLUGIN_HOST} "
+fi
+
+if [ ! -z "$KUBE_BACULA_IMAGE" ]
+then
+   CONNECTION_ARGS="${CONNECTION_ARGS} baculaimage=${KUBE_BACULA_IMAGE} imagepullpolicy=ifNotPresent "
+fi
+
+export debug=1
+scripts/cleanup
+scripts/copy-kubernetes-plugin-confs ${TEST_ID}
+
+printf "\n ... Preparing ...\n"
+
+# export requires variables
+setup_plugin_param "kubernetes:"
+if [ "x$KUBECONFIG" != "x" ]
+then
+   export KUBECONFIG
+   LPLUG="${LPLUG} config='$KUBECONFIG' ${CONNECTION_ARGS}"
+fi
+
+KSTORAGECLASS=`${KUBECTL} get storageclass | grep local | wc -l`
+if [ $KSTORAGECLASS -eq 0 ]
+then
+   echo "Do you need a local storage class. It is to simplify the errors!"
+   exit 1
+fi
+
+
+# check the requirements
+KNODES=`${KUBECTL} get nodes | grep Ready | wc -l`
+if [ $KNODES -eq 0 ]
+then
+   echo "A working Kubernetes cluster required!"
+   exit 1
+fi
+
+# check if K8S_NAMESPACE_1 exists
+KPLUGTEST_1=`${KUBECTL} get ns | grep "^${K8S_NAMESPACE_1} " | wc -l`
+KPLUGTEST_2=`${KUBECTL} get ns | grep "^${K8S_NAMESPACE_2} " | wc -l`
+if [ $KPLUGTEST_1 -ne 0 ] && [ "x$1" != "xforce" ];
+then
+   echo "Namespace \"${K8S_NAMESPACE_1}\" exist on cluster and no force option specified!"
+   exit 1
+fi
+if [ $KPLUGTEST_2 -ne 0 ] && [ "x$1" != "xforce" ];
+then
+   echo "Namespace \"${K8S_NAMESPACE_2}\" exist on cluster and no force option specified!"
+   exit 1
+fi
+
+# prepare data
+printf "\n ... Apply data ... \n"
+reset_k8s_env() {
+   if [ $KPLUGTEST_1 -ne 0 ]
+   then
+      printf "Removing namespaces: ${K8S_NAMESPACE_1} and ${K8S_NAMESPACE_2}\n"
+      ${KUBECTL} delete ns ${K8S_NAMESPACE_1} 2>&1 > ${tmp}/kube.log
+      ${KUBECTL} delete ns ${K8S_NAMESPACE_2} 2>&1 >> ${tmp}/kube.log
+      printf "Removed namespaces: ${K8S_NAMESPACE_1} and ${K8S_NAMESPACE_2}\n"
+   fi
+   ${KUBECTL} apply -f ${K8S_SCRIPT_YAML_FILE} 2>&1 >> ${tmp}/kube.log
+
+   i=0
+   SPIN=('-' '\\' '|' '/')
+   printf "\n ... Waiting to ready ... \n"
+   while true
+   do
+      kstat_n1=`${KUBECTL} -n ${K8S_NAMESPACE_1} get pods -o go-template='{{range .items}}{{.status.phase}}{{"\n"}}{{end}}' | grep -v Running | wc -l`
+      kstat_n2=`${KUBECTL} -n ${K8S_NAMESPACE_2} get pods -o go-template='{{range .items}}{{.status.phase}}{{"\n"}}{{end}}' | grep -v Running | wc -l`
+
+      if [ $kstat_n1 -eq 0 ] && [ $kstat_n2 -eq 0 ]; then
+         break
+      fi;
+      w=1
+      printf "\b${SPIN[(($i % 4))]}"
+      if [ $i -eq 600 ]
+      then
+         echo "Timeout waiting for test data to populate. Cannot continue!"
+         exit 1
+      fi
+      ((i++))
+      sleep 1
+   done
+   # Command to create a file inside pvc
+   printf "\n ... Refill data in pvcs ...\n"
+   SIZE_MB=1000
+   DD_CMD="dd if=/dev/urandom of=${PVC_N1_0006_1_PATH_IN_POD}/file${SIZE_MB}MB bs=1M count=${SIZE_MB}"
+   # Exec command inside pod.
+   ${KUBECTL} exec -it $POD_N1_0006_1 -n ${K8S_NAMESPACE_1} -- /bin/bash -c "$DD_CMD"
+   SIZE_MB=1500
+   DD_CMD="dd if=/dev/urandom of=${PVC_N1_0006_2_PATH_IN_POD}/file${SIZE_MB}MB bs=1M count=${SIZE_MB}"
+   # Exec command inside pod.
+   ${KUBECTL} exec -it $POD_N1_0006_1 -n ${K8S_NAMESPACE_1} -- /bin/bash -c "$DD_CMD"
+   SIZE_MB=1300
+   DD_CMD="dd if=/dev/urandom of=${PVC_N1_0006_3_PATH_IN_POD}/file${SIZE_MB}MB bs=1M count=${SIZE_MB}"
+   # Exec command inside pod.
+   ${KUBECTL} exec -it $POD_N1_0006_2 -n ${K8S_NAMESPACE_1} -- /bin/bash -c "$DD_CMD"
+
+   SIZE_MB=800
+   DD_CMD="dd if=/dev/urandom of=${PVC_N2_0006_1_PATH_IN_POD}/file${SIZE_MB}MB bs=1M count=${SIZE_MB}"
+   # Exec command inside pod.
+   ${KUBECTL} exec -it $POD_N2_0006_1 -n ${K8S_NAMESPACE_2} -- /bin/bash -c "$DD_CMD"
+}
+
+reset_k8s_env
+
+
+# wait a bit to objects to populate.
+sleep 10
+
+
+# get variables
+printf "\n ... Get Environment Variables ...\n"
+${KUBECTL} get ns -o name > ${tmp}/allns.log
+${KUBECTL} get pv -o name > ${tmp}/allpv.log
+${KUBECTL} get storageclass -o name > ${tmp}/allsc.log
+
+
+# Prepare bacula dir configuration
+printf "\n ... Preparing Bacula-dir configuration ...\n"
+export PLUGIN_WORKING=${cwd}/working
+
+out_sed="${tmp}/sed_tmp"
+echo "s%@LPLUG@%${LPLUG}%" > ${out_sed}
+echo "s%@K8S_NAMESPACE_1@%${K8S_NAMESPACE_1}%" >> ${out_sed}
+echo "s%@K8S_NAMESPACE_2@%${K8S_NAMESPACE_2}%" >> ${out_sed}
+echo "s%@PVC_N1_0006_1@%${PVC_N1_0006_1}%" >> ${out_sed}
+echo "s%@PVC_N1_0006_2@%${PVC_N1_0006_2}%" >> ${out_sed}
+echo "s%@PVC_N1_0006_3@%${PVC_N1_0006_3}%" >> ${out_sed}
+echo "s%@PVC_N2_0006_1@%${PVC_N2_0006_1}%" >> ${out_sed}
+
+echo "s%@CONNECTION_ARGS@%${CONNECTION_ARGS}%" >> ${out_sed}
+echo "s%@BACKUP_PROXY_WITHOUT_PVC@%${BACKUP_PROXY_WITHOUT_PVC}%" >> ${out_sed}
+echo "s%@BACKUP_ONLY_PVC@%${BACKUP_ONLY_PVC}%" >> ${out_sed}
+printf "\nCommand launched:\n"
+echo "sed -i -f ${out_sed} ${conf}/bacula-dir.conf"
+
+sed -i -f ${out_sed} ${conf}/bacula-dir.conf
+
+printf "\n ... Done ...\n"
+
+## Variables to restore from other jobs
+JOB_ID_TO_RESTORE_1=0
+JOB_ID_TO_RESTORE_2=0
+
+
+start_test
+
+# We must put the bconsole command in ${cwd}/tmp/bconcmds
+cat <<END_OF_DATA >${tmp}/bconcmds
+@output /dev/null
+messages
+@$out ${tmp}/log.out
+label storage=File1 pool=Default volume=TestVolume001
+@setdebug dir level=500 trace=1
+quit
+END_OF_DATA
+
+run_bacula
+
+#############
+## BTEST 1 ##
+#############
+
+btest1 () {
+   # Test 1
+   TEST=1
+   OUTPUT_FILE=${tmp}/blog${TEST}.out
+   JOB_ID_TO_RESTORE_1=${JOBID}
+   do_regress_parallel_backup_test ${TEST} 5 6
+   check_regress_parallel_backup_statusT ${TEST} 2
+   F=$?
+   # Check pvc1 is backup once
+   F_1=0
+   RET=`grep "@kubernetes" ${OUTPUT_FILE} | grep "${PVC_N1_0006_1}.tar" | wc -l`
+   RES=1
+   if [ $RET -ne $RES ]
+   then
+      F_1=1
+      ((bstat++))
+   fi
+   # Check pvc2 data is backup
+   F_2=0
+   RET=`grep "@kubernetes" ${OUTPUT_FILE} | grep "${PVC_N1_0006_3}.tar" | wc -l`
+   RES=1
+   if [ $RET -ne $RES ]
+   then
+      F_2=1
+      ((bstat++))
+   fi
+
+   printf "%s\n" "--------"
+   printf "Results backup test ${TEST}:\n"
+   printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+   printf "%s%s\n" " -> The pvc data of '${PVC_N1_0006_1}' is backup: " $(regress_test_result ${F_1}) 
+   printf "%s%s\n" " -> The pvc data of '${PVC_N1_0006_3}' is backup: " $(regress_test_result ${F_2})
+   printf "%s\n" "--------"
+}
+
+
+#############
+## RTEST 1 ##
+#############
+rtest1 () {
+   TEST=1
+   if [ "${JOB_ID_TO_RESTORE_1}" -eq 0 ]; then
+      printf "%s\n" "--------------"
+      printf "%s\n" "The job id to restore ${TEST} was not assigned."
+      printf "%s\n" "--------------"
+      exit 1
+   fi
+   # Before delete
+   echo "---> Before delete the pvc:" 2>&1 > ${tmp}/rlog${TEST}.out
+   ${KUBECTL} -n ${K8S_NAMESPACE_1} get pvc/${PVC_N1_0006_1} 2>&1 >> ${tmp}/rlog${TEST}.out
+   ${KUBECTL} -n ${K8S_NAMESPACE_1} get pod/${POD_N1_0006_1} 2>&1 >> ${tmp}/rlog${TEST}.out
+   echo "---> Deleting the pvc and pod:" 2>&1 >> ${tmp}/rlog${TEST}.out
+   ${KUBECTL} -n ${K8S_NAMESPACE_1} delete pod/${POD_N1_0006_1} 2>&1 >> ${tmp}/rlog${TEST}.out
+   ${KUBECTL} -n ${K8S_NAMESPACE_1} delete pvc/${PVC_N1_0006_1} 2>&1 >> ${tmp}/rlog${TEST}.out
+   echo "---> Deleted the pvc(${PVC_N1_0006_1}) and pod (${POD_N1_0006_1})" 2>&1 >> ${tmp}/rlog${TEST}.out
+   actions=(
+      "" # Always starts with empty line. I don't know why is neccesary.
+      "cd @kubernetes/namespaces/${K8S_NAMESPACE_1}/pods/"
+      "mark ${POD_N1_0006_1}.yaml"
+      "cd ../persistentvolumeclaims/"
+      "mark ${PVC_N1_0006_1}.yaml"
+      "mark ${PVC_N1_0006_1}.tar"
+   )
+   do_regress_restore_test_jobid ${TEST} ${JOB_ID_TO_RESTORE_1} "/" $actions
+   check_regress_restore_statusT ${TEST}
+   F=$?
+   # check if object restored on kubernetes
+   
+   echo "---> After restore the pod and pvc:" 2>&1 >> ${tmp}/rlog${TEST}.out
+   ${KUBECTL} -n ${K8S_NAMESPACE_1} get pod/${POD_N1_0006_1} 2>&1 >> ${tmp}/rlog${TEST}.out
+   RET=`${KUBECTL} -n ${K8S_NAMESPACE_1} get pod/${POD_N1_0006_1} -o go-template='{{.metadata.name}}{{"\n"}}' 2>/dev/null | wc -l`
+   ${KUBECTL} -n ${K8S_NAMESPACE_1} get pvc/${PVC_N1_0006_1} 2>&1 >> ${tmp}/rlog${TEST}.out
+   RET_1=`${KUBECTL} -n ${K8S_NAMESPACE_1} get pvc/${PVC_N1_0006_1} -o go-template='{{.metadata.name}}{{"\n"}}' 2>/dev/null | wc -l`
+
+   F_1=0 F_2=0
+   rets=($RET $RET_1)
+   fs=("F_1" "F_2")
+
+   for i in ${!rets[@]}; do
+      echo "RET: ${rets[i]}" >> ${tmp}/rlog${TEST}.out
+      if [ ${rets[i]} -ne 1 ]; then
+         eval ${fs[i]}=1
+         rstat=$((rstat+1))
+      fi
+   done
+
+   printf "%s\n" "--------"
+   printf "Result restore test ${TEST}:"
+   printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+   printf "%s%s\n" " -> The pod ${POD_N1_0006_1} was restored: " $(regress_test_result ${F_1})
+   printf "%s%s\n" " -> The pvc ${PVC_N1_0006_1} was restored: " $(regress_test_result ${F_2})
+   printf "%s\n" "--------"
+}
+
+estat=0
+
+bstat=0
+JOBID=1
+# This job is the base of all backup jobs names
+JobName=${JobNameBase}-
+
+btest1
+
+rstat=0
+#rtest1
+
+# stop_bacula
+end_test
\ No newline at end of file