bin
build
scripts/*.conf
+scripts/*/*.conf
+scripts/*.so
test.out
tmp
weird-files
# The next parameters is to test with proxy backups.
#KUBE_PROXY_POD_PLUGIN_HOST="<IP>" # Example FD ip: 192.168.0.99
+# If you connect to Lab VPN, you can put your assigned IP in this VPN.
+
#KUBE_BACULA_IMAGE="bacula-backup:latest"
# If you don't have a certificate, you can generate it automatically by activating the KUBE_GEN_CERT variable, otherwise you can generate it with these commands:
# openssl req -new -newkey rsa:4096 -nodes -keyout /tmp/snakeoil.key -out /tmp/snakeoil.csr
#!/bin/sh
-cp -f ${rscripts}/kubernetes-plugin-test-bacula-dir.conf ${conf}/bacula-dir.conf
+
+if [ -z ${1} ]; then
+ cp -f ${rscripts}/kubernetes-plugin-test-bacula-dir.conf ${conf}/bacula-dir.conf
+else
+ cp -f ${rscripts}/kubernetes/kubernetes-plugin-test-${1}-bacula-dir.conf ${conf}/bacula-dir.conf
+fi
cp -f ${rscripts}/test-bacula-sd.conf ${conf}/bacula-sd.conf
cp -f ${rscripts}/test-bacula-fd.conf ${conf}/bacula-fd.conf
cp -f ${rscripts}/test-console.conf ${conf}/bconsole.conf
sed -f ${out} ${rscripts}/plugin-handleXACL-test-bacula-dir.conf.in >${rscripts}/plugin-handleXACL-test-bacula-dir.conf
sed -f ${out} ${rscripts}/swift-plugin-test-bacula-dir.conf.in >${rscripts}/swift-plugin-test-bacula-dir.conf
sed -f ${out} ${rscripts}/rhv-plugin-test-bacula-dir.conf.in >${rscripts}/rhv-plugin-test-bacula-dir.conf
+
+# Kubernetes plugin
sed -f ${out} ${rscripts}/kubernetes-plugin-test-bacula-dir.conf.in >${rscripts}/kubernetes-plugin-test-bacula-dir.conf
+sed -f ${out} ${rscripts}/k8s_backend.in >${rscripts}/k8s_backend
+
+find ${rscripts}/kubernetes/ -type f -name "*.conf.in" | while read -r file; do
+ compiled_conf="${file%.in}"
+ sed -f ${out} ${file} > ${compiled_conf}
+done
+
+
+sed -f ${out} ${rscripts}/openshift-plugin-test-bacula-dir.conf.in >${rscripts}/openshift-plugin-test-bacula-dir.conf
sed -f ${out} ${rscripts}/docker-plugin-test-bacula-dir.conf.in >${rscripts}/docker-plugin-test-bacula-dir.conf
sed -f ${out} ${rscripts}/bacula-sd-2disk-virtual.conf.in >${rscripts}/bacula-sd-2disk-virtual.conf
sed -f ${out} ${rscripts}/bacula-sd-virtual-tape.conf.in >${rscripts}/bacula-sd-virtual-tape.conf
--- /dev/null
+# Tests for kubernetes plugin
+
+## Test-0001 PVC Data tests
+
+Variables to replace:
+```
+@K8S_NAMESPACE_2@ = Determine the second namespace to backup
+```
+
+### 01 Test standard backup with only one pvc
+
+Specific Variables to replace:
+```
+@K8S_NAMESPACE_1@ = Determine one namespace to backup
+@PVC_0001_1@ = Determine the specific pvc to backup
+```
+
+### 02 Test standard backup with two pvcs
+
+Specific variables to replace:
+```
+@K8S_NAMESPACE_1@ = Determine one namespace to backup
+@PVC_0001_1@ = Determine the specific pvc to backup
+@PVC_0001_2@ = Determine the specific pvc to backup
+```
+
+### 03 Test standard backup with two pvcs in different namespaces
+
+Specific variables to replace:
+```
+@K8S_NAMESPACE_1@ = Determine one namespace to backup
+@K8S_NAMESPACE_2@ = Determine other namespace to backup
+@PVC_0001_1@ = Determine the specific pvc in namespace 1 to backup
+@PVC_0001_3@ = Determine the specific pvc in namespace 2 to backup
+```
+
+### 04 Test standard backup with two pvcs but one of them is in other namespace is not specificated in fileset
+ TODO
+Specific variables to replace:
+```
+@K8S_NAMESPACE_1@ = Determine one namespace to backup
+@K8S_NAMESPACE_2@ = Determine other namespace to backup
+@PVC_0001_1@ = Determine the specific pvc in namespace 1 to backup
+@PVC_0001_3@ = Determine the specific pvc in namespace 2 to backup
+```
+
+### 05 Test the feature where avoid pvcs which are in status `Terminating`.
+
+Specific Variables to replace:
+```
+@K8S_NAMESPACE_1@ = Determine one namespace to backup
+@PVC_0001_1@ = Determine the specific pvc to backup will be Terminating status
+@PVC_N1_0001_2@ = Determine the specific pvc to backup
+```
+
+
+### 06 Test the change mode of clone to standard
+
+Note: When the plugin changes the mode in pvc backup:
+
+ - The plugin creates two files with the same pvc file `.tar`. This is because when try clone is empty and retry again.
+
+Specific variables to replace:
+```
+@K8S_NAMESPACE_1@ = Determine one namespace to backup
+@PVC_0001_1@ = Determine the specific pvc to backup
+```
+
+## Test-0002
+
+This tests is created based on ticket: https://bugs.baculasystems.com/view.php?id=10901
+
+### 01 Cluster IPs
--- /dev/null
+#
+# Kubernetes Plugin Bacula Director Configuration file
+# Target: Test the feature where avoid pvcs which are in status `Terminating`.
+#
+
+Director { # define myself
+ Name = @hostname@-dir
+ DIRPort = @dirport@ # where we listen for UA connections
+ QueryFile = "@scriptdir@/query.sql"
+ WorkingDirectory = "@working_dir@"
+ PidDirectory = "@piddir@"
+ SubSysDirectory = "@subsysdir@"
+ Maximum Concurrent Jobs = 1
+ Password = "pNvX1WiXnwv2C/F7E52LGvw6rKjbbPvu2kyuPa9pVaL3" # Console password
+ Messages = Standard
+}
+
+JobDefs {
+ Name = "BackupJob"
+ Type = Backup
+ Pool = Default
+ Storage = File
+ Messages = Standard
+ Priority = 10
+ Client=@hostname@-fd
+ Write Bootstrap = "@working_dir@/%n-%f.bsr"
+}
+
+JobDefs {
+ Name = "Default"
+ Type = Backup
+ Client=@hostname@-fd
+ Level = Full
+ Storage = File1
+ Messages = Standard
+ Write Bootstrap = "@working_dir@/%c.bsr"
+ Pool = Default
+ SpoolData = yes
+ Max Run Time = 30min
+}
+
+# List of files to be backed up
+FileSet {
+ Name = "Full Set"
+ Include { Options { signature=SHA1 }
+ File =<@tmpdir@/file-list
+ }
+}
+
+# Client (File Services) to backup
+Client {
+ Name = @hostname@-fd
+ Address = @hostname@
+ FDPort = @fdport@
+ Catalog = MyCatalog
+ Password = "xevrjURYoCHhn26RaJoWbeWXEY/a3VqGKp/37tgWiuHc" # password for FileDaemon
+ File Retention = 30d # 30 days
+ Job Retention = 180d # six months
+ AutoPrune = yes # Prune expired Jobs/Files
+}
+
+# Definiton of file storage device
+Storage {
+ Name = File
+ Address = @hostname@ # N.B. Use a fully qualified name here
+ SDPort = @sdport@
+ Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9"
+ Device = FileStorage
+ Media Type = File
+}
+
+# Definiton of file storage device
+Storage {
+ Name = File1
+ Address = @hostname@ # N.B. Use a fully qualified name here
+ SDPort = @sdport@
+ Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9"
+ Device = FileStorage1
+ Media Type = File1
+}
+
+# Standard Restore template, to be changed by Console program
+Job {
+ Name = "RestoreFiles"
+ Type = Restore
+ Client=@hostname@-fd
+ FileSet="Full Set"
+ Storage = File1
+ Messages = Standard
+ Pool = Default
+ Where = @tmpdir@/bacula-restores
+ Max Run Time = 30min
+}
+
+# Generic catalog service
+Catalog {
+ Name = MyCatalog
+ @libdbi@
+ dbname = @db_name@; user = @db_user@; password = "@db_password@"
+}
+
+# Reasonable message delivery -- send most everything to email address
+# and to the console
+Messages {
+ Name = Standard
+ mailcommand = "@sbindir@/bsmtp -h localhost -f \"\(Bacula regression\) %r\" -s \"Regression: %t %e of %c %l\" %r"
+ operatorcommand = "@sbindir@/bsmtp -h localhost -f \"\(Bacula regression\) %r\" -s \"Regression: Intervention needed for %j\" %r"
+# MailOnError = @job_email@ = all, !terminate
+# operator = @job_email@ = mount
+ console = all
+
+ append = "@working_dir@/log" = all, !skipped
+ catalog = all, !skipped
+}
+
+Messages {
+ Name = NoEmail
+ mailcommand = "@sbindir@/bsmtp -h localhost -f \"\(Bacula regression\) %r\" -s \"Regression: %t %e of %c %l\" %r"
+ console = all, !skipped, !terminate, !restored
+ append = "@working_dir@/log" = all, !skipped
+ catalog = all, !skipped
+}
+
+
+# Default pool definition
+Pool {
+ Name = Default
+ Pool Type = Backup
+ Recycle = yes # Bacula can automatically recycle Volumes
+ AutoPrune = yes # Prune expired volumes
+ Volume Retention = 365d # one year
+}
+
+
+### Specific configuration to kubernetes tests
+
+#### 01 Test standard backup with only one pvc
+FileSet {
+ Name = "Test-K8S-Set-0001-1"
+ Include { Options { signature=SHA1 }
+ Plugin = "@LPLUG@ backup_mode=standard pvcdata=@PVC_N1_0001_1@ namespace=@K8S_NAMESPACE_1@"
+ }
+}
+Job {
+ Name = "Test-K8S-0001-1"
+ JobDefs = Default
+ FileSet = Test-K8S-Set-0001-1
+}
+
+### 02 Test standard backup with two pvcs
+FileSet {
+ Name = "Test-K8S-Set-0001-2"
+ Include { Options { signature=SHA1 }
+ Plugin = "@LPLUG@ backup_mode=standard pvcdata=@PVC_N1_0001_1@,@PVC_N1_0001_2@ namespace=@K8S_NAMESPACE_1@"
+ }
+}
+Job {
+ Name = "Test-K8S-0001-2"
+ JobDefs = Default
+ FileSet = Test-K8S-Set-0001-2
+}
+
+
+### 03 Test standard backup with two pvcs in different namespaces
+FileSet {
+ Name = "Test-K8S-Set-0001-3"
+ Include { Options { signature=SHA1 }
+ Plugin = "@LPLUG@ backup_mode=standard pvcdata=@PVC_N1_0001_1@,@PVC_N2_0001_3@ namespace=@K8S_NAMESPACE_1@ namespace=@K8S_NAMESPACE_2@"
+ }
+}
+Job {
+ Name = "Test-K8S-0001-3"
+ JobDefs = Default
+ FileSet = Test-K8S-Set-0001-3
+}
+
+
+#####
+## Waiting for answer: https://gitlab.baculasystems.com/qa/qa-kubernetes-plugin/-/issues/18
+#####
+### 04 Test standard backup with two pvcs but one of them is in other namespace is not specificated in fileset
+FileSet {
+ Name = "Test-K8S-Set-0001-4"
+ Include { Options { signature=SHA1 }
+ Plugin = "@LPLUG@ backup_mode=standard pvcdata=@PVC_N1_0001_1@,@PVC_N2_0001_3@ namespace=@K8S_NAMESPACE_1@"
+ }
+}
+Job {
+ Name = "Test-K8S-0001-4"
+ JobDefs = Default
+ FileSet = Test-K8S-Set-0001-4
+}
+
+
+### 05 Test the feature where avoid pvcs which are in status `Terminating`.
+FileSet {
+ Name = "Test-K8S-Set-0001-5"
+ Include { Options { signature=SHA1 }
+ Plugin = "@LPLUG@ backup_mode=standard pvcdata=@PVC_N1_0001_1@,@PVC_N1_0001_2@ namespace=@K8S_NAMESPACE_1@"
+ }
+}
+Job {
+ Name = "Test-K8S-0001-5"
+ JobDefs = Default
+ FileSet = Test-K8S-Set-0001-5
+}
+
+
+### 06 Test the change mode of clone to standard
+FileSet {
+ Name = "Test-K8S-Set-0001-6"
+ Include { Options { signature=SHA1 }
+ Plugin = "@LPLUG@ backup_mode=clone pvcdata=@PVC_N1_0001_1@ namespace=@K8S_NAMESPACE_1@"
+ }
+}
+Job {
+ Name = "Test-K8S-0001-6"
+ JobDefs = Default
+ FileSet = Test-K8S-Set-0001-6
+}
\ No newline at end of file
--- /dev/null
+# testing-ns-0001: Config file to test the pvcdata features
+
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: testing-ns-0001-1
+---
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: local-storage
+provisioner: kubernetes.io/no-provisioner
+volumeBindingMode: WaitForFirstConsumer
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: test-pvc-0001-1
+ namespace: testing-ns-0001-1
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
+ storageClassName: local-path
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: test-pod-0001-1
+ namespace: testing-ns-0001-1
+spec:
+ volumes:
+ - name: pvc-dc7cdd05-a92d-416a-b584-fd7043b5bf48
+ persistentVolumeClaim:
+ claimName: test-pvc-0001-1
+ containers:
+ - name: test-nginx-container
+ image: nginx
+ ports:
+ - containerPort: 80
+ name: "http-server"
+ volumeMounts:
+ - mountPath: "/pvc"
+ name: pvc-dc7cdd05-a92d-416a-b584-fd7043b5bf48
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: test-pvc-0001-2
+ namespace: testing-ns-0001-1
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
+ storageClassName: local-path
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: test-pod-0001-2
+ namespace: testing-ns-0001-1
+spec:
+ volumes:
+ - name: pvc-dc7cdd05-a92d-416a-b584-fd7083b5bf48
+ persistentVolumeClaim:
+ claimName: test-pvc-0001-2
+ containers:
+ - name: test-nginx-container
+ image: nginx
+ ports:
+ - containerPort: 80
+ name: "http-server"
+ volumeMounts:
+ - mountPath: "/pvc"
+ name: pvc-dc7cdd05-a92d-416a-b584-fd7083b5bf48
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: testing-ns-0001-2
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: test-pvc-0001-3
+ namespace: testing-ns-0001-2
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
+ storageClassName: local-path
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: test-pod-0001-3
+ namespace: testing-ns-0001-2
+spec:
+ volumes:
+ - name: pvc-0001-3
+ persistentVolumeClaim:
+ claimName: test-pvc-0001-3
+ containers:
+ - name: test-nginx-container
+ image: nginx
+ ports:
+ - containerPort: 80
+ name: "http-server"
+ volumeMounts:
+ - mountPath: "/pvc"
+ name: pvc-0001-3
\ No newline at end of file
--- /dev/null
+#
+# Kubernetes Plugin Bacula Director Configuration file
+# Target: Test the feature where avoid pvcs which are in status `Terminating`.
+#
+
+Director { # define myself
+ Name = @hostname@-dir
+ DIRPort = @dirport@ # where we listen for UA connections
+ QueryFile = "@scriptdir@/query.sql"
+ WorkingDirectory = "@working_dir@"
+ PidDirectory = "@piddir@"
+ SubSysDirectory = "@subsysdir@"
+ Maximum Concurrent Jobs = 1
+ Password = "pNvX1WiXnwv2C/F7E52LGvw6rKjbbPvu2kyuPa9pVaL3" # Console password
+ Messages = Standard
+}
+
+JobDefs {
+ Name = "BackupJob"
+ Type = Backup
+ Pool = Default
+ Storage = File
+ Messages = Standard
+ Priority = 10
+ Client=@hostname@-fd
+ Write Bootstrap = "@working_dir@/%n-%f.bsr"
+}
+
+JobDefs {
+ Name = "Default"
+ Type = Backup
+ Client=@hostname@-fd
+ Level = Full
+ Storage = File1
+ Messages = Standard
+ Write Bootstrap = "@working_dir@/%c.bsr"
+ Pool = Default
+ SpoolData = yes
+ Max Run Time = 30min
+}
+
+# List of files to be backed up
+FileSet {
+ Name = "Full Set"
+ Include { Options { signature=SHA1 }
+ File =<@tmpdir@/file-list
+ }
+}
+
+# Client (File Services) to backup
+Client {
+ Name = @hostname@-fd
+ Address = @hostname@
+ FDPort = @fdport@
+ Catalog = MyCatalog
+ Password = "xevrjURYoCHhn26RaJoWbeWXEY/a3VqGKp/37tgWiuHc" # password for FileDaemon
+ File Retention = 30d # 30 days
+ Job Retention = 180d # six months
+ AutoPrune = yes # Prune expired Jobs/Files
+}
+
+# Definiton of file storage device
+Storage {
+ Name = File
+ Address = @hostname@ # N.B. Use a fully qualified name here
+ SDPort = @sdport@
+ Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9"
+ Device = FileStorage
+ Media Type = File
+}
+
+# Definiton of file storage device
+Storage {
+ Name = File1
+ Address = @hostname@ # N.B. Use a fully qualified name here
+ SDPort = @sdport@
+ Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9"
+ Device = FileStorage1
+ Media Type = File1
+}
+
+# Standard Restore template, to be changed by Console program
+Job {
+ Name = "RestoreFiles"
+ Type = Restore
+ Client=@hostname@-fd
+ FileSet="Full Set"
+ Storage = File1
+ Messages = Standard
+ Pool = Default
+ Where = @tmpdir@/bacula-restores
+ Max Run Time = 30min
+}
+
+# Generic catalog service
+Catalog {
+ Name = MyCatalog
+ @libdbi@
+ dbname = @db_name@; user = @db_user@; password = "@db_password@"
+}
+
+# Reasonable message delivery -- send most everything to email address
+# and to the console
+Messages {
+ Name = Standard
+ mailcommand = "@sbindir@/bsmtp -h localhost -f \"\(Bacula regression\) %r\" -s \"Regression: %t %e of %c %l\" %r"
+ operatorcommand = "@sbindir@/bsmtp -h localhost -f \"\(Bacula regression\) %r\" -s \"Regression: Intervention needed for %j\" %r"
+# MailOnError = @job_email@ = all, !terminate
+# operator = @job_email@ = mount
+ console = all
+
+ append = "@working_dir@/log" = all, !skipped
+ catalog = all, !skipped
+}
+
+Messages {
+ Name = NoEmail
+ mailcommand = "@sbindir@/bsmtp -h localhost -f \"\(Bacula regression\) %r\" -s \"Regression: %t %e of %c %l\" %r"
+ console = all, !skipped, !terminate, !restored
+ append = "@working_dir@/log" = all, !skipped
+ catalog = all, !skipped
+}
+
+
+# Default pool definition
+Pool {
+ Name = Default
+ Pool Type = Backup
+ Recycle = yes # Bacula can automatically recycle Volumes
+ AutoPrune = yes # Prune expired volumes
+ Volume Retention = 365d # one year
+}
+
+
+### Specific configuration to kubernetes tests
+
+#### 01 Cluster IPs
+FileSet {
+ Name = "Test-K8S-Set-0002-1"
+ Include { Options { signature=SHA1 }
+ Plugin = "@LPLUG@ namespace=@K8S_NAMESPACE_1@"
+ }
+}
+Job {
+ Name = "Test-K8S-0002-1"
+ JobDefs = Default
+ FileSet = Test-K8S-Set-0002-1
+}
+
+
+
+
+
+
+
+
+
+
--- /dev/null
+# testing-ns-0001: Config file to test the feature where avoid terminating pvc test
+# Source:
+# https://rook.io/docs/rook/latest-release/Getting-Started/quickstart/#tldr
+
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: testing-ns-0002-1
+---
+# Source: rook/deploy/examples/dashboard-ingress-https.yaml
+#
+# This example is for Kubernetes running an nginx-ingress
+# and an ACME (e.g. Let's Encrypt) certificate service
+#
+# The nginx-ingress annotations support the dashboard
+# running using HTTPS with a self-signed certificate
+#
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: rook-ceph-mgr-dashboard
+ namespace: testing-ns-0002-1 # namespace:cluster
+ annotations:
+ kubernetes.io/tls-acme: "true"
+ nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
+ nginx.ingress.kubernetes.io/server-snippet: |
+ proxy_ssl_verify off;
+spec:
+ ingressClassName: "nginx"
+ tls:
+ - hosts:
+ - rook-ceph.example.com
+ secretName: rook-ceph.example.com
+ rules:
+ - host: rook-ceph.example.com
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: rook-ceph-mgr-dashboard
+ port:
+ name: https-dashboard
+---
+# Source: rook/deploy/examples/dashboard-loadbalancer.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: rook-ceph-mgr-dashboard-loadbalancer
+ namespace: testing-ns-0002-1 # namespace:cluster
+ labels:
+ app: rook-ceph-mgr
+ rook_cluster: testing-ns-0002-1 # namespace:cluster
+spec:
+ ports:
+ - name: dashboard
+ port: 8443
+ protocol: TCP
+ targetPort: 8443
+ selector:
+ app: rook-ceph-mgr
+ mgr_role: active
+ rook_cluster: testing-ns-0002-1
+ sessionAffinity: None
+ type: LoadBalancer
+ externalIPs:
+ - 10.0.100.35
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: rook-ceph-mgr-dashboard-external-http
+ namespace: testing-ns-0002-1 # namespace:cluster
+ labels:
+ app: rook-ceph-mgr
+ rook_cluster: testing-ns-0002-1 # namespace:cluster
+spec:
+ ports:
+ - name: dashboard
+ port: 7000
+ protocol: TCP
+ targetPort: 7000
+ selector:
+ app: rook-ceph-mgr
+ mgr_role: active
+ rook_cluster: testing-ns-0002-1
+ sessionAffinity: None
+ type: NodePort
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: test-pod-0002-1
+ namespace: testing-ns-0002-1
+spec:
+ containers:
+ - name: test-nginx-container
+ image: nginx
+ ports:
+ - containerPort: 8080
+ name: "http-server"
\ No newline at end of file
--- /dev/null
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: plugintest
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: plugintest-secrets
+ namespace: plugintest
+ labels:
+ app: plugintest
+data:
+ # username: bacula
+ # password: plugintest
+ # secretkey: 5bAoV2CpzBvhBQZaYUX1qYawC00qhrx8cEW0fK1zYkTxVdbxfvWMyi0h5QbweJkq
+ username: YmFjdWxhCg==
+ password: cGx1Z2ludGVzdAo=
+ secretkey: NWJBb1YyQ3B6QnZoQlFaYVlVWDFxWWF3QzAwcWhyeDhjRVcwZksxellrVHhWZGJ4ZnZXTXlpMGg1UWJ3ZUprcQo=
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: plugintest-configmap
+ namespace: plugintest
+ labels:
+ app: plugintest
+data:
+ database: bacula
+ database_host: 127.0.0.1
+ database_port: '5432'
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: plugintest-subdomain
+ namespace: plugintest
+ labels:
+ app: plugintest
+spec:
+ selector:
+ name: plugintest
+ # clusterIP: None
+ ports:
+ - name: foo # Actually, no port is needed.
+ port: 1234
+ targetPort: 1234
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: plugintest-nginx-service
+ namespace: plugintest
+ labels:
+ app: plugintest-nginx-service
+ tier: backend
+spec:
+ ports:
+ - port: 80
+ name: web
+ clusterIP: None
+ selector:
+ app: plugintest-nginx-web
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: plugintest-persistent-volume-claim
+ namespace: plugintest
+ labels:
+ app: plugintest
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
+ storageClassName: local-path
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: plugintest3-persistent-volume-claim
+ namespace: plugintest
+ labels:
+ app: plugintest
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 2Gi
+ storageClassName: local-path
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: plugintest1
+ namespace: plugintest
+ labels:
+ app: plugintest
+ environment: production
+ # tier: frontend
+spec:
+ hostname: plugintest-1
+ subdomain: plugintest-subdomain
+ containers:
+ - image: busybox:1.28
+ command:
+ - sleep
+ - "3600"
+ name: plugintest
+ volumeMounts:
+ - name: plugintest-persistent-storage
+ mountPath: /data
+ volumes:
+ - name: plugintest-persistent-storage
+ persistentVolumeClaim:
+ claimName: plugintest-persistent-volume-claim
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: plugintest2
+ namespace: plugintest
+ labels:
+ app: plugintest
+spec:
+ hostname: plugintest-2
+ subdomain: plugintest-subdomain
+ containers:
+ - image: busybox:1.28
+ command:
+ - sleep
+ - "3600"
+ name: plugintest
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: plugintest3
+ namespace: plugintest
+ labels:
+ app: plugintest
+ environment: production
+ # tier: frontend
+ annotations:
+ bacula/backup.mode: standard
+ bacula/backup.volumes: plugintest3-persistent-volume-claim
+spec:
+ hostname: plugintest-3
+ subdomain: plugintest-subdomain
+ containers:
+ - image: busybox:1.28
+ command:
+ - sleep
+ - "3600"
+ name: plugintest
+ volumeMounts:
+ - name: plugintest3-persistent-storage
+ mountPath: /data
+ volumes:
+ - name: plugintest3-persistent-storage
+ persistentVolumeClaim:
+ claimName: plugintest3-persistent-volume-claim
+---
+apiVersion: apps/v1
+kind: ReplicaSet
+metadata:
+ name: plugintest-frontend
+ namespace: plugintest
+ labels:
+ app: plugintest-replica
+ tier: frontend
+spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ tier: frontend
+ template:
+ metadata:
+ labels:
+ tier: frontend
+ spec:
+ containers:
+ - name: plugintest-frontend-test
+ image: dependencytrack/frontend
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: plugintest-nginx-deployment
+ namespace: plugintest
+ labels:
+ app: plugintest-deployment
+spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app: plugintest-deployment
+ template:
+ metadata:
+ labels:
+ app: plugintest-deployment
+ spec:
+ containers:
+ - name: plugintest-nginx
+ image: nginx:latest
+ ports:
+ - containerPort: 8080
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: plugintest-nginx-web
+ namespace: plugintest
+spec:
+ selector:
+ matchLabels:
+ app: plugintest-nginx-web
+ serviceName: "nginx"
+ replicas: 3
+ template:
+ metadata:
+ namespace: plugintest
+ labels:
+ app: plugintest-nginx-web
+ tier: backend
+ spec:
+ terminationGracePeriodSeconds: 10
+ containers:
+ - name: plugintest-nginx-web
+ image: k8s.gcr.io/nginx-slim:0.8
+ ports:
+ - containerPort: 80
+ name: web
+ volumeMounts:
+ - name: plugintest-www-data
+ mountPath: /usr/share/nginx/html
+ volumeClaimTemplates:
+ - metadata:
+ name: plugintest-www-data
+ namespace: plugintest
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: 1Gi
+ storageClassName: local-path
--- /dev/null
+# /bin/bash
+
+#
+# Check a value of parameter if greater and less than input values.
+#
+# in:
+# $1 - type of log. Values allowed: b,r,e,l
+# $2 - the parameter to check. For example: 'FD Bytes Written', 'SD Bytes Written', 'jobbytes'
+# $3 - number to use to operation greater than. Can use *KB, *MB format. Example: 5, 50KB, 100MB
+# $4 - number to use to operation less than. Can use *KB, *MB format. Example: 5, 50KB, 100MB
+# $5 - a test number to examine which means we will check log${ltest}.out logfile
+check_regress_size_backup() {
+ type_log=$1
+ param=$2
+ gt_value=$3
+ lt_value=$4
+ n_test=$5
+
+ bytes_written_value=$(get_value_of_parameter_in_log $type_log "${param}" "${n_test}")
+
+ check_regress_number_in_log $type_log "gt" "${param}" "${gt_value}" "${n_test}"
+ F=$?
+ printf "Result expected bytes backed up ;%s; \n" "${F}"
+ printf "%s%s\n" " -> Nº bytes of this backup (${gt_value} < ${bytes_written_value}): " $(regress_test_result ${F})
+
+ check_regress_number_in_log $type_log "lt" "${param}" "${lt_value}" "${n_test}"
+ F=$?
+ printf "Result expected bytes backed up ;%s; \n" "${F}"
+ printf "%s%s\n" " -> Nº bytes of this backup (${bytes_written_value} < ${lt_value}): " $(regress_test_result ${F})
+}
+
+#
+# Setup pod annotations in kubernetes
+#
+# in:
+# $1: pod_name
+# $2: mode to annotate
+# $3: volumes to annotate
+set_up_k8s_annotations() {
+ pod=$1
+ mode_value=$2
+ vols_value=$3
+ BACKUP_MODE_ANN=bacula/backup.mode
+ BACKUP_VOL_ANN=bacula/backup.volumes
+
+ # --- SetUp
+ ${KUBECTL} annotate pod $pod ${BACKUP_MODE_ANN}=${mode_value} --overwrite > /dev/null
+ ${KUBECTL} annotate pod $pod ${BACKUP_VOL_ANN}=${vols_value} --overwrite > /dev/null
+}
+
+end_set_up_k8s_annotations() {
+ pod=$1
+ BACKUP_MODE_ANN=bacula/backup.mode
+ BACKUP_VOL_ANN=bacula/backup.volumes
+
+ # Remove annotation in pod.
+ ${KUBECTL} annotate pod ${POD_WITH_ANNOTATIONS} ${BACKUP_MODE_ANN}- > /dev/null
+ ${KUBECTL} annotate pod ${POD_WITH_ANNOTATIONS} ${BACKUP_VOL_ANN}- > /dev/null
+}
\ No newline at end of file
--- /dev/null
+#!/bin/bash
+#
+# Copyright (C) 2000-2015 Kern Sibbald
+# License: BSD 2-Clause; see file LICENSE-FOSS
+#
+
+#
+# Attempt to backup and restore Kubernetes objects including namespaces and
+# persistent volumes configuration using Kubernetes plugin
+#
+# The test assumes you have a working K8S cluster available with enought
+# resources to handle test and a available "plugintest" namespace.
+#
+
+#
+# I used minikube server with csi addon
+# minikube -p test-local stop && minikube -p test-local start && minikube -p test-local addons enable volumesnapshots && minikube -p test-local addons enable csi-hostpath-driver
+#
+
+# $ kubectl apply -f kubernetes-plugintest.yaml
+# namespace/plugintest created
+# secret/plugintest-secrets created
+# configmap/plugintest-configmap created
+# service/plugintest-subdomain created
+# service/plugintest-nginx-service created
+# persistentvolumeclaim/plugintest-persistent-volume-claim created
+# persistentvolumeclaim/plugintest3-persistent-volume-claim created
+# pod/plugintest1 created
+# pod/plugintest2 created
+# pod/plugintest3 created
+# replicaset.apps/plugintest-frontend created
+# deployment.apps/plugintest-nginx-deployment created
+# statefulset.apps/plugintest-nginx-web created
+
+# $ kubectl apply -f kubernetes-plugintest-csi-driver.yaml
+# storageclass.storage.k8s.io/csi-hostpath-sc configured
+# persistentvolumeclaim/plugintest-persistent-volume-claim-csi created
+# persistentvolumeclaim/plugintest-persistent-volume-claim-csi-2 created
+# pod/plugintest-annotations-test created
+
+# $ kubectl -n plugintest get pods -o wide
+# NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+# plugintest-annotations-test 0/1 Pending 0 27s <none> <none> <none> <none>
+# plugintest-frontend-2qrh6 1/1 Running 0 40s 10.244.18.4 test-local <none> <none>
+# plugintest-frontend-9jpqf 1/1 Running 0 40s 10.244.18.2 test-local <none> <none>
+# plugintest-frontend-bwdz4 1/1 Running 0 40s 10.244.18.1 test-local <none> <none>
+# plugintest-nginx-deployment-568dfdd98-2h68n 1/1 Running 0 40s 10.244.18.7 test-local <none> <none>
+# plugintest-nginx-deployment-568dfdd98-ltb8v 1/1 Running 0 40s 10.244.18.6 test-local <none> <none>
+# plugintest-nginx-deployment-568dfdd98-lxqt9 1/1 Running 0 40s 10.244.18.5 test-local <none> <none>
+# plugintest-nginx-web-0 0/1 Pending 0 40s <none> <none> <none> <none>
+# plugintest1 0/1 Pending 0 40s <none> <none> <none> <none>
+# plugintest2 1/1 Running 0 40s 10.244.18.3 test-local <none> <none>
+# plugintest3 0/1 Pending 0 40s <none> <none> <none> <none>
+
+
+# $ kubectl -n plugintest get svc -o wide
+# NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
+# plugintest-nginx-service ClusterIP None <none> 80/TCP 63s app=plugintest-nginx-web
+# plugintest-subdomain ClusterIP 10.97.43.130 <none> 1234/TCP 63s name=plugintest
+
+# $ kubectl -n plugintest get pvc -o wide
+# NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE
+# plugintest-persistent-volume-claim Bound pvc-3381c472-5b3e-4b74-a999-24f3f7031a61 1Gi RWO standard 4m20s Filesystem
+# plugintest-persistent-volume-claim-csi Bound pvc-9aecf542-5e7a-47c7-b9db-57a3b092bcf1 1Gi RWO csi-hostpath-sc 4m7s Filesystem
+# plugintest-persistent-volume-claim-csi-2 Bound pvc-9c38ea52-e9fd-48c2-a45b-2cba5908ae8c 1Gi RWO csi-hostpath-sc 4m7s Filesystem
+# plugintest-www-data-plugintest-nginx-web-0 Bound pvc-5c6ac739-fe67-476b-9711-282d8a376aa6 1Gi RWO standard 4m20s Filesystem
+# plugintest-www-data-plugintest-nginx-web-1 Bound pvc-0cd78b05-b0c8-433d-8533-b270c8cc685b 1Gi RWO standard 101s Filesystem
+# plugintest-www-data-plugintest-nginx-web-2 Bound pvc-ebdeac73-ff33-498c-8cd6-1d6c96b2c346 1Gi RWO standard 93s Filesystem
+# plugintest3-persistent-volume-claim Bound pvc-7b906501-5bc6-469c-9e9f-87ee5db4fc07 1Gi RWO standard 4m20s Filesystem
+
+# $ kubectl -n plugintest get rs -o wide
+# NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
+# plugintest-frontend 3 3 3 4m41s plugintest-frontend-test gcr.io/google_samples/gb-frontend:v3 tier=frontend
+# plugintest-nginx-deployment-568dfdd98 3 3 3 4m41s plugintest-nginx nginx:latest app=plugintest-deployment,pod-template-hash=568dfdd98
+
+
+# $ kubectl -n plugintest get sts -o wide
+# NAME READY AGE CONTAINERS IMAGES
+# plugintest-nginx-web 3/3 4m55s plugintest-nginx-web k8s.gcr.io/nginx-slim:0.8
+
+# $ kubectl -n plugintest get storageclass -o wide
+# NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
+# csi-hostpath-sc hostpath.csi.k8s.io Delete Immediate false 94d
+# standard (default) k8s.io/minikube-hostpath Delete Immediate false 94d
+
+# $ kubectl -n plugintest get volumesnapshotclasses -o wide
+# NAME DRIVER DELETIONPOLICY AGE
+# csi-hostpath-snapclass hostpath.csi.k8s.io Delete 94d
+
+
+TestName="kubernetes-plugin-test"
+JobName="PluginKubernetesTest"
+FileSetName="TestPluginKubernetesSet"
+TESTING_NAMESPACE="plugintest"
+
+. scripts/functions
+. scripts/regress-utils.sh
+
+echo "NEW KUBERNETES"
+
+#
+# Check a value of parameter if greater and less than input values.
+#
+# in:
+# $1 - type of log. Values allowed: b,r,e,l
+# $2 - the parameter to check. For example: 'FD Bytes Written', 'SD Bytes Written', 'jobbytes'
+# $3 - number to use to operation greater than. Can use *KB, *MB format. Example: 5, 50KB, 100MB
+# $4 - number to use to operation less than. Can use *KB, *MB format. Example: 5, 50KB, 100MB
+# $5 - a test number to examine which means we will check log${ltest}.out logfile
+check_regress_size_backup() {
+ type_log=$1
+ param=$2
+ gt_value=$3
+ lt_value=$4
+ n_test=$5
+
+ bytes_written_value=$(get_value_of_parameter_in_log $type_log "${param}" "${n_test}")
+
+ check_regress_number_in_log $type_log "gt" "${param}" "${gt_value}" "${n_test}"
+ F=$?
+ printf "Result expected bytes backed up ;%s; \n" "${F}"
+ printf "%s%s\n" " -> Nº bytes of this backup (${gt_value} < ${bytes_written_value}): " $(regress_test_result ${F})
+
+ check_regress_number_in_log $type_log "lt" "${param}" "${lt_value}" "${n_test}"
+ F=$?
+ printf "Result expected bytes backed up ;%s; \n" "${F}"
+ printf "%s%s\n" " -> Nº bytes of this backup (${bytes_written_value} < ${lt_value}): " $(regress_test_result ${F})
+}
+
+#
+# Setup pod annotations in kubernetes
+#
+# in:
+# $1: pod_name
+# $2: mode to annotate
+# $3: volumes to annotate
+set_up_k8s_annotations() {
+ pod=$1
+ mode_value=$2
+ vols_value=$3
+ BACKUP_MODE_ANN=bacula/backup.mode
+ BACKUP_VOL_ANN=bacula/backup.volumes
+
+ # --- SetUp
+ ${KUBECTL} annotate pod $pod ${BACKUP_MODE_ANN}=${mode_value} --overwrite > /dev/null
+ ${KUBECTL} annotate pod $pod ${BACKUP_VOL_ANN}=${vols_value} --overwrite > /dev/null
+}
+
+end_set_up_k8s_annotations() {
+ pod=$1
+ BACKUP_MODE_ANN=bacula/backup.mode
+ BACKUP_VOL_ANN=bacula/backup.volumes
+
+ # Remove annotation in pod.
+ ${KUBECTL} annotate pod ${POD_WITH_ANNOTATIONS} ${BACKUP_MODE_ANN}- > /dev/null
+ ${KUBECTL} annotate pod ${POD_WITH_ANNOTATIONS} ${BACKUP_VOL_ANN}- > /dev/null
+}
+
+export debug=1
+scripts/cleanup
+scripts/copy-kubernetes-plugin-confs
+printf "Preparing ... "
+
+# export requires variables
+setup_plugin_param "kubernetes:"
+if [ "x$KUBECONFIG" != "x" ]
+then
+ export KUBECONFIG
+ LPLUG="${LPLUG} config='$KUBECONFIG'"
+fi
+
+mkdir -p ${tmp}
+
+# check the requirements
+KNODES=`${KUBECTL} get nodes | grep Ready | wc -l`
+if [ $KNODES -eq 0 ]
+then
+ echo "A working Kubernetes cluster required!"
+ exit 1
+fi
+
+# check if plugintest namespace exist
+KPLUGTEST=`${KUBECTL} get ns | grep "^${TESTING_NAMESPACE} " | wc -l`
+if [ $KPLUGTEST -ne 0 -a "x$1" != "xforce" ]
+then
+ echo "Namespace \"${TESTING_NAMESPACE}\" exist on cluster and no force option specified!"
+ exit 1
+fi
+
+# prepare data
+printf "apply data ... "
+if [ $KPLUGTEST -ne 0 ]
+then
+ ${KUBECTL} delete ns ${TESTING_NAMESPACE} 2>&1 > ${tmp}/kube.log
+fi
+${KUBECTL} apply -f scripts/kubernetes-plugintest.yaml 2>&1 >> ${tmp}/kube.log
+
+if [ $KUBE_PROXY_BACKUP_TEST -ne 0 ]
+then
+ ${KUBECTL} apply -f scripts/kubernetes-plugintest-csi-driver.yaml 2>&1 >> ${tmp}/kube.log
+fi
+
+i=0
+SPIN=('-' '\\' '|' '/')
+printf "waiting to ready ... "
+while true
+do
+ kstat=`${KUBECTL} -n ${TESTING_NAMESPACE} get pods -o go-template='{{range .items}}{{.status.phase}}{{"\n"}}{{end}}' | grep -v Running | wc -l`
+ if [ $kstat -eq 0 ]
+ then
+ break
+ fi;
+ w=1
+ printf "\b${SPIN[(($i % 4))]}"
+ if [ $i -eq 600 ]
+ then
+ echo "Timeout waiting for test data to populate. Cannot continue!"
+ exit 1
+ fi
+ ((i++))
+ sleep 1
+done
+printf "\b"
+
+printf "Refill data in pvcs...\n"
+POD_NAME="plugintest1"
+POD_PATH="/data"
+# Command to create a file of 150M inside pvc
+DD_CMD="dd if=/dev/urandom of=${POD_PATH}/file15MB bs=1M count=15"
+# Exec command inside pod.
+${KUBECTL} exec -it $POD_NAME -n ${TESTING_NAMESPACE} -- /bin/ash -c "$DD_CMD"
+
+sleep 3
+
+printf "File of 15MB created in pod ${POD_NAME}\n"
+
+for N in `seq 0 2`
+do
+ POD_NAME="plugintest-nginx-web-${N}"
+ POD_PATH="/usr/share/nginx/html"
+ FILE_SIZE=$((10 * ${N}))
+ # Command to create a file of 1N0M inside pvc
+ DD_CMD="dd if=/dev/urandom of=${POD_PATH}/file${FILE_SIZE}MB bs=1M count=${FILE_SIZE}"
+ ${KUBECTL} exec -it $POD_NAME -n ${TESTING_NAMESPACE} -- /bin/bash -c "$DD_CMD"
+ sleep 3
+ printf "File of ${FILE_SIZE}MB created in pod ${POD_NAME}\n"
+done
+
+POD_NAME="plugintest3"
+DD_CMD="dd if=/dev/urandom of=/data/file30MB bs=1M count=30"
+
+# Exec command inside pod.
+${KUBECTL} exec -it $POD_NAME -n ${TESTING_NAMESPACE} -- /bin/ash -c "$DD_CMD"
+
+sleep 3
+
+printf "File of 30MB created in pod ${POD_NAME}\n"
+
+if [ $KUBE_PROXY_BACKUP_TEST -ne 0 ]
+then
+ POD_NAME="plugintest-annotations-test"
+ POD_PATH="/data"
+ # Command to create a file of 10M inside pvc
+ DD_CMD="dd if=/dev/urandom of=${POD_PATH}/file10MB bs=1M count=10"
+ # Exec command inside pod.
+ ${KUBECTL} exec -it $POD_NAME -n ${TESTING_NAMESPACE} -- /bin/ash -c "$DD_CMD"
+
+ sleep 3
+ POD_PATH="/data-csi"
+ # Command to create a file of 20M inside pvc
+ DD_CMD="dd if=/dev/urandom of=${POD_PATH}/file20MB bs=1M count=20"
+ # Exec command inside pod.
+ ${KUBECTL} exec -it $POD_NAME -n ${TESTING_NAMESPACE} -- /bin/ash -c "$DD_CMD"
+
+ sleep 3
+ POD_PATH="/data-csi-2"
+ # Command to create a file of 30M inside pvc
+ DD_CMD="dd if=/dev/urandom of=${POD_PATH}/file30MB bs=1M count=30"
+ # Exec command inside pod.
+ ${KUBECTL} exec -it $POD_NAME -n ${TESTING_NAMESPACE} -- /bin/ash -c "$DD_CMD"
+
+ sleep 3
+fi
+
+
+printf "End refill data in pvcs...\n"
+
+# wait abit to objects to populate.
+sleep 30
+
+# get variables
+printf "variables ... "
+${KUBECTL} get ns -o name > ${tmp}/allns.log
+${KUBECTL} get pv -o name > ${tmp}/allpv.log
+PV1=`${KUBECTL} -n ${TESTING_NAMESPACE} get pvc/plugintest-persistent-volume-claim -o go-template='{{.spec.volumeName}}'`
+if [ $KUBE_PROXY_BACKUP_TEST -ne 0 ]
+then
+ PV_CSI1=`${KUBECTL} -n ${TESTING_NAMESPACE} get pvc/plugintest-persistent-volume-claim-csi -o go-template='{{.spec.volumeName}}'`
+fi
+BACKUP_ONLY_PVC="plugintest-persistent-volume-claim"
+BACKUP_PROXY_ARGS=""
+
+if [ $KUBE_PROXY_BACKUP_TEST -ne 0 ]
+then
+ if [ $KUBE_GEN_CERT -ne 0 ]
+ then
+ setup_self_signed_cert $KUBE_FD_CERT_DIR $KUBE_FD_CERT_NAME
+ if [ "x$KUBE_FD_CERT_FILE" = "x" ]
+ then
+ KUBE_FD_CERT_FILE=${KUBE_FD_CERT_DIR}${KUBE_FD_CERT_NAME}.pem
+ KUBE_FD_KEY_FILE=${KUBE_FD_CERT_DIR}${KUBE_FD_CERT_NAME}.key
+ fi
+ fi
+ BACKUP_PROXY_CERTS="fdkeyfile=$KUBE_FD_KEY_FILE fdcertfile=$KUBE_FD_CERT_FILE"
+ BACKUP_PROXY_WITHOUT_PVC="baculaimage=${KUBE_BACULA_IMAGE} pluginhost=${KUBE_PROXY_POD_PLUGIN_HOST} $BACKUP_PROXY_CERTS"
+ BACKUP_PROXY_ARGS="$BACKUP_PROXY_WITHOUT_PVC pvcdata"
+fi
+
+
+# prepare kubernetes backend
+export PLUGIN_WORKING=${cwd}/working
+
+echo "PV1: $PV1" > ${tmp}/objinfo.log
+
+out_sed="${tmp}/sed_tmp"
+echo "s%@LPLUG@%${LPLUG}%" > ${out_sed}
+echo "s%@PV1@%${PV1}%" >> ${out_sed}
+if [ $KUBE_PROXY_BACKUP_TEST -ne 0 ]
+then
+ echo "s%@PV_CSI1@%${PV_CSI1}%" >> ${out_sed}
+fi
+echo "s%@BACKUP_PROXY_ARGS@%${BACKUP_PROXY_ARGS}%" >> ${out_sed}
+echo "s%@BACKUP_PROXY_WITHOUT_PVC@%${BACKUP_PROXY_WITHOUT_PVC}%" >> ${out_sed}
+echo "s%@BACKUP_ONLY_PVC@%${BACKUP_ONLY_PVC}%" >> ${out_sed}
+
+sed -i -f ${out_sed} ${conf}/bacula-dir.conf
+
+echo "done"
+
+start_test
+
+JOBID=1
+
+cat <<END_OF_DATA >${cwd}/tmp/bconcmds
+@output /dev/null
+messages
+@$out ${cwd}/tmp/log.out
+label storage=File1 pool=Default volume=TestVolume001
+@#setdebug dir level=500 trace=1
+quit
+END_OF_DATA
+
+run_bacula
+
+# special case for all objects
+do_regress_estimate_test
+F=0
+RET=`grep "/@kubernetes/" ${cwd}/tmp/elog.out | grep "yaml" | wc -l`
+# 11+4+4+2+3
+RES=24
+echo "RET: $RET RES: $RES" >> ${tmp}/elog.out
+if [ $RET -le $RES ]
+then
+ F=1
+ ((estat++))
+fi
+printf "%s\n" "--------"
+printf "Results estimate test ${TEST}:\n"
+printf "%s%s\n" " -> Estimated all objects: " $(regress_test_result ${F})
+printf "%s\n" "--------"
+
+# then estimate with data
+TEST=1
+do_regress_estimate_test ${TEST}
+F=0
+RET=`grep "/@kubernetes/namespaces/" ${tmp}/elog${TEST}.out | grep "yaml" | wc -l`
+RES=25
+PVRET=`grep "/@kubernetes/persistentvolumes/" ${tmp}/elog${TEST}.out | grep "yaml" | wc -l`
+echo "RET: $RET RES: $RES" PVRET: $PVRET >> ${cwd}/tmp/elog${TEST}.out
+if [ $RET -lt $RES -o $PVRET -eq 0 ]
+then
+ F=1
+ ((estat++))
+fi
+printf "%s\n" "--------"
+printf "Results estimate test ${TEST}:\n"
+printf "%s%s\n" " -> Estimated with data: " $(regress_test_result ${F})
+printf "%s\n" "--------"
+
+((TEST++))
+do_regress_estimate_test ${TEST}
+F=0
+RET=`grep "/@kubernetes/" ${tmp}/elog${TEST}.out | grep "yaml" | wc -l`
+RES=2
+echo "RET: $RET RES: $RES" >> ${cwd}/tmp/elog${TEST}.out
+if [ $RET -lt $RES ]
+then
+ F=1
+ ((estat++))
+fi
+printf "%s\n" "--------"
+printf "Results estimate test ${TEST}:\n"
+printf "%s%s\n" " -> Estimated objects: " $(regress_test_result ${F})
+printf "%s\n" "--------"
+
+((TEST++))
+do_regress_estimate_test ${TEST}
+F=0
+RET=`grep "/@kubernetes/" ${tmp}/elog${TEST}.out | grep "yaml" | wc -l`
+RES=26
+echo "RET: $RET RES: $RES" >> ${cwd}/tmp/elog${TEST}.out
+if [ $RET -lt $RES ]
+then
+ F=1
+ ((estat++))
+fi
+printf "%s\n" "--------"
+printf "Results estimate test ${TEST}:\n"
+printf "%s%s\n" " -> Estimated objects: " $(regress_test_result ${F})
+printf "%s\n" "--------"
+
+
+# listing tests goes to estimate tests
+TEST=1
+do_regress_listing_test ${TEST} "/"
+F=0
+RET=`grep "^drwxr-xr-x" ${cwd}/tmp/llog${TEST}.out | wc -l`
+echo "RET: $RET" >> ${cwd}/tmp/llog${TEST}.out
+if [ $RET -ne 3 ]
+then
+ F=1
+ estat=$((estat+1))
+fi
+printf "%s\n" "--------"
+printf "Results listing test ${TEST}:\n"
+printf "%s%s\n" " -> All: " $(regress_test_result ${F})
+printf "%s\n" "--------"
+
+allns=`cat ${tmp}/allns.log | wc -l`
+((TEST++))
+do_regress_listing_test ${TEST} "namespaces"
+F=0
+RET=`grep "^drwxr-xr-x" ${cwd}/tmp/llog${TEST}.out | wc -l`
+echo "RET: $RET ALLNS: ${allns}" >> ${cwd}/tmp/llog${TEST}.out
+if [ $RET -ne ${allns} ]
+then
+ F=1
+ estat=$((estat+1))
+fi
+printf "%s\n" "--------"
+printf "Results listing test ${TEST}:\n"
+printf "%s%s\n" " -> Namespaces: " $(regress_test_result ${F})
+printf "%s\n" "--------"
+
+allpv=`cat ${tmp}/allpv.log | wc -l`
+((TEST++))
+do_regress_listing_test ${TEST} "persistentvolumes"
+F=0
+RET=`grep "^-rw-r-----" ${cwd}/tmp/llog${TEST}.out | wc -l`
+echo "RET: $RET ALLPV: ${allpv}" >> ${cwd}/tmp/llog${TEST}.out
+if [ $RET -ne ${allpv} ]
+then
+ F=1
+ estat=$((estat+1))
+fi
+printf "%s\n" "--------"
+printf "Results listing test ${TEST}:\n"
+printf "%s%s\n" " -> Persistent volumes: " $(regress_test_result ${F})
+printf "%s\n" "--------"
+
+#
+# now do backups
+#
+bstat=0
+# first backup with data
+for TEST in `seq 1 3`
+do
+ do_regress_backup_test ${TEST}
+ check_regress_backup_statusT ${TEST}
+ F=$?
+ printf "%s\n" "--------"
+ printf "Results backup test ${TEST}:\n"
+ printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+ printf "%s\n" "--------"
+done
+
+# Check backup pvcdata in pod annotations without pvcdata parameter in fileset
+TEST=4
+expected_string_in_log="plugintest3-persistent-volume-claim.tar"
+
+do_regress_backup_test ${TEST}
+check_regress_backup_statusT ${TEST}
+F=$?
+printf "%s\n" "--------"
+printf "Results backup test ${TEST}:\n"
+printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+
+check_regress_string_in_log "b" "${expected_string_in_log}" ${TEST}
+F=$?
+printf "%s%s\n" " -> Backup pvc not specified in fileset but yes in pod annotations: " $(regress_test_result ${F})
+printf "%s\n" "--------"
+
+# second, backup with proxy backup if it's activated
+if [ $KUBE_PROXY_BACKUP_TEST -ne 0 ]
+then
+ expected_string_in_log="is compatible with volume snapshot backup"
+ TEST=30
+ do_regress_backup_test ${TEST}
+ check_regress_backup_statusT ${TEST}
+ F=$?
+ printf "%s\n" "--------"
+ printf "Results backup test ${TEST}:\n"
+ printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+ check_regress_string_in_log "b" "${expected_string_in_log}" ${TEST}
+ F=$?
+ printf "%s%s\n" " -> Use volume snapshot tecnology: " $(regress_test_result ${F})
+ expected_string_in_log="because it did previously with a pod"
+ check_regress_string_in_log "b" "${expected_string_in_log}" ${TEST}
+ F=$?
+ printf "%s%s\n" " -> Skip volume backup when it was done in pod annotations: " $(regress_test_result ${F})
+ printf "%s\n" "--------"
+
+ # Test if you define one pvc to backup, only backup this pvc.
+ TEST=31
+
+ do_regress_backup_test ${TEST}
+ check_regress_backup_statusT ${TEST}
+ F=$?
+ printf "%s\n" "--------"
+ printf "Results backup test ${TEST}:\n"
+ printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+
+ expected_string_in_log="Finish backup volume claim: plugintest-persistent-volume-claim"
+ check_regress_string_in_log "b" "${expected_string_in_log}" ${TEST}
+ F=$?
+ printf "Result expected FinishBackup ;%s; \n" "${F}"
+ printf "%s%s\n" " -> Backup only one defined pvcdata(plugintest-persistent-volume-claim): " $(regress_test_result ${F})
+ not_expected_string_in_log="Start backup volume claim: plugintest-www-data-plugintest-nginx-web-0"
+ check_regress_string_not_in_log "b" "${not_expected_string_in_log}" ${TEST}
+ F=$?
+ printf "%s%s\n" " -> Backup only one defined pvcdata: " $(regress_test_result ${F})
+ printf "%s\n" "--------"
+
+ #
+ # Test with csi-driver to check combinations in pod annotations
+ #
+
+ # Remove other pods annotations
+ POD_WITH_ANNOTATIONS=plugintest3
+ BACKUP_MODE_ANN=bacula/backup.mode
+ BACKUP_VOL_ANN=bacula/backup.volumes
+ end_set_up_k8s_annotations ${POD_WITH_ANNOTATIONS}
+ # ${KUBECTL} annotate pod ${POD_WITH_ANNOTATIONS} ${BACKUP_MODE_ANN}- > /dev/null
+ # ${KUBECTL} annotate pod ${POD_WITH_ANNOTATIONS} ${BACKUP_VOL_ANN}- > /dev/null
+
+ POD_WITH_ANNOTATIONS=plugintest-annotations-test
+ BYTES_WRITTEN_PARAM="SD Bytes Written"
+ # 40. Check annotation:
+ # mode: standard
+ # one vol: plugintest-persistent-volume-claim-csi
+ TEST=40
+ BACKUP_MODE_ANN_VALUE=standard
+ BACKUP_VOLS_ANN_VALUE=plugintest-persistent-volume-claim-csi
+ # --- SetUp
+ set_up_k8s_annotations "${POD_WITH_ANNOTATIONS}" "${BACKUP_MODE_ANN_VALUE}" "${BACKUP_VOLS_ANN_VALUE}"
+ # ---
+
+ do_regress_backup_test ${TEST}
+ check_regress_backup_statusT ${TEST}
+ F=$?
+ printf "%s\n" "--------"
+ printf "Results backup test ${TEST}:\n"
+ printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+
+ GT_VALUE=20MB
+ LT_VALUE=30MB
+ check_regress_size_backup "b" "${BYTES_WRITTEN_PARAM}" ${GT_VALUE} ${LT_VALUE} ${TEST}
+
+ # --- EndSetup
+ end_set_up_k8s_annotations ${POD_WITH_ANNOTATIONS}
+ # ---
+ printf "%s\n" "--------"
+
+ # 41. Check annotation:
+ # mode: standard
+ # two vols: plugintest-persistent-volume-claim-csi,plugintest-persistent-volume-claim-csi-2
+ TEST=41
+ BACKUP_MODE_ANN_VALUE=standard
+ BACKUP_VOLS_ANN_VALUE=plugintest-persistent-volume-claim-csi,plugintest-persistent-volume-claim-csi-2
+ # --- SetUp
+ set_up_k8s_annotations "${POD_WITH_ANNOTATIONS}" "${BACKUP_MODE_ANN_VALUE}" "${BACKUP_VOLS_ANN_VALUE}"
+ # ---
+
+ do_regress_backup_test ${TEST}
+ check_regress_backup_statusT ${TEST}
+ F=$?
+ printf "%s\n" "--------"
+ printf "Results backup test ${TEST}:\n"
+ printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+
+ GT_VALUE=50MB
+ LT_VALUE=60MB
+ check_regress_size_backup "b" "${BYTES_WRITTEN_PARAM}" ${GT_VALUE} ${LT_VALUE} ${TEST}
+
+ # --- EndSetup
+ end_set_up_k8s_annotations ${POD_WITH_ANNOTATIONS}
+ # ---
+ printf "%s\n" "--------"
+
+ # 50. Check annotation:
+ # mode: snapshot
+ # one vol (no compatible): plugintest-persistent-volume-claim
+ TEST=50
+ BACKUP_MODE_ANN_VALUE=snapshot
+ BACKUP_VOLS_ANN_VALUE=plugintest-persistent-volume-claim
+ # --- SetUp
+ set_up_k8s_annotations "${POD_WITH_ANNOTATIONS}" "${BACKUP_MODE_ANN_VALUE}" "${BACKUP_VOLS_ANN_VALUE}"
+ # ---
+
+ do_regress_backup_test ${TEST}
+ check_regress_backup_statusT ${TEST}
+ F=$?
+ printf "%s\n" "--------"
+ printf "Results backup test ${TEST}:\n"
+ printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+
+ expected_string_in_log="is not compatible with snapshot"
+ check_regress_string_in_log "b" "${expected_string_in_log}" ${TEST}
+ F=$?
+ printf "%s%s\n" " -> PVC is not compatible with snapshot mode: " $(regress_test_result ${F})
+
+ GT_VALUE=25MB
+ LT_VALUE=30MB
+ check_regress_size_backup "b" "${BYTES_WRITTEN_PARAM}" ${GT_VALUE} ${LT_VALUE} ${TEST}
+
+ # --- EndSetup
+ end_set_up_k8s_annotations ${POD_WITH_ANNOTATIONS}
+ # ---
+ printf "%s\n" "--------"
+
+ # 51. Check annotation:
+ # mode: snapshot
+ # one vol(comp): plugintest-persistent-volume-claim-csi
+ TEST=51
+ BACKUP_MODE_ANN_VALUE=standard
+ BACKUP_VOLS_ANN_VALUE=plugintest-persistent-volume-claim-csi
+ # --- SetUp
+ set_up_k8s_annotations "${POD_WITH_ANNOTATIONS}" "${BACKUP_MODE_ANN_VALUE}" "${BACKUP_VOLS_ANN_VALUE}"
+ # ---
+
+ do_regress_backup_test ${TEST}
+ check_regress_backup_statusT ${TEST}
+ F=$?
+ printf "%s\n" "--------"
+ printf "Results backup test ${TEST}:\n"
+ printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+
+
+ GT_VALUE=20MB
+ LT_VALUE=30MB
+ check_regress_size_backup "b" "${BYTES_WRITTEN_PARAM}" ${GT_VALUE} ${LT_VALUE} ${TEST}
+
+ # --- EndSetup
+ end_set_up_k8s_annotations ${POD_WITH_ANNOTATIONS}
+ # ---
+ printf "%s\n" "--------"
+
+ # 52. Check annotation:
+ # mode: snapshot
+ # two vols(comp): plugintest-persistent-volume-claim-csi,plugintest-persistent-volume-claim-csi-2
+ TEST=52
+ BACKUP_MODE_ANN_VALUE=standard
+ BACKUP_VOLS_ANN_VALUE=plugintest-persistent-volume-claim-csi,plugintest-persistent-volume-claim-csi-2
+ # --- SetUp
+ set_up_k8s_annotations "${POD_WITH_ANNOTATIONS}" "${BACKUP_MODE_ANN_VALUE}" "${BACKUP_VOLS_ANN_VALUE}"
+ # ---
+
+ do_regress_backup_test ${TEST}
+ check_regress_backup_statusT ${TEST}
+ F=$?
+ printf "%s\n" "--------"
+ printf "Results backup test ${TEST}:\n"
+ printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+
+
+ GT_VALUE=50MB
+ LT_VALUE=60MB
+ check_regress_size_backup "b" "${BYTES_WRITTEN_PARAM}" ${GT_VALUE} ${LT_VALUE} ${TEST}
+
+ # --- EndSetup
+ end_set_up_k8s_annotations ${POD_WITH_ANNOTATIONS}
+ # ---
+ printf "%s\n" "--------"
+
+ # 53. Check annotation:
+ # mode: snapshot
+ # one vol comp and another not: plugintest-persistent-volume-claim-csi,plugintest-persistent-volume-claim
+ TEST=53
+ BACKUP_MODE_ANN_VALUE=snapshot
+ BACKUP_VOLS_ANN_VALUE=plugintest-persistent-volume-claim-csi,plugintest-persistent-volume-claim
+ # --- SetUp
+ set_up_k8s_annotations "${POD_WITH_ANNOTATIONS}" "${BACKUP_MODE_ANN_VALUE}" "${BACKUP_VOLS_ANN_VALUE}"
+ # ---
+
+ do_regress_backup_test ${TEST}
+ check_regress_backup_statusT ${TEST}
+ F=$?
+ printf "%s\n" "--------"
+ printf "Results backup test ${TEST}:\n"
+ printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+
+ expected_string_in_log="is not compatible with snapshot"
+ check_regress_string_in_log "b" "${expected_string_in_log}" ${TEST}
+ F=$?
+ printf "%s%s\n" " -> PVC is not compatible with snapshot mode: " $(regress_test_result ${F})
+
+ GT_VALUE=45MB
+ LT_VALUE=60MB
+ check_regress_size_backup "b" "${BYTES_WRITTEN_PARAM}" ${GT_VALUE} ${LT_VALUE} ${TEST}
+
+ # --- EndSetup
+ end_set_up_k8s_annotations ${POD_WITH_ANNOTATIONS}
+ # ---
+ printf "%s\n" "--------"
+
+ # 54. Check annotation:
+ # mode: snapshot
+ # one vol not comp and another yes: plugintest-persistent-volume-claim,plugintest-persistent-volume-claim-csi
+ TEST=54
+ BACKUP_MODE_ANN_VALUE=snapshot
+ BACKUP_VOLS_ANN_VALUE=plugintest-persistent-volume-claim,plugintest-persistent-volume-claim-csi
+ # --- SetUp
+ set_up_k8s_annotations "${POD_WITH_ANNOTATIONS}" "${BACKUP_MODE_ANN_VALUE}" "${BACKUP_VOLS_ANN_VALUE}"
+ # ---
+
+ do_regress_backup_test ${TEST}
+ check_regress_backup_statusT ${TEST}
+ F=$?
+ printf "%s\n" "--------"
+ printf "Results backup test ${TEST}:\n"
+ printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+
+ expected_string_in_log="is not compatible with snapshot"
+ check_regress_string_in_log "b" "${expected_string_in_log}" ${TEST}
+ F=$?
+ printf "%s%s\n" " -> PVC is not compatible with snapshot mode: " $(regress_test_result ${F})
+
+ GT_VALUE=45MB
+ LT_VALUE=60MB
+ check_regress_size_backup "b" "${BYTES_WRITTEN_PARAM}" ${GT_VALUE} ${LT_VALUE} ${TEST}
+
+ # --- EndSetup
+ end_set_up_k8s_annotations ${POD_WITH_ANNOTATIONS}
+ # ---
+ printf "%s\n" "--------"
+
+ # 60. Check annotation:
+ # mode: clone
+ # one vol: plugintest-persistent-volume-claim-csi
+ TEST=60
+ BACKUP_MODE_ANN_VALUE=clone
+ BACKUP_VOLS_ANN_VALUE=plugintest-persistent-volume-claim-csi
+ # --- SetUp
+ set_up_k8s_annotations "${POD_WITH_ANNOTATIONS}" "${BACKUP_MODE_ANN_VALUE}" "${BACKUP_VOLS_ANN_VALUE}"
+ # ---
+
+ do_regress_backup_test ${TEST}
+ check_regress_backup_statusT ${TEST}
+ F=$?
+ printf "%s\n" "--------"
+ printf "Results backup test ${TEST}:\n"
+ printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+
+ GT_VALUE=20MB
+ LT_VALUE=30MB
+ check_regress_size_backup "b" "${BYTES_WRITTEN_PARAM}" ${GT_VALUE} ${LT_VALUE} ${TEST}
+
+ # --- EndSetup
+ end_set_up_k8s_annotations ${POD_WITH_ANNOTATIONS}
+ # ---
+ printf "%s\n" "--------"
+
+ # # 61. Check annotation:
+ # # mode: clone
+ # # one vol (no comp): plugintest-persistent-volume-claim
+ # TEST=61
+ # BACKUP_MODE_ANN_VALUE=clone
+ # BACKUP_VOLS_ANN_VALUE=plugintest-persistent-volume-claim
+ # # --- SetUp
+ # ${KUBECTL} annotate pod ${POD_WITH_ANNOTATIONS} ${BACKUP_MODE_ANN}=${BACKUP_MODE_ANN_VALUE} --overwrite > /dev/null
+ # ${KUBECTL} annotate pod ${POD_WITH_ANNOTATIONS} ${BACKUP_VOL_ANN}=${BACKUP_VOLS_ANN_VALUE} --overwrite > /dev/null
+ # # ---
+
+ # do_regress_backup_test ${TEST}
+ # check_regress_backup_statusT ${TEST}
+ # F=$?
+ # printf "%s\n" "--------"
+ # printf "Results backup test ${TEST}:\n"
+ # printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+
+ # BYTES_WRITTEN_VALUE=$(get_value_of_parameter_in_log "b" "${BYTES_WRITTEN_PARAM}" "${TEST}")
+ # GT_VALUE=20MB
+ # check_regress_number_in_log "b" "gt" "${BYTES_WRITTEN_PARAM}" "${GT_VALUE}" "${TEST}"
+ # F=$?
+ # printf "Result expected bytes backed up ;%s; \n" "${F}"
+ # printf "%s%s\n" " -> Nº bytes of this backup (${BACKUP_MODE_ANN_VALUE} mode) one vol annotated inside pod (${GT_VALUE} < ${BYTES_WRITTEN_VALUE}): " $(regress_test_result ${F})
+ # LT_VALUE=30MB
+ # check_regress_number_in_log "b" "lt" "${BYTES_WRITTEN_PARAM}" "${LT_VALUE}" "${TEST}"
+ # F=$?
+ # printf "Result expected bytes backed up ;%s; \n" "${F}"
+ # printf "%s%s\n" " -> Nº bytes of this backup (${BACKUP_MODE_ANN_VALUE} mode) one vol annotated inside pod (${BYTES_WRITTEN_VALUE} < ${LT_VALUE}): " $(regress_test_result ${F})
+
+ # printf "%s\n" "--------"
+
+ # 62. Check annotation:
+ # mode: clone
+ # two vols: plugintest-persistent-volume-claim-csi,plugintest-persistent-volume-claim-csi-2
+ TEST=62
+ BACKUP_MODE_ANN_VALUE=clone
+ BACKUP_VOLS_ANN_VALUE=plugintest-persistent-volume-claim-csi,plugintest-persistent-volume-claim-csi-2
+ # --- SetUp
+ set_up_k8s_annotations "${POD_WITH_ANNOTATIONS}" "${BACKUP_MODE_ANN_VALUE}" "${BACKUP_VOLS_ANN_VALUE}"
+ # ---
+
+ do_regress_backup_test ${TEST}
+ check_regress_backup_statusT ${TEST}
+ F=$?
+ printf "%s\n" "--------"
+ printf "Results backup test ${TEST}:\n"
+ printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+
+ GT_VALUE=50MB
+ LT_VALUE=60MB
+ check_regress_size_backup "b" "${BYTES_WRITTEN_PARAM}" "${GT_VALUE}" "${LT_VALUE}" "${TEST}"
+
+ # --- EndSetup
+ end_set_up_k8s_annotations ${POD_WITH_ANNOTATIONS}
+ # ---
+ printf "%s\n" "--------"
+
+ # --- After all
+ ${KUBECTL} annotate pod ${POD_WITH_ANNOTATIONS} ${BACKUP_MODE_ANN}- > /dev/null
+ ${KUBECTL} annotate pod ${POD_WITH_ANNOTATIONS} ${BACKUP_VOL_ANN}- > /dev/null
+ # --- After All annotations
+
+ # 70. Check pvcdata in fileset:
+ # pvcdata
+ # one vol: plugintest-persistent-volume-claim
+ TEST=70
+ do_regress_backup_test ${TEST}
+ check_regress_backup_statusT ${TEST}
+ F=$?
+ printf "%s\n" "--------"
+ printf "Results backup test ${TEST}:\n"
+ printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+
+ GT_VALUE=25MB
+ LT_VALUE=30MB
+ check_regress_size_backup "b" "${BYTES_WRITTEN_PARAM}" "${GT_VALUE}" "${LT_VALUE}" "${TEST}"
+
+ # --- EndSetup
+ end_set_up_k8s_annotations ${POD_WITH_ANNOTATIONS}
+ # ---
+ printf "%s\n" "--------"
+
+ # 71. Check pvcdata in fileset:
+ # pvcdata
+ # two vols: plugintest-persistent-volume-claim,plugintest3-persistent-volume-claim
+ TEST=71
+ do_regress_backup_test ${TEST}
+ check_regress_backup_statusT ${TEST}
+ F=$?
+ printf "%s\n" "--------"
+ printf "Results backup test ${TEST}:\n"
+ printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+
+ GT_VALUE=55MB
+ LT_VALUE=60MB
+ check_regress_size_backup "b" "${BYTES_WRITTEN_PARAM}" "${GT_VALUE}" "${LT_VALUE}" "${TEST}"
+
+ # --- EndSetup
+ end_set_up_k8s_annotations ${POD_WITH_ANNOTATIONS}
+ # ---
+ printf "%s\n" "--------"
+
+ # 72. Check pvcdata in fileset:
+ # pvcdata
+ # all vols
+ TEST=72
+ do_regress_backup_test ${TEST}
+ check_regress_backup_statusT ${TEST}
+ F=$?
+ printf "%s\n" "--------"
+ printf "Results backup test ${TEST}:\n"
+ printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+
+ GT_VALUE=140MB
+ LT_VALUE=150MB
+ check_regress_size_backup "b" "${BYTES_WRITTEN_PARAM}" "${GT_VALUE}" "${LT_VALUE}" "${TEST}"
+
+ # --- EndSetup
+ end_set_up_k8s_annotations ${POD_WITH_ANNOTATIONS}
+ # ---
+ printf "%s\n" "--------"
+
+ # 73. Check pvcdata in fileset one annotation standard:
+ # pvcdata
+ # all vols
+ TEST=73
+ BACKUP_MODE_ANN_VALUE=standard
+ BACKUP_VOLS_ANN_VALUE=plugintest-persistent-volume-claim-csi
+ # --- SetUp
+ set_up_k8s_annotations "${POD_WITH_ANNOTATIONS}" "${BACKUP_MODE_ANN_VALUE}" "${BACKUP_VOLS_ANN_VALUE}"
+ # ---
+
+ do_regress_backup_test ${TEST}
+ check_regress_backup_statusT ${TEST}
+ F=$?
+ printf "%s\n" "--------"
+ printf "Results backup test ${TEST}:\n"
+ printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+
+ GT_VALUE=140MB
+ LT_VALUE=150MB
+ check_regress_size_backup "b" "${BYTES_WRITTEN_PARAM}" "${GT_VALUE}" "${LT_VALUE}" "${TEST}"
+
+ # --- EndSetup
+ end_set_up_k8s_annotations ${POD_WITH_ANNOTATIONS}
+ # ---
+
+ printf "%s\n" "--------"
+
+ # 74. Check pvcdata in fileset one annotation standard:
+ # pvcdata
+ # all vols
+ TEST=74
+ BACKUP_MODE_ANN_VALUE=clone
+ BACKUP_VOLS_ANN_VALUE=plugintest-persistent-volume-claim-csi
+ # --- SetUp
+ set_up_k8s_annotations "${POD_WITH_ANNOTATIONS}" "${BACKUP_MODE_ANN_VALUE}" "${BACKUP_VOLS_ANN_VALUE}"
+ # ---
+
+ do_regress_backup_test ${TEST}
+ check_regress_backup_statusT ${TEST}
+ F=$?
+ printf "%s\n" "--------"
+ printf "Results backup test ${TEST}:\n"
+ printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+
+ GT_VALUE=140MB
+ LT_VALUE=150MB
+ check_regress_size_backup "b" "${BYTES_WRITTEN_PARAM}" "${GT_VALUE}" "${LT_VALUE}" "${TEST}"
+
+ # --- EndSetup
+ end_set_up_k8s_annotations ${POD_WITH_ANNOTATIONS}
+ # ---
+ printf "%s\n" "--------"
+
+ # 75. Check pvcdata in fileset one annotation standard:
+ # pvcdata
+ # all vols
+ TEST=75
+ BACKUP_MODE_ANN_VALUE=snapshot
+ BACKUP_VOLS_ANN_VALUE=plugintest-persistent-volume-claim-csi
+ # --- SetUp
+ set_up_k8s_annotations "${POD_WITH_ANNOTATIONS}" "${BACKUP_MODE_ANN_VALUE}" "${BACKUP_VOLS_ANN_VALUE}"
+ # ---
+
+ do_regress_backup_test ${TEST}
+ check_regress_backup_statusT ${TEST}
+ F=$?
+ printf "%s\n" "--------"
+ printf "Results backup test ${TEST}:\n"
+ printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+
+ GT_VALUE=140MB
+ LT_VALUE=150MB
+ check_regress_size_backup "b" "${BYTES_WRITTEN_PARAM}" "${GT_VALUE}" "${LT_VALUE}" "${TEST}"
+
+ # --- EndSetup
+ end_set_up_k8s_annotations ${POD_WITH_ANNOTATIONS}
+ # ---
+ printf "%s\n" "--------"
+fi
+
+# now, backup with warnings
+for TEST in `seq 11 13`
+do
+ do_regress_backup_test ${TEST}
+ check_regress_backup_statusW ${TEST}
+ F=$?
+ printf "%s\n" "--------"
+ printf "Result backup test ${TEST}:"
+ printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+ printf "%s\n" "--------"
+done
+
+# now, backup failed to test
+for TEST in `seq 21 22`
+do
+ do_regress_backup_test ${TEST}
+ check_regress_backup_statusE ${TEST}
+ F=$?
+ printf "%s\n" "--------"
+ printf "Result failed backup test ${TEST}:"
+ printf "%s%s\n" " -> StatusE: " $(regress_test_result ${F})
+ printf "%s\n" "--------"
+done
+
+#do_regress_backup_test 1 "incremental"
+#check_regress_backup_statusT 1
+#F=$?
+#regress_test_result ${F}
+
+# now remove some objects
+dstat=0
+
+TEST=1
+${KUBECTL} -n plugintest delete cm/plugintest-configmap 2>&1 > ${tmp}/rlog${TEST}.out
+do_regress_restore_test ${TEST} 1 "" "file=/@kubernetes/namespaces/plugintest/configmaps/plugintest-configmap.yaml"
+check_regress_restore_statusT ${TEST}
+F=$?
+# check if object restored on kubernetes
+${KUBECTL} -n plugintest get cm/plugintest-configmap 2>&1 >> ${tmp}/rlog${TEST}.out
+RET=`${KUBECTL} -n plugintest get cm/plugintest-configmap -o go-template='{{.metadata.name}}{{"\n"}}' 2>/dev/null | wc -l`
+echo "RET: $RET" >> ${tmp}/rlog${TEST}.out
+if [ $RET -ne 1 ]
+then
+ F=1
+ dstat=$((dstat+1))
+fi
+printf "%s\n" "--------"
+printf "Result restore test ${TEST}:"
+printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+printf "%s\n" "--------"
+
+((TEST++))
+${KUBECTL} -n plugintest delete secret/plugintest-secrets 2>&1 > ${tmp}/rlog${TEST}.out
+do_regress_restore_test ${TEST} 1 "" "file=/@kubernetes/namespaces/plugintest/secrets/plugintest-secrets.yaml"
+check_regress_restore_statusT ${TEST}
+F=$?
+# check if object restored on kubernetes
+${KUBECTL} -n plugintest get secret/plugintest-secrets 2>&1 >> ${tmp}/rlog${TEST}.out
+RET=`${KUBECTL} -n plugintest get secret/plugintest-secrets -o go-template='{{.metadata.name}}{{"\n"}}' 2>/dev/null | wc -l`
+echo "RET: $RET" >> ${tmp}/rlog${TEST}.out
+if [ $RET -ne 1 ]
+then
+ F=1
+ dstat=$((dstat+1))
+fi
+printf "%s\n" "--------"
+printf "Result restore test ${TEST}:"
+printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+printf "%s\n" "--------"
+
+((TEST++))
+${KUBECTL} delete ns/plugintest 2>&1 > ${tmp}/rlog${TEST}.out
+do_regress_restore_test ${TEST} 1 "" "select all"
+check_regress_restore_statusT ${TEST}
+F=$?
+# check if object restored on kubernetes, we have to wait until ready
+W=0
+if [ $F -eq 0 ]
+then
+ i=0
+ while true
+ do
+ kstat=`${KUBECTL} -n plugintest get pods -o go-template='{{range .items}}{{.status.phase}}{{"\n"}}{{end}}' | grep -v Running | wc -l`
+ if [ $kstat -eq 0 ]
+ then
+ break
+ fi;
+ if [ $i -eq 600 ]
+ then
+ echo "Timeout waiting for restore data to populate!"
+ W=1
+ fi
+ ((i++))
+ sleep 1
+ done
+fi
+if [ $W -eq 0 ]
+then
+ ${KUBECTL} -n plugintest get secret/plugintest-secrets 2>&1 >> ${tmp}/rlog${TEST}.out
+ RET=`${KUBECTL} -n plugintest get secret/plugintest-secrets -o go-template='{{.metadata.name}}{{"\n"}}' 2>/dev/null | wc -l`
+ echo "RET: $RET" >> ${tmp}/rlog${TEST}.out
+ if [ $RET -ne 1 ]
+ then
+ F=1
+ dstat=$((dstat+1))
+ fi
+ printf "%s\n" "--------"
+ printf "Result restore test ${TEST}:"
+ printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+ printf "%s\n" "--------"
+fi
+
+
+stop_bacula
+end_test
--- /dev/null
+#!/bin/bash
+#
+# Copyright (C) 2000-2015 Kern Sibbald
+# License: BSD 2-Clause; see file LICENSE-FOSS
+#
+
+#
+# Attempt to backup and restore kubernetes pvcs avoiding the pvc
+# with Terminating status
+#
+# Assumes:
+# - You have a working K8S cluster avaialable
+# - You can create storage class with any local-storage provider
+
+#
+# The k8s cluster status:
+
+# $ kubectl apply -f scripts/kubernetes/kubernetes-plugin-test-0001.yaml
+# namespace/testing-ns-0001-1 created
+# storageclass.storage.k8s.io/local-storage unchanged
+# persistentvolumeclaim/test-persistent-volume-claim-0001 created
+# pod/test-pod-0001 created
+
+
+# $ kubectl -n testing-ns-0001-1 get pods -o wide
+# NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+# test-pod-0001 1/1 Running 0 4m59s 10.85.0.124 am-u20-k8s-worker02-bck <none> <none>
+
+# $ kubectl -n testing-ns-0001-1 get pvc -o wide
+# NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE
+# test-persistent-volume-claim-0001 Bound pvc-e4b2c7b7-2679-494c-af61-8e1cac026c4d 1Gi RWO local-path 5m29s Filesystem
+
+# $ kubectl -n testing-ns-0001-1 get svc -o wide
+# No resources found in testing-ns-0001-1 namespace.
+
+# $ kubectl -n testing-ns-0001-1 get rs -o wide
+# No resources found in testing-ns-0001-1 namespace.
+
+# $ kubectl -n testing-ns-0001-1 get sts -o wide
+# No resources found in testing-ns-0001-1 namespace.
+
+# $ kubectl -n testing-ns-0001-1 get storageclass -o wide
+# NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
+# local-path rancher.io/local-path Delete WaitForFirstConsumer false 16h
+# local-storage kubernetes.io/no-provisioner Delete WaitForFirstConsumer false 148m
+# nfs-client k8s-sigs.io/nfs-subdir-external-provisioner Delete Immediate false 250d
+# rook-ceph-block rook-ceph.rbd.csi.ceph.com Delete Immediate true 236d
+
+
+# $ kubectl -n testing-ns-0001-1 get volumesnapshotclasses -o wide
+# NAME DRIVER DELETIONPOLICY AGE
+# csi-rbdplugin-snapclass rook-ceph.rbd.csi.ceph.com Delete 235d
+
+TEST_ID=0001
+TestName="kubernetes-plugin-test-${TEST_ID}"
+JobNameBase="Test-K8S-${TEST_ID}"
+FileSetName="Test-K8S-Set-${TEST_ID}-"
+
+# Variables in tests
+K8S_SCRIPT_YAML_FILE="scripts/kubernetes/kubernetes-plugin-test-${TEST_ID}.yaml"
+K8S_NAMESPACE_1="testing-ns-0001-1"
+K8S_NAMESPACE_2="testing-ns-0001-2"
+PVC_N1_0001_1="test-pvc-0001-1"
+PVC_N1_0001_2="test-pvc-0001-2"
+PVC_N2_0001_3="test-pvc-0001-3"
+POD_N1_0001_1="test-pod-0001-1"
+POD_N1_0001_2="test-pod-0001-2"
+POD_N2_0001_3="test-pod-0001-3"
+PVC_PATH_IN_POD="/pvc"
+
+. scripts/functions
+. scripts/regress-utils.sh
+
+. tests/kubernetes/k8s-utils.sh
+
+printf "\nInit test: ${TestName}\n"
+
+CONNECTION_ARGS=""
+if [ ! -z $KUBE_FD_CERT_FILE ]
+then
+ setup_self_signed_cert $KUBE_FD_CERT_DIR $KUBE_FD_CERT_NAME
+ CONNECTION_ARGS=" fdkeyfile=$KUBE_FD_KEY_FILE fdcertfile=$KUBE_FD_CERT_FILE "
+fi
+
+if [ ! -z "$KUBE_PROXY_POD_PLUGIN_HOST" ]
+then
+ CONNECTION_ARGS="${CONNECTION_ARGS} pluginhost=${KUBE_PROXY_POD_PLUGIN_HOST} "
+fi
+
+if [ ! -z "$KUBE_BACULA_IMAGE" ]
+then
+ CONNECTION_ARGS="${CONNECTION_ARGS} baculaimage=${KUBE_BACULA_IMAGE} "
+fi
+
+export debug=1
+scripts/cleanup
+scripts/copy-kubernetes-plugin-confs ${TEST_ID}
+
+printf "\n ... Preparing ...\n"
+
+# export requires variables
+setup_plugin_param "kubernetes:"
+if [ "x$KUBECONFIG" != "x" ]
+then
+ export KUBECONFIG
+ LPLUG="${LPLUG} config='$KUBECONFIG' ${CONNECTION_ARGS}"
+fi
+
+KSTORAGECLASS=`${KUBECTL} get storageclass | grep local | wc -l`
+if [ $KSTORAGECLASS -eq 0 ]
+then
+ echo "Do you need a local storage class. It is to simplify the errors!"
+ exit 1
+fi
+
+tmp="${tmp}/test-${TEST_ID}"
+
+mkdir -p ${tmp}
+
+# check the requirements
+KNODES=`${KUBECTL} get nodes | grep Ready | wc -l`
+if [ $KNODES -eq 0 ]
+then
+ echo "A working Kubernetes cluster required!"
+ exit 1
+fi
+
+# check if K8S_NAMESPACE_1 or K8S_NAMESPACE_2 exist
+KPLUGTEST_1=`${KUBECTL} get ns | grep "^${K8S_NAMESPACE_1} " | wc -l`
+KPLUGTEST_2=`${KUBECTL} get ns | grep "^${K8S_NAMESPACE_2} " | wc -l`
+if [ $KPLUGTEST_1 -ne 0 ] && [ "x$1" != "xforce" ];
+then
+ echo "Namespace \"${K8S_NAMESPACE_1}\" exist on cluster and no force option specified!"
+ exit 1
+fi
+if [ $KPLUGTEST_2 -ne 0 ] && [ "x$1" != "xforce" ];
+then
+ echo "Namespace \"${K8S_NAMESPACE_2}\" exist on cluster and no force option specified!"
+ exit 1
+fi
+
+
+# prepare data
+printf "\n ... Apply data ... \n"
+reset_k8s_env() {
+ if [ $KPLUGTEST_1 -ne 0 ]
+ then
+ printf "Removing namespaces: ${K8S_NAMESPACE_1} and ${K8S_NAMESPACE_2}\n"
+ ${KUBECTL} delete ns ${K8S_NAMESPACE_1} 2>&1 > ${tmp}/kube.log
+ ${KUBECTL} delete ns ${K8S_NAMESPACE_2} 2>&1 >> ${tmp}/kube.log
+ printf "Removed namespaces: ${K8S_NAMESPACE_1} and ${K8S_NAMESPACE_2}\n"
+ fi
+ ${KUBECTL} apply -f ${K8S_SCRIPT_YAML_FILE} 2>&1 >> ${tmp}/kube.log
+
+ i=0
+ SPIN=('-' '\\' '|' '/')
+ printf "\n ... Waiting to ready ... \n"
+ while true
+ do
+ # TODO: Check also the pods in namespace_2 are running
+ kstat=`${KUBECTL} -n ${K8S_NAMESPACE_1} get pods -o go-template='{{range .items}}{{.status.phase}}{{"\n"}}{{end}}' | grep -v Running | wc -l`
+ if [ $kstat -eq 0 ]
+ then
+ break
+ fi;
+ w=1
+ printf "\b${SPIN[(($i % 4))]}"
+ if [ $i -eq 600 ]
+ then
+ echo "Timeout waiting for test data to populate. Cannot continue!"
+ exit 1
+ fi
+ ((i++))
+ sleep 1
+ done
+ # Command to create a file inside pvc
+ printf "\n ... Refill data in pvcs ...\n"
+ SIZE_MB=10
+ DD_CMD="dd if=/dev/urandom of=${PVC_PATH_IN_POD}/file${SIZE_MB}MB bs=1M count=${SIZE_MB}"
+ # Exec command inside pod.
+ ${KUBECTL} exec -it $POD_N1_0001_1 -n ${K8S_NAMESPACE_1} -- /bin/bash -c "$DD_CMD"
+ SIZE_MB=$(( ${SIZE_MB} + 10 ))
+ DD_CMD="dd if=/dev/urandom of=${PVC_PATH_IN_POD}/file${SIZE_MB}MB bs=1M count=${SIZE_MB}"
+ ${KUBECTL} exec -it $POD_N1_0001_2 -n ${K8S_NAMESPACE_1} -- /bin/bash -c "$DD_CMD"
+ SIZE_MB=$(( ${SIZE_MB} + 10 ))
+ DD_CMD="dd if=/dev/urandom of=${PVC_PATH_IN_POD}/file${SIZE_MB}MB bs=1M count=${SIZE_MB}"
+ ${KUBECTL} exec -it $POD_N2_0001_3 -n ${K8S_NAMESPACE_2} -- /bin/bash -c "$DD_CMD"
+}
+
+reset_k8s_env
+
+
+# wait a bit to objects to populate.
+sleep 10
+
+
+# get variables
+printf "\n ... Get Environment Variables ...\n"
+${KUBECTL} get ns -o name > ${tmp}/allns.log
+${KUBECTL} get pv -o name > ${tmp}/allpv.log
+
+
+# Prepare bacula dir configuration
+printf "\n ... Preparing Bacula-dir configuration ...\n"
+export PLUGIN_WORKING=${cwd}/working
+
+out_sed="${tmp}/sed_tmp"
+echo "s%@LPLUG@%${LPLUG}%" > ${out_sed}
+echo "s%@K8S_NAMESPACE_1@%${K8S_NAMESPACE_1}%" >> ${out_sed}
+echo "s%@K8S_NAMESPACE_2@%${K8S_NAMESPACE_2}%" >> ${out_sed}
+echo "s%@PVC_N1_0001_1@%${PVC_N1_0001_1}%" >> ${out_sed}
+echo "s%@PVC_N1_0001_2@%${PVC_N1_0001_2}%" >> ${out_sed}
+echo "s%@PVC_N2_0001_3@%${PVC_N2_0001_3}%" >> ${out_sed}
+
+echo "s%@CONNECTION_ARGS@%${CONNECTION_ARGS}%" >> ${out_sed}
+echo "s%@BACKUP_PROXY_WITHOUT_PVC@%${BACKUP_PROXY_WITHOUT_PVC}%" >> ${out_sed}
+echo "s%@BACKUP_ONLY_PVC@%${BACKUP_ONLY_PVC}%" >> ${out_sed}
+printf "\nCommand launched:\n"
+echo "sed -i -f ${out_sed} ${conf}/bacula-dir.conf"
+
+sed -i -f ${out_sed} ${conf}/bacula-dir.conf
+
+printf "\n ... Done ...\n"
+
+start_test
+
+# We must put the bconsole command in ${cwd}/tmp/bconcmds
+cat <<END_OF_DATA >${tmp}/bconcmds
+@output /dev/null
+messages
+@$out ${tmp}/log.out
+label storage=File1 pool=Default volume=TestVolume001
+@setdebug dir level=500 trace=1
+quit
+END_OF_DATA
+
+run_bacula
+
+#############
+## ETEST 1 ##
+#############
+etest1 () {
+ TEST=1
+ OUTPUT_FILE=${tmp}/elog${TEST}.out
+ JobName=${JobNameBase}-${TEST}
+ # special case for all objects
+ do_regress_estimate_test ${TEST}
+ F_1=0
+ RET=`grep "${K8S_NAMESPACE_1}" ${OUTPUT_FILE} | grep "yaml" | wc -l`
+ # (ns 1) + (configmap 1) + (serviceaccount 1) + (pvc 2) + (pods 2)
+ RES=5
+ echo "RET: $RET RES: $RES" >> ${OUTPUT_FILE}
+ if [ $RET -le $RES ]
+ then
+ F_1=1
+ ((estat++))
+ fi
+
+ # Check if exists the pod in estimate
+ F_2=0
+ RET=`grep "${POD_N1_0001_1}.yaml" ${OUTPUT_FILE} | wc -l`
+ RES=1
+ if [ $RET -ne $RES ]
+ then
+ F_2=1
+ ((estat++))
+ fi
+
+ # Check exists pvc data in estimate
+ F_3=0
+ RET=`grep "${PVC_N1_0001_1}.tar" ${OUTPUT_FILE} | wc -l`
+ RES=1
+ if [ $RET -ne $RES ]
+ then
+ F_3=1
+ ((estat++))
+ fi
+
+ # Check not exist namespace 2 in estimate
+ F_4=0
+ RET=`grep "${K8S_NAMESPACE_2}" ${OUTPUT_FILE} | wc -l`
+ RES=0
+ if [ $RET -ne $RES ]
+ then
+ F_4=1
+ ((estat++))
+ fi
+
+ # Check not exists pvc2 in estimate because it was not included in fs.
+ F_5=0
+ RET=`grep "${PVC_N1_0001_2}.tar" ${OUTPUT_FILE} | wc -l`
+ RES=0
+ if [ $RET -ne $RES ]
+ then
+ F_5=1
+ ((estat++))
+ fi
+ printf "%s\n" "--------"
+ printf "Results estimate test ${TEST}:\n"
+ printf "%s%s\n" " -> Estimated all objects: " $(regress_test_result ${F_1})
+ printf "%s%s\n" " -> Estimated included specific pod in ns1: " $(regress_test_result ${F_2})
+ printf "%s%s\n" " -> Estimated included specific pvc in ns1: " $(regress_test_result ${F_3})
+ printf "%s%s\n" " -> Estimated not include namespace 2: " $(regress_test_result ${F_4})
+ printf "%s%s\n" " -> Estimated not include pvc 2: " $(regress_test_result ${F_5})
+ printf "%s\n" "--------"
+}
+
+#############
+## ETEST 2 ##
+#############
+
+# Check if estimate both pvcs when in FileSet they are included
+etest2 () {
+ TEST=2
+ OUTPUT_FILE=${tmp}/elog${TEST}.out
+ JobName=${JobNameBase}-${TEST}
+ # special case for all objects
+ do_regress_estimate_test ${TEST}
+ # Check exists pvc1 data in estimate
+ F_1=0
+ RET=`grep "${PVC_N1_0001_1}.tar" ${OUTPUT_FILE} | wc -l`
+ RES=1
+ if [ $RET -ne $RES ]
+ then
+ F_1=1
+ ((estat++))
+ fi
+
+ # Check exists pvc2 data in estimate
+ F_2=0
+ RET=`grep "${PVC_N1_0001_2}.tar" ${OUTPUT_FILE} | wc -l`
+ RES=1
+ if [ $RET -ne $RES ]
+ then
+ F_2=1
+ ((estat++))
+ fi
+ printf "%s\n" "--------"
+ printf "Results estimate test ${TEST}:\n"
+ printf "%s%s\n" " -> Estimated included specific pvc1 in ns1: " $(regress_test_result ${F_1})
+ printf "%s%s\n" " -> Estimated included specific pvc2 in ns1: " $(regress_test_result ${F_2})
+ printf "%s\n" "--------"
+}
+
+#############
+## BTEST 1 ##
+#############
+btest1 () {
+ # Test 1
+ TEST=1
+ OUTPUT_FILE=${tmp}/blog${TEST}.out
+ do_regress_backup_test ${TEST}
+ check_regress_backup_statusT ${TEST}
+ F=$?
+ # Check pvc1 is backup once
+ F_1=0
+ RET=`grep "@kubernetes" ${OUTPUT_FILE} | grep "${PVC_N1_0001_1}.tar" | wc -l`
+ RES=1
+ if [ $RET -ne $RES ]
+ then
+ F_1=1
+ ((bstat++))
+ fi
+ # Check pvc2 data is not backup
+ F_2=0
+ RET=`grep "@kubernetes" ${OUTPUT_FILE} | grep "${PVC_N1_0001_2}.tar" | wc -l`
+ RES=0
+ if [ $RET -ne $RES ]
+ then
+ F_2=1
+ ((bstat++))
+ fi
+
+ # Check pvc2 yaml is backup
+ F_3=0
+ RET=`grep "@kubernetes" ${OUTPUT_FILE} | grep "${PVC_N1_0001_2}.yaml" | wc -l`
+ RES=1
+ if [ $RET -ne $RES ]
+ then
+ F_3=1
+ ((bstat++))
+ fi
+ printf "%s\n" "--------"
+ printf "Results backup test ${TEST}:\n"
+ printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+ printf "%s%s\n" " -> The pvc data of '${PVC_N1_0001_1}' is backup once: " $(regress_test_result ${F_1})
+ printf "%s%s\n" " -> The pvc data of '${PVC_N1_0001_2}' will not backup: " $(regress_test_result ${F_2})
+ printf "%s%s\n" " -> The pvc yaml of '${PVC_N1_0001_2}' is backup: " $(regress_test_result ${F_3})
+ printf "%s\n" "--------"
+}
+
+#############
+## BTEST 2 ##
+#############
+btest2 () {
+ # Test 2
+ TEST=2
+ OUTPUT_FILE=${tmp}/blog${TEST}.out
+ do_regress_backup_test ${TEST}
+ check_regress_backup_statusT ${TEST}
+ F=$?
+ # Check pvc1 is backup once
+ F_1=0
+ RET=`grep "@kubernetes" ${OUTPUT_FILE} | grep "${PVC_N1_0001_1}.tar" | wc -l`
+ RES=1
+ if [ $RET -ne $RES ]
+ then
+ F_1=1
+ ((bstat++))
+ fi
+ # Check pvc2 data is backup
+ F_2=0
+ RET=`grep "@kubernetes" ${OUTPUT_FILE} | grep "${PVC_N1_0001_2}.tar" | wc -l`
+ RES=1
+ if [ $RET -ne $RES ]
+ then
+ F_2=1
+ ((bstat++))
+ fi
+ printf "%s\n" "--------"
+ printf "Results backup test ${TEST}:\n"
+ printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+ printf "%s%s\n" " -> The pvc data of '${PVC_N1_0001_1}' is backup once: " $(regress_test_result ${F_1})
+ printf "%s%s\n" " -> The pvc data of '${PVC_N1_0001_2}' is backup once: " $(regress_test_result ${F_2})
+ printf "%s\n" "--------"
+}
+
+#############
+## BTEST 3 ##
+#############
+btest3() {
+ # Test 3
+ TEST=3
+ OUTPUT_FILE=${tmp}/blog${TEST}.out
+ do_regress_backup_test ${TEST}
+ check_regress_backup_statusT ${TEST}
+ F=$?
+ # Check pvc1 is backup once
+ F_1=0
+ RET=`grep "@kubernetes" ${OUTPUT_FILE} | grep "${PVC_N1_0001_1}.tar" | wc -l`
+ RES=1
+ if [ $RET -ne $RES ]
+ then
+ F_1=1
+ ((bstat++))
+ fi
+ # Check pvc2 data is not backup
+ F_2=0
+ RET=`grep "@kubernetes" ${OUTPUT_FILE} | grep "${PVC_N1_0001_2}.tar" | wc -l`
+ RES=0
+ if [ $RET -ne $RES ]
+ then
+ F_2=1
+ ((bstat++))
+ fi
+ # Check pvc3 data of other namespace is backup
+ F_3=0
+ RET=`grep "@kubernetes" ${OUTPUT_FILE} | grep "${PVC_N2_0001_3}.tar" | wc -l`
+ RES=1
+ if [ $RET -ne $RES ]
+ then
+ F_3=1
+ ((bstat++))
+ fi
+
+ printf "%s\n" "--------"
+ printf "Results backup test ${TEST}:\n"
+ printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+ printf "%s%s\n" " -> The pvc data of '${PVC_N1_0001_1}' of namespace '${K8S_NAMESPACE_1}' is backup once: " $(regress_test_result ${F_1})
+ printf "%s%s\n" " -> The pvc data of '${PVC_N1_0001_2}' of namespace '${K8S_NAMESPACE_1}' is not backup: " $(regress_test_result ${F_2})
+ printf "%s%s\n" " -> The pvc data of '${PVC_N2_0001_3}' of namespace '${K8S_NAMESPACE_2}' is backup once: " $(regress_test_result ${F_3})
+ printf "%s\n" "--------"
+}
+
+#############
+## BTEST 4 ##
+#############
+btest4() {
+ ####### Waiting for answer: https://gitlab.baculasystems.com/qa/qa-kubernetes-plugin/-/issues/18
+ # Test 4
+ TEST=4
+ OUTPUT_FILE=${tmp}/blog${TEST}.out
+ do_regress_backup_test ${TEST}
+ check_regress_backup_statusT ${TEST}
+ F=$?
+ # Check pvc1 is backup once
+ F_1=0
+ RET=`grep "@kubernetes" ${OUTPUT_FILE} | grep "${PVC_N1_0001_1}.tar" | wc -l`
+ RES=1
+ if [ $RET -ne $RES ]
+ then
+ F_1=1
+ ((bstat++))
+ fi
+
+ printf "%s\n" "--------"
+ printf "Results backup test ${TEST}:\n"
+ printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+ printf "%s%s\n" " -> The pvc data of '${PVC_N1_0001_1}' of namespace '${K8S_NAMESPACE_1}' is backup once: " $(regress_test_result ${F_1})
+ printf "%s\n" "--------"
+}
+
+## Prepare escenario to Test 5
+btest5-pre () {
+ printf "%s\n" "++++++++"
+ printf "%s\n" "Preparing escenario to Test 5."
+ printf "%s\n" "Put 'Terminating' status in pvc ${PVC_N1_0001_1}"
+ ${KUBECTL} delete pvc ${PVC_N1_0001_1} -n ${K8S_NAMESPACE_1} 2>&1 > ${tmp}/kube.log &
+ i=0
+ while true
+ do
+ kstat=`${KUBECTL} -n ${K8S_NAMESPACE_1} get pvc ${PVC_N1_0001_1} | grep -v Terminating | wc -l`
+ sleep 5
+ if [ $kstat -eq 1 ]
+ then
+ break
+ fi;
+ w=1
+ printf "\b${SPIN[(($i % 4))]}"
+ if [ $i -eq 600 ]
+ then
+ echo "Timeout waiting for test data to populate. Cannot continue!"
+ exit 1
+ fi
+ ((i++))
+ sleep 1
+ done
+ printf "%s\n" "++++++++"
+}
+
+btest5-post () {
+ reset_k8s_env
+}
+#############
+## BTEST 5 ##
+#############
+btest5 () {
+ # Test 5
+ TEST=5
+ OUTPUT_FILE=${tmp}/blog${TEST}.out
+ kstat=`${KUBECTL} -n ${K8S_NAMESPACE_1} get pvc ${PVC_N1_0001_1} | grep -v Terminating | wc -l`
+ do_regress_backup_test ${TEST}
+ check_regress_backup_statusT ${TEST}
+ F=$?
+ # Check pvc1 is not backup
+ F_1=0
+ RET=`grep "@kubernetes" ${OUTPUT_FILE} | grep "${PVC_N1_0001_1}.tar" | wc -l`
+ RES=0
+ if [ $RET -ne $RES ]
+ then
+ F_1=1
+ ((bstat++))
+ fi
+ # Check pvc2 data is backup once
+ F_2=0
+ RET=`grep "@kubernetes" ${OUTPUT_FILE} | grep "${PVC_N1_0001_2}.tar" | wc -l`
+ RES=1
+ if [ $RET -ne $RES ]
+ then
+ F_2=1
+ ((bstat++))
+ fi
+ # Check warning in job log
+ F_3=0
+ RET=`grep "Skip pvc" ${OUTPUT_FILE} | grep "${PVC_N1_0001_1}" | grep "Terminating status" | wc -l`
+ RES=1
+ if [ $RET -ne $RES ]
+ then
+ F_3=1
+ ((bstat++))
+ fi
+
+ printf "%s\n" "--------"
+ printf "Results backup test ${TEST}:\n"
+ printf "%s%s\n" " -> PVC is in Terminating status:" $(regress_test_result ${kstat})
+ printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+ printf "%s%s\n" " -> The pvc data of '${PVC_N1_0001_1}' of namespace '${K8S_NAMESPACE_1}' is backup once: " $(regress_test_result ${F_1})
+ printf "%s%s\n" " -> The pvc data of '${PVC_N1_0001_2}' of namespace '${K8S_NAMESPACE_1}' is not backup: " $(regress_test_result ${F_2})
+ printf "%s%s\n" " -> The warning of skip pvc backup in joblog: " $(regress_test_result ${F_3})
+ printf "%s\n" "--------"
+}
+
+#############
+## BTEST 6 ##
+#############
+btest6 () {
+ # Test 6
+ TEST=6
+ OUTPUT_FILE=${tmp}/blog${TEST}.out
+ do_regress_backup_test ${TEST}
+ check_regress_backup_statusW ${TEST}
+ F=$?
+ # Check pvc1 is not backup
+ F_1=0
+ RET=`grep "@kubernetes" ${OUTPUT_FILE} | grep "${PVC_N1_0001_1}.tar" | wc -l`
+ RES=2
+ if [ $RET -ne $RES ]
+ then
+ F_1=1
+ ((bstat++))
+ fi
+ # Check warning in job log
+ F_2=0
+ RET=`grep "As clone backup is empty" ${OUTPUT_FILE} | grep "${PVC_N1_0001_1}" | grep "standard mode" | wc -l`
+ RES=1
+ RET2='grep "As clone backup is empty" ${OUTPUT_FILE} | grep "${PVC_N1_0001_1}" | grep "standard mode" | wc -l'
+ printf "%s\n%s\n%s\n" "+++++++++++++++++++" "Results: ${RET}" "Command:${RET2}"
+ if [ $RET -ne $RES ]
+ then
+ F_2=1
+ ((bstat++))
+ fi
+
+ printf "%s\n" "--------"
+ printf "Results backup test ${TEST}:\n"
+ printf "%s%s\n" " -> StatusT - with warnings: " $(regress_test_result ${F})
+ printf "%s%s\n" " -> The pvc data of '${PVC_N1_0001_1}' of namespace '${K8S_NAMESPACE_1}' is backup: " $(regress_test_result ${F_1})
+ printf "%s%s\n" " -> The error message of change backup mode from clone to standard mode in joblog: " $(regress_test_result ${F_2})
+ printf "%s\n" "--------"
+}
+
+
+estat=0
+
+etest1
+etest2
+
+bstat=0
+JOBID=1
+# This job is the base of all backup jobs names
+JobName=${JobNameBase}-
+btest1
+btest2
+btest3
+btest4
+btest5-pre
+btest5
+btest5-post
+btest6
+
+
+stop_bacula
+end_test
\ No newline at end of file
--- /dev/null
+#!/bin/bash
+#
+# Copyright (C) 2000-2015 Kern Sibbald
+# License: BSD 2-Clause; see file LICENSE-FOSS
+#
+
+#
+# Attempt to backup and restore kubernetes ingress objects and their services
+#
+# Assumes:
+# - You have a working K8S cluster avaialable
+
+#
+# The k8s cluster status:
+
+# $ kubectl apply -f kubernetes-plugin-test-0002.yaml
+# namespace/testing-ns-0002-1 created
+# ingress.networking.k8s.io/rook-ceph-mgr-dashboard created
+# service/rook-ceph-mgr-dashboard-loadbalancer created
+# service/rook-ceph-mgr-dashboard-external-http created
+
+# $ kubectl -n testing-ns-0002-1 get ingress
+# NAME CLASS HOSTS ADDRESS PORTS AGE
+# rook-ceph-mgr-dashboard nginx rook-ceph.example.com 80, 443 10s
+
+# $ kubectl -n testing-ns-0002-1 get service
+# NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+# rook-ceph-mgr-dashboard-external-http NodePort 10.104.56.66 <none> 7000:31835/TCP 20s
+# rook-ceph-mgr-dashboard-loadbalancer LoadBalancer 10.107.163.59 10.0.100.35 8443:31796/TCP 21s
+
+
+TEST_ID=0002
+TestName="kubernetes-plugin-test-${TEST_ID}"
+JobNameBase="Test-K8S-${TEST_ID}"
+FileSetName="Test-K8S-Set-${TEST_ID}-"
+
+# Variables in tests
+K8S_SCRIPT_YAML_FILE="scripts/kubernetes/kubernetes-plugin-test-${TEST_ID}.yaml"
+K8S_NAMESPACE_1="testing-ns-0002-1"
+POD_N1_0002_1="test-pod-0002-1"
+
+. scripts/functions
+. scripts/regress-utils.sh
+
+. tests/kubernetes/k8s-utils.sh
+
+printf "\nInit test: ${TestName}\n"
+
+CONNECTION_ARGS=""
+if [ ! -z $KUBE_FD_CERT_FILE ]
+then
+ setup_self_signed_cert $KUBE_FD_CERT_DIR $KUBE_FD_CERT_NAME
+ CONNECTION_ARGS=" fdkeyfile=$KUBE_FD_KEY_FILE fdcertfile=$KUBE_FD_CERT_FILE "
+fi
+
+if [ ! -z "$KUBE_PROXY_POD_PLUGIN_HOST" ]
+then
+ CONNECTION_ARGS="${CONNECTION_ARGS} pluginhost=${KUBE_PROXY_POD_PLUGIN_HOST} "
+fi
+
+if [ ! -z "$KUBE_BACULA_IMAGE" ]
+then
+ CONNECTION_ARGS="${CONNECTION_ARGS} baculaimage=${KUBE_BACULA_IMAGE} "
+fi
+
+export debug=1
+scripts/cleanup
+scripts/copy-kubernetes-plugin-confs ${TEST_ID}
+
+printf "\n ... Preparing ...\n"
+
+# export requires variables
+setup_plugin_param "kubernetes:"
+if [ "x$KUBECONFIG" != "x" ]
+then
+ export KUBECONFIG
+ LPLUG="${LPLUG} config='$KUBECONFIG' ${CONNECTION_ARGS}"
+fi
+
+tmp="${tmp}/test-${TEST_ID}"
+
+mkdir -p ${tmp}
+
+# check the requirements
+KNODES=`${KUBECTL} get nodes | grep Ready | wc -l`
+if [ $KNODES -eq 0 ]
+then
+ echo "A working Kubernetes cluster required!"
+ exit 1
+fi
+
+# check if K8S_NAMESPACE_1 or K8S_NAMESPACE_2 exist
+KPLUGTEST_1=`${KUBECTL} get ns | grep "^${K8S_NAMESPACE_1} " | wc -l`
+if [ $KPLUGTEST_1 -ne 0 ] && [ "x$1" != "xforce" ];
+then
+ echo "Namespace \"${K8S_NAMESPACE_1}\" exist on cluster and no force option specified!"
+ exit 1
+fi
+
+
+# prepare data
+printf "\n ... Apply data ... \n"
+
+remove_testing_env() {
+ printf "Removing namespaces: ${K8S_NAMESPACE_1}\n"
+ ${KUBECTL} delete ns ${K8S_NAMESPACE_1} 2>&1 > ${tmp}/kube.log
+ printf "Removed namespaces: ${K8S_NAMESPACE_1}\n"
+}
+
+reset_k8s_env() {
+
+ remove_testing_env
+
+ ${KUBECTL} apply -f ${K8S_SCRIPT_YAML_FILE} 2>&1 >> ${tmp}/kube.log
+
+ i=0
+ SPIN=('-' '\\' '|' '/')
+ printf "\n ... Waiting to ready ... \n"
+ while true
+ do
+ # TODO: Check also the pods in namespace_2 are running
+ kstat=`${KUBECTL} -n ${K8S_NAMESPACE_1} get pods -o go-template='{{range .items}}{{.status.phase}}{{"\n"}}{{end}}' | grep -v Running | wc -l`
+ if [ $kstat -eq 0 ]
+ then
+ break
+ fi;
+ w=1
+ printf "\b${SPIN[(($i % 4))]}"
+ if [ $i -eq 600 ]
+ then
+ echo "Timeout waiting for test data to populate. Cannot continue!"
+ exit 1
+ fi
+ ((i++))
+ sleep 1
+ done
+}
+
+reset_k8s_env
+
+
+# wait a bit to objects to populate.
+sleep 10
+
+
+# get variables
+printf "\n ... Get Environment Variables ...\n"
+${KUBECTL} get ns -o name > ${tmp}/allns.log
+${KUBECTL} get pv -o name > ${tmp}/allpv.log
+
+
+# Prepare bacula dir configuration
+printf "\n ... Preparing Bacula-dir configuration ...\n"
+export PLUGIN_WORKING=${cwd}/working
+
+out_sed="${tmp}/sed_tmp"
+echo "s%@LPLUG@%${LPLUG}%" > ${out_sed}
+echo "s%@K8S_NAMESPACE_1@%${K8S_NAMESPACE_1}%" >> ${out_sed}
+
+printf "\nCommand launched:\n"
+echo "sed -i -f ${out_sed} ${conf}/bacula-dir.conf"
+
+sed -i -f ${out_sed} ${conf}/bacula-dir.conf
+
+printf "\n ... Done ...\n"
+
+start_test
+
+# We must put the bconsole command in ${cwd}/tmp/bconcmds
+cat <<END_OF_DATA >${tmp}/bconcmds
+@output /dev/null
+messages
+@$out ${tmp}/log.out
+label storage=File1 pool=Default volume=TestVolume001
+@setdebug dir level=500 trace=1
+quit
+END_OF_DATA
+
+run_bacula
+
+#############
+## ETEST 1 ##
+#############
+etest1 () {
+ TEST=1
+ OUTPUT_FILE=${tmp}/elog${TEST}.out
+ JobName=${JobNameBase}-${TEST}
+ # special case for all objects
+ do_regress_estimate_test ${TEST}
+ F_1=0
+ RET=`grep "${K8S_NAMESPACE_1}" ${OUTPUT_FILE} | grep "yaml" | wc -l`
+ # (ns 1) + (configmap 1) + (serviceaccount 1) + (pvc 2) + (pods 2)
+ RES=5
+ echo "RET: $RET RES: $RES" >> ${OUTPUT_FILE}
+ if [ $RET -le $RES ]
+ then
+ F_1=1
+ ((estat++))
+ fi
+
+ # Check if exists ingress folder
+ F_2=0
+ RET=`grep "@kubernetes" ${OUTPUT_FILE} | grep "/ingress/" | wc -l`
+ RES=1
+ if [ $RET -ne $RES ]
+ then
+ F_2=1
+ ((estat++))
+ fi
+
+ printf "%s\n" "--------"
+ printf "Results estimate test ${TEST}:\n"
+ printf "%s%s\n" " -> Estimated all objects: " $(regress_test_result ${F_1})
+ printf "%s%s\n" " -> Estimated included ingress objects: " $(regress_test_result ${F_2})
+ printf "%s\n" "--------"
+}
+
+
+#############
+## BTEST 1 ##
+#############
+btest1 () {
+ # Test 1
+ TEST=1
+ OUTPUT_FILE=${tmp}/blog${TEST}.out
+ do_regress_backup_test ${TEST}
+ check_regress_backup_statusT ${TEST}
+ F=$?
+ # Check if namespace is backup
+ F_1=0
+ RET=`grep "@kubernetes" ${OUTPUT_FILE} | grep "${K8S_NAMESPACE_1}.yaml" | wc -l`
+ RES=1
+ if [ $RET -ne $RES ]
+ then
+ F_1=1
+ ((bstat++))
+ fi
+ # Check if exists ingress folder
+ F_2=0
+ RET=`grep "@kubernetes" ${OUTPUT_FILE} | grep "/ingress/" | wc -l`
+ RES=1
+ if [ $RET -gt $RES ]
+ then
+ F_2=1
+ ((bstat++))
+ fi
+
+ # Check loadbalancer yaml is backup
+ F_3=0
+ RET=`grep "@kubernetes" ${OUTPUT_FILE} | grep "loadbalancer.yaml" | wc -l`
+ RES=1
+ if [ $RET -ne $RES ]
+ then
+ F_3=1
+ ((bstat++))
+ fi
+ printf "%s\n" "--------"
+ printf "Results backup test ${TEST}:\n"
+ printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+ printf "%s%s\n" " -> The namespace '${K8S_NAMESPACE_1}' was backup: " $(regress_test_result ${F_1})
+ printf "%s%s\n" " -> The ingress objects were backup: " $(regress_test_result ${F_2})
+ printf "%s%s\n" " -> The loadbalancer yaml was backup: " $(regress_test_result ${F_3})
+ printf "%s\n" "--------"
+}
+
+
+#############
+## RTEST 1 ##
+#############
+rtest1 () {
+ TEST=1
+ # Before delete
+ echo "---> Before delete the service:\n" 2>&1 > ${tmp}/rlog${TEST}.out
+ ${KUBECTL} -n ${K8S_NAMESPACE_1} get svc/rook-ceph-mgr-dashboard-loadbalancer 2>&1 >> ${tmp}/rlog${TEST}.out
+
+ ${KUBECTL} -n ${K8S_NAMESPACE_1} delete svc/rook-ceph-mgr-dashboard-loadbalancer 2>&1 >> ${tmp}/rlog${TEST}.out
+ do_regress_restore_test ${TEST} 1 "" "file=/@kubernetes/namespaces/${K8S_NAMESPACE_1}/services/rook-ceph-mgr-dashboard-loadbalancer.yaml"
+ check_regress_restore_statusT ${TEST}
+ F=$?
+ # check if object restored on kubernetes
+
+ echo "---> After restore the service:\n" 2>&1 >> ${tmp}/rlog${TEST}.out
+ ${KUBECTL} -n ${K8S_NAMESPACE_1} get svc/rook-ceph-mgr-dashboard-loadbalancer 2>&1 >> ${tmp}/rlog${TEST}.out
+ RET=`${KUBECTL} -n ${K8S_NAMESPACE_1} get svc/rook-ceph-mgr-dashboard-loadbalancer -o go-template='{{.metadata.name}}{{"\n"}}' 2>/dev/null | wc -l`
+ echo "RET: $RET" >> ${tmp}/rlog${TEST}.out
+ if [ $RET -ne 1 ]
+ then
+ F=1
+ dstat=$((dstat+1))
+ fi
+ printf "%s\n" "--------"
+ printf "Result restore test ${TEST}:"
+ printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+ printf "%s\n" "--------"
+}
+
+estat=0
+
+etest1
+
+bstat=0
+JOBID=1
+# This job is the base of all backup jobs names
+JobName=${JobNameBase}-
+btest1
+
+dstat=0
+rtest1
+
+stop_bacula
+end_test
\ No newline at end of file