logging.debug('Snapshot is activated')
vsnapshot, pvcdata = self.handle_create_vsnapshot_backup(namespace, pvcdata.get('name'))
self._io.send_info(PVC_BACKUP_MODE_APPLIED_INFO.format(pvcdata.get('name'), BaculaBackupMode.Snapshot))
+ self.current_backup_mode = BaculaBackupMode.Snapshot
if (vsnapshot is None and self.fs_backup_mode != BaculaBackupMode.Standard) or self.fs_backup_mode == BaculaBackupMode.Clone:
if self.fs_backup_mode != BaculaBackupMode.Clone:
cloned_pvc.get('fi').set_name(pvcdata.get('fi').name)
pvcdata = cloned_pvc
is_cloned = True
+ self.current_backup_mode = BaculaBackupMode.Clone
if self.fs_backup_mode == BaculaBackupMode.Standard:
self._io.send_info(PVC_BACKUP_MODE_APPLIED_INFO.format(pvcdata.get('name'), BaculaBackupMode.Standard))
+ self.current_backup_mode = BaculaBackupMode.Standard
logging.debug('Process_pvcdata (Backup_job): {} --- {}'.format(vsnapshot, pvcdata))
if self.prepare_bacula_pod(pvcdata, namespace=namespace, mode='backup'):
logging.debug("Skip pvc. Cause Terminating status")
self._io.send_warning("Skip pvc `{}` because it is in Terminating status.".format(pvcname))
continue
+ if self._plugin.pvc_is_pending(namespace, original_pvc):
+ logging.debug("Skip pvc. Cause Pending status")
+ self._io.send_warning("Skip pvc `{}` because it is in Pending status.".format(pvcname))
+ continue
if backupmode == BaculaBackupMode.Snapshot:
logging.debug('Snapshot mode chosen')
logging.debug("Skip pvc. Cause Terminating status")
self._io.send_warning("Skip pvc of second try `{}` because it is in Terminating status.".format(pvc))
continue
+ if self._plugin.pvc_is_pending(nsname, pvcdata):
+ logging.debug("Skip pvc. Cause Pending status")
+ self._io.send_warning("Skip pvc `{}` because it is in Pending status.".format(pvc))
+ continue
self._io.send_warning(SECOND_TRY_PVCDATA_INFO.format(pvc))
self._io.send_info(PROCESSING_PVCDATA_START_INFO.format(pvc=pvc))
if self._plugin.pvc_is_terminating(nsname, pvcdata):
logging.debug("Skip pvc. Cause Terminating status")
self._io.send_warning("Skip pvc `{}` because it is in Terminating status.".format(pvc))
continue
+ if self._plugin.pvc_is_pending(nsname, pvcdata):
+ logging.debug("Skip pvc. Cause Pending status")
+ self._io.send_warning("Skip pvc `{}` because it is in Pending status.".format(pvc))
+ continue
status = self.process_pvcdata(nsname, pvcdata)
if status is None:
# None means unable to prepare listening service during backup
logging.debug("Skip pvc. Cause Terminating status")
self._io.send_warning("Skip pvc `{}` because it is in Terminating status.".format(pvc))
continue
+ if self._plugin.pvc_is_pending(namespace, pvc):
+ logging.debug("Skip pvc. Cause Pending status")
+ self._io.send_warning("Skip pvc `{}` because it is in Pending status.".format(pvc))
+ continue
# get pvcdata for this volume
pvcdata = self._plugin.get_pvcdata_namespaced(namespace, pvc)
if isinstance(pvcdata, dict) and 'exception' in pvcdata:
ImagePullPolicy,
prepare_backup_pod_yaml)
from baculak8s.plugins.k8sbackend.pvcclone import prepare_backup_clone_yaml
+from baculak8s.plugins.k8sbackend.baculaannotations import BaculaBackupMode
from baculak8s.util.respbody import parse_json_descr
from baculak8s.util.sslserver import DEFAULTTIMEOUT, ConnectionServer
from baculak8s.util.token import generate_token
self.imagepullpolicy = ImagePullPolicy.process_param(params.get('imagepullpolicy'))
self.backup_clone_compatibility = True
self.debug = params.get('debug', 0)
+ self.current_backup_mode = BaculaBackupMode.Standard
def handle_pod_logs(self, connstream):
logmode = ''
continue
logging.debug('Bytes/files in backup: {}/{}'.format(bytes_count, file_count))
logging.debug('Type of job:' + str(self._params.get('type')))
- if self._params.get('type') == 'b' and bytes_count == 0 and file_count < 3:
+ # If the backupMode is standard, we ignore if the backup contains 0 bytes.
+ if self._params.get('type') == 'b' and self.current_backup_mode != BaculaBackupMode.Standard and bytes_count == 0 and file_count < 3:
self._io.send_non_fatal_error(WARNING_CLONED_PVC_WAS_NOT_WORKED)
self.backup_clone_compatibility = False
for pvc in pvcs.items:
pvcdata = persistentvolumeclaims_read_namespaced(corev1api, namespace, pvc.metadata.name)
spec = encoder_dump(pvcdata)
- # logging.debug("PVCDATA-OBJ:{}".format(pvcdata))
- pvcsize = k8s_size_to_int(pvcdata.status.capacity['storage'])
+ logging.debug("PVCDATA-OBJ:{}".format(pvcdata))
+ pvcsize = 0
+ if (pvcdata.status.capacity is not None):
+ pvcsize = k8s_size_to_int(pvcdata.status.capacity['storage'])
pvcstotalsize += pvcsize
# logging.debug("PVCDATA-SIZE:{} {}".format(pvcdata.status.capacity['storage'], pvcsize))
# logging.debug("PVCDATA-ENC:{}".format(spec))
logging.exception(ex)
logging.error('Had a error when try to get deletion_timestamp of pvc status')
return True
+
+ # Check if the pvc is pending status.
+ # This can be checked if the property 'status'>'phase' is Pending
+ def pvc_is_pending(self, namespace, pvc):
+ if not isinstance(pvc, dict):
+ raise Exception('Error when try to get pvc status. PVC must be a `dict`')
+ logging.debug('PVC is pending status?. Namespace:{}.\nPVC Name:{}'.format(namespace,pvc.get('name')))
+
+ pvc_status = self._check_persistentvolume_claim_status(namespace, pvc.get('name'))
+ # logging.debug('Pvc status:{}'.format(pvc_status))
+ try:
+ current_status = pvc_status.status.phase
+ if current_status == 'Pending':
+ return True
+ return False
+ except Exception as ex:
+ logging.debug('Exception ocurrs:{}'.format(ex))
+ logging.exception(ex)
+ logging.error('Had a error when try to get pvc status')
+ return True
def remove_backup_pod(self, namespace, podname=BACULABACKUPPODNAME):
logging.debug('remove_backup_pod')
Name = "Test-K8S-0001-6"
JobDefs = Default
FileSet = Test-K8S-Set-0001-6
+}
+
+### 07 Test standard backup with 0 bytes. We need avoid do backup twice.
+FileSet {
+ Name = "Test-K8S-Set-0001-7"
+ Include { Options { signature=SHA1 }
+ Plugin = "@LPLUG@ backup_mode=standard pvcdata=@PVC_N1_0001_4@ namespace=@K8S_NAMESPACE_1@"
+ }
+}
+Job {
+ Name = "Test-K8S-0001-7"
+ JobDefs = Default
+ FileSet = Test-K8S-Set-0001-7
+}
+
+### 08 Test avoid pvcs with `Pending` status
+FileSet {
+ Name = "Test-K8S-Set-0001-8"
+ Include { Options { signature=SHA1 }
+ Plugin = "@LPLUG@ backup_mode=standard pvcdata=@PVC_N1_0001_5@ namespace=@K8S_NAMESPACE_1@"
+ }
+}
+Job {
+ Name = "Test-K8S-0001-8"
+ JobDefs = Default
+ FileSet = Test-K8S-Set-0001-8
}
\ No newline at end of file
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer
---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: local-path
+provisioner: rancher.io/local-path
+reclaimPolicy: Delete
+volumeBindingMode: WaitForFirstConsumer
+---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: testing-ns-0001-1
spec:
volumes:
- - name: pvc-dc7cdd05-a92d-416a-b584-fd7043b5bf48
+ - name: pvc-0001-1
persistentVolumeClaim:
claimName: test-pvc-0001-1
containers:
name: "http-server"
volumeMounts:
- mountPath: "/pvc"
- name: pvc-dc7cdd05-a92d-416a-b584-fd7043b5bf48
+ name: pvc-0001-1
---
apiVersion: v1
kind: PersistentVolumeClaim
namespace: testing-ns-0001-1
spec:
volumes:
- - name: pvc-dc7cdd05-a92d-416a-b584-fd7083b5bf48
+ - name: pvc-0001-2
persistentVolumeClaim:
claimName: test-pvc-0001-2
containers:
name: "http-server"
volumeMounts:
- mountPath: "/pvc"
- name: pvc-dc7cdd05-a92d-416a-b584-fd7083b5bf48
+ name: pvc-0001-2
---
apiVersion: v1
kind: Namespace
name: "http-server"
volumeMounts:
- mountPath: "/pvc"
- name: pvc-0001-3
\ No newline at end of file
+ name: pvc-0001-3
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: test-pvc-0001-4
+ namespace: testing-ns-0001-1
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
+ storageClassName: local-path
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: test-pod-0001-4
+ namespace: testing-ns-0001-1
+spec:
+ volumes:
+ - name: pvc-0001-4
+ persistentVolumeClaim:
+ claimName: test-pvc-0001-4
+ containers:
+ - name: test-nginx-container
+ image: nginx
+ imagePullPolicy: IfNotPresent
+ ports:
+ - containerPort: 80
+ name: "http-server"
+ volumeMounts:
+ - mountPath: "/pvc"
+ name: pvc-0001-4
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: test-pvc-0001-5
+ namespace: testing-ns-0001-1
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
+ storageClassName: local-path
\ No newline at end of file
metadata:
name: testing-ns-0003-1
---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: local-path
+provisioner: rancher.io/local-path
+reclaimPolicy: Delete
+volumeBindingMode: WaitForFirstConsumer
+---
apiVersion: v1
kind: ConfigMap
metadata:
reclaimPolicy: Delete
volumeBindingMode: Immediate
---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: local-path
+provisioner: rancher.io/local-path
+reclaimPolicy: Delete
+volumeBindingMode: WaitForFirstConsumer
+---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
PVC_N1_0001_1="test-pvc-0001-1"
PVC_N1_0001_2="test-pvc-0001-2"
PVC_N2_0001_3="test-pvc-0001-3"
+PVC_N1_0001_4="test-pvc-0001-4"
+PVC_N1_0001_5="test-pvc-0001-5"
POD_N1_0001_1="test-pod-0001-1"
POD_N1_0001_2="test-pod-0001-2"
POD_N2_0001_3="test-pod-0001-3"
+POD_N1_0001_4="test-pod-0001-4"
PVC_PATH_IN_POD="/pvc"
. scripts/functions
printf "\n ... Waiting to ready ... \n"
while true
do
- # TODO: Check also the pods in namespace_2 are running
- kstat=`${KUBECTL} -n ${K8S_NAMESPACE_1} get pods -o go-template='{{range .items}}{{.status.phase}}{{"\n"}}{{end}}' | grep -v Running | wc -l`
- if [ $kstat -eq 0 ]
- then
+ kstat_n1=`${KUBECTL} -n ${K8S_NAMESPACE_1} get pods -o go-template='{{range .items}}{{.status.phase}}{{"\n"}}{{end}}' | grep -v Running | wc -l`
+ kstat_n2=`${KUBECTL} -n ${K8S_NAMESPACE_2} get pods -o go-template='{{range .items}}{{.status.phase}}{{"\n"}}{{end}}' | grep -v Running | wc -l`
+
+ if [ $kstat_n1 -eq 0 ] && [ $kstat_n2 -eq 0 ]; then
break
fi;
w=1
printf "\n ... Get Environment Variables ...\n"
${KUBECTL} get ns -o name > ${tmp}/allns.log
${KUBECTL} get pv -o name > ${tmp}/allpv.log
+${KUBECTL} get storageclass -o name > ${tmp}/allsc.log
# Prepare bacula dir configuration
echo "s%@PVC_N1_0001_1@%${PVC_N1_0001_1}%" >> ${out_sed}
echo "s%@PVC_N1_0001_2@%${PVC_N1_0001_2}%" >> ${out_sed}
echo "s%@PVC_N2_0001_3@%${PVC_N2_0001_3}%" >> ${out_sed}
+echo "s%@PVC_N1_0001_4@%${PVC_N1_0001_4}%" >> ${out_sed}
+echo "s%@PVC_N1_0001_5@%${PVC_N1_0001_5}%" >> ${out_sed}
echo "s%@CONNECTION_ARGS@%${CONNECTION_ARGS}%" >> ${out_sed}
echo "s%@BACKUP_PROXY_WITHOUT_PVC@%${BACKUP_PROXY_WITHOUT_PVC}%" >> ${out_sed}
F_2=0
RET=`grep "As clone backup is empty" ${OUTPUT_FILE} | grep "standard mode" | wc -l`
RES=1
- RET2=`grep "As clone backup is empty" ${OUTPUT_FILE} | grep "standard mode" | wc -l`
- printf "%s\n%s\n%s\n" "+++++++++++++++++++" "Results: ${RET}" "Command:${RET2}"
+ # printf "%s\n%s\n%s\n" "+++++++++++++++++++" "Expected Result: ${RET}" "Command:${RET}"
if [ $RET -ne $RES ]
then
F_2=1
}
+#############
+## BTEST 7 ##
+#############
+btest7 () {
+ # Test 7
+ TEST=7
+ OUTPUT_FILE=${tmp}/blog${TEST}.out
+ do_regress_backup_test ${TEST}
+ check_regress_backup_statusT ${TEST}
+ F=$?
+ # Check pvc1 is not backup
+ F_1=0
+ RET=`grep "@kubernetes" ${OUTPUT_FILE} | grep "${PVC_N1_0001_4}.tar" | wc -l`
+ RES=1
+ printf "\n%s\n" "RES: ${RES} RET: ${RET}"
+ if [ $RET -ne $RES ]
+ then
+ F_1=1
+ ((bstat++))
+ fi
+ # Check warning in job log
+ F_2=0
+ RET=`grep "As clone backup is empty" ${OUTPUT_FILE} | grep "standard mode" | wc -l`
+ RES=0
+ # printf "%s\n%s\n%s\n" "+++++++++++++++++++" "Expected Result: ${RES}" "Command:${RET}"
+ if [ $RET -ne $RES ]
+ then
+ F_2=1
+ ((bstat++))
+ fi
+
+ printf "%s\n" "--------"
+ printf "Results backup test ${TEST}:\n"
+ printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+ printf "%s%s\n" " -> The pvc data of '${PVC_N1_0001_4}' of namespace '${K8S_NAMESPACE_1}' is backup: " $(regress_test_result ${F_1})
+ printf "%s%s\n" " -> The pvc data mode is 'Standard', so even if pvcdata size is 0, it mustn't repeat the backup: " $(regress_test_result ${F_1})
+ printf "%s\n" "--------"
+}
+
+
+#############
+## BTEST 8 ##
+#############
+btest8 () {
+ # Test 8
+ TEST=8
+ OUTPUT_FILE=${tmp}/blog${TEST}.out
+ do_regress_backup_test ${TEST}
+ check_regress_backup_statusT ${TEST}
+ F=$?
+ # Check pvc1 is not backup
+ F_1=0
+ RET=`grep "@kubernetes" ${OUTPUT_FILE} | grep "${PVC_N1_0001_5}.tar" | wc -l`
+ RES=0
+ printf "\n%s\n" "RES: ${RES} RET: ${RET}"
+ if [ $RET -ne $RES ]
+ then
+ F_1=1
+ ((bstat++))
+ fi
+ # Check warning in job log
+ F_2=0
+ RET=`grep "Skip pvc" ${OUTPUT_FILE} | grep "in Pending status" | wc -l`
+ RES=1
+ # printf "%s\n%s\n%s\n" "+++++++++++++++++++" "Expected Result: ${RES}" "Command:${RET}"
+ if [ $RET -ne $RES ]
+ then
+ F_2=1
+ ((bstat++))
+ fi
+
+ printf "%s\n" "--------"
+ printf "Results backup test ${TEST}:\n"
+ printf "%s%s\n" " -> StatusT: " $(regress_test_result ${F})
+ printf "%s%s\n" " -> The pvc data of '${PVC_N1_0001_5}' of namespace '${K8S_NAMESPACE_1}' is not backup: " $(regress_test_result ${F_1})
+ printf "%s%s\n" " -> The pvc data of '${PVC_N1_0001_5}' was skipped because it is in pending status: " $(regress_test_result ${F_2})
+ printf "%s\n" "--------"
+}
+
#############
## RTEST 1 ##
estat=0
etest1
-# etest2
+etest2
bstat=0
JOBID=1
btest5
btest5-post
btest6
+btest7
+btest8
rstat=0
rtest1
rtest2
-stop_bacula
+# stop_bacula
end_test
\ No newline at end of file