]> git.ipfire.org Git - thirdparty/paperless-ngx.git/commitdiff
Let ruff autofix some things from the newest version
authorTrenton H <797416+stumpylog@users.noreply.github.com>
Tue, 13 Jun 2023 17:55:52 +0000 (10:55 -0700)
committerTrenton H <797416+stumpylog@users.noreply.github.com>
Wed, 14 Jun 2023 03:15:18 +0000 (20:15 -0700)
docker/wait-for-redis.py
src/documents/barcodes.py
src/documents/signals/handlers.py
src/documents/tasks.py
src/documents/views.py
src/paperless_mail/mail.py
src/paperless_tesseract/parsers.py

index d6ce5c639a7a5cabd01c8729c7871a112edf707d..41df5a08b6d5ff468d01bcf4468f6115e89db936 100755 (executable)
@@ -28,7 +28,7 @@ if __name__ == "__main__":
             except Exception as e:
                 print(
                     f"Redis ping #{attempt} failed.\n"
-                    f"Error: {str(e)}.\n"
+                    f"Error: {e!s}.\n"
                     f"Waiting {RETRY_SLEEP_SECONDS}s",
                     flush=True,
                 )
index dfeac545b2244580f64be0b473a24417fd357b6d..f3d59bc5b2b938b5ad9c90206feb14fe21f9ebc7 100644 (file)
@@ -121,7 +121,7 @@ class BarcodeReader:
             if barcode.text:
                 barcodes.append(barcode.text)
                 logger.debug(
-                    f"Barcode of type {str(barcode.format)} found: {barcode.text}",
+                    f"Barcode of type {barcode.format} found: {barcode.text}",
                 )
 
         return barcodes
@@ -141,7 +141,7 @@ class BarcodeReader:
                 decoded_barcode = barcode.data.decode("utf-8")
                 barcodes.append(decoded_barcode)
                 logger.debug(
-                    f"Barcode of type {str(barcode.type)} found: {decoded_barcode}",
+                    f"Barcode of type {barcode.type} found: {decoded_barcode}",
                 )
 
         return barcodes
@@ -348,7 +348,7 @@ class BarcodeReader:
 
         for idx, document_path in enumerate(doc_paths):
             if override_name is not None:
-                newname = f"{str(idx)}_{override_name}"
+                newname = f"{idx}_{override_name}"
                 dest = save_to_dir / newname
             else:
                 dest = save_to_dir
index 40765443b6ed8d63e2e775d85b9d7abff0a71197..4a39d98eab5f469b87d569731f703f63f32d4b0a 100644 (file)
@@ -346,7 +346,7 @@ def cleanup_document_deletion(sender, instance, using, **kwargs):
                     logger.debug(f"Deleted file {filename}.")
                 except OSError as e:
                     logger.warning(
-                        f"While deleting document {str(instance)}, the file "
+                        f"While deleting document {instance!s}, the file "
                         f"{filename} could not be deleted: {e}",
                     )
 
@@ -369,13 +369,13 @@ class CannotMoveFilesException(Exception):
 def validate_move(instance, old_path, new_path):
     if not os.path.isfile(old_path):
         # Can't do anything if the old file does not exist anymore.
-        logger.fatal(f"Document {str(instance)}: File {old_path} has gone.")
+        logger.fatal(f"Document {instance!s}: File {old_path} has gone.")
         raise CannotMoveFilesException
 
     if os.path.isfile(new_path):
         # Can't do anything if the new file already exists. Skip updating file.
         logger.warning(
-            f"Document {str(instance)}: Cannot rename file "
+            f"Document {instance!s}: Cannot rename file "
             f"since target path {new_path} already exists.",
         )
         raise CannotMoveFilesException
index 1603a13590a3c4293cfa1740d05be4539fd55777..d320875a042eda14ca5ce64ddf84d016cf6deeee 100644 (file)
@@ -116,7 +116,7 @@ def consume_file(
                         {"type": "status_update", "data": payload},
                     )
                 except ConnectionError as e:
-                    logger.warning(f"ConnectionError on status send: {str(e)}")
+                    logger.warning(f"ConnectionError on status send: {e!s}")
                 # consuming stops here, since the original document with
                 # the barcodes has been split and will be consumed separately
 
index d60cf459d1ebe269b833e1bbfc7af4ac8348b346..cd69095fea8e1117d64e81f55bbf89eb6861c1ec 100644 (file)
@@ -519,7 +519,7 @@ class DocumentViewSet(
             try:
                 return Response(self.getNotes(doc))
             except Exception as e:
-                logger.warning(f"An error occurred retrieving notes: {str(e)}")
+                logger.warning(f"An error occurred retrieving notes: {e!s}")
                 return Response(
                     {"error": "Error retreiving notes, check logs for more detail."},
                 )
@@ -538,7 +538,7 @@ class DocumentViewSet(
 
                 return Response(self.getNotes(doc))
             except Exception as e:
-                logger.warning(f"An error occurred saving note: {str(e)}")
+                logger.warning(f"An error occurred saving note: {e!s}")
                 return Response(
                     {
                         "error": "Error saving note, check logs for more detail.",
@@ -628,7 +628,7 @@ class UnifiedSearchViewSet(DocumentViewSet):
             except NotFound:
                 raise
             except Exception as e:
-                logger.warning(f"An error occurred listing search results: {str(e)}")
+                logger.warning(f"An error occurred listing search results: {e!s}")
                 return HttpResponseBadRequest(
                     "Error listing search results, check logs for more detail.",
                 )
@@ -699,7 +699,7 @@ class BulkEditView(GenericAPIView):
             result = method(documents, **parameters)
             return Response({"result": result})
         except Exception as e:
-            logger.warning(f"An error occurred performing bulk edit: {str(e)}")
+            logger.warning(f"An error occurred performing bulk edit: {e!s}")
             return HttpResponseBadRequest(
                 "Error performing bulk edit, check logs for more detail.",
             )
index bfb306e5abcbbe81e37a1b818a3772cfc1dbb883..65768d3c24c952999de8a9af8aabeb924fc20796 100644 (file)
@@ -544,7 +544,7 @@ class MailAccountHandler(LoggingMixin):
         criterias = make_criterias(rule, supports_gmail_labels)
 
         self.log.debug(
-            f"Rule {rule}: Searching folder with criteria {str(criterias)}",
+            f"Rule {rule}: Searching folder with criteria {criterias}",
         )
 
         try:
index b6a721b2f0e78636cd627a8d23416b4361b604c3..c6d066fbe197c5bc9ba19cc10d7f1d1352593bba 100644 (file)
@@ -335,7 +335,7 @@ class RasterisedDocumentParser(DocumentParser):
                 self.text = text_original
         except (NoTextFoundException, InputFileError) as e:
             self.log.warning(
-                f"Encountered an error while running OCR: {str(e)}. "
+                f"Encountered an error while running OCR: {e!s}. "
                 f"Attempting force OCR to get the text.",
             )
 
@@ -370,11 +370,11 @@ class RasterisedDocumentParser(DocumentParser):
 
             except Exception as e:
                 # If this fails, we have a serious issue at hand.
-                raise ParseError(f"{e.__class__.__name__}: {str(e)}") from e
+                raise ParseError(f"{e.__class__.__name__}: {e!s}") from e
 
         except Exception as e:
             # Anything else is probably serious.
-            raise ParseError(f"{e.__class__.__name__}: {str(e)}") from e
+            raise ParseError(f"{e.__class__.__name__}: {e!s}") from e
 
         # As a last resort, if we still don't have any text for any reason,
         # try to extract the text from the original document.