]> git.ipfire.org Git - thirdparty/paperless-ngx.git/commitdiff
Variable refactoring
authorshamoon <4887959+shamoon@users.noreply.github.com>
Fri, 8 Aug 2025 12:06:25 +0000 (08:06 -0400)
committershamoon <4887959+shamoon@users.noreply.github.com>
Fri, 8 Aug 2025 12:06:25 +0000 (08:06 -0400)
docs/configuration.md
src-ui/src/app/data/paperless-config.ts
src/documents/tests/test_api_app_config.py
src/paperless/config.py
src/paperless/migrations/0005_applicationconfiguration_ai_enabled_and_more.py
src/paperless/models.py
src/paperless/settings.py
src/paperless_ai/base_model.py [moved from src/paperless_ai/tools.py with 100% similarity]
src/paperless_ai/client.py
src/paperless_ai/tests/test_client.py

index b0dded52a6d8c358f09ad45f7433a2f83e014fd0..37dd24ffaa5f9ba5e8e18b1f8c5d6feed9534e02 100644 (file)
@@ -1779,20 +1779,20 @@ password. All of these options come from their similarly-named [Django settings]
 
 ## AI {#ai}
 
-#### [`PAPERLESS_ENABLE_AI=<bool>`](#PAPERLESS_ENABLE_AI) {#PAPERLESS_ENABLE_AI}
+#### [`PAPERLESS_AI_ENABLED=<bool>`](#PAPERLESS_AI_ENABLED) {#PAPERLESS_AI_ENABLED}
 
 : Enables the AI features in Paperless. This includes the AI-based
 suggestions. This setting is required to be set to true in order to use the AI features.
 
     Defaults to false.
 
-#### [`PAPERLESS_LLM_EMBEDDING_BACKEND=<str>`](#PAPERLESS_LLM_EMBEDDING_BACKEND) {#PAPERLESS_LLM_EMBEDDING_BACKEND}
+#### [`PAPERLESS_AI_LLM_EMBEDDING_BACKEND=<str>`](#PAPERLESS_AI_LLM_EMBEDDING_BACKEND) {#PAPERLESS_AI_LLM_EMBEDDING_BACKEND}
 
 : The embedding backend to use for RAG. This can be either "openai" or "huggingface".
 
     Defaults to None.
 
-#### [`PAPERLESS_LLM_EMBEDDING_MODEL=<str>`](#PAPERLESS_LLM_EMBEDDING_MODEL) {#PAPERLESS_LLM_EMBEDDING_MODEL}
+#### [`PAPERLESS_AI_LLM_EMBEDDING_MODEL=<str>`](#PAPERLESS_AI_LLM_EMBEDDING_MODEL) {#PAPERLESS_AI_LLM_EMBEDDING_MODEL}
 
 : The model to use for the embedding backend for RAG. This can be set to any of the embedding models supported by the current embedding backend. If not supplied, defaults to "text-embedding-3-small" for OpenAI and "sentence-transformers/all-MiniLM-L6-v2" for Huggingface.
 
@@ -1815,26 +1815,26 @@ using the OpenAI API. This setting is required to be set to use the AI features.
 
         Refer to the OpenAI terms of service, and use at your own risk.
 
-#### [`PAPERLESS_LLM_MODEL=<str>`](#PAPERLESS_LLM_MODEL) {#PAPERLESS_LLM_MODEL}
+#### [`PAPERLESS_AI_LLM_MODEL=<str>`](#PAPERLESS_AI_LLM_MODEL) {#PAPERLESS_AI_LLM_MODEL}
 
 : The model to use for the AI backend, i.e. "gpt-3.5-turbo", "gpt-4" or any of the models supported by the
 current backend. If not supplied, defaults to "gpt-3.5-turbo" for OpenAI and "llama3" for Ollama.
 
     Defaults to None.
 
-#### [`PAPERLESS_LLM_API_KEY=<str>`](#PAPERLESS_LLM_API_KEY) {#PAPERLESS_LLM_API_KEY}
+#### [`PAPERLESS_AI_LLM_API_KEY=<str>`](#PAPERLESS_AI_LLM_API_KEY) {#PAPERLESS_AI_LLM_API_KEY}
 
 : The API key to use for the AI backend. This is required for the OpenAI backend only.
 
     Defaults to None.
 
-#### [`PAPERLESS_LLM_URL=<str>`](#PAPERLESS_LLM_URL) {#PAPERLESS_LLM_URL}
+#### [`PAPERLESS_AI_LLM_ENDPOINT=<str>`](#PAPERLESS_AI_LLM_ENDPOINT) {#PAPERLESS_AI_LLM_ENDPOINT}
 
-: The URL to use for the AI backend. This is required for the Ollama backend only.
+: The endpoint / url to use for the AI backend. This is required for the Ollama backend only.
 
     Defaults to None.
 
-#### [`PAPERLESS_LLM_INDEX_TASK_CRON=<cron expression>`](#PAPERLESS_LLM_INDEX_TASK_CRON) {#PAPERLESS_LLM_INDEX_TASK_CRON}
+#### [`PAPERLESS_AI_LLM_INDEX_TASK_CRON=<cron expression>`](#PAPERLESS_AI_LLM_INDEX_TASK_CRON) {#PAPERLESS_AI_LLM_INDEX_TASK_CRON}
 
 : Configures the schedule to update the AI embeddings of text content and metadata for all documents. Only performed if
 AI is enabled and the LLM embedding backend is set.
index 7236eae081d9dbe265fec7b9e27988749ceb5152..fd151002dd30faf555c809b8012fe3635eaf04af 100644 (file)
@@ -284,14 +284,14 @@ export const PaperlessConfigOptions: ConfigOption[] = [
     title: $localize`LLM Embedding Backend`,
     type: ConfigOptionType.Select,
     choices: mapToItems(LLMEmbeddingBackendConfig),
-    config_key: 'PAPERLESS_LLM_EMBEDDING_BACKEND',
+    config_key: 'PAPERLESS_AI_LLM_EMBEDDING_BACKEND',
     category: ConfigCategory.AI,
   },
   {
     key: 'llm_embedding_model',
     title: $localize`LLM Embedding Model`,
     type: ConfigOptionType.String,
-    config_key: 'PAPERLESS_LLM_EMBEDDING_MODEL',
+    config_key: 'PAPERLESS_AI_LLM_EMBEDDING_MODEL',
     category: ConfigCategory.AI,
   },
   {
@@ -299,28 +299,28 @@ export const PaperlessConfigOptions: ConfigOption[] = [
     title: $localize`LLM Backend`,
     type: ConfigOptionType.Select,
     choices: mapToItems(LLMBackendConfig),
-    config_key: 'PAPERLESS_LLM_BACKEND',
+    config_key: 'PAPERLESS_AI_LLM_BACKEND',
     category: ConfigCategory.AI,
   },
   {
     key: 'llm_model',
     title: $localize`LLM Model`,
     type: ConfigOptionType.String,
-    config_key: 'PAPERLESS_LLM_MODEL',
+    config_key: 'PAPERLESS_AI_LLM_MODEL',
     category: ConfigCategory.AI,
   },
   {
     key: 'llm_api_key',
     title: $localize`LLM API Key`,
     type: ConfigOptionType.Password,
-    config_key: 'PAPERLESS_LLM_API_KEY',
+    config_key: 'PAPERLESS_AI_LLM_API_KEY',
     category: ConfigCategory.AI,
   },
   {
-    key: 'llm_url',
-    title: $localize`LLM URL`,
+    key: 'llm_endpoint',
+    title: $localize`LLM Endpoint`,
     type: ConfigOptionType.String,
-    config_key: 'PAPERLESS_LLM_URL',
+    config_key: 'PAPERLESS_AI_LLM_ENDPOINT',
     category: ConfigCategory.AI,
   },
 ]
@@ -358,5 +358,5 @@ export interface PaperlessConfig extends ObjectWithId {
   llm_backend: string
   llm_model: string
   llm_api_key: string
-  llm_url: string
+  llm_endpoint: string
 }
index 914f42998443b791b9f2f1a75909234796b7d577..dea8238a743ea238b6cfcf3eba8ba8259b3d4c7e 100644 (file)
@@ -71,7 +71,7 @@ class TestApiAppConfig(DirectoriesMixin, APITestCase):
                 "llm_backend": None,
                 "llm_model": None,
                 "llm_api_key": None,
-                "llm_url": None,
+                "llm_endpoint": None,
             },
         )
 
index c263ed6feebb3ed8f67dbeeaa53e6da175ca6db8..f0d0d1799e75c40ed1ee376d5f2d14e7a07ec7b3 100644 (file)
@@ -183,7 +183,7 @@ class AIConfig(BaseConfig):
     llm_backend: str = dataclasses.field(init=False)
     llm_model: str = dataclasses.field(init=False)
     llm_api_key: str = dataclasses.field(init=False)
-    llm_url: str = dataclasses.field(init=False)
+    llm_endpoint: str = dataclasses.field(init=False)
 
     def __post_init__(self) -> None:
         app_config = self._get_config_instance()
@@ -198,7 +198,7 @@ class AIConfig(BaseConfig):
         self.llm_backend = app_config.llm_backend or settings.LLM_BACKEND
         self.llm_model = app_config.llm_model or settings.LLM_MODEL
         self.llm_api_key = app_config.llm_api_key or settings.LLM_API_KEY
-        self.llm_url = app_config.llm_url or settings.LLM_URL
+        self.llm_endpoint = app_config.llm_endpoint or settings.LLM_ENDPOINT
 
     def llm_index_enabled(self) -> bool:
         return self.ai_enabled and self.llm_embedding_backend
index d9f59b96b6d17357c4d41ebe471726b8f9cb17c5..a0d46fc812a79c157b26e6c05635617c7cef9d42 100644 (file)
@@ -73,12 +73,12 @@ class Migration(migrations.Migration):
         ),
         migrations.AddField(
             model_name="applicationconfiguration",
-            name="llm_url",
+            name="llm_endpoint",
             field=models.CharField(
                 blank=True,
                 max_length=128,
                 null=True,
-                verbose_name="Sets the LLM URL, optional",
+                verbose_name="Sets the LLM endpoint, optional",
             ),
         ),
     ]
index 54fcacd7ba4b9dc8f0fea3f45675b840f982524f..61cb5f59560eef4bb6cab35726f234284468a4c9 100644 (file)
@@ -326,8 +326,8 @@ class ApplicationConfiguration(AbstractSingletonModel):
         max_length=128,
     )
 
-    llm_url = models.CharField(
-        verbose_name=_("Sets the LLM URL, optional"),
+    llm_endpoint = models.CharField(
+        verbose_name=_("Sets the LLM endpoint, optional"),
         null=True,
         blank=True,
         max_length=128,
index f7933081a2dd33e85ca3a871ff432fa94e3001c3..062f47e8efe2df4eb672761c75873d07ea90632a 100644 (file)
@@ -1460,10 +1460,10 @@ OUTLOOK_OAUTH_ENABLED = bool(
 ################################################################################
 AI_ENABLED = __get_boolean("PAPERLESS_AI_ENABLED", "NO")
 LLM_EMBEDDING_BACKEND = os.getenv(
-    "PAPERLESS_LLM_EMBEDDING_BACKEND",
+    "PAPERLESS_AI_LLM_EMBEDDING_BACKEND",
 )  # "huggingface" or "openai"
-LLM_EMBEDDING_MODEL = os.getenv("PAPERLESS_LLM_EMBEDDING_MODEL")
-LLM_BACKEND = os.getenv("PAPERLESS_LLM_BACKEND")  # "ollama" or "openai"
-LLM_MODEL = os.getenv("PAPERLESS_LLM_MODEL")
-LLM_API_KEY = os.getenv("PAPERLESS_LLM_API_KEY")
-LLM_URL = os.getenv("PAPERLESS_LLM_URL")
+LLM_EMBEDDING_MODEL = os.getenv("PAPERLESS_AI_LLM_EMBEDDING_MODEL")
+LLM_BACKEND = os.getenv("PAPERLESS_AI_LLM_BACKEND")  # "ollama" or "openai"
+LLM_MODEL = os.getenv("PAPERLESS_AI_LLM_MODEL")
+LLM_API_KEY = os.getenv("PAPERLESS_AI_LLM_API_KEY")
+LLM_ENDPOINT = os.getenv("PAPERLESS_AI_LLM_ENDPOINT")
index 651ca70229a261b2d06228e238f4f9e84aae392d..5e6b9162e54a50927c8997119a200362cfc4b43f 100644 (file)
@@ -6,7 +6,7 @@ from llama_index.llms.ollama import Ollama
 from llama_index.llms.openai import OpenAI
 
 from paperless.config import AIConfig
-from paperless_ai.tools import DocumentClassifierSchema
+from paperless_ai.base_model import DocumentClassifierSchema
 
 logger = logging.getLogger("paperless_ai.client")
 
@@ -24,7 +24,7 @@ class AIClient:
         if self.settings.llm_backend == "ollama":
             return Ollama(
                 model=self.settings.llm_model or "llama3",
-                base_url=self.settings.llm_url or "http://localhost:11434",
+                base_url=self.settings.llm_endpoint or "http://localhost:11434",
                 request_timeout=120,
             )
         elif self.settings.llm_backend == "openai":
index 6ef7b332b44b77327cc9e67cf21cf26e904fa1d8..62327c24aee46561e59e9cd1ab9257f0b217f9f1 100644 (file)
@@ -31,7 +31,7 @@ def mock_openai_llm():
 def test_get_llm_ollama(mock_ai_config, mock_ollama_llm):
     mock_ai_config.llm_backend = "ollama"
     mock_ai_config.llm_model = "test_model"
-    mock_ai_config.llm_url = "http://test-url"
+    mock_ai_config.llm_endpoint = "http://test-url"
 
     client = AIClient()
 
@@ -67,7 +67,7 @@ def test_get_llm_unsupported_backend(mock_ai_config):
 def test_run_llm_query(mock_ai_config, mock_ollama_llm):
     mock_ai_config.llm_backend = "ollama"
     mock_ai_config.llm_model = "test_model"
-    mock_ai_config.llm_url = "http://test-url"
+    mock_ai_config.llm_endpoint = "http://test-url"
 
     mock_llm_instance = mock_ollama_llm.return_value
 
@@ -96,7 +96,7 @@ def test_run_llm_query(mock_ai_config, mock_ollama_llm):
 def test_run_chat(mock_ai_config, mock_ollama_llm):
     mock_ai_config.llm_backend = "ollama"
     mock_ai_config.llm_model = "test_model"
-    mock_ai_config.llm_url = "http://test-url"
+    mock_ai_config.llm_endpoint = "http://test-url"
 
     mock_llm_instance = mock_ollama_llm.return_value
     mock_llm_instance.chat.return_value = "test_chat_result"