## AI {#ai}
-#### [`PAPERLESS_ENABLE_AI=<bool>`](#PAPERLESS_ENABLE_AI) {#PAPERLESS_ENABLE_AI}
+#### [`PAPERLESS_AI_ENABLED=<bool>`](#PAPERLESS_AI_ENABLED) {#PAPERLESS_AI_ENABLED}
: Enables the AI features in Paperless. This includes the AI-based
suggestions. This setting is required to be set to true in order to use the AI features.
Defaults to false.
-#### [`PAPERLESS_LLM_EMBEDDING_BACKEND=<str>`](#PAPERLESS_LLM_EMBEDDING_BACKEND) {#PAPERLESS_LLM_EMBEDDING_BACKEND}
+#### [`PAPERLESS_AI_LLM_EMBEDDING_BACKEND=<str>`](#PAPERLESS_AI_LLM_EMBEDDING_BACKEND) {#PAPERLESS_AI_LLM_EMBEDDING_BACKEND}
: The embedding backend to use for RAG. This can be either "openai" or "huggingface".
Defaults to None.
-#### [`PAPERLESS_LLM_EMBEDDING_MODEL=<str>`](#PAPERLESS_LLM_EMBEDDING_MODEL) {#PAPERLESS_LLM_EMBEDDING_MODEL}
+#### [`PAPERLESS_AI_LLM_EMBEDDING_MODEL=<str>`](#PAPERLESS_AI_LLM_EMBEDDING_MODEL) {#PAPERLESS_AI_LLM_EMBEDDING_MODEL}
: The model to use for the embedding backend for RAG. This can be set to any of the embedding models supported by the current embedding backend. If not supplied, defaults to "text-embedding-3-small" for OpenAI and "sentence-transformers/all-MiniLM-L6-v2" for Huggingface.
Refer to the OpenAI terms of service, and use at your own risk.
-#### [`PAPERLESS_LLM_MODEL=<str>`](#PAPERLESS_LLM_MODEL) {#PAPERLESS_LLM_MODEL}
+#### [`PAPERLESS_AI_LLM_MODEL=<str>`](#PAPERLESS_AI_LLM_MODEL) {#PAPERLESS_AI_LLM_MODEL}
: The model to use for the AI backend, i.e. "gpt-3.5-turbo", "gpt-4" or any of the models supported by the
current backend. If not supplied, defaults to "gpt-3.5-turbo" for OpenAI and "llama3" for Ollama.
Defaults to None.
-#### [`PAPERLESS_LLM_API_KEY=<str>`](#PAPERLESS_LLM_API_KEY) {#PAPERLESS_LLM_API_KEY}
+#### [`PAPERLESS_AI_LLM_API_KEY=<str>`](#PAPERLESS_AI_LLM_API_KEY) {#PAPERLESS_AI_LLM_API_KEY}
: The API key to use for the AI backend. This is required for the OpenAI backend only.
Defaults to None.
-#### [`PAPERLESS_LLM_URL=<str>`](#PAPERLESS_LLM_URL) {#PAPERLESS_LLM_URL}
+#### [`PAPERLESS_AI_LLM_ENDPOINT=<str>`](#PAPERLESS_AI_LLM_ENDPOINT) {#PAPERLESS_AI_LLM_ENDPOINT}
-: The URL to use for the AI backend. This is required for the Ollama backend only.
+: The endpoint / url to use for the AI backend. This is required for the Ollama backend only.
Defaults to None.
-#### [`PAPERLESS_LLM_INDEX_TASK_CRON=<cron expression>`](#PAPERLESS_LLM_INDEX_TASK_CRON) {#PAPERLESS_LLM_INDEX_TASK_CRON}
+#### [`PAPERLESS_AI_LLM_INDEX_TASK_CRON=<cron expression>`](#PAPERLESS_AI_LLM_INDEX_TASK_CRON) {#PAPERLESS_AI_LLM_INDEX_TASK_CRON}
: Configures the schedule to update the AI embeddings of text content and metadata for all documents. Only performed if
AI is enabled and the LLM embedding backend is set.
title: $localize`LLM Embedding Backend`,
type: ConfigOptionType.Select,
choices: mapToItems(LLMEmbeddingBackendConfig),
- config_key: 'PAPERLESS_LLM_EMBEDDING_BACKEND',
+ config_key: 'PAPERLESS_AI_LLM_EMBEDDING_BACKEND',
category: ConfigCategory.AI,
},
{
key: 'llm_embedding_model',
title: $localize`LLM Embedding Model`,
type: ConfigOptionType.String,
- config_key: 'PAPERLESS_LLM_EMBEDDING_MODEL',
+ config_key: 'PAPERLESS_AI_LLM_EMBEDDING_MODEL',
category: ConfigCategory.AI,
},
{
title: $localize`LLM Backend`,
type: ConfigOptionType.Select,
choices: mapToItems(LLMBackendConfig),
- config_key: 'PAPERLESS_LLM_BACKEND',
+ config_key: 'PAPERLESS_AI_LLM_BACKEND',
category: ConfigCategory.AI,
},
{
key: 'llm_model',
title: $localize`LLM Model`,
type: ConfigOptionType.String,
- config_key: 'PAPERLESS_LLM_MODEL',
+ config_key: 'PAPERLESS_AI_LLM_MODEL',
category: ConfigCategory.AI,
},
{
key: 'llm_api_key',
title: $localize`LLM API Key`,
type: ConfigOptionType.Password,
- config_key: 'PAPERLESS_LLM_API_KEY',
+ config_key: 'PAPERLESS_AI_LLM_API_KEY',
category: ConfigCategory.AI,
},
{
- key: 'llm_url',
- title: $localize`LLM URL`,
+ key: 'llm_endpoint',
+ title: $localize`LLM Endpoint`,
type: ConfigOptionType.String,
- config_key: 'PAPERLESS_LLM_URL',
+ config_key: 'PAPERLESS_AI_LLM_ENDPOINT',
category: ConfigCategory.AI,
},
]
llm_backend: string
llm_model: string
llm_api_key: string
- llm_url: string
+ llm_endpoint: string
}
"llm_backend": None,
"llm_model": None,
"llm_api_key": None,
- "llm_url": None,
+ "llm_endpoint": None,
},
)
llm_backend: str = dataclasses.field(init=False)
llm_model: str = dataclasses.field(init=False)
llm_api_key: str = dataclasses.field(init=False)
- llm_url: str = dataclasses.field(init=False)
+ llm_endpoint: str = dataclasses.field(init=False)
def __post_init__(self) -> None:
app_config = self._get_config_instance()
self.llm_backend = app_config.llm_backend or settings.LLM_BACKEND
self.llm_model = app_config.llm_model or settings.LLM_MODEL
self.llm_api_key = app_config.llm_api_key or settings.LLM_API_KEY
- self.llm_url = app_config.llm_url or settings.LLM_URL
+ self.llm_endpoint = app_config.llm_endpoint or settings.LLM_ENDPOINT
def llm_index_enabled(self) -> bool:
return self.ai_enabled and self.llm_embedding_backend
),
migrations.AddField(
model_name="applicationconfiguration",
- name="llm_url",
+ name="llm_endpoint",
field=models.CharField(
blank=True,
max_length=128,
null=True,
- verbose_name="Sets the LLM URL, optional",
+ verbose_name="Sets the LLM endpoint, optional",
),
),
]
max_length=128,
)
- llm_url = models.CharField(
- verbose_name=_("Sets the LLM URL, optional"),
+ llm_endpoint = models.CharField(
+ verbose_name=_("Sets the LLM endpoint, optional"),
null=True,
blank=True,
max_length=128,
################################################################################
AI_ENABLED = __get_boolean("PAPERLESS_AI_ENABLED", "NO")
LLM_EMBEDDING_BACKEND = os.getenv(
- "PAPERLESS_LLM_EMBEDDING_BACKEND",
+ "PAPERLESS_AI_LLM_EMBEDDING_BACKEND",
) # "huggingface" or "openai"
-LLM_EMBEDDING_MODEL = os.getenv("PAPERLESS_LLM_EMBEDDING_MODEL")
-LLM_BACKEND = os.getenv("PAPERLESS_LLM_BACKEND") # "ollama" or "openai"
-LLM_MODEL = os.getenv("PAPERLESS_LLM_MODEL")
-LLM_API_KEY = os.getenv("PAPERLESS_LLM_API_KEY")
-LLM_URL = os.getenv("PAPERLESS_LLM_URL")
+LLM_EMBEDDING_MODEL = os.getenv("PAPERLESS_AI_LLM_EMBEDDING_MODEL")
+LLM_BACKEND = os.getenv("PAPERLESS_AI_LLM_BACKEND") # "ollama" or "openai"
+LLM_MODEL = os.getenv("PAPERLESS_AI_LLM_MODEL")
+LLM_API_KEY = os.getenv("PAPERLESS_AI_LLM_API_KEY")
+LLM_ENDPOINT = os.getenv("PAPERLESS_AI_LLM_ENDPOINT")
from llama_index.llms.openai import OpenAI
from paperless.config import AIConfig
-from paperless_ai.tools import DocumentClassifierSchema
+from paperless_ai.base_model import DocumentClassifierSchema
logger = logging.getLogger("paperless_ai.client")
if self.settings.llm_backend == "ollama":
return Ollama(
model=self.settings.llm_model or "llama3",
- base_url=self.settings.llm_url or "http://localhost:11434",
+ base_url=self.settings.llm_endpoint or "http://localhost:11434",
request_timeout=120,
)
elif self.settings.llm_backend == "openai":
def test_get_llm_ollama(mock_ai_config, mock_ollama_llm):
mock_ai_config.llm_backend = "ollama"
mock_ai_config.llm_model = "test_model"
- mock_ai_config.llm_url = "http://test-url"
+ mock_ai_config.llm_endpoint = "http://test-url"
client = AIClient()
def test_run_llm_query(mock_ai_config, mock_ollama_llm):
mock_ai_config.llm_backend = "ollama"
mock_ai_config.llm_model = "test_model"
- mock_ai_config.llm_url = "http://test-url"
+ mock_ai_config.llm_endpoint = "http://test-url"
mock_llm_instance = mock_ollama_llm.return_value
def test_run_chat(mock_ai_config, mock_ollama_llm):
mock_ai_config.llm_backend = "ollama"
mock_ai_config.llm_model = "test_model"
- mock_ai_config.llm_url = "http://test-url"
+ mock_ai_config.llm_endpoint = "http://test-url"
mock_llm_instance = mock_ollama_llm.return_value
mock_llm_instance.chat.return_value = "test_chat_result"