def run_llm_query(prompt: str) -> str:
- if settings.LLM_BACKEND == "ollama":
- return _run_ollama_query(prompt)
- return _run_openai_query(prompt)
+ logger.debug(
+ "Running LLM query against %s with model %s",
+ settings.LLM_BACKEND,
+ settings.LLM_MODEL,
+ )
+ match settings.LLM_BACKEND:
+ case "openai":
+ result = _run_openai_query(prompt)
+ case "ollama":
+ result = _run_ollama_query(prompt)
+ case _:
+ raise ValueError(f"Unsupported LLM backend: {settings.LLM_BACKEND}")
+ logger.debug("LLM query result: %s", result)
+ return result
def _run_ollama_query(prompt: str) -> str:
filename = document.filename or ""
content = document.content or ""
+ # Limit the content to 10k characters
+ content = content[:10000]
+
prompt = f"""
You are a document classification assistant. Based on the content below, return a JSON object suggesting the following classification fields:
- title: A descriptive title for the document
"""
try:
- logger.debug(f"LLM classification prompt: {prompt}")
result = run_llm_query(prompt)
- logger.debug(f"LLM classification result: {result}")
suggestions = parse_llm_classification_response(result)
return suggestions or {}
except Exception: