doc_key = get_suggestion_cache_key(document_id)
data: SuggestionCacheData = cache.get(doc_key)
- if data and data.classifier_version == 1000 and data.classifier_hash == backend:
+ if data and data.classifier_hash == backend:
return data
return None
)
+def invalidate_llm_suggestions_cache(
+ document_id: int,
+) -> None:
+ """
+ Invalidate the LLM suggestions cache for a specific document and backend.
+ """
+ doc_key = get_suggestion_cache_key(document_id)
+ data: SuggestionCacheData = cache.get(doc_key)
+
+ if data:
+ cache.delete(doc_key)
+
+
def get_metadata_cache_key(document_id: int) -> str:
"""
Returns the basic key for a document's metadata
from documents import matching
from documents.caching import clear_document_caches
+from documents.caching import invalidate_llm_suggestions_cache
from documents.file_handling import create_source_path_directory
from documents.file_handling import delete_empty_directories
from documents.file_handling import generate_unique_filename
)
+@receiver(models.signals.post_save, sender=Document)
+def update_llm_suggestions_cache(sender, instance, **kwargs):
+ """
+ Invalidate the LLM suggestions cache when a document is saved.
+ """
+ # Invalidate the cache for the document
+ invalidate_llm_suggestions_cache(instance.pk)
+
+
# should be disabled in /src/documents/management/commands/document_importer.py handle
@receiver(models.signals.post_save, sender=CustomField)
def check_paths_and_prune_custom_fields(sender, instance: CustomField, **kwargs):