From: hunter-nl Date: Thu, 14 Aug 2025 12:57:04 +0000 (+0200) Subject: Update gpt.lua to handle OpenAI parallel old and new models X-Git-Tag: 3.13.0~29^2~7 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=ba7df736e4e22dd06280f365ed5c8ff6f4e324dc;p=thirdparty%2Frspamd.git Update gpt.lua to handle OpenAI parallel old and new models When in rspamd_config is specified multiple models (old/new), this is handled now correctly to set the required attributes for each model request. --- diff --git a/src/plugins/lua/gpt.lua b/src/plugins/lua/gpt.lua index 5a8bb10abb..72f61e6005 100644 --- a/src/plugins/lua/gpt.lua +++ b/src/plugins/lua/gpt.lua @@ -716,7 +716,7 @@ local function openai_check(task, content, sel_part) return true end - local body = { + local body_base = { model = settings.model, messages = { { @@ -741,21 +741,7 @@ local function openai_check(task, content, sel_part) } } } - - -- Set the correct token limit field - local token_field = get_max_tokens_field(settings.model) - body[token_field] = settings.max_tokens - - -- Set the temperature field if model supports it - if supports_temperature(settings.model) then - body.temperature = settings.temperature - end - - -- Conditionally add response_format - if settings.include_response_format then - body.response_format = { type = "json_object" } - end - + if type(settings.model) == 'string' then settings.model = { settings.model } end @@ -766,6 +752,21 @@ local function openai_check(task, content, sel_part) success = false, checked = false } + local body = body_base + -- Set the correct token limit field + local token_field = get_max_tokens_field(model) + body[token_field] = settings.max_tokens + + -- Set the temperature field if model supports it + if supports_temperature(model) then + body.temperature = settings.temperature + end + + -- Conditionally add response_format + if settings.include_response_format then + body.response_format = { type = "json_object" } + end + body.model = model local http_params = { url = settings.url,