Browse Source

The gpt4all api which is openai compatible does not support response_format and defaults to json_objec. This change adds the ability to add

include_response_format = false;

In the gpt.conf local.d settings

and exclude sending response_format. No other changes are needed for gpt4all api fixing the error:

 gpt.lua:365: got reply: {"error":{"code":null,"message":"'response_format' is not supported","param":null,"type":"invalid_request_error"}}
pull/5276/head
John Quaglieri 10 months ago
parent
commit
d48fbaddc9
  1. 16
      src/plugins/lua/gpt.lua

16
src/plugins/lua/gpt.lua

@ -48,6 +48,8 @@ gpt {
allow_passthrough = false;
# Check messages that are apparent ham (no action and negative score)
allow_ham = false;
# default send response_format field { type = "json_object" }
include_response_format = true,
}
]])
return
@ -393,7 +395,6 @@ local function default_llm_check(task)
model = settings.model,
max_tokens = settings.max_tokens,
temperature = settings.temperature,
response_format = { type = "json_object" },
messages = {
{
role = 'system',
@ -418,6 +419,11 @@ local function default_llm_check(task)
}
}
-- Conditionally add response_format
if settings.include_response_format then
body.response_format = { type = "json_object" }
end
upstream = settings.upstreams:get_upstream_round_robin()
local http_params = {
url = settings.url,
@ -498,7 +504,6 @@ local function ollama_check(task)
model = settings.model,
max_tokens = settings.max_tokens,
temperature = settings.temperature,
response_format = { type = "json_object" },
messages = {
{
role = 'system',
@ -523,6 +528,11 @@ local function ollama_check(task)
}
}
-- Conditionally add response_format
if settings.include_response_format then
body.response_format = { type = "json_object" }
end
upstream = settings.upstreams:get_upstream_round_robin()
local http_params = {
url = settings.url,
@ -618,4 +628,4 @@ if opts then
parent = id,
score = -2.0,
})
end
end
Loading…
Cancel
Save