Update language-inference-engine.js

- Add response type check before parsing
- Wrap JSON.parse in a try/catch to handle potential malformed responses.
- Use “system” role for the initial bias message on gemini model object's wrap
This commit is contained in:
Jihoon Yi 2025-05-10 16:30:46 +09:00 committed by GitHub
parent b1078cd36c
commit d8a29eaeee
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -196,9 +196,10 @@ var ENGINE_PROFILES = {
},
"url": "https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent?key={apikey}",
"wrap": function(model, message, temperature) {
const BIAS_MESSAGE = BIAS_MESSAGE
return {
"contents": [{
"role": "user", // Changed "developer" to "user" for the initial prompt. The overall prompt is still intended to guide the model, so this is reasonable.
"role": "system",
"parts": [{
"text": BIAS_MESSAGE
}]
@ -214,7 +215,6 @@ var ENGINE_PROFILES = {
};
},
"callback": function(response) {
response = JSON.parse(response)
if ("error" in response) {
return ["Error: " + response.error.message];
} else {
@ -531,6 +531,14 @@ function LanguageInferenceEngine() {
.send()
.responseBody;
if(typeof response === 'string'){
try {
response = JSON.parse(response)
} catch (e) {
return ["Error: Malformed response"];
}
}
return callback(response);
};
}