From 47bb893c49d28c4738f26b589decd1fd7cf35e35 Mon Sep 17 00:00:00 2001 From: "Namhyeon, Go" Date: Tue, 28 Jan 2025 09:38:23 +0900 Subject: [PATCH] Update language-inference-engine.js --- lib/language-inference-engine.js | 39 +++++++++++++++++++++----------- 1 file changed, 26 insertions(+), 13 deletions(-) diff --git a/lib/language-inference-engine.js b/lib/language-inference-engine.js index 30697df..6d01a11 100644 --- a/lib/language-inference-engine.js +++ b/lib/language-inference-engine.js @@ -21,12 +21,13 @@ var biasMessage = "Write all future code examples in JavaScript ES3 using the ex var engineProfiles = { "openai": { + "defaultModel": "gpt-4o-mini", "headers": { "Content-Type": "application/json", "Authorization": "Bearer {apikey}" }, "url": "https://api.openai.com/v1/chat/completions", - "wrap": function(model, message) { + "wrap": function(model, message, temperature) { return { "model": model, "messages": [{ @@ -35,7 +36,8 @@ var engineProfiles = { }, { "role": "user", "content": message - }] + }], + "temperature": temperature }; }, "callback": function(response) { @@ -51,13 +53,14 @@ var engineProfiles = { } }, "anthropic": { + "defaultModel": "claude-3-5-sonnet-20241022", "headers": { "Content-Type": "application/json", "x-api-key": "{apikey}", "anthropic-version": "2023-06-01" }, "url": "https://api.anthropic.com/v1/messages", - "wrap": function(model, message) { + "wrap": function(model, message, temperature) { return { "model": model, "max_tokens": 1024, @@ -70,7 +73,8 @@ var engineProfiles = { "role": "user", "content": message } - ] + ], + "temperature": temperature }; }, "callback": function(response) { @@ -90,12 +94,13 @@ var engineProfiles = { } }, "groq": { + "defaultModel": "llama-3.1-8b-instant", "headers": { "Content-Type": "application/json", "Authorization": "Bearer {apikey}" }, "url": "https://api.groq.com/openai/v1/chat/completions", - "wrap": function(model, message) { + "wrap": function(model, message, temperature) { return { "model": model, "messages": [ @@ -107,7 +112,8 @@ var engineProfiles = { "role": "user", "content": message } - ] + ], + "temperature": temperature }; }, "callback": function(response) { @@ -123,13 +129,15 @@ var engineProfiles = { } }, "xai": { + "defaultModel": "grok-2-latest", "headers": { "Content-Type": "application/json", "Authorization": "Bearer {apikey}" }, "url": "https://api.x.ai/v1/chat/completions", - "wrap": function(model, message) { + "wrap": function(model, message, temperature) { return { + "model": model, "messages": [ { "role": "system", @@ -140,7 +148,7 @@ var engineProfiles = { "content": message } ], - "model": model + "temperature": temperature } }, "callback": function(response) { @@ -152,12 +160,13 @@ var engineProfiles = { } }, "google": { + "defaultModel": "gemini-1.5-flash", "headers": { "Content-Type": "application/json", "Authorization": "Bearer {apikey}" }, "url": "https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent?key={apikey}", - "warp": function(model, message) { + "warp": function(model, message, temperature) { return { "contents": [ { @@ -189,12 +198,13 @@ var engineProfiles = { } }, "deepseek": { + "defaultModel": "deepseek-chat", "headers": { "Content-Type": "application/json", "Authorization": "Bearer {apikey}" }, "url": "https://api.deepseek.com/chat/completions", - "wrap": function(model, message) { + "wrap": function(model, message, temperature) { "model": model, "messages": [ { @@ -206,6 +216,7 @@ var engineProfiles = { "content": message } ], + "temperature": temperature, "stream": false } }, @@ -225,6 +236,7 @@ var engineProfiles = { function LanguageInferenceEngine() { this.type = "llm"; // e.g. legacy (Legacy NLP), llm (LLM) this.provider = ""; + this.model = ""; this.engineProfile = null; this.setProvider = function(provider) { @@ -232,6 +244,7 @@ function LanguageInferenceEngine() { if (provider in engineProfiles) { this.engineProfile = engineProfiles[provider]; + this.model = this.engineProfile.defaultModel; } return this; @@ -252,7 +265,7 @@ function LanguageInferenceEngine() { return this; } - this.inference = function(message) { + this.inference = function(message, temperature) { if (this.engineProfile == null) return this; @@ -267,7 +280,7 @@ function LanguageInferenceEngine() { "apikey": apikey }) .setHeaders(headers) - .setRequestBody(wrap(message)) + .setRequestBody(wrap(this.model, message, temperature)) .open("post", url) .send() .responseBody; @@ -281,7 +294,7 @@ exports.create = function() { return new LanguageInferenceEngine(); }; -exports.VERSIONINFO = "Language Inference Engine (NLP/LLM) integration version 0.1.1"; +exports.VERSIONINFO = "Language Inference Engine (NLP/LLM) integration version 0.1.2"; exports.AUTHOR = "abuse@catswords.net"; exports.global = global; exports.require = global.require;