diff --git a/README.md b/README.md index fa2b798..b1ff8cb 100644 --- a/README.md +++ b/README.md @@ -12,8 +12,7 @@ Do not connect this bot to public servers with coding enabled. This project allo - [Minecraft Java Edition](https://www.minecraft.net/en-us/store/minecraft-java-bedrock-edition-pc) (up to v1.21.1, recommend v1.21.1) - [Node.js Installed](https://nodejs.org/) (at least v18) -- One of these: [OpenAI API Key](https://openai.com/blog/openai-api) | [Gemini API Key](https://aistudio.google.com/app/apikey) | [Anthropic API Key](https://docs.anthropic.com/claude/docs/getting-access-to-claude) | [Replicate API Key](https://replicate.com/) | [Hugging Face API Key](https://huggingface.co/) | [Groq API Key](https://console.groq.com/keys) | [Ollama Installed](https://ollama.com/download). | [Mistral API Key](https://docs.mistral.ai/getting-started/models/models_overview/) | [Qwen API Key [Intl.]](https://www.alibabacloud.com/help/en/model-studio/developer-reference/get-api-key)/[[cn]](https://help.aliyun.com/zh/model-studio/getting-started/first-api-call-to-qwen?) | [Novita AI API Key](https://novita.ai/settings?utm_source=github_mindcraft&utm_medium=github_readme&utm_campaign=link#key-management) | [Cerebras API Key](https://cloud.cerebras.ai) - +- One of these: [OpenAI API Key](https://openai.com/blog/openai-api) | [Gemini API Key](https://aistudio.google.com/app/apikey) | [Anthropic API Key](https://docs.anthropic.com/claude/docs/getting-access-to-claude) | [Replicate API Key](https://replicate.com/) | [Hugging Face API Key](https://huggingface.co/) | [Groq API Key](https://console.groq.com/keys) | [Ollama Installed](https://ollama.com/download). | [Mistral API Key](https://docs.mistral.ai/getting-started/models/models_overview/) | [Qwen API Key [Intl.]](https://www.alibabacloud.com/help/en/model-studio/developer-reference/get-api-key)/[[cn]](https://help.aliyun.com/zh/model-studio/getting-started/first-api-call-to-qwen?) | [Novita AI API Key](https://novita.ai/settings?utm_source=github_mindcraft&utm_medium=github_readme&utm_campaign=link#key-management) | [Cerebras API Key](https://cloud.cerebras.ai) | [Mercury API](https://platform.inceptionlabs.ai/docs) ## Install and Run @@ -66,6 +65,7 @@ You can configure the agent's name, model, and prompts in their profile like `an | `hyperbolic` | `HYPERBOLIC_API_KEY` | `hyperbolic/deepseek-ai/DeepSeek-V3` | [docs](https://docs.hyperbolic.xyz/docs/getting-started) | | `vllm` | n/a | `vllm/llama3` | n/a | | `cerebras` | `CEREBRAS_API_KEY` | `cerebras/llama-3.3-70b` | [docs](https://inference-docs.cerebras.ai/introduction) | +| `mercury(EA)` | `MERCURY_API_KEY` | `mercury-coder-small` | [docs](https://www.inceptionlabs.ai/) | If you use Ollama, to install the models used by default (generation and embedding), execute the following terminal command: `ollama pull llama3.1 && ollama pull nomic-embed-text` diff --git a/keys.example.json b/keys.example.json index 52204ae..fe68128 100644 --- a/keys.example.json +++ b/keys.example.json @@ -14,5 +14,6 @@ "HYPERBOLIC_API_KEY": "", "NOVITA_API_KEY": "", "OPENROUTER_API_KEY": "", - "CEREBRAS_API_KEY": "" + "CEREBRAS_API_KEY": "", + "MERCURY_API_KEY":"" } diff --git a/profiles/mercury.json b/profiles/mercury.json new file mode 100644 index 0000000..cac6d49 --- /dev/null +++ b/profiles/mercury.json @@ -0,0 +1,15 @@ +{ + "name": "Mercury", + + "cooldown": 5000, + + "model": { + "api": "mercury", + "url": "https://api.inceptionlabs.ai/v1", + "model": "mercury-coder-small" + }, + + "embedding": "openai", + + "description":"Official Website Introduction:The world’s first diffusion large language models" +} \ No newline at end of file diff --git a/settings.js b/settings.js index c0f7705..d9f0037 100644 --- a/settings.js +++ b/settings.js @@ -18,6 +18,7 @@ const settings = { // "./profiles/grok.json", // "./profiles/mistral.json", // "./profiles/deepseek.json", + //"./profiles/mercury.json", // using more than 1 profile requires you to /msg each bot indivually // individual profiles override values from the base profile diff --git a/src/models/mercury.js b/src/models/mercury.js new file mode 100644 index 0000000..d4c4c3e --- /dev/null +++ b/src/models/mercury.js @@ -0,0 +1,92 @@ +import OpenAIApi from 'openai'; +import { getKey, hasKey } from '../utils/keys.js'; +import { strictFormat } from '../utils/text.js'; + +export class Mercury { + constructor(model_name, url, params) { + this.model_name = model_name; + this.params = params; + let config = {}; + if (url) + config.baseURL = url; + + config.apiKey = getKey('MERCURY_API_KEY'); + + this.openai = new OpenAIApi(config); + } + + async sendRequest(turns, systemMessage, stop_seq='***') { + if (typeof stop_seq === 'string') { + stop_seq = [stop_seq]; + } else if (!Array.isArray(stop_seq)) { + stop_seq = []; + } + let messages = [{'role': 'system', 'content': systemMessage}].concat(turns); + messages = strictFormat(messages); + const pack = { + model: this.model_name || "mercury-coder-small", + messages, + stop: stop_seq, + ...(this.params || {}) + }; + + + let res = null; + + try { + console.log('Awaiting mercury api response from model', this.model_name) + // console.log('Messages:', messages); + let completion = await this.openai.chat.completions.create(pack); + if (completion.choices[0].finish_reason == 'length') + throw new Error('Context length exceeded'); + console.log('Received.') + res = completion.choices[0].message.content; + } + catch (err) { + if ((err.message == 'Context length exceeded' || err.code == 'context_length_exceeded') && turns.length > 1) { + console.log('Context length exceeded, trying again with shorter context.'); + return await this.sendRequest(turns.slice(1), systemMessage, stop_seq); + } else if (err.message.includes('image_url')) { + console.log(err); + res = 'Vision is only supported by certain models.'; + } else { + console.log(err); + res = 'My brain disconnected, try again.'; + } + } + return res; + } + + async sendVisionRequest(messages, systemMessage, imageBuffer) { + const imageMessages = [...messages]; + imageMessages.push({ + role: "user", + content: [ + { type: "text", text: systemMessage }, + { + type: "image_url", + image_url: { + url: `data:image/jpeg;base64,${imageBuffer.toString('base64')}` + } + } + ] + }); + + return this.sendRequest(imageMessages, systemMessage); + } + + async embed(text) { + if (text.length > 8191) + text = text.slice(0, 8191); + const embedding = await this.openai.embeddings.create({ + model: this.model_name || "text-embedding-3-small", + input: text, + encoding_format: "float", + }); + return embedding.data[0].embedding; + } + +} + + + diff --git a/src/models/prompter.js b/src/models/prompter.js index a8c4db7..6ca9b60 100644 --- a/src/models/prompter.js +++ b/src/models/prompter.js @@ -228,7 +228,7 @@ export class Prompter { console.error('Error: Generated response is not a string', generation); throw new Error('Generated response is not a string'); } - console.log("Generated response:", generation); + console.log("Generated response:", generation); await this._saveLog(prompt, messages, generation, 'conversation'); } catch (error) { @@ -245,7 +245,7 @@ export class Prompter { if (current_msg_time !== this.most_recent_msg_time) { console.warn(`${this.agent.name} received new message while generating, discarding old response.`); return ''; - } + } if (generation?.includes('')) { const [_, afterThink] = generation.split('') @@ -282,7 +282,7 @@ export class Prompter { await this._saveLog(prompt, to_summarize, resp, 'memSaving'); if (resp?.includes('')) { const [_, afterThink] = resp.split('') - resp = afterThink + resp = afterThink; } return resp; }