Merge pull request #592 from mindcraft-bots/mercury

Mercury and azure
This commit is contained in:
Max Robinson 2025-08-23 16:16:45 -05:00 committed by GitHub
commit b7b57e2f04
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 169 additions and 10 deletions

View file

@ -12,8 +12,7 @@ Do not connect this bot to public servers with coding enabled. This project allo
- [Minecraft Java Edition](https://www.minecraft.net/en-us/store/minecraft-java-bedrock-edition-pc) (up to v1.21.1, recommend v1.21.1)
- [Node.js Installed](https://nodejs.org/) (at least v18)
- One of these: [OpenAI API Key](https://openai.com/blog/openai-api) | [Gemini API Key](https://aistudio.google.com/app/apikey) | [Anthropic API Key](https://docs.anthropic.com/claude/docs/getting-access-to-claude) | [Replicate API Key](https://replicate.com/) | [Hugging Face API Key](https://huggingface.co/) | [Groq API Key](https://console.groq.com/keys) | [Ollama Installed](https://ollama.com/download). | [Mistral API Key](https://docs.mistral.ai/getting-started/models/models_overview/) | [Qwen API Key [Intl.]](https://www.alibabacloud.com/help/en/model-studio/developer-reference/get-api-key)/[[cn]](https://help.aliyun.com/zh/model-studio/getting-started/first-api-call-to-qwen?) | [Novita AI API Key](https://novita.ai/settings?utm_source=github_mindcraft&utm_medium=github_readme&utm_campaign=link#key-management) | [Cerebras API Key](https://cloud.cerebras.ai)
- One of these: [OpenAI API Key](https://openai.com/blog/openai-api) | [Gemini API Key](https://aistudio.google.com/app/apikey) | [Anthropic API Key](https://docs.anthropic.com/claude/docs/getting-access-to-claude) | [Replicate API Key](https://replicate.com/) | [Hugging Face API Key](https://huggingface.co/) | [Groq API Key](https://console.groq.com/keys) | [Ollama Installed](https://ollama.com/download). | [Mistral API Key](https://docs.mistral.ai/getting-started/models/models_overview/) | [Qwen API Key [Intl.]](https://www.alibabacloud.com/help/en/model-studio/developer-reference/get-api-key)/[[cn]](https://help.aliyun.com/zh/model-studio/getting-started/first-api-call-to-qwen?) | [Novita AI API Key](https://novita.ai/settings?utm_source=github_mindcraft&utm_medium=github_readme&utm_campaign=link#key-management) | [Cerebras API Key](https://cloud.cerebras.ai) | [Mercury API](https://platform.inceptionlabs.ai/docs)
## Install and Run
@ -66,10 +65,13 @@ You can configure the agent's name, model, and prompts in their profile like `an
| `hyperbolic` | `HYPERBOLIC_API_KEY` | `hyperbolic/deepseek-ai/DeepSeek-V3` | [docs](https://docs.hyperbolic.xyz/docs/getting-started) |
| `vllm` | n/a | `vllm/llama3` | n/a |
| `cerebras` | `CEREBRAS_API_KEY` | `cerebras/llama-3.3-70b` | [docs](https://inference-docs.cerebras.ai/introduction) |
| `mercury` | `MERCURY_API_KEY` | `mercury-coder-small` | [docs](https://www.inceptionlabs.ai/) |
If you use Ollama, to install the models used by default (generation and embedding), execute the following terminal command:
`ollama pull llama3.1 && ollama pull nomic-embed-text`
To use Azure, you can reuse the `OPENAI_API_KEY` environment variable. You can get the key from the Azure portal. See [azure.json](profiles/azure.json) for an example.
### Online Servers
To connect to online servers your bot will need an official Microsoft/Minecraft account. You can use your own personal one, but will need another account if you want to connect too and play with it. To connect, change these lines in `settings.js`:
```javascript

View file

@ -14,5 +14,6 @@
"HYPERBOLIC_API_KEY": "",
"NOVITA_API_KEY": "",
"OPENROUTER_API_KEY": "",
"CEREBRAS_API_KEY": ""
"CEREBRAS_API_KEY": "",
"MERCURY_API_KEY":""
}

19
profiles/azure.json Normal file
View file

@ -0,0 +1,19 @@
{
"name": "azure",
"model": {
"api": "azure",
"url": "https://<your-resource>.openai.azure.com",
"model": "<chat-deployment-name>",
"params": {
"apiVersion": "2024-08-01-preview"
}
},
"embedding": {
"api": "azure",
"url": "https://<your-resource>.openai.azure.com",
"model": "<embedding-deployment-name>",
"params": {
"apiVersion": "2024-08-01-preview"
}
}
}

View file

@ -1,7 +1,7 @@
{
"name": "claude",
"model": "claude-4-sonnet-latest",
"model": "claude-sonnet-4-20250514",
"embedding": "openai"
}

View file

@ -2,7 +2,7 @@
"name": "claude_thinker",
"model": {
"model": "claude-4-sonnet-latest",
"model": "claude-sonnet-4-20250514",
"params": {
"thinking": {
"type": "enabled",

9
profiles/mercury.json Normal file
View file

@ -0,0 +1,9 @@
{
"name": "Mercury",
"cooldown": 5000,
"model": "mercury/mercury-coder-small",
"embedding": "openai"
}

View file

@ -18,6 +18,7 @@ const settings = {
// "./profiles/grok.json",
// "./profiles/mistral.json",
// "./profiles/deepseek.json",
//"./profiles/mercury.json",
// using more than 1 profile requires you to /msg each bot indivually
// individual profiles override values from the base profile

32
src/models/azure.js Normal file
View file

@ -0,0 +1,32 @@
import { AzureOpenAI } from "openai";
import { getKey, hasKey } from '../utils/keys.js';
import { GPT } from './gpt.js'
export class AzureGPT extends GPT {
static prefix = 'azure';
constructor(model_name, url, params) {
super(model_name, url)
this.model_name = model_name;
this.params = params || {};
const config = {};
if (url)
config.endpoint = url;
config.apiKey = hasKey('AZURE_OPENAI_API_KEY') ? getKey('AZURE_OPENAI_API_KEY') : getKey('OPENAI_API_KEY');
config.deployment = model_name;
if (this.params.apiVersion) {
config.apiVersion = this.params.apiVersion;
delete this.params.apiVersion; // remove from params for later use in requests
}
else {
throw new Error('apiVersion is required in params for azure!');
}
this.openai = new AzureOpenAI(config)
}
}

View file

@ -21,7 +21,7 @@ export class Claude {
const messages = strictFormat(turns);
let res = null;
try {
console.log('Awaiting anthropic api response...')
console.log(`Awaiting anthropic response from ${this.model_name}...`)
if (!this.params.max_tokens) {
if (this.params.thinking?.budget_tokens) {
this.params.max_tokens = this.params.thinking.budget_tokens + 1000;
@ -31,7 +31,7 @@ export class Claude {
}
}
const resp = await this.anthropic.messages.create({
model: this.model_name || "claude-3-sonnet-20240229",
model: this.model_name || "claude-sonnet-4-20250514",
system: systemMessage,
messages: messages,
...(this.params || {})

95
src/models/mercury.js Normal file
View file

@ -0,0 +1,95 @@
import OpenAIApi from 'openai';
import { getKey, hasKey } from '../utils/keys.js';
import { strictFormat } from '../utils/text.js';
export class Mercury {
static prefix = 'mercury';
constructor(model_name, url, params) {
this.model_name = model_name;
this.params = params;
let config = {};
if (url)
config.baseURL = url;
else
config.baseURL = "https://api.inceptionlabs.ai/v1";
config.apiKey = getKey('MERCURY_API_KEY');
this.openai = new OpenAIApi(config);
}
async sendRequest(turns, systemMessage, stop_seq='***') {
if (typeof stop_seq === 'string') {
stop_seq = [stop_seq];
} else if (!Array.isArray(stop_seq)) {
stop_seq = [];
}
let messages = [{'role': 'system', 'content': systemMessage}].concat(turns);
messages = strictFormat(messages);
const pack = {
model: this.model_name || "mercury-coder-small",
messages,
stop: stop_seq,
...(this.params || {})
};
let res = null;
try {
console.log('Awaiting mercury api response from model', this.model_name)
// console.log('Messages:', messages);
let completion = await this.openai.chat.completions.create(pack);
if (completion.choices[0].finish_reason == 'length')
throw new Error('Context length exceeded');
console.log('Received.')
res = completion.choices[0].message.content;
}
catch (err) {
if ((err.message == 'Context length exceeded' || err.code == 'context_length_exceeded') && turns.length > 1) {
console.log('Context length exceeded, trying again with shorter context.');
return await this.sendRequest(turns.slice(1), systemMessage, stop_seq);
} else if (err.message.includes('image_url')) {
console.log(err);
res = 'Vision is only supported by certain models.';
} else {
console.log(err);
res = 'My brain disconnected, try again.';
}
}
return res;
}
async sendVisionRequest(messages, systemMessage, imageBuffer) {
const imageMessages = [...messages];
imageMessages.push({
role: "user",
content: [
{ type: "text", text: systemMessage },
{
type: "image_url",
image_url: {
url: `data:image/jpeg;base64,${imageBuffer.toString('base64')}`
}
}
]
});
return this.sendRequest(imageMessages, systemMessage);
}
async embed(text) {
if (text.length > 8191)
text = text.slice(0, 8191);
const embedding = await this.openai.embeddings.create({
model: this.model_name || "text-embedding-3-small",
input: text,
encoding_format: "float",
});
return embedding.data[0].embedding;
}
}

View file

@ -228,7 +228,7 @@ export class Prompter {
console.error('Error: Generated response is not a string', generation);
throw new Error('Generated response is not a string');
}
console.log("Generated response:", generation);
console.log("Generated response:", generation);
await this._saveLog(prompt, messages, generation, 'conversation');
} catch (error) {
@ -245,7 +245,7 @@ export class Prompter {
if (current_msg_time !== this.most_recent_msg_time) {
console.warn(`${this.agent.name} received new message while generating, discarding old response.`);
return '';
}
}
if (generation?.includes('</think>')) {
const [_, afterThink] = generation.split('</think>')
@ -282,7 +282,7 @@ export class Prompter {
await this._saveLog(prompt, to_summarize, resp, 'memSaving');
if (resp?.includes('</think>')) {
const [_, afterThink] = resp.split('</think>')
resp = afterThink
resp = afterThink;
}
return resp;
}