mirror of
https://github.com/kolbytn/mindcraft.git
synced 2025-08-28 18:03:03 +02:00
commit
808c29b534
34 changed files with 455 additions and 221 deletions
|
@ -12,7 +12,7 @@ Do not connect this bot to public servers with coding enabled. This project allo
|
||||||
|
|
||||||
- [Minecraft Java Edition](https://www.minecraft.net/en-us/store/minecraft-java-bedrock-edition-pc) (up to v1.21.1, recommend v1.21.1)
|
- [Minecraft Java Edition](https://www.minecraft.net/en-us/store/minecraft-java-bedrock-edition-pc) (up to v1.21.1, recommend v1.21.1)
|
||||||
- [Node.js Installed](https://nodejs.org/) (at least v18)
|
- [Node.js Installed](https://nodejs.org/) (at least v18)
|
||||||
- One of these: [OpenAI API Key](https://openai.com/blog/openai-api) | [Gemini API Key](https://aistudio.google.com/app/apikey) | [Anthropic API Key](https://docs.anthropic.com/claude/docs/getting-access-to-claude) | [Replicate API Key](https://replicate.com/) | [Hugging Face API Key](https://huggingface.co/) | [Groq API Key](https://console.groq.com/keys) | [Ollama Installed](https://ollama.com/download). | [Mistral API Key](https://docs.mistral.ai/getting-started/models/models_overview/) | [Qwen API Key [Intl.]](https://www.alibabacloud.com/help/en/model-studio/developer-reference/get-api-key)/[[cn]](https://help.aliyun.com/zh/model-studio/getting-started/first-api-call-to-qwen?) | [Novita AI API Key](https://novita.ai/settings?utm_source=github_mindcraft&utm_medium=github_readme&utm_campaign=link#key-management) |
|
- One of these: [OpenAI API Key](https://openai.com/blog/openai-api) | [Gemini API Key](https://aistudio.google.com/app/apikey) | [Anthropic API Key](https://docs.anthropic.com/claude/docs/getting-access-to-claude) | [Replicate API Key](https://replicate.com/) | [Hugging Face API Key](https://huggingface.co/) | [Groq API Key](https://console.groq.com/keys) | [Ollama Installed](https://ollama.com/download). | [Mistral API Key](https://docs.mistral.ai/getting-started/models/models_overview/) | [Qwen API Key [Intl.]](https://www.alibabacloud.com/help/en/model-studio/developer-reference/get-api-key)/[[cn]](https://help.aliyun.com/zh/model-studio/getting-started/first-api-call-to-qwen?) | [Novita AI API Key](https://novita.ai/settings?utm_source=github_mindcraft&utm_medium=github_readme&utm_campaign=link#key-management) | [Cerebras API Key](https://cloud.cerebras.ai) | [Mercury API](https://platform.inceptionlabs.ai/docs)
|
||||||
|
|
||||||
## Install and Run
|
## Install and Run
|
||||||
|
|
||||||
|
@ -64,10 +64,14 @@ You can configure the agent's name, model, and prompts in their profile like `an
|
||||||
| `glhf.chat` | `GHLF_API_KEY` | `glhf/hf:meta-llama/Llama-3.1-405B-Instruct` | [docs](https://glhf.chat/user-settings/api) |
|
| `glhf.chat` | `GHLF_API_KEY` | `glhf/hf:meta-llama/Llama-3.1-405B-Instruct` | [docs](https://glhf.chat/user-settings/api) |
|
||||||
| `hyperbolic` | `HYPERBOLIC_API_KEY` | `hyperbolic/deepseek-ai/DeepSeek-V3` | [docs](https://docs.hyperbolic.xyz/docs/getting-started) |
|
| `hyperbolic` | `HYPERBOLIC_API_KEY` | `hyperbolic/deepseek-ai/DeepSeek-V3` | [docs](https://docs.hyperbolic.xyz/docs/getting-started) |
|
||||||
| `vllm` | n/a | `vllm/llama3` | n/a |
|
| `vllm` | n/a | `vllm/llama3` | n/a |
|
||||||
|
| `cerebras` | `CEREBRAS_API_KEY` | `cerebras/llama-3.3-70b` | [docs](https://inference-docs.cerebras.ai/introduction) |
|
||||||
|
| `mercury` | `MERCURY_API_KEY` | `mercury-coder-small` | [docs](https://www.inceptionlabs.ai/) |
|
||||||
|
|
||||||
If you use Ollama, to install the models used by default (generation and embedding), execute the following terminal command:
|
If you use Ollama, to install the models used by default (generation and embedding), execute the following terminal command:
|
||||||
`ollama pull llama3.1 && ollama pull nomic-embed-text`
|
`ollama pull llama3.1 && ollama pull nomic-embed-text`
|
||||||
|
|
||||||
|
To use Azure, you can reuse the `OPENAI_API_KEY` environment variable. You can get the key from the Azure portal. See [azure.json](profiles/azure.json) for an example.
|
||||||
|
|
||||||
### Online Servers
|
### Online Servers
|
||||||
To connect to online servers your bot will need an official Microsoft/Minecraft account. You can use your own personal one, but will need another account if you want to connect too and play with it. To connect, change these lines in `settings.js`:
|
To connect to online servers your bot will need an official Microsoft/Minecraft account. You can use your own personal one, but will need another account if you want to connect too and play with it. To connect, change these lines in `settings.js`:
|
||||||
```javascript
|
```javascript
|
||||||
|
@ -174,3 +178,5 @@ Some of the node modules that we depend on have bugs in them. To add a patch, ch
|
||||||
url = {https://arxiv.org/abs/2504.17950},
|
url = {https://arxiv.org/abs/2504.17950},
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -13,5 +13,7 @@
|
||||||
"GHLF_API_KEY": "",
|
"GHLF_API_KEY": "",
|
||||||
"HYPERBOLIC_API_KEY": "",
|
"HYPERBOLIC_API_KEY": "",
|
||||||
"NOVITA_API_KEY": "",
|
"NOVITA_API_KEY": "",
|
||||||
"OPENROUTER_API_KEY": ""
|
"OPENROUTER_API_KEY": "",
|
||||||
|
"CEREBRAS_API_KEY": "",
|
||||||
|
"MERCURY_API_KEY":""
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
"type": "module",
|
"type": "module",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@anthropic-ai/sdk": "^0.17.1",
|
"@anthropic-ai/sdk": "^0.17.1",
|
||||||
|
"@cerebras/cerebras_cloud_sdk": "^1.46.0",
|
||||||
"@google/generative-ai": "^0.2.1",
|
"@google/generative-ai": "^0.2.1",
|
||||||
"@huggingface/inference": "^2.8.1",
|
"@huggingface/inference": "^2.8.1",
|
||||||
"@mistralai/mistralai": "^1.1.0",
|
"@mistralai/mistralai": "^1.1.0",
|
||||||
|
|
19
profiles/azure.json
Normal file
19
profiles/azure.json
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
{
|
||||||
|
"name": "azure",
|
||||||
|
"model": {
|
||||||
|
"api": "azure",
|
||||||
|
"url": "https://<your-resource>.openai.azure.com",
|
||||||
|
"model": "<chat-deployment-name>",
|
||||||
|
"params": {
|
||||||
|
"apiVersion": "2024-08-01-preview"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"embedding": {
|
||||||
|
"api": "azure",
|
||||||
|
"url": "https://<your-resource>.openai.azure.com",
|
||||||
|
"model": "<embedding-deployment-name>",
|
||||||
|
"params": {
|
||||||
|
"apiVersion": "2024-08-01-preview"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,7 +1,7 @@
|
||||||
{
|
{
|
||||||
"name": "claude",
|
"name": "claude",
|
||||||
|
|
||||||
"model": "claude-3-5-sonnet-latest",
|
"model": "claude-sonnet-4-20250514",
|
||||||
|
|
||||||
"embedding": "openai"
|
"embedding": "openai"
|
||||||
}
|
}
|
|
@ -2,7 +2,7 @@
|
||||||
"name": "claude_thinker",
|
"name": "claude_thinker",
|
||||||
|
|
||||||
"model": {
|
"model": {
|
||||||
"model": "claude-3-7-sonnet-latest",
|
"model": "claude-sonnet-4-20250514",
|
||||||
"params": {
|
"params": {
|
||||||
"thinking": {
|
"thinking": {
|
||||||
"type": "enabled",
|
"type": "enabled",
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
{
|
{
|
||||||
"name": "gemini",
|
"name": "gemini",
|
||||||
|
|
||||||
"model": "gemini-2.0-flash",
|
"model": "gemini-2.5-flash",
|
||||||
|
|
||||||
"cooldown": 5000
|
"cooldown": 5000
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
{
|
{
|
||||||
"name": "Grok",
|
"name": "Grok",
|
||||||
|
|
||||||
"model": "grok-beta",
|
"model": "grok-3-mini-latest",
|
||||||
|
|
||||||
"embedding": "openai"
|
"embedding": "openai"
|
||||||
}
|
}
|
9
profiles/mercury.json
Normal file
9
profiles/mercury.json
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
{
|
||||||
|
"name": "Mercury",
|
||||||
|
|
||||||
|
"cooldown": 5000,
|
||||||
|
|
||||||
|
"model": "mercury/mercury-coder-small",
|
||||||
|
|
||||||
|
"embedding": "openai"
|
||||||
|
}
|
|
@ -7,7 +7,7 @@ const settings = {
|
||||||
// the mindserver manages all agents and hosts the UI
|
// the mindserver manages all agents and hosts the UI
|
||||||
"mindserver_port": 8080,
|
"mindserver_port": 8080,
|
||||||
|
|
||||||
"base_profile": "survival", // survival, creative, assistant, or god_mode
|
"base_profile": "survival", // survival, assistant, creative, or god_mode
|
||||||
"profiles": [
|
"profiles": [
|
||||||
"./andy.json",
|
"./andy.json",
|
||||||
// "./profiles/gpt.json",
|
// "./profiles/gpt.json",
|
||||||
|
@ -18,6 +18,7 @@ const settings = {
|
||||||
// "./profiles/grok.json",
|
// "./profiles/grok.json",
|
||||||
// "./profiles/mistral.json",
|
// "./profiles/mistral.json",
|
||||||
// "./profiles/deepseek.json",
|
// "./profiles/deepseek.json",
|
||||||
|
//"./profiles/mercury.json",
|
||||||
|
|
||||||
// using more than 1 profile requires you to /msg each bot indivually
|
// using more than 1 profile requires you to /msg each bot indivually
|
||||||
// individual profiles override values from the base profile
|
// individual profiles override values from the base profile
|
||||||
|
|
|
@ -69,7 +69,7 @@ export class ActionManager {
|
||||||
else {
|
else {
|
||||||
this.recent_action_counter = 0;
|
this.recent_action_counter = 0;
|
||||||
}
|
}
|
||||||
if (this.recent_action_counter > 2) {
|
if (this.recent_action_counter > 3) {
|
||||||
console.warn('Fast action loop detected, cancelling resume.');
|
console.warn('Fast action loop detected, cancelling resume.');
|
||||||
this.cancelResume(); // likely cause of repetition
|
this.cancelResume(); // likely cause of repetition
|
||||||
}
|
}
|
||||||
|
|
|
@ -228,28 +228,33 @@ export async function smeltItem(bot, itemName, num=1) {
|
||||||
await furnace.putInput(mc.getItemId(itemName), null, num);
|
await furnace.putInput(mc.getItemId(itemName), null, num);
|
||||||
// wait for the items to smelt
|
// wait for the items to smelt
|
||||||
let total = 0;
|
let total = 0;
|
||||||
let collected_last = true;
|
|
||||||
let smelted_item = null;
|
let smelted_item = null;
|
||||||
await new Promise(resolve => setTimeout(resolve, 200));
|
await new Promise(resolve => setTimeout(resolve, 200));
|
||||||
|
let last_collected = Date.now();
|
||||||
while (total < num) {
|
while (total < num) {
|
||||||
await new Promise(resolve => setTimeout(resolve, 10000));
|
await new Promise(resolve => setTimeout(resolve, 1000));
|
||||||
console.log('checking...');
|
|
||||||
let collected = false;
|
|
||||||
if (furnace.outputItem()) {
|
if (furnace.outputItem()) {
|
||||||
smelted_item = await furnace.takeOutput();
|
smelted_item = await furnace.takeOutput();
|
||||||
if (smelted_item) {
|
if (smelted_item) {
|
||||||
total += smelted_item.count;
|
total += smelted_item.count;
|
||||||
collected = true;
|
last_collected = Date.now();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!collected && !collected_last) {
|
if (Date.now() - last_collected > 11000) {
|
||||||
break; // if nothing was collected this time or last time
|
break; // if nothing has been collected in 11 seconds, stop
|
||||||
}
|
}
|
||||||
collected_last = collected;
|
|
||||||
if (bot.interrupt_code) {
|
if (bot.interrupt_code) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// take all remaining in input/fuel slots
|
||||||
|
if (furnace.inputItem()) {
|
||||||
|
await furnace.takeInput();
|
||||||
|
}
|
||||||
|
if (furnace.fuelItem()) {
|
||||||
|
await furnace.takeFuel();
|
||||||
|
}
|
||||||
|
|
||||||
await bot.closeWindow(furnace);
|
await bot.closeWindow(furnace);
|
||||||
|
|
||||||
if (placedFurnace) {
|
if (placedFurnace) {
|
||||||
|
@ -1040,7 +1045,7 @@ export async function goToGoal(bot, goal) {
|
||||||
log(bot, `Found destructive path.`);
|
log(bot, `Found destructive path.`);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
log(bot, `Could not find a path to goal, attempting to navigate anyway using destructive movements.`);
|
log(bot, `Path not found, but attempting to navigate anyway using destructive movements.`);
|
||||||
}
|
}
|
||||||
|
|
||||||
const doorCheckInterval = startDoorInterval(bot);
|
const doorCheckInterval = startDoorInterval(bot);
|
||||||
|
@ -1288,11 +1293,29 @@ export async function followPlayer(bot, username, distance=4) {
|
||||||
while (!bot.interrupt_code) {
|
while (!bot.interrupt_code) {
|
||||||
await new Promise(resolve => setTimeout(resolve, 500));
|
await new Promise(resolve => setTimeout(resolve, 500));
|
||||||
// in cheat mode, if the distance is too far, teleport to the player
|
// in cheat mode, if the distance is too far, teleport to the player
|
||||||
if (bot.modes.isOn('cheat') && bot.entity.position.distanceTo(player.position) > 100 && player.isOnGround) {
|
const distance_from_player = bot.entity.position.distanceTo(player.position);
|
||||||
|
|
||||||
|
const teleport_distance = 100;
|
||||||
|
const ignore_modes_distance = 30;
|
||||||
|
const nearby_distance = distance + 2;
|
||||||
|
|
||||||
|
if (distance_from_player > teleport_distance && bot.modes.isOn('cheat')) {
|
||||||
|
// teleport with cheat mode
|
||||||
await goToPlayer(bot, username);
|
await goToPlayer(bot, username);
|
||||||
}
|
}
|
||||||
const is_nearby = bot.entity.position.distanceTo(player.position) <= distance + 2;
|
else if (distance_from_player > ignore_modes_distance) {
|
||||||
if (is_nearby) {
|
// these modes slow down the bot, and we want to catch up
|
||||||
|
bot.modes.pause('item_collecting');
|
||||||
|
bot.modes.pause('hunting');
|
||||||
|
bot.modes.pause('torch_placing');
|
||||||
|
}
|
||||||
|
else if (distance_from_player <= ignore_modes_distance) {
|
||||||
|
bot.modes.unpause('item_collecting');
|
||||||
|
bot.modes.unpause('hunting');
|
||||||
|
bot.modes.unpause('torch_placing');
|
||||||
|
}
|
||||||
|
|
||||||
|
if (distance_from_player <= nearby_distance) {
|
||||||
clearInterval(doorCheckInterval);
|
clearInterval(doorCheckInterval);
|
||||||
doorCheckInterval = null;
|
doorCheckInterval = null;
|
||||||
bot.modes.pause('unstuck');
|
bot.modes.pause('unstuck');
|
||||||
|
|
|
@ -156,7 +156,7 @@ const modes_list = [
|
||||||
{
|
{
|
||||||
name: 'hunting',
|
name: 'hunting',
|
||||||
description: 'Hunt nearby animals when idle.',
|
description: 'Hunt nearby animals when idle.',
|
||||||
interrupts: [],
|
interrupts: ['action:followPlayer'],
|
||||||
on: true,
|
on: true,
|
||||||
active: false,
|
active: false,
|
||||||
update: async function (agent) {
|
update: async function (agent) {
|
||||||
|
|
89
src/models/_model_map.js
Normal file
89
src/models/_model_map.js
Normal file
|
@ -0,0 +1,89 @@
|
||||||
|
import { promises as fs } from 'fs';
|
||||||
|
import path from 'path';
|
||||||
|
import { fileURLToPath, pathToFileURL } from 'url';
|
||||||
|
|
||||||
|
const __filename = fileURLToPath(import.meta.url);
|
||||||
|
const __dirname = path.dirname(__filename);
|
||||||
|
|
||||||
|
// Dynamically discover model classes in this directory.
|
||||||
|
// Each model class must export a static `prefix` string.
|
||||||
|
const apiMap = await (async () => {
|
||||||
|
const map = {};
|
||||||
|
const files = (await fs.readdir(__dirname))
|
||||||
|
.filter(f => f.endsWith('.js') && f !== '_model_map.js' && f !== 'prompter.js');
|
||||||
|
for (const file of files) {
|
||||||
|
try {
|
||||||
|
const moduleUrl = pathToFileURL(path.join(__dirname, file)).href;
|
||||||
|
const mod = await import(moduleUrl);
|
||||||
|
for (const exported of Object.values(mod)) {
|
||||||
|
if (typeof exported === 'function' && Object.prototype.hasOwnProperty.call(exported, 'prefix')) {
|
||||||
|
const prefix = exported.prefix;
|
||||||
|
if (typeof prefix === 'string' && prefix.length > 0) {
|
||||||
|
map[prefix] = exported;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
console.warn('Failed to load model module:', file, e?.message || e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return map;
|
||||||
|
})();
|
||||||
|
|
||||||
|
export function selectAPI(profile) {
|
||||||
|
if (typeof profile === 'string' || profile instanceof String) {
|
||||||
|
profile = {model: profile};
|
||||||
|
}
|
||||||
|
// backwards compatibility with local->ollama
|
||||||
|
if (profile.api?.includes('local') || profile.model?.includes('local')) {
|
||||||
|
profile.api = 'ollama';
|
||||||
|
if (profile.model) {
|
||||||
|
profile.model = profile.model.replace('local', 'ollama');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!profile.api) {
|
||||||
|
const api = Object.keys(apiMap).find(key => profile.model?.startsWith(key));
|
||||||
|
if (api) {
|
||||||
|
profile.api = api;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
// check for some common models that do not require prefixes
|
||||||
|
if (profile.model.includes('gpt') || profile.model.includes('o1')|| profile.model.includes('o3'))
|
||||||
|
profile.api = 'openai';
|
||||||
|
else if (profile.model.includes('claude'))
|
||||||
|
profile.api = 'anthropic';
|
||||||
|
else if (profile.model.includes('gemini'))
|
||||||
|
profile.api = "google";
|
||||||
|
else if (profile.model.includes('grok'))
|
||||||
|
profile.api = 'grok';
|
||||||
|
else if (profile.model.includes('mistral'))
|
||||||
|
profile.api = 'mistral';
|
||||||
|
else if (profile.model.includes('deepseek'))
|
||||||
|
profile.api = 'deepseek';
|
||||||
|
else if (profile.model.includes('qwen'))
|
||||||
|
profile.api = 'qwen';
|
||||||
|
}
|
||||||
|
if (!profile.api) {
|
||||||
|
throw new Error('Unknown model:', profile.model);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!apiMap[profile.api]) {
|
||||||
|
throw new Error('Unknown api:', profile.api);
|
||||||
|
}
|
||||||
|
let model_name = profile.model.replace(profile.api + '/', ''); // remove prefix
|
||||||
|
profile.model = model_name === "" ? null : model_name; // if model is empty, set to null
|
||||||
|
return profile;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function createModel(profile) {
|
||||||
|
if (!!apiMap[profile.model]) {
|
||||||
|
// if the model value is an api (instead of a specific model name)
|
||||||
|
// then set model to null so it uses the default model for that api
|
||||||
|
profile.model = null;
|
||||||
|
}
|
||||||
|
if (!apiMap[profile.api]) {
|
||||||
|
throw new Error('Unknown api:', profile.api);
|
||||||
|
}
|
||||||
|
const model = new apiMap[profile.api](profile.model, profile.url, profile.params);
|
||||||
|
return model;
|
||||||
|
}
|
32
src/models/azure.js
Normal file
32
src/models/azure.js
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
import { AzureOpenAI } from "openai";
|
||||||
|
import { getKey, hasKey } from '../utils/keys.js';
|
||||||
|
import { GPT } from './gpt.js'
|
||||||
|
|
||||||
|
export class AzureGPT extends GPT {
|
||||||
|
static prefix = 'azure';
|
||||||
|
constructor(model_name, url, params) {
|
||||||
|
super(model_name, url)
|
||||||
|
|
||||||
|
this.model_name = model_name;
|
||||||
|
this.params = params || {};
|
||||||
|
|
||||||
|
const config = {};
|
||||||
|
|
||||||
|
if (url)
|
||||||
|
config.endpoint = url;
|
||||||
|
|
||||||
|
config.apiKey = hasKey('AZURE_OPENAI_API_KEY') ? getKey('AZURE_OPENAI_API_KEY') : getKey('OPENAI_API_KEY');
|
||||||
|
|
||||||
|
config.deployment = model_name;
|
||||||
|
|
||||||
|
if (this.params.apiVersion) {
|
||||||
|
config.apiVersion = this.params.apiVersion;
|
||||||
|
delete this.params.apiVersion; // remove from params for later use in requests
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
throw new Error('apiVersion is required in params for azure!');
|
||||||
|
}
|
||||||
|
|
||||||
|
this.openai = new AzureOpenAI(config)
|
||||||
|
}
|
||||||
|
}
|
61
src/models/cerebras.js
Normal file
61
src/models/cerebras.js
Normal file
|
@ -0,0 +1,61 @@
|
||||||
|
import CerebrasSDK from '@cerebras/cerebras_cloud_sdk';
|
||||||
|
import { strictFormat } from '../utils/text.js';
|
||||||
|
import { getKey } from '../utils/keys.js';
|
||||||
|
|
||||||
|
export class Cerebras {
|
||||||
|
static prefix = 'cerebras';
|
||||||
|
constructor(model_name, url, params) {
|
||||||
|
this.model_name = model_name;
|
||||||
|
this.url = url;
|
||||||
|
this.params = params;
|
||||||
|
|
||||||
|
// Initialize client with API key
|
||||||
|
this.client = new CerebrasSDK({ apiKey: getKey('CEREBRAS_API_KEY') });
|
||||||
|
}
|
||||||
|
|
||||||
|
async sendRequest(turns, systemMessage, stop_seq = '***') {
|
||||||
|
// Format messages array
|
||||||
|
const messages = strictFormat(turns);
|
||||||
|
messages.unshift({ role: 'system', content: systemMessage });
|
||||||
|
|
||||||
|
const pack = {
|
||||||
|
model: this.model_name || 'gpt-oss-120b',
|
||||||
|
messages,
|
||||||
|
stream: false,
|
||||||
|
...(this.params || {}),
|
||||||
|
};
|
||||||
|
|
||||||
|
let res;
|
||||||
|
try {
|
||||||
|
const completion = await this.client.chat.completions.create(pack);
|
||||||
|
// OpenAI-compatible shape
|
||||||
|
res = completion.choices?.[0]?.message?.content || '';
|
||||||
|
} catch (err) {
|
||||||
|
console.error('Cerebras API error:', err);
|
||||||
|
res = 'My brain disconnected, try again.';
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
async sendVisionRequest(messages, systemMessage, imageBuffer) {
|
||||||
|
const imageMessages = [...messages];
|
||||||
|
imageMessages.push({
|
||||||
|
role: "user",
|
||||||
|
content: [
|
||||||
|
{ type: "text", text: systemMessage },
|
||||||
|
{
|
||||||
|
type: "image_url",
|
||||||
|
image_url: {
|
||||||
|
url: `data:image/jpeg;base64,${imageBuffer.toString('base64')}`
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
});
|
||||||
|
|
||||||
|
return this.sendRequest(imageMessages, systemMessage);
|
||||||
|
}
|
||||||
|
|
||||||
|
async embed(text) {
|
||||||
|
throw new Error('Embeddings are not supported by Cerebras.');
|
||||||
|
}
|
||||||
|
}
|
|
@ -3,6 +3,7 @@ import { strictFormat } from '../utils/text.js';
|
||||||
import { getKey } from '../utils/keys.js';
|
import { getKey } from '../utils/keys.js';
|
||||||
|
|
||||||
export class Claude {
|
export class Claude {
|
||||||
|
static prefix = 'anthropic';
|
||||||
constructor(model_name, url, params) {
|
constructor(model_name, url, params) {
|
||||||
this.model_name = model_name;
|
this.model_name = model_name;
|
||||||
this.params = params || {};
|
this.params = params || {};
|
||||||
|
@ -20,7 +21,7 @@ export class Claude {
|
||||||
const messages = strictFormat(turns);
|
const messages = strictFormat(turns);
|
||||||
let res = null;
|
let res = null;
|
||||||
try {
|
try {
|
||||||
console.log('Awaiting anthropic api response...')
|
console.log(`Awaiting anthropic response from ${this.model_name}...`)
|
||||||
if (!this.params.max_tokens) {
|
if (!this.params.max_tokens) {
|
||||||
if (this.params.thinking?.budget_tokens) {
|
if (this.params.thinking?.budget_tokens) {
|
||||||
this.params.max_tokens = this.params.thinking.budget_tokens + 1000;
|
this.params.max_tokens = this.params.thinking.budget_tokens + 1000;
|
||||||
|
@ -30,7 +31,7 @@ export class Claude {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
const resp = await this.anthropic.messages.create({
|
const resp = await this.anthropic.messages.create({
|
||||||
model: this.model_name || "claude-3-sonnet-20240229",
|
model: this.model_name || "claude-sonnet-4-20250514",
|
||||||
system: systemMessage,
|
system: systemMessage,
|
||||||
messages: messages,
|
messages: messages,
|
||||||
...(this.params || {})
|
...(this.params || {})
|
||||||
|
|
|
@ -3,6 +3,7 @@ import { getKey, hasKey } from '../utils/keys.js';
|
||||||
import { strictFormat } from '../utils/text.js';
|
import { strictFormat } from '../utils/text.js';
|
||||||
|
|
||||||
export class DeepSeek {
|
export class DeepSeek {
|
||||||
|
static prefix = 'deepseek';
|
||||||
constructor(model_name, url, params) {
|
constructor(model_name, url, params) {
|
||||||
this.model_name = model_name;
|
this.model_name = model_name;
|
||||||
this.params = params;
|
this.params = params;
|
||||||
|
|
|
@ -3,6 +3,7 @@ import { toSinglePrompt, strictFormat } from '../utils/text.js';
|
||||||
import { getKey } from '../utils/keys.js';
|
import { getKey } from '../utils/keys.js';
|
||||||
|
|
||||||
export class Gemini {
|
export class Gemini {
|
||||||
|
static prefix = 'google';
|
||||||
constructor(model_name, url, params) {
|
constructor(model_name, url, params) {
|
||||||
this.model_name = model_name;
|
this.model_name = model_name;
|
||||||
this.params = params;
|
this.params = params;
|
||||||
|
@ -36,7 +37,7 @@ export class Gemini {
|
||||||
async sendRequest(turns, systemMessage) {
|
async sendRequest(turns, systemMessage) {
|
||||||
let model;
|
let model;
|
||||||
const modelConfig = {
|
const modelConfig = {
|
||||||
model: this.model_name || "gemini-1.5-flash",
|
model: this.model_name || "gemini-2.5-flash",
|
||||||
// systemInstruction does not work bc google is trash
|
// systemInstruction does not work bc google is trash
|
||||||
};
|
};
|
||||||
if (this.url) {
|
if (this.url) {
|
||||||
|
@ -142,15 +143,15 @@ export class Gemini {
|
||||||
}
|
}
|
||||||
|
|
||||||
async embed(text) {
|
async embed(text) {
|
||||||
let model;
|
let model = this.model_name || "text-embedding-004";
|
||||||
if (this.url) {
|
if (this.url) {
|
||||||
model = this.genAI.getGenerativeModel(
|
model = this.genAI.getGenerativeModel(
|
||||||
{ model: "text-embedding-004" },
|
{ model },
|
||||||
{ baseUrl: this.url }
|
{ baseUrl: this.url }
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
model = this.genAI.getGenerativeModel(
|
model = this.genAI.getGenerativeModel(
|
||||||
{ model: "text-embedding-004" }
|
{ model }
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,7 @@ import OpenAIApi from 'openai';
|
||||||
import { getKey } from '../utils/keys.js';
|
import { getKey } from '../utils/keys.js';
|
||||||
|
|
||||||
export class GLHF {
|
export class GLHF {
|
||||||
|
static prefix = 'glhf';
|
||||||
constructor(model_name, url) {
|
constructor(model_name, url) {
|
||||||
this.model_name = model_name;
|
this.model_name = model_name;
|
||||||
const apiKey = getKey('GHLF_API_KEY');
|
const apiKey = getKey('GHLF_API_KEY');
|
||||||
|
|
|
@ -3,6 +3,7 @@ import { getKey, hasKey } from '../utils/keys.js';
|
||||||
import { strictFormat } from '../utils/text.js';
|
import { strictFormat } from '../utils/text.js';
|
||||||
|
|
||||||
export class GPT {
|
export class GPT {
|
||||||
|
static prefix = 'openai';
|
||||||
constructor(model_name, url, params) {
|
constructor(model_name, url, params) {
|
||||||
this.model_name = model_name;
|
this.model_name = model_name;
|
||||||
this.params = params;
|
this.params = params;
|
||||||
|
@ -22,20 +23,21 @@ export class GPT {
|
||||||
async sendRequest(turns, systemMessage, stop_seq='***') {
|
async sendRequest(turns, systemMessage, stop_seq='***') {
|
||||||
let messages = [{'role': 'system', 'content': systemMessage}].concat(turns);
|
let messages = [{'role': 'system', 'content': systemMessage}].concat(turns);
|
||||||
messages = strictFormat(messages);
|
messages = strictFormat(messages);
|
||||||
|
let model = this.model_name || "gpt-4o-mini";
|
||||||
const pack = {
|
const pack = {
|
||||||
model: this.model_name || "gpt-3.5-turbo",
|
model: model,
|
||||||
messages,
|
messages,
|
||||||
stop: stop_seq,
|
stop: stop_seq,
|
||||||
...(this.params || {})
|
...(this.params || {})
|
||||||
};
|
};
|
||||||
if (this.model_name.includes('o1') || this.model_name.includes('o3') || this.model_name.includes('5')) {
|
if (model.includes('o1') || model.includes('o3') || model.includes('5')) {
|
||||||
delete pack.stop;
|
delete pack.stop;
|
||||||
}
|
}
|
||||||
|
|
||||||
let res = null;
|
let res = null;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
console.log('Awaiting openai api response from model', this.model_name)
|
console.log('Awaiting openai api response from model', model)
|
||||||
// console.log('Messages:', messages);
|
// console.log('Messages:', messages);
|
||||||
let completion = await this.openai.chat.completions.create(pack);
|
let completion = await this.openai.chat.completions.create(pack);
|
||||||
if (completion.choices[0].finish_reason == 'length')
|
if (completion.choices[0].finish_reason == 'length')
|
||||||
|
@ -88,6 +90,3 @@ export class GPT {
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -3,6 +3,7 @@ import { getKey } from '../utils/keys.js';
|
||||||
|
|
||||||
// xAI doesn't supply a SDK for their models, but fully supports OpenAI and Anthropic SDKs
|
// xAI doesn't supply a SDK for their models, but fully supports OpenAI and Anthropic SDKs
|
||||||
export class Grok {
|
export class Grok {
|
||||||
|
static prefix = 'xai';
|
||||||
constructor(model_name, url, params) {
|
constructor(model_name, url, params) {
|
||||||
this.model_name = model_name;
|
this.model_name = model_name;
|
||||||
this.url = url;
|
this.url = url;
|
||||||
|
@ -19,13 +20,12 @@ export class Grok {
|
||||||
this.openai = new OpenAIApi(config);
|
this.openai = new OpenAIApi(config);
|
||||||
}
|
}
|
||||||
|
|
||||||
async sendRequest(turns, systemMessage, stop_seq='***') {
|
async sendRequest(turns, systemMessage) {
|
||||||
let messages = [{'role': 'system', 'content': systemMessage}].concat(turns);
|
let messages = [{'role': 'system', 'content': systemMessage}].concat(turns);
|
||||||
|
|
||||||
const pack = {
|
const pack = {
|
||||||
model: this.model_name || "grok-beta",
|
model: this.model_name || "grok-3-mini-latest",
|
||||||
messages,
|
messages,
|
||||||
stop: [stop_seq],
|
|
||||||
...(this.params || {})
|
...(this.params || {})
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -42,7 +42,7 @@ export class Grok {
|
||||||
catch (err) {
|
catch (err) {
|
||||||
if ((err.message == 'Context length exceeded' || err.code == 'context_length_exceeded') && turns.length > 1) {
|
if ((err.message == 'Context length exceeded' || err.code == 'context_length_exceeded') && turns.length > 1) {
|
||||||
console.log('Context length exceeded, trying again with shorter context.');
|
console.log('Context length exceeded, trying again with shorter context.');
|
||||||
return await this.sendRequest(turns.slice(1), systemMessage, stop_seq);
|
return await this.sendRequest(turns.slice(1), systemMessage);
|
||||||
} else if (err.message.includes('The model expects a single `text` element per message.')) {
|
} else if (err.message.includes('The model expects a single `text` element per message.')) {
|
||||||
console.log(err);
|
console.log(err);
|
||||||
res = 'Vision is only supported by certain models.';
|
res = 'Vision is only supported by certain models.';
|
||||||
|
|
|
@ -6,6 +6,7 @@ import { getKey } from '../utils/keys.js';
|
||||||
|
|
||||||
// Umbrella class for everything under the sun... That GroqCloud provides, that is.
|
// Umbrella class for everything under the sun... That GroqCloud provides, that is.
|
||||||
export class GroqCloudAPI {
|
export class GroqCloudAPI {
|
||||||
|
static prefix = 'groq';
|
||||||
|
|
||||||
constructor(model_name, url, params) {
|
constructor(model_name, url, params) {
|
||||||
|
|
||||||
|
@ -49,7 +50,7 @@ export class GroqCloudAPI {
|
||||||
|
|
||||||
let completion = await this.groq.chat.completions.create({
|
let completion = await this.groq.chat.completions.create({
|
||||||
"messages": messages,
|
"messages": messages,
|
||||||
"model": this.model_name || "llama-3.3-70b-versatile",
|
"model": this.model_name || "qwen/qwen3-32b",
|
||||||
"stream": false,
|
"stream": false,
|
||||||
"stop": stop_seq,
|
"stop": stop_seq,
|
||||||
...(this.params || {})
|
...(this.params || {})
|
||||||
|
@ -63,7 +64,6 @@ export class GroqCloudAPI {
|
||||||
if (err.message.includes("content must be a string")) {
|
if (err.message.includes("content must be a string")) {
|
||||||
res = "Vision is only supported by certain models.";
|
res = "Vision is only supported by certain models.";
|
||||||
} else {
|
} else {
|
||||||
console.log(this.model_name);
|
|
||||||
res = "My brain disconnected, try again.";
|
res = "My brain disconnected, try again.";
|
||||||
}
|
}
|
||||||
console.log(err);
|
console.log(err);
|
||||||
|
|
|
@ -3,6 +3,7 @@ import { getKey } from '../utils/keys.js';
|
||||||
import { HfInference } from "@huggingface/inference";
|
import { HfInference } from "@huggingface/inference";
|
||||||
|
|
||||||
export class HuggingFace {
|
export class HuggingFace {
|
||||||
|
static prefix = 'huggingface';
|
||||||
constructor(model_name, url, params) {
|
constructor(model_name, url, params) {
|
||||||
// Remove 'huggingface/' prefix if present
|
// Remove 'huggingface/' prefix if present
|
||||||
this.model_name = model_name.replace('huggingface/', '');
|
this.model_name = model_name.replace('huggingface/', '');
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
import { getKey } from '../utils/keys.js';
|
import { getKey } from '../utils/keys.js';
|
||||||
|
|
||||||
export class Hyperbolic {
|
export class Hyperbolic {
|
||||||
|
static prefix = 'hyperbolic';
|
||||||
constructor(modelName, apiUrl) {
|
constructor(modelName, apiUrl) {
|
||||||
this.modelName = modelName || "deepseek-ai/DeepSeek-V3";
|
this.modelName = modelName || "deepseek-ai/DeepSeek-V3";
|
||||||
this.apiUrl = apiUrl || "https://api.hyperbolic.xyz/v1/chat/completions";
|
this.apiUrl = apiUrl || "https://api.hyperbolic.xyz/v1/chat/completions";
|
||||||
|
|
95
src/models/mercury.js
Normal file
95
src/models/mercury.js
Normal file
|
@ -0,0 +1,95 @@
|
||||||
|
import OpenAIApi from 'openai';
|
||||||
|
import { getKey, hasKey } from '../utils/keys.js';
|
||||||
|
import { strictFormat } from '../utils/text.js';
|
||||||
|
|
||||||
|
export class Mercury {
|
||||||
|
static prefix = 'mercury';
|
||||||
|
constructor(model_name, url, params) {
|
||||||
|
this.model_name = model_name;
|
||||||
|
this.params = params;
|
||||||
|
let config = {};
|
||||||
|
if (url)
|
||||||
|
config.baseURL = url;
|
||||||
|
else
|
||||||
|
config.baseURL = "https://api.inceptionlabs.ai/v1";
|
||||||
|
|
||||||
|
config.apiKey = getKey('MERCURY_API_KEY');
|
||||||
|
|
||||||
|
this.openai = new OpenAIApi(config);
|
||||||
|
}
|
||||||
|
|
||||||
|
async sendRequest(turns, systemMessage, stop_seq='***') {
|
||||||
|
if (typeof stop_seq === 'string') {
|
||||||
|
stop_seq = [stop_seq];
|
||||||
|
} else if (!Array.isArray(stop_seq)) {
|
||||||
|
stop_seq = [];
|
||||||
|
}
|
||||||
|
let messages = [{'role': 'system', 'content': systemMessage}].concat(turns);
|
||||||
|
messages = strictFormat(messages);
|
||||||
|
const pack = {
|
||||||
|
model: this.model_name || "mercury-coder-small",
|
||||||
|
messages,
|
||||||
|
stop: stop_seq,
|
||||||
|
...(this.params || {})
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
let res = null;
|
||||||
|
|
||||||
|
try {
|
||||||
|
console.log('Awaiting mercury api response from model', this.model_name)
|
||||||
|
// console.log('Messages:', messages);
|
||||||
|
let completion = await this.openai.chat.completions.create(pack);
|
||||||
|
if (completion.choices[0].finish_reason == 'length')
|
||||||
|
throw new Error('Context length exceeded');
|
||||||
|
console.log('Received.')
|
||||||
|
res = completion.choices[0].message.content;
|
||||||
|
}
|
||||||
|
catch (err) {
|
||||||
|
if ((err.message == 'Context length exceeded' || err.code == 'context_length_exceeded') && turns.length > 1) {
|
||||||
|
console.log('Context length exceeded, trying again with shorter context.');
|
||||||
|
return await this.sendRequest(turns.slice(1), systemMessage, stop_seq);
|
||||||
|
} else if (err.message.includes('image_url')) {
|
||||||
|
console.log(err);
|
||||||
|
res = 'Vision is only supported by certain models.';
|
||||||
|
} else {
|
||||||
|
console.log(err);
|
||||||
|
res = 'My brain disconnected, try again.';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
async sendVisionRequest(messages, systemMessage, imageBuffer) {
|
||||||
|
const imageMessages = [...messages];
|
||||||
|
imageMessages.push({
|
||||||
|
role: "user",
|
||||||
|
content: [
|
||||||
|
{ type: "text", text: systemMessage },
|
||||||
|
{
|
||||||
|
type: "image_url",
|
||||||
|
image_url: {
|
||||||
|
url: `data:image/jpeg;base64,${imageBuffer.toString('base64')}`
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
});
|
||||||
|
|
||||||
|
return this.sendRequest(imageMessages, systemMessage);
|
||||||
|
}
|
||||||
|
|
||||||
|
async embed(text) {
|
||||||
|
if (text.length > 8191)
|
||||||
|
text = text.slice(0, 8191);
|
||||||
|
const embedding = await this.openai.embeddings.create({
|
||||||
|
model: this.model_name || "text-embedding-3-small",
|
||||||
|
input: text,
|
||||||
|
encoding_format: "float",
|
||||||
|
});
|
||||||
|
return embedding.data[0].embedding;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -3,6 +3,7 @@ import { getKey } from '../utils/keys.js';
|
||||||
import { strictFormat } from '../utils/text.js';
|
import { strictFormat } from '../utils/text.js';
|
||||||
|
|
||||||
export class Mistral {
|
export class Mistral {
|
||||||
|
static prefix = 'mistral';
|
||||||
#client;
|
#client;
|
||||||
|
|
||||||
constructor(model_name, url, params) {
|
constructor(model_name, url, params) {
|
||||||
|
|
|
@ -4,8 +4,9 @@ import { strictFormat } from '../utils/text.js';
|
||||||
|
|
||||||
// llama, mistral
|
// llama, mistral
|
||||||
export class Novita {
|
export class Novita {
|
||||||
|
static prefix = 'novita';
|
||||||
constructor(model_name, url, params) {
|
constructor(model_name, url, params) {
|
||||||
this.model_name = model_name.replace('novita/', '');
|
this.model_name = model_name;
|
||||||
this.url = url || 'https://api.novita.ai/v3/openai';
|
this.url = url || 'https://api.novita.ai/v3/openai';
|
||||||
this.params = params;
|
this.params = params;
|
||||||
|
|
||||||
|
@ -25,7 +26,7 @@ export class Novita {
|
||||||
messages = strictFormat(messages);
|
messages = strictFormat(messages);
|
||||||
|
|
||||||
const pack = {
|
const pack = {
|
||||||
model: this.model_name || "meta-llama/llama-3.1-70b-instruct",
|
model: this.model_name || "meta-llama/llama-4-scout-17b-16e-instruct",
|
||||||
messages,
|
messages,
|
||||||
stop: [stop_seq],
|
stop: [stop_seq],
|
||||||
...(this.params || {})
|
...(this.params || {})
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
import { strictFormat } from '../utils/text.js';
|
import { strictFormat } from '../utils/text.js';
|
||||||
|
|
||||||
export class Local {
|
export class Ollama {
|
||||||
|
static prefix = 'ollama';
|
||||||
constructor(model_name, url, params) {
|
constructor(model_name, url, params) {
|
||||||
this.model_name = model_name;
|
this.model_name = model_name;
|
||||||
this.params = params;
|
this.params = params;
|
||||||
|
@ -10,11 +11,9 @@ export class Local {
|
||||||
}
|
}
|
||||||
|
|
||||||
async sendRequest(turns, systemMessage) {
|
async sendRequest(turns, systemMessage) {
|
||||||
let model = this.model_name || 'llama3.1'; // Updated to llama3.1, as it is more performant than llama3
|
let model = this.model_name || 'sweaterdog/andy-4:micro-q8_0';
|
||||||
let messages = strictFormat(turns);
|
let messages = strictFormat(turns);
|
||||||
messages.unshift({ role: 'system', content: systemMessage });
|
messages.unshift({ role: 'system', content: systemMessage });
|
||||||
|
|
||||||
// We'll attempt up to 5 times for models with deepseek-r1-esk reasoning if the <think> tags are mismatched.
|
|
||||||
const maxAttempts = 5;
|
const maxAttempts = 5;
|
||||||
let attempt = 0;
|
let attempt = 0;
|
||||||
let finalRes = null;
|
let finalRes = null;
|
||||||
|
@ -24,14 +23,14 @@ export class Local {
|
||||||
console.log(`Awaiting local response... (model: ${model}, attempt: ${attempt})`);
|
console.log(`Awaiting local response... (model: ${model}, attempt: ${attempt})`);
|
||||||
let res = null;
|
let res = null;
|
||||||
try {
|
try {
|
||||||
res = await this.send(this.chat_endpoint, {
|
let apiResponse = await this.send(this.chat_endpoint, {
|
||||||
model: model,
|
model: model,
|
||||||
messages: messages,
|
messages: messages,
|
||||||
stream: false,
|
stream: false,
|
||||||
...(this.params || {})
|
...(this.params || {})
|
||||||
});
|
});
|
||||||
if (res) {
|
if (apiResponse) {
|
||||||
res = res['message']['content'];
|
res = apiResponse['message']['content'];
|
||||||
} else {
|
} else {
|
||||||
res = 'No response data.';
|
res = 'No response data.';
|
||||||
}
|
}
|
||||||
|
@ -43,36 +42,27 @@ export class Local {
|
||||||
console.log(err);
|
console.log(err);
|
||||||
res = 'My brain disconnected, try again.';
|
res = 'My brain disconnected, try again.';
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the model name includes "deepseek-r1" or "Andy-3.5-reasoning", then handle the <think> block.
|
|
||||||
const hasOpenTag = res.includes("<think>");
|
const hasOpenTag = res.includes("<think>");
|
||||||
const hasCloseTag = res.includes("</think>");
|
const hasCloseTag = res.includes("</think>");
|
||||||
|
|
||||||
// If there's a partial mismatch, retry to get a complete response.
|
|
||||||
if ((hasOpenTag && !hasCloseTag)) {
|
if ((hasOpenTag && !hasCloseTag)) {
|
||||||
console.warn("Partial <think> block detected. Re-generating...");
|
console.warn("Partial <think> block detected. Re-generating...");
|
||||||
continue;
|
if (attempt < maxAttempts) continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If </think> is present but <think> is not, prepend <think>
|
|
||||||
if (hasCloseTag && !hasOpenTag) {
|
if (hasCloseTag && !hasOpenTag) {
|
||||||
res = '<think>' + res;
|
res = '<think>' + res;
|
||||||
}
|
}
|
||||||
// Changed this so if the model reasons, using <think> and </think> but doesn't start the message with <think>, <think> ges prepended to the message so no error occur.
|
|
||||||
|
|
||||||
// If both tags appear, remove them (and everything inside).
|
|
||||||
if (hasOpenTag && hasCloseTag) {
|
if (hasOpenTag && hasCloseTag) {
|
||||||
res = res.replace(/<think>[\s\S]*?<\/think>/g, '');
|
res = res.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
|
||||||
}
|
}
|
||||||
|
|
||||||
finalRes = res;
|
finalRes = res;
|
||||||
break; // Exit the loop if we got a valid response.
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (finalRes == null) {
|
if (finalRes == null) {
|
||||||
console.warn("Could not get a valid <think> block or normal response after max attempts.");
|
console.warn("Could not get a valid response after max attempts.");
|
||||||
finalRes = 'I thought too hard, sorry, try again.';
|
finalRes = 'I thought too hard, sorry, try again.';
|
||||||
}
|
}
|
||||||
return finalRes;
|
return finalRes;
|
||||||
|
@ -104,4 +94,22 @@ export class Local {
|
||||||
}
|
}
|
||||||
return data;
|
return data;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async sendVisionRequest(messages, systemMessage, imageBuffer) {
|
||||||
|
const imageMessages = [...messages];
|
||||||
|
imageMessages.push({
|
||||||
|
role: "user",
|
||||||
|
content: [
|
||||||
|
{ type: "text", text: systemMessage },
|
||||||
|
{
|
||||||
|
type: "image_url",
|
||||||
|
image_url: {
|
||||||
|
url: `data:image/jpeg;base64,${imageBuffer.toString('base64')}`
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
});
|
||||||
|
|
||||||
|
return this.sendRequest(imageMessages, systemMessage);
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -3,6 +3,7 @@ import { getKey, hasKey } from '../utils/keys.js';
|
||||||
import { strictFormat } from '../utils/text.js';
|
import { strictFormat } from '../utils/text.js';
|
||||||
|
|
||||||
export class OpenRouter {
|
export class OpenRouter {
|
||||||
|
static prefix = 'openrouter';
|
||||||
constructor(model_name, url) {
|
constructor(model_name, url) {
|
||||||
this.model_name = model_name;
|
this.model_name = model_name;
|
||||||
|
|
||||||
|
|
|
@ -5,26 +5,10 @@ import { SkillLibrary } from "../agent/library/skill_library.js";
|
||||||
import { stringifyTurns } from '../utils/text.js';
|
import { stringifyTurns } from '../utils/text.js';
|
||||||
import { getCommand } from '../agent/commands/index.js';
|
import { getCommand } from '../agent/commands/index.js';
|
||||||
import settings from '../agent/settings.js';
|
import settings from '../agent/settings.js';
|
||||||
|
|
||||||
import { Gemini } from './gemini.js';
|
|
||||||
import { GPT } from './gpt.js';
|
|
||||||
import { Claude } from './claude.js';
|
|
||||||
import { Mistral } from './mistral.js';
|
|
||||||
import { ReplicateAPI } from './replicate.js';
|
|
||||||
import { Local } from './local.js';
|
|
||||||
import { Novita } from './novita.js';
|
|
||||||
import { GroqCloudAPI } from './groq.js';
|
|
||||||
import { HuggingFace } from './huggingface.js';
|
|
||||||
import { Qwen } from "./qwen.js";
|
|
||||||
import { Grok } from "./grok.js";
|
|
||||||
import { DeepSeek } from './deepseek.js';
|
|
||||||
import { Hyperbolic } from './hyperbolic.js';
|
|
||||||
import { GLHF } from './glhf.js';
|
|
||||||
import { OpenRouter } from './openrouter.js';
|
|
||||||
import { VLLM } from './vllm.js';
|
|
||||||
import { promises as fs } from 'fs';
|
import { promises as fs } from 'fs';
|
||||||
import path from 'path';
|
import path from 'path';
|
||||||
import { fileURLToPath } from 'url';
|
import { fileURLToPath } from 'url';
|
||||||
|
import { selectAPI, createModel } from './_model_map.js';
|
||||||
|
|
||||||
const __filename = fileURLToPath(import.meta.url);
|
const __filename = fileURLToPath(import.meta.url);
|
||||||
const __dirname = path.dirname(__filename);
|
const __dirname = path.dirname(__filename);
|
||||||
|
@ -66,70 +50,46 @@ export class Prompter {
|
||||||
this.last_prompt_time = 0;
|
this.last_prompt_time = 0;
|
||||||
this.awaiting_coding = false;
|
this.awaiting_coding = false;
|
||||||
|
|
||||||
// try to get "max_tokens" parameter, else null
|
// for backwards compatibility, move max_tokens to params
|
||||||
let max_tokens = null;
|
let max_tokens = null;
|
||||||
if (this.profile.max_tokens)
|
if (this.profile.max_tokens)
|
||||||
max_tokens = this.profile.max_tokens;
|
max_tokens = this.profile.max_tokens;
|
||||||
|
|
||||||
let chat_model_profile = this._selectAPI(this.profile.model);
|
let chat_model_profile = selectAPI(this.profile.model);
|
||||||
this.chat_model = this._createModel(chat_model_profile);
|
this.chat_model = createModel(chat_model_profile);
|
||||||
|
|
||||||
if (this.profile.code_model) {
|
if (this.profile.code_model) {
|
||||||
let code_model_profile = this._selectAPI(this.profile.code_model);
|
let code_model_profile = selectAPI(this.profile.code_model);
|
||||||
this.code_model = this._createModel(code_model_profile);
|
this.code_model = createModel(code_model_profile);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
this.code_model = this.chat_model;
|
this.code_model = this.chat_model;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (this.profile.vision_model) {
|
if (this.profile.vision_model) {
|
||||||
let vision_model_profile = this._selectAPI(this.profile.vision_model);
|
let vision_model_profile = selectAPI(this.profile.vision_model);
|
||||||
this.vision_model = this._createModel(vision_model_profile);
|
this.vision_model = createModel(vision_model_profile);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
this.vision_model = this.chat_model;
|
this.vision_model = this.chat_model;
|
||||||
}
|
}
|
||||||
|
|
||||||
let embedding = this.profile.embedding;
|
|
||||||
if (embedding === undefined) {
|
|
||||||
if (chat_model_profile.api !== 'ollama')
|
|
||||||
embedding = {api: chat_model_profile.api};
|
|
||||||
else
|
|
||||||
embedding = {api: 'none'};
|
|
||||||
}
|
|
||||||
else if (typeof embedding === 'string' || embedding instanceof String)
|
|
||||||
embedding = {api: embedding};
|
|
||||||
|
|
||||||
console.log('Using embedding settings:', embedding);
|
|
||||||
|
|
||||||
|
let embedding_model_profile = null;
|
||||||
|
if (this.profile.embedding) {
|
||||||
try {
|
try {
|
||||||
if (embedding.api === 'google')
|
embedding_model_profile = selectAPI(this.profile.embedding);
|
||||||
this.embedding_model = new Gemini(embedding.model, embedding.url);
|
} catch (e) {
|
||||||
else if (embedding.api === 'openai')
|
embedding_model_profile = null;
|
||||||
this.embedding_model = new GPT(embedding.model, embedding.url);
|
}
|
||||||
else if (embedding.api === 'replicate')
|
}
|
||||||
this.embedding_model = new ReplicateAPI(embedding.model, embedding.url);
|
if (embedding_model_profile) {
|
||||||
else if (embedding.api === 'ollama')
|
this.embedding_model = createModel(embedding_model_profile);
|
||||||
this.embedding_model = new Local(embedding.model, embedding.url);
|
}
|
||||||
else if (embedding.api === 'qwen')
|
|
||||||
this.embedding_model = new Qwen(embedding.model, embedding.url);
|
|
||||||
else if (embedding.api === 'mistral')
|
|
||||||
this.embedding_model = new Mistral(embedding.model, embedding.url);
|
|
||||||
else if (embedding.api === 'huggingface')
|
|
||||||
this.embedding_model = new HuggingFace(embedding.model, embedding.url);
|
|
||||||
else if (embedding.api === 'novita')
|
|
||||||
this.embedding_model = new Novita(embedding.model, embedding.url);
|
|
||||||
else {
|
else {
|
||||||
this.embedding_model = null;
|
this.embedding_model = createModel({api: chat_model_profile.api});
|
||||||
let embedding_name = embedding ? embedding.api : '[NOT SPECIFIED]'
|
|
||||||
console.warn('Unsupported embedding: ' + embedding_name + '. Using word-overlap instead, expect reduced performance. Recommend using a supported embedding model. See Readme.');
|
|
||||||
}
|
|
||||||
}
|
|
||||||
catch (err) {
|
|
||||||
console.warn('Warning: Failed to initialize embedding model:', err.message);
|
|
||||||
console.log('Continuing anyway, using word-overlap instead.');
|
|
||||||
this.embedding_model = null;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
this.skill_libary = new SkillLibrary(agent, this.embedding_model);
|
this.skill_libary = new SkillLibrary(agent, this.embedding_model);
|
||||||
mkdirSync(`./bots/${name}`, { recursive: true });
|
mkdirSync(`./bots/${name}`, { recursive: true });
|
||||||
writeFileSync(`./bots/${name}/last_profile.json`, JSON.stringify(this.profile, null, 4), (err) => {
|
writeFileSync(`./bots/${name}/last_profile.json`, JSON.stringify(this.profile, null, 4), (err) => {
|
||||||
|
@ -140,88 +100,6 @@ export class Prompter {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
_selectAPI(profile) {
|
|
||||||
if (typeof profile === 'string' || profile instanceof String) {
|
|
||||||
profile = {model: profile};
|
|
||||||
}
|
|
||||||
if (!profile.api) {
|
|
||||||
if (profile.model.includes('openrouter/'))
|
|
||||||
profile.api = 'openrouter'; // must do first because shares names with other models
|
|
||||||
else if (profile.model.includes('ollama/'))
|
|
||||||
profile.api = 'ollama'; // also must do early because shares names with other models
|
|
||||||
else if (profile.model.includes('gemini'))
|
|
||||||
profile.api = 'google';
|
|
||||||
else if (profile.model.includes('vllm/'))
|
|
||||||
profile.api = 'vllm';
|
|
||||||
else if (profile.model.includes('gpt') || profile.model.includes('o1')|| profile.model.includes('o3'))
|
|
||||||
profile.api = 'openai';
|
|
||||||
else if (profile.model.includes('claude'))
|
|
||||||
profile.api = 'anthropic';
|
|
||||||
else if (profile.model.includes('huggingface/'))
|
|
||||||
profile.api = "huggingface";
|
|
||||||
else if (profile.model.includes('replicate/'))
|
|
||||||
profile.api = 'replicate';
|
|
||||||
else if (profile.model.includes('mistralai/') || profile.model.includes("mistral/"))
|
|
||||||
model_profile.api = 'mistral';
|
|
||||||
else if (profile.model.includes("groq/") || profile.model.includes("groqcloud/"))
|
|
||||||
profile.api = 'groq';
|
|
||||||
else if (profile.model.includes("glhf/"))
|
|
||||||
profile.api = 'glhf';
|
|
||||||
else if (profile.model.includes("hyperbolic/"))
|
|
||||||
profile.api = 'hyperbolic';
|
|
||||||
else if (profile.model.includes('novita/'))
|
|
||||||
profile.api = 'novita';
|
|
||||||
else if (profile.model.includes('qwen'))
|
|
||||||
profile.api = 'qwen';
|
|
||||||
else if (profile.model.includes('grok'))
|
|
||||||
profile.api = 'xai';
|
|
||||||
else if (profile.model.includes('deepseek'))
|
|
||||||
profile.api = 'deepseek';
|
|
||||||
else if (profile.model.includes('mistral'))
|
|
||||||
profile.api = 'mistral';
|
|
||||||
else
|
|
||||||
throw new Error('Unknown model:', profile.model);
|
|
||||||
}
|
|
||||||
return profile;
|
|
||||||
}
|
|
||||||
_createModel(profile) {
|
|
||||||
let model = null;
|
|
||||||
if (profile.api === 'google')
|
|
||||||
model = new Gemini(profile.model, profile.url, profile.params);
|
|
||||||
else if (profile.api === 'openai')
|
|
||||||
model = new GPT(profile.model, profile.url, profile.params);
|
|
||||||
else if (profile.api === 'anthropic')
|
|
||||||
model = new Claude(profile.model, profile.url, profile.params);
|
|
||||||
else if (profile.api === 'replicate')
|
|
||||||
model = new ReplicateAPI(profile.model.replace('replicate/', ''), profile.url, profile.params);
|
|
||||||
else if (profile.api === 'ollama')
|
|
||||||
model = new Local(profile.model.replace('ollama/', ''), profile.url, profile.params);
|
|
||||||
else if (profile.api === 'mistral')
|
|
||||||
model = new Mistral(profile.model, profile.url, profile.params);
|
|
||||||
else if (profile.api === 'groq')
|
|
||||||
model = new GroqCloudAPI(profile.model.replace('groq/', '').replace('groqcloud/', ''), profile.url, profile.params);
|
|
||||||
else if (profile.api === 'huggingface')
|
|
||||||
model = new HuggingFace(profile.model, profile.url, profile.params);
|
|
||||||
else if (profile.api === 'glhf')
|
|
||||||
model = new GLHF(profile.model.replace('glhf/', ''), profile.url, profile.params);
|
|
||||||
else if (profile.api === 'hyperbolic')
|
|
||||||
model = new Hyperbolic(profile.model.replace('hyperbolic/', ''), profile.url, profile.params);
|
|
||||||
else if (profile.api === 'novita')
|
|
||||||
model = new Novita(profile.model.replace('novita/', ''), profile.url, profile.params);
|
|
||||||
else if (profile.api === 'qwen')
|
|
||||||
model = new Qwen(profile.model, profile.url, profile.params);
|
|
||||||
else if (profile.api === 'xai')
|
|
||||||
model = new Grok(profile.model, profile.url, profile.params);
|
|
||||||
else if (profile.api === 'deepseek')
|
|
||||||
model = new DeepSeek(profile.model, profile.url, profile.params);
|
|
||||||
else if (profile.api === 'openrouter')
|
|
||||||
model = new OpenRouter(profile.model.replace('openrouter/', ''), profile.url, profile.params);
|
|
||||||
else if (profile.api === 'vllm')
|
|
||||||
model = new VLLM(profile.model.replace('vllm/', ''), profile.url, profile.params);
|
|
||||||
else
|
|
||||||
throw new Error('Unknown API:', profile.api);
|
|
||||||
return model;
|
|
||||||
}
|
|
||||||
getName() {
|
getName() {
|
||||||
return this.profile.name;
|
return this.profile.name;
|
||||||
}
|
}
|
||||||
|
@ -404,7 +282,7 @@ export class Prompter {
|
||||||
await this._saveLog(prompt, to_summarize, resp, 'memSaving');
|
await this._saveLog(prompt, to_summarize, resp, 'memSaving');
|
||||||
if (resp?.includes('</think>')) {
|
if (resp?.includes('</think>')) {
|
||||||
const [_, afterThink] = resp.split('</think>')
|
const [_, afterThink] = resp.split('</think>')
|
||||||
resp = afterThink
|
resp = afterThink;
|
||||||
}
|
}
|
||||||
return resp;
|
return resp;
|
||||||
}
|
}
|
||||||
|
@ -482,6 +360,4 @@ export class Prompter {
|
||||||
logFile = path.join(logDir, logFile);
|
logFile = path.join(logDir, logFile);
|
||||||
await fs.appendFile(logFile, String(logEntry), 'utf-8');
|
await fs.appendFile(logFile, String(logEntry), 'utf-8');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,6 +3,7 @@ import { getKey, hasKey } from '../utils/keys.js';
|
||||||
import { strictFormat } from '../utils/text.js';
|
import { strictFormat } from '../utils/text.js';
|
||||||
|
|
||||||
export class Qwen {
|
export class Qwen {
|
||||||
|
static prefix = 'qwen';
|
||||||
constructor(model_name, url, params) {
|
constructor(model_name, url, params) {
|
||||||
this.model_name = model_name;
|
this.model_name = model_name;
|
||||||
this.params = params;
|
this.params = params;
|
||||||
|
|
|
@ -4,6 +4,7 @@ import { getKey } from '../utils/keys.js';
|
||||||
|
|
||||||
// llama, mistral
|
// llama, mistral
|
||||||
export class ReplicateAPI {
|
export class ReplicateAPI {
|
||||||
|
static prefix = 'replicate';
|
||||||
constructor(model_name, url, params) {
|
constructor(model_name, url, params) {
|
||||||
this.model_name = model_name;
|
this.model_name = model_name;
|
||||||
this.url = url;
|
this.url = url;
|
||||||
|
|
|
@ -6,6 +6,7 @@ import { getKey, hasKey } from '../utils/keys.js';
|
||||||
import { strictFormat } from '../utils/text.js';
|
import { strictFormat } from '../utils/text.js';
|
||||||
|
|
||||||
export class VLLM {
|
export class VLLM {
|
||||||
|
static prefix = 'vllm';
|
||||||
constructor(model_name, url) {
|
constructor(model_name, url) {
|
||||||
this.model_name = model_name;
|
this.model_name = model_name;
|
||||||
|
|
||||||
|
@ -23,13 +24,14 @@ export class VLLM {
|
||||||
|
|
||||||
async sendRequest(turns, systemMessage, stop_seq = '***') {
|
async sendRequest(turns, systemMessage, stop_seq = '***') {
|
||||||
let messages = [{ 'role': 'system', 'content': systemMessage }].concat(turns);
|
let messages = [{ 'role': 'system', 'content': systemMessage }].concat(turns);
|
||||||
|
let model = this.model_name || "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B";
|
||||||
|
|
||||||
if (this.model_name.includes('deepseek') || this.model_name.includes('qwen')) {
|
if (model.includes('deepseek') || model.includes('qwen')) {
|
||||||
messages = strictFormat(messages);
|
messages = strictFormat(messages);
|
||||||
}
|
}
|
||||||
|
|
||||||
const pack = {
|
const pack = {
|
||||||
model: this.model_name || "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
model: model,
|
||||||
messages,
|
messages,
|
||||||
stop: stop_seq,
|
stop: stop_seq,
|
||||||
};
|
};
|
||||||
|
|
Loading…
Add table
Reference in a new issue