Merge branch 'develop' into TTS

This commit is contained in:
Sweaterdog 2025-03-13 23:54:49 -07:00 committed by GitHub
commit 360b937237
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
22 changed files with 762 additions and 234 deletions

View file

@ -39,20 +39,23 @@ You can configure the agent's name, model, and prompts in their profile like `an
| API | Config Variable | Example Model name | Docs |
|------|------|------|------|
| `openai` | `OPENAI_API_KEY` | `gpt-4o-mini` | [docs](https://platform.openai.com/docs/models) |
| `google` | `GEMINI_API_KEY` | `gemini-pro` | [docs](https://ai.google.dev/gemini-api/docs/models/gemini) |
| `google` | `GEMINI_API_KEY` | `gemini-2.0-flash` | [docs](https://ai.google.dev/gemini-api/docs/models/gemini) |
| `anthropic` | `ANTHROPIC_API_KEY` | `claude-3-haiku-20240307` | [docs](https://docs.anthropic.com/claude/docs/models-overview) |
| `replicate` | `REPLICATE_API_KEY` | `replicate/meta/meta-llama-3-70b-instruct` | [docs](https://replicate.com/collections/language-models) |
| `ollama` (local) | n/a | `llama3` | [docs](https://ollama.com/library) |
| `groq` | `GROQCLOUD_API_KEY` | `groq/mixtral-8x7b-32768` | [docs](https://console.groq.com/docs/models) |
| `huggingface` | `HUGGINGFACE_API_KEY` | `huggingface/mistralai/Mistral-Nemo-Instruct-2407` | [docs](https://huggingface.co/models) |
| `novita` | `NOVITA_API_KEY` | `gryphe/mythomax-l2-13b` | [docs](https://novita.ai/model-api/product/llm-api?utm_source=github_mindcraft&utm_medium=github_readme&utm_campaign=link) |
| `xai` | `XAI_API_KEY` | `grok-2-1212` | [docs](https://docs.x.ai/docs) |
| `deepseek` | `DEEPSEEK_API_KEY` | `deepseek-chat` | [docs](https://api-docs.deepseek.com/) |
| `ollama` (local) | n/a | `llama3.1` | [docs](https://ollama.com/library) |
| `qwen` | `QWEN_API_KEY` | `qwen-max` | [Intl.](https://www.alibabacloud.com/help/en/model-studio/developer-reference/use-qwen-by-calling-api)/[cn](https://help.aliyun.com/zh/model-studio/getting-started/models) |
| `xai` | `MISTRAL_API_KEY` | `mistral-large-latest` | [docs](https://docs.mistral.ai/getting-started/models/models_overview/) |
| `deepseek` | `XAI_API_KEY` | `grok-beta` | [docs](https://docs.x.ai/docs) |
| `mistral` | `MISTRAL_API_KEY` | `mistral-large-latest` | [docs](https://docs.mistral.ai/getting-started/models/models_overview/) |
| `replicate` | `REPLICATE_API_KEY` | `replicate/meta/meta-llama-3-70b-instruct` | [docs](https://replicate.com/collections/language-models) |
| `groq` (not grok) | `GROQCLOUD_API_KEY` | `groq/mixtral-8x7b-32768` | [docs](https://console.groq.com/docs/models) |
| `huggingface` | `HUGGINGFACE_API_KEY` | `huggingface/mistralai/Mistral-Nemo-Instruct-2407` | [docs](https://huggingface.co/models) |
| `novita` | `NOVITA_API_KEY` | `novita/deepseek/deepseek-r1` | [docs](https://novita.ai/model-api/product/llm-api?utm_source=github_mindcraft&utm_medium=github_readme&utm_campaign=link) |
| `openrouter` | `OPENROUTER_API_KEY` | `openrouter/anthropic/claude-3.5-sonnet` | [docs](https://openrouter.ai/models) |
| `glhf.chat` | `GHLF_API_KEY` | `glhf/hf:meta-llama/Llama-3.1-405B-Instruct` | [docs](https://glhf.chat/user-settings/api) |
| `hyperbolic` | `HYPERBOLIC_API_KEY` | `hyperbolic/deepseek-ai/DeepSeek-V3` | [docs](https://docs.hyperbolic.xyz/docs/getting-started) |
If you use Ollama, to install the models used by default (generation and embedding), execute the following terminal command:
`ollama pull llama3 && ollama pull nomic-embed-text`
`ollama pull llama3.1 && ollama pull nomic-embed-text`
### Online Servers
To connect to online servers your bot will need an official Microsoft/Minecraft account. You can use your own personal one, but will need another account if you want to connect too and play with it. To connect, change these lines in `settings.js`:

View file

@ -10,6 +10,8 @@
"XAI_API_KEY": "",
"MISTRAL_API_KEY": "",
"DEEPSEEK_API_KEY": "",
"GHLF_API_KEY": "",
"HYPERBOLIC_API_KEY": "",
"NOVITA_API_KEY": "",
"OPENROUTER_API_KEY": ""
}

View file

@ -6,6 +6,7 @@
"@huggingface/inference": "^2.8.1",
"@mistralai/mistralai": "^1.1.0",
"canvas": "^3.1.0",
"cheerio": "^1.0.0",
"express": "^4.18.2",
"google-translate-api-x": "^10.7.1",
"groq-sdk": "^0.5.0",

View file

@ -1,12 +1,13 @@
diff --git a/node_modules/@google/generative-ai/dist/index.mjs b/node_modules/@google/generative-ai/dist/index.mjs
index 23a175b..aab7e19 100644
--- a/node_modules/@google/generative-ai/dist/index.mjs
+++ b/node_modules/@google/generative-ai/dist/index.mjs
@@ -156,1 +156,1 @@
-const API_VERSION = "v1";
+const API_VERSION = "v1beta";
diff --git a/node_modules/@google/generative-ai/dist/index.js b/node_modules/@google/generative-ai/dist/index.js
--- a/node_modules/@google/generative-ai/dist/index.js
+++ b/node_modules/@google/generative-ai/dist/index.js
@@ -156,1 +156,1 @@
@@ -151,7 +151,7 @@ class GoogleGenerativeAIResponseError extends GoogleGenerativeAIError {
* limitations under the License.
*/
const BASE_URL = "https://generativelanguage.googleapis.com";
-const API_VERSION = "v1";
+const API_VERSION = "v1beta";
/**
* We can't `require` package.json if this runs on web. We will use rollup to
* swap in the version number here at build time.

View file

@ -0,0 +1,15 @@
{
"name": "claude_thinker",
"model": {
"model": "claude-3-7-sonnet-latest",
"params": {
"thinking": {
"type": "enabled",
"budget_tokens": 4000
}
}
},
"embedding": "openai"
}

View file

@ -3,7 +3,7 @@
"conversing": "You are a playful Minecraft bot named $NAME that can converse with players, see, move, mine, build, and interact with the world by using commands.\n$SELF_PROMPT Act human-like as if you were a typical Minecraft player, rather than an AI. Be very brief in your responses, don't apologize constantly, don't give instructions or make lists unless asked, and don't refuse requests. Don't pretend to act, use commands immediately when requested. Do NOT say this: 'Sure, I've stopped.', instead say this: 'Sure, I'll stop. !stop'. Do NOT say this: 'On my way! Give me a moment.', instead say this: 'On my way! !goToPlayer(\"playername\", 3)'. Respond only as $NAME, never output '(FROM OTHER BOT)' or pretend to be someone else. If you have nothing to say or do, respond with an just a tab '\t'. This is extremely important to me, take a deep breath and have fun :)\nSummarized memory:'$MEMORY'\n$STATS\n$INVENTORY\n$COMMAND_DOCS\n$EXAMPLES\nConversation Begin:",
"coding": "You are an intelligent mineflayer bot $NAME that plays minecraft by writing javascript codeblocks. Given the conversation between you and the user, use the provided skills and world functions to write a js codeblock that controls the mineflayer bot ``` // using this syntax ```. The code will be executed and you will receive it's output. If you are satisfied with the response, respond without a codeblock in a conversational way. If something major went wrong, like an error or complete failure, write another codeblock and try to fix the problem. Minor mistakes are acceptable. Be maximally efficient, creative, and clear. Do not use commands !likeThis, only use codeblocks. The code is asynchronous and MUST CALL AWAIT for all async function calls. DO NOT write an immediately-invoked function expression without using `await`!! DO NOT WRITE LIKE THIS: ```(async () => {console.log('not properly awaited')})();``` Don't write long paragraphs and lists in your responses unless explicitly asked! Only summarize the code you write with a sentence or two when done. This is extremely important to me, think step-by-step, take a deep breath and good luck! \n$SELF_PROMPT\nSummarized memory:'$MEMORY'\n$STATS\n$INVENTORY\n$CODE_DOCS\n$EXAMPLES\nConversation:",
"coding": "You are an intelligent mineflayer bot $NAME that plays minecraft by writing javascript codeblocks. Given the conversation, use the provided skills and world functions to write a js codeblock that controls the mineflayer bot ``` // using this syntax ```. The code will be executed and you will receive it's output. If an error occurs, write another codeblock and try to fix the problem. Be maximally efficient, creative, and correct. Be mindful of previous actions. Do not use commands !likeThis, only use codeblocks. The code is asynchronous and MUST USE AWAIT for all async function calls. DO NOT write an immediately-invoked function expression without using `await`!! DO NOT WRITE LIKE THIS: ```(async () => {console.log('not properly awaited')})();``` You have `Vec3`, `skills`, and `world` imported, and the mineflayer `bot` is given. Do not use setTimeout or setInterval, instead use `await skills.wait(bot, ms)`. Do not speak conversationally, only use codeblocks. Do any planning in comments. This is extremely important to me, think step-by-step, take a deep breath and good luck! \n$SELF_PROMPT\nSummarized memory:'$MEMORY'\n$STATS\n$INVENTORY\n$CODE_DOCS\n$EXAMPLES\nConversation:",
"saving_memory": "You are a minecraft bot named $NAME that has been talking and playing minecraft by using commands. Update your memory by summarizing the following conversation and your old memory in your next response. Prioritize preserving important facts, things you've learned, useful tips, and long term reminders. Do Not record stats, inventory, or docs! Only save transient information from your chat history. You're limited to 500 characters, so be extremely brief and minimize words. Compress useful information. \nOld Memory: '$MEMORY'\nRecent conversation: \n$TO_SUMMARIZE\nSummarize your old memory and recent conversation into a new memory, and respond only with the unwrapped memory text: ",
@ -238,8 +238,9 @@
],
[
{"role": "user", "content": "brug: build a dirt house"},
{"role": "assistant", "content": "```const position = world.getPosition(bot);\nconst startX = position.x;\nconst startY = position.y;\nconst startZ = position.z;\nconst width = 7;\nconst depth = 7;\nconst height = 4;\n\n// Build the walls\nfor (let x = startX; x < startX + width; x++) {\n for (let y = startY; y < startY + height; y++) {\n for (let z = startZ; z < startZ + depth; z++) {\n if (x === startX || x === startX + width - 1 || y === startY || y === startY + height - 1 || z === startZ || z === startZ + depth - 1) {\n await skills.placeBlock(bot, 'oak_planks', x, y, z); \n }\n }\n }\n}\n```"}
{"role": "assistant", "content": "```js\nconst position = world.getPosition(bot);\nconst startX = position.x;\nconst startY = position.y;\nconst startZ = position.z;\nconst width = 7;\nconst depth = 7;\nconst height = 4;\n\n// Build front wall (z = startZ)\nfor (let x = startX; x < startX + width; x++) {\n for (let y = startY; y < startY + height; y++) {\n await skills.placeBlock(bot, 'oak_planks', x, y, startZ);\n }\n}\n\n// Build back wall (z = startZ + depth - 1)\nfor (let x = startX; x < startX + width; x++) {\n for (let y = startY; y < startY + height; y++) {\n await skills.placeBlock(bot, 'oak_planks', x, y, startZ + depth - 1);\n }\n}\n\n// Build left wall (x = startX)\nfor (let z = startZ; z < startZ + depth; z++) {\n for (let y = startY; y < startY + height; y++) {\n await skills.placeBlock(bot, 'oak_planks', startX, y, z);\n }\n}\n\n// Build right wall (x = startX + width - 1)\nfor (let z = startZ; z < startZ + depth; z++) {\n for (let y = startY; y < startY + height; y++) {\n await skills.placeBlock(bot, 'oak_planks', startX + width - 1, y, z);\n }\n}\n\n// Build floor (y = startY)\nfor (let x = startX; x < startX + width; x++) {\n for (let z = startZ; z < startZ + depth; z++) {\n await skills.placeBlock(bot, 'oak_planks', x, startY, z);\n }\n}\n\n// Build ceiling (y = startY + height - 1)\nfor (let x = startX; x < startX + width; x++) {\n for (let z = startZ; z < startZ + depth; z++) {\n await skills.placeBlock(bot, 'oak_planks', x, startY + height - 1, z);\n }\n}\n```"}
]
]
}

View file

@ -1,7 +1,7 @@
{
"name": "gemini",
"model": "gemini-2.0-flash-lite-preview-02-05",
"model": "gemini-2.0-flash",
"cooldown": 10000
}

View file

@ -19,7 +19,6 @@ export default
// "./profiles/gemini.json",
// "./profiles/llama.json",
// "./profiles/qwen.json",
// "./profiles/mistral.json",
// "./profiles/grok.json",
// "./profiles/mistral.json",
// "./profiles/deepseek.json",
@ -30,13 +29,14 @@ export default
"load_memory": false, // load memory from previous session
"init_message": "Respond with hello world and your name", // sends to all on spawn
"only_chat_with": [], // users that the bots listen to and send general messages to. if empty it will chat publicly
"speak": false, // allows all bots to speak through system text-to-speech. works on windows, mac, on linux you need to `apt install espeak`
"language": "en", // translate to/from this language. Supports these language names: https://cloud.google.com/translate/docs/languages
"show_bot_views": false, // show bot's view in browser at localhost:3000, 3001...
"allow_insecure_coding": false, // allows newAction command and model can write/run code on your computer. enable at own risk
"blocked_actions" : [], // commands to disable and remove from docs. Ex: ["!setMode"]
"code_timeout_mins": -1, // minutes code is allowed to run. -1 for no timeout
"relevant_docs_count": 5, // Parameter: -1 = all, 0 = no references, 5 = five references. If exceeding the maximum, all reference documents are returned.
"relevant_docs_count": 5, // number of relevant code function docs to select for prompting. -1 for all
"max_messages": 15, // max number of messages to keep in context
"num_examples": 2, // number of examples to give to the model

View file

@ -14,6 +14,7 @@ import { addViewer } from './viewer.js';
import settings from '../../settings.js';
import { serverProxy } from './agent_proxy.js';
import { Task } from './tasks.js';
import { say } from './speak.js';
export class Agent {
async start(profile_fp, load_mem=false, init_message=null, count_id=0, task_path=null, task_id=null) {
@ -28,102 +29,89 @@ export class Agent {
}
this.count_id = count_id;
try {
if (!profile_fp) {
throw new Error('No profile filepath provided');
}
console.log('Starting agent initialization with profile:', profile_fp);
console.log('Initializing action manager...');
this.actions = new ActionManager(this);
console.log('Initializing prompter...');
this.prompter = new Prompter(this, profile_fp);
this.name = this.prompter.getName();
console.log('Initializing history...');
this.history = new History(this);
console.log('Initializing coder...');
this.coder = new Coder(this);
console.log('Initializing npc controller...');
this.npc = new NPCContoller(this);
console.log('Initializing memory bank...');
this.memory_bank = new MemoryBank();
console.log('Initializing self prompter...');
this.self_prompter = new SelfPrompter(this);
convoManager.initAgent(this);
console.log('Initializing examples...');
await this.prompter.initExamples();
console.log('Initializing task...');
this.task = new Task(this, task_path, task_id);
const blocked_actions = this.task.blocked_actions || [];
blacklistCommands(blocked_actions);
serverProxy.connect(this);
console.log(this.name, 'logging into minecraft...');
this.bot = initBot(this.name);
initModes(this);
let save_data = null;
if (load_mem) {
save_data = this.history.load();
}
this.bot.on('login', () => {
console.log(this.name, 'logged in!');
serverProxy.login();
if (this.prompter.profile.skin) {
this.bot.chat(`/skin set URL ${this.prompter.profile.skin.model} ${this.prompter.profile.skin.path}`);
} else {
this.bot.chat(`/skin clear`);
}
});
const spawnTimeout = setTimeout(() => {
process.exit(0);
}, 30000);
this.bot.once('spawn', async () => {
try {
clearTimeout(spawnTimeout);
addViewer(this.bot, count_id);
// wait briefly so stats are not undefined
await new Promise((resolve) => setTimeout(resolve, 1000));
console.log(`${this.name} spawned.`);
this.clearBotLogs();
this._setupEventHandlers(save_data, init_message);
this.startEvents();
// this.task.initBotTask();
if (!load_mem) {
this.task.initBotTask();
}
} catch (error) {
console.error('Error in spawn event:', error);
process.exit(0);
}
});
} catch (error) {
console.error('Agent start failed with error');
console.error(error);
throw error;
if (!profile_fp) {
throw new Error('No profile filepath provided');
}
}
console.log('Starting agent initialization with profile:', profile_fp);
// Initialize components with more detailed error handling
console.log('Initializing action manager...');
this.actions = new ActionManager(this);
console.log('Initializing prompter...');
this.prompter = new Prompter(this, profile_fp);
this.name = this.prompter.getName();
console.log('Initializing history...');
this.history = new History(this);
console.log('Initializing coder...');
this.coder = new Coder(this);
console.log('Initializing npc controller...');
this.npc = new NPCContoller(this);
console.log('Initializing memory bank...');
this.memory_bank = new MemoryBank();
console.log('Initializing self prompter...');
this.self_prompter = new SelfPrompter(this);
convoManager.initAgent(this);
console.log('Initializing examples...');
await this.prompter.initExamples();
console.log('Initializing task...');
this.task = new Task(this, task_path, task_id);
const blocked_actions = settings.blocked_actions.concat(this.task.blocked_actions || []);
blacklistCommands(blocked_actions);
serverProxy.connect(this);
console.log(this.name, 'logging into minecraft...');
this.bot = initBot(this.name);
initModes(this);
let save_data = null;
if (load_mem) {
save_data = this.history.load();
}
this.bot.on('login', () => {
console.log(this.name, 'logged in!');
serverProxy.login();
// Set skin for profile, requires Fabric Tailor. (https://modrinth.com/mod/fabrictailor)
if (this.prompter.profile.skin)
this.bot.chat(`/skin set URL ${this.prompter.profile.skin.model} ${this.prompter.profile.skin.path}`);
else
this.bot.chat(`/skin clear`);
});
const spawnTimeout = setTimeout(() => {
process.exit(0);
}, 30000);
this.bot.once('spawn', async () => {
try {
clearTimeout(spawnTimeout);
addViewer(this.bot, count_id);
// wait for a bit so stats are not undefined
await new Promise((resolve) => setTimeout(resolve, 1000));
console.log(`${this.name} spawned.`);
this.clearBotLogs();
this._setupEventHandlers(save_data, init_message);
this.startEvents();
if (!load_mem) {
this.task.initBotTask();
}
} catch (error) {
console.error('Error in spawn event:', error);
process.exit(0);
}
});
} throw error;
}
async _setupEventHandlers(save_data, init_message) {
const ignore_messages = [
@ -375,6 +363,9 @@ export class Agent {
}
}
else {
if (settings.speak) {
say(message);
}
this.bot.chat(message);
}
}

View file

@ -39,7 +39,7 @@ export class Coder {
// check function exists
const missingSkills = skills.filter(skill => !!allDocs[skill]);
if (missingSkills.length > 0) {
result += 'These functions do not exist. Please modify the correct function name and try again.\n';
result += 'These functions do not exist.\n';
result += '### FUNCTIONS NOT FOUND ###\n';
result += missingSkills.join('\n');
console.log(result)
@ -177,12 +177,14 @@ export class Coder {
}
if (failures >= 3) {
console.warn("Action failed, agent would not write code.");
return { success: false, message: 'Action failed, agent would not write code.', interrupted: false, timedout: false };
}
messages.push({
role: 'system',
content: 'Error: no code provided. Write code in codeblock in your response. ``` // example ```'}
);
console.warn("No code block generated.");
failures++;
continue;
}
@ -192,12 +194,14 @@ export class Coder {
let src_lint_copy = result.src_lint_copy;
const analysisResult = await this.lintCode(src_lint_copy);
if (analysisResult) {
const message = 'Error: Code syntax error. Please try again:'+'\n'+analysisResult+'\n';
const message = 'Error: Code lint error:'+'\n'+analysisResult+'\nPlease try again.';
console.warn("Linting error:"+'\n'+analysisResult+'\n');
messages.push({ role: 'system', content: message });
continue;
}
if (!executionModuleExports) {
agent_history.add('system', 'Failed to stage code, something is wrong.');
console.warn("Failed to stage code, something is wrong.");
return {success: false, message: null, interrupted: false, timedout: false};
}

View file

@ -407,6 +407,14 @@ export const actionsList = [
return `Converstaion with ${player_name} ended.`;
}
},
{
name: '!digDown',
description: 'Digs down a specified distance. Will stop if it reaches lava, water, or a fall of >=4 blocks below the bot.',
params: {'distance': { type: 'int', description: 'Distance to dig down', domain: [1, Number.MAX_SAFE_INTEGER] }},
perform: runAsAction(async (agent, distance) => {
await skills.digDown(agent.bot, distance)
})
},
// { // commented for now, causes confusion with goal command
// name: '!npcGoal',
// description: 'Set a simple goal for an item or building to automatically work towards. Do not use for complex goals.',

View file

@ -2,6 +2,7 @@ import * as world from '../library/world.js';
import * as mc from '../../utils/mcdata.js';
import { getCommandDocs } from './index.js';
import convoManager from '../conversation.js';
import { load } from 'cheerio';
const pad = (str) => {
return '\n' + str + '\n';
@ -214,6 +215,35 @@ export const queryList = [
return pad(craftingPlan);
},
},
{
name: '!searchWiki',
description: 'Search the Minecraft Wiki for the given query.',
params: {
'query': { type: 'string', description: 'The query to search for.' }
},
perform: async function (agent, query) {
const url = `https://minecraft.wiki/w/${query}`
try {
const response = await fetch(url);
if (response.status === 404) {
return `${query} was not found on the Minecraft Wiki. Try adjusting your search term.`;
}
const html = await response.text();
const $ = load(html);
const parserOutput = $("div.mw-parser-output");
parserOutput.find("table.navbox").remove();
const divContent = parserOutput.text();
return divContent.trim();
} catch (error) {
console.error("Error fetching or parsing HTML:", error);
return `The following error occured: ${error}`
}
}
},
{
name: '!help',
description: 'Lists all available commands and their descriptions.',

View file

@ -111,16 +111,28 @@ export async function craftRecipe(bot, itemName, num=1) {
return true;
}
export async function wait(seconds) {
export async function wait(bot, milliseconds) {
/**
* Waits for the given number of seconds.
* @param {number} seconds, the number of seconds to wait.
* Waits for the given number of milliseconds.
* @param {MinecraftBot} bot, reference to the minecraft bot.
* @param {number} milliseconds, the number of milliseconds to wait.
* @returns {Promise<boolean>} true if the wait was successful, false otherwise.
* @example
* await skills.wait(10);
* await skills.wait(bot, 1000);
**/
// setTimeout is disabled to prevent unawaited code, so this is a safe alternative
await new Promise(resolve => setTimeout(resolve, seconds * 1000));
// setTimeout is disabled to prevent unawaited code, so this is a safe alternative that enables interrupts
let timeLeft = milliseconds;
let startTime = Date.now();
while (timeLeft > 0) {
if (bot.interrupt_code) return false;
let waitTime = Math.min(2000, timeLeft);
await new Promise(resolve => setTimeout(resolve, waitTime));
let elapsed = Date.now() - startTime;
timeLeft = milliseconds - elapsed;
}
return true;
}
@ -1363,3 +1375,61 @@ export async function activateNearestBlock(bot, type) {
log(bot, `Activated ${type} at x:${block.position.x.toFixed(1)}, y:${block.position.y.toFixed(1)}, z:${block.position.z.toFixed(1)}.`);
return true;
}
export async function digDown(bot, distance = 10) {
/**
* Digs down a specified distance. Will stop if it reaches lava, water, or a fall of >=4 blocks below the bot.
* @param {MinecraftBot} bot, reference to the minecraft bot.
* @param {int} distance, distance to dig down.
* @returns {Promise<boolean>} true if successfully dug all the way down.
* @example
* await skills.digDown(bot, 10);
**/
let start_block_pos = bot.blockAt(bot.entity.position).position;
for (let i = 1; i <= distance; i++) {
const targetBlock = bot.blockAt(start_block_pos.offset(0, -i, 0));
let belowBlock = bot.blockAt(start_block_pos.offset(0, -i-1, 0));
if (!targetBlock || !belowBlock) {
log(bot, `Dug down ${i-1} blocks, but reached the end of the world.`);
return true;
}
// Check for lava, water
if (targetBlock.name === 'lava' || targetBlock.name === 'water' ||
belowBlock.name === 'lava' || belowBlock.name === 'water') {
log(bot, `Dug down ${i-1} blocks, but reached ${belowBlock ? belowBlock.name : '(lava/water)'}`)
return false;
}
const MAX_FALL_BLOCKS = 2;
let num_fall_blocks = 0;
for (let j = 0; j <= MAX_FALL_BLOCKS; j++) {
if (!belowBlock || (belowBlock.name !== 'air' && belowBlock.name !== 'cave_air')) {
break;
}
num_fall_blocks++;
belowBlock = bot.blockAt(belowBlock.position.offset(0, -1, 0));
}
if (num_fall_blocks > MAX_FALL_BLOCKS) {
log(bot, `Dug down ${i-1} blocks, but reached a drop below the next block.`);
return false;
}
if (targetBlock.name === 'air' || targetBlock.name === 'cave_air') {
log(bot, 'Skipping air block');
console.log(targetBlock.position);
continue;
}
let dug = await breakBlockAt(bot, targetBlock.position.x, targetBlock.position.y, targetBlock.position.z);
if (!dug) {
log(bot, 'Failed to dig block at position:' + targetBlock.position);
return false;
}
}
log(bot, `Dug down ${distance} blocks.`);
return true;
}

43
src/agent/speak.js Normal file
View file

@ -0,0 +1,43 @@
import { exec } from 'child_process';
let speakingQueue = [];
let isSpeaking = false;
export function say(textToSpeak) {
speakingQueue.push(textToSpeak);
if (!isSpeaking) {
processQueue();
}
}
function processQueue() {
if (speakingQueue.length === 0) {
isSpeaking = false;
return;
}
isSpeaking = true;
const textToSpeak = speakingQueue.shift();
const isWin = process.platform === "win32";
const isMac = process.platform === "darwin";
let command;
if (isWin) {
command = `powershell -Command "Add-Type –AssemblyName System.Speech; (New-Object System.Speech.Synthesis.SpeechSynthesizer).Speak(\\"${textToSpeak}\\")"`;
} else if (isMac) {
command = `say "${textToSpeak}"`;
} else {
command = `espeak "${textToSpeak}"`;
}
exec(command, (error, stdout, stderr) => {
if (error) {
console.error(`Error: ${error.message}`);
console.error(`${error.stack}`);
} else if (stderr) {
console.error(`Error: ${stderr}`);
}
processQueue(); // Continue with the next message in the queue
});
}

View file

@ -22,7 +22,12 @@ export class Claude {
try {
console.log('Awaiting anthropic api response...')
if (!this.params.max_tokens) {
this.params.max_tokens = 4096;
if (this.params.thinking?.budget_tokens) {
this.params.max_tokens = this.params.thinking.budget_tokens + 1000;
// max_tokens must be greater than thinking.budget_tokens
} else {
this.params.max_tokens = 4096;
}
}
const resp = await this.anthropic.messages.create({
model: this.model_name || "claude-3-sonnet-20240229",
@ -32,7 +37,14 @@ export class Claude {
});
console.log('Received.')
res = resp.content[0].text;
// get first content of type text
const textContent = resp.content.find(content => content.type === 'text');
if (textContent) {
res = textContent.text;
} else {
console.warn('No text content found in the response.');
res = 'No response from Claude.';
}
}
catch (err) {
console.log(err);

View file

@ -39,7 +39,6 @@ export class Gemini {
model: this.model_name || "gemini-1.5-flash",
// systemInstruction does not work bc google is trash
};
if (this.url) {
model = this.genAI.getGenerativeModel(
modelConfig,
@ -72,7 +71,26 @@ export class Gemini {
}
});
const response = await result.response;
const text = response.text();
let text;
// Handle "thinking" models since they smart
if (this.model_name && this.model_name.includes("thinking")) {
if (
response.candidates &&
response.candidates.length > 0 &&
response.candidates[0].content &&
response.candidates[0].content.parts &&
response.candidates[0].content.parts.length > 1
) {
text = response.candidates[0].content.parts[1].text;
} else {
console.warn("Unexpected response structure for thinking model:", response);
text = response.text();
}
} else {
text = response.text();
}
console.log('Received.');
return text;
@ -94,4 +112,4 @@ export class Gemini {
const result = await model.embedContent(text);
return result.embedding.values;
}
}
}

70
src/models/glhf.js Normal file
View file

@ -0,0 +1,70 @@
import OpenAIApi from 'openai';
import { getKey } from '../utils/keys.js';
export class GLHF {
constructor(model_name, url) {
this.model_name = model_name;
const apiKey = getKey('GHLF_API_KEY');
if (!apiKey) {
throw new Error('API key not found. Please check keys.json and ensure GHLF_API_KEY is defined.');
}
this.openai = new OpenAIApi({
apiKey,
baseURL: url || "https://glhf.chat/api/openai/v1"
});
}
async sendRequest(turns, systemMessage, stop_seq = '***') {
// Construct the message array for the API request.
let messages = [{ role: 'system', content: systemMessage }].concat(turns);
const pack = {
model: this.model_name || "hf:meta-llama/Llama-3.1-405B-Instruct",
messages,
stop: [stop_seq]
};
const maxAttempts = 5;
let attempt = 0;
let finalRes = null;
while (attempt < maxAttempts) {
attempt++;
console.log(`Awaiting glhf.chat API response... (attempt: ${attempt})`);
try {
let completion = await this.openai.chat.completions.create(pack);
if (completion.choices[0].finish_reason === 'length') {
throw new Error('Context length exceeded');
}
let res = completion.choices[0].message.content;
// If there's an open <think> tag without a corresponding </think>, retry.
if (res.includes("<think>") && !res.includes("</think>")) {
console.warn("Partial <think> block detected. Re-generating...");
continue;
}
// If there's a closing </think> tag but no opening <think>, prepend one.
if (res.includes("</think>") && !res.includes("<think>")) {
res = "<think>" + res;
}
finalRes = res.replace(/<\|separator\|>/g, '*no response*');
break; // Valid response obtained.
} catch (err) {
if ((err.message === 'Context length exceeded' || err.code === 'context_length_exceeded') && turns.length > 1) {
console.log('Context length exceeded, trying again with shorter context.');
return await this.sendRequest(turns.slice(1), systemMessage, stop_seq);
} else {
console.error(err);
finalRes = 'My brain disconnected, try again.';
break;
}
}
}
if (finalRes === null) {
finalRes = "I thought too hard, sorry, try again";
}
return finalRes;
}
async embed(text) {
throw new Error('Embeddings are not supported by glhf.');
}
}

View file

@ -2,48 +2,107 @@ import Groq from 'groq-sdk';
import fs from "fs";
import { getKey } from '../utils/keys.js';
export class GroqCloudAPI {
constructor(model_name, url, params) {
this.model_name = model_name;
this.url = url;
this.params = params || {};
if (this.url) {
console.warn("Groq Cloud has no implementation for custom URLs. Ignoring provided URL.");
}
this.groq = new Groq({ apiKey: getKey('GROQCLOUD_API_KEY') });
}
// THIS API IS NOT TO BE CONFUSED WITH GROK!
// Go to grok.js for that. :)
// Umbrella class for everything under the sun... That GroqCloud provides, that is.
export class GroqCloudAPI {
constructor(model_name, url, params) {
this.model_name = model_name;
this.url = url;
this.params = params || {};
// Remove any mention of "tools" from params:
if (this.params.tools)
delete this.params.tools;
// This is just a bit of future-proofing in case we drag Mindcraft in that direction.
// I'm going to do a sneaky ReplicateAPI theft for a lot of this, aren't I?
if (this.url)
console.warn("Groq Cloud has no implementation for custom URLs. Ignoring provided URL.");
this.groq = new Groq({ apiKey: getKey('GROQCLOUD_API_KEY') });
}
async sendRequest(turns, systemMessage, stop_seq = null) {
// Variables for DeepSeek-R1 models
const maxAttempts = 5;
let attempt = 0;
let finalRes = null;
let res = null;
// Construct messages array
let messages = [{"role": "system", "content": systemMessage}].concat(turns);
while (attempt < maxAttempts) {
attempt++;
// These variables look odd, but they're for the future.
let raw_res = null;
let tool_calls = null;
async sendRequest(turns, systemMessage, stop_seq = null) {
let messages = [{ role: "system", content: systemMessage }].concat(turns);
let res = null;
try {
console.log("Awaiting Groq response...");
if (!this.params.max_tokens) {
this.params.max_tokens = 16384;
// Handle deprecated max_tokens parameter
if (this.params.max_tokens) {
console.warn("GROQCLOUD WARNING: A profile is using `max_tokens`. This is deprecated. Please move to `max_completion_tokens`.");
this.params.max_completion_tokens = this.params.max_tokens;
delete this.params.max_tokens;
}
if (!this.params.max_completion_tokens) {
this.params.max_completion_tokens = 8000; // Set it lower.
}
let completion = await this.groq.chat.completions.create({
messages,
model: this.model_name || "mixtral-8x7b-32768",
stream: true,
stop: stop_seq,
...this.params
"messages": messages,
"model": this.model_name || "llama-3.3-70b-versatile",
"stream": false,
"stop": stop_seq,
...(this.params || {})
});
let temp_res = "";
for await (const chunk of completion) {
temp_res += chunk.choices[0]?.delta?.content || '';
}
res = temp_res;
raw_res = completion.choices[0].message;
res = raw_res.content;
} catch (err) {
console.log(err);
res = "My brain just kinda stopped working. Try again.";
// Check for <think> tag issues
const hasOpenTag = res.includes("<think>");
const hasCloseTag = res.includes("</think>");
// If a partial <think> block is detected, log a warning and retry
if (hasOpenTag && !hasCloseTag) {
console.warn("Partial <think> block detected. Re-generating Groq request...");
continue; // This will skip the rest of the loop and try again
}
return res;
}
// If only the closing tag is present, prepend an opening tag
if (hasCloseTag && !hasOpenTag) {
res = '<think>' + res;
}
// Remove the complete <think> block (and any content inside) from the response
res = res.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
async embed(text) {
throw new Error('Embeddings are not supported by Groq.');
finalRes = res;
break; // Exit the loop once a valid response is obtained
}
if (finalRes == null) {
console.warn("Could not obtain a valid <think> block or normal response after max attempts.");
finalRes = "I thought too hard, sorry, try again.";
}
finalRes = finalRes.replace(/<\|separator\|>/g, '*no response*');
return finalRes;
}
}

View file

@ -1,46 +1,85 @@
import {toSinglePrompt} from '../utils/text.js';
import {getKey} from '../utils/keys.js';
import {HfInference} from "@huggingface/inference";
import { toSinglePrompt } from '../utils/text.js';
import { getKey } from '../utils/keys.js';
import { HfInference } from "@huggingface/inference";
export class HuggingFace {
constructor(model_name, url, params) {
this.model_name = model_name.replace('huggingface/','');
this.url = url;
this.params = params;
constructor(model_name, url, params) {
// Remove 'huggingface/' prefix if present
this.model_name = model_name.replace('huggingface/', '');
this.url = url;
this.params = params;
if (this.url) {
console.warn("Hugging Face doesn't support custom urls!");
if (this.url) {
console.warn("Hugging Face doesn't support custom urls!");
}
this.huggingface = new HfInference(getKey('HUGGINGFACE_API_KEY'));
}
async sendRequest(turns, systemMessage) {
const stop_seq = '***';
// Build a single prompt from the conversation turns
const prompt = toSinglePrompt(turns, null, stop_seq);
// Fallback model if none was provided
const model_name = this.model_name || 'meta-llama/Meta-Llama-3-8B';
// Combine system message with the prompt
const input = systemMessage + "\n" + prompt;
// We'll try up to 5 times in case of partial <think> blocks for DeepSeek-R1 models.
const maxAttempts = 5;
let attempt = 0;
let finalRes = null;
while (attempt < maxAttempts) {
attempt++;
console.log(`Awaiting Hugging Face API response... (model: ${model_name}, attempt: ${attempt})`);
let res = '';
try {
// Consume the streaming response chunk by chunk
for await (const chunk of this.huggingface.chatCompletionStream({
model: model_name,
messages: [{ role: "user", content: input }],
...(this.params || {})
})) {
res += (chunk.choices[0]?.delta?.content || "");
}
} catch (err) {
console.log(err);
res = 'My brain disconnected, try again.';
// Break out immediately; we only retry when handling partial <think> tags.
break;
}
// If the model is DeepSeek-R1, check for mismatched <think> blocks.
const hasOpenTag = res.includes("<think>");
const hasCloseTag = res.includes("</think>");
// If there's a partial mismatch, warn and retry the entire request.
if ((hasOpenTag && !hasCloseTag)) {
console.warn("Partial <think> block detected. Re-generating...");
continue;
}
this.huggingface = new HfInference(getKey('HUGGINGFACE_API_KEY'));
}
async sendRequest(turns, systemMessage) {
const stop_seq = '***';
const prompt = toSinglePrompt(turns, null, stop_seq);
let model_name = this.model_name || 'meta-llama/Meta-Llama-3-8B';
const input = systemMessage + "\n" + prompt;
let res = '';
try {
console.log('Awaiting Hugging Face API response...');
for await (const chunk of this.huggingface.chatCompletionStream({
model: model_name,
messages: [{ role: "user", content: input }],
...(this.params || {})
})) {
res += (chunk.choices[0]?.delta?.content || "");
}
} catch (err) {
console.log(err);
res = 'My brain disconnected, try again.';
// If both tags are present, remove the <think> block entirely.
if (hasOpenTag && hasCloseTag) {
res = res.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
}
console.log('Received.');
console.log(res);
return res;
finalRes = res;
break; // Exit loop if we got a valid response.
}
async embed(text) {
throw new Error('Embeddings are not supported by HuggingFace.');
// If no valid response was obtained after max attempts, assign a fallback.
if (finalRes == null) {
console.warn("Could not get a valid <think> block or normal response after max attempts.");
finalRes = 'I thought too hard, sorry, try again.';
}
}
console.log('Received.');
console.log(finalRes);
return finalRes;
}
async embed(text) {
throw new Error('Embeddings are not supported by HuggingFace.');
}
}

113
src/models/hyperbolic.js Normal file
View file

@ -0,0 +1,113 @@
import { getKey } from '../utils/keys.js';
export class Hyperbolic {
constructor(modelName, apiUrl) {
this.modelName = modelName || "deepseek-ai/DeepSeek-V3";
this.apiUrl = apiUrl || "https://api.hyperbolic.xyz/v1/chat/completions";
// Retrieve the Hyperbolic API key from keys.js
this.apiKey = getKey('HYPERBOLIC_API_KEY');
if (!this.apiKey) {
throw new Error('HYPERBOLIC_API_KEY not found. Check your keys.js file.');
}
}
/**
* Sends a chat completion request to the Hyperbolic endpoint.
*
* @param {Array} turns - An array of message objects, e.g. [{role: 'user', content: 'Hi'}].
* @param {string} systemMessage - The system prompt or instruction.
* @param {string} stopSeq - A stopping sequence, default '***'.
* @returns {Promise<string>} - The model's reply.
*/
async sendRequest(turns, systemMessage, stopSeq = '***') {
// Prepare the messages with a system prompt at the beginning
const messages = [{ role: 'system', content: systemMessage }, ...turns];
// Build the request payload
const payload = {
model: this.modelName,
messages: messages,
max_tokens: 8192,
temperature: 0.7,
top_p: 0.9,
stream: false
};
const maxAttempts = 5;
let attempt = 0;
let finalRes = null;
while (attempt < maxAttempts) {
attempt++;
console.log(`Awaiting Hyperbolic API response... (attempt: ${attempt})`);
console.log('Messages:', messages);
let completionContent = null;
try {
const response = await fetch(this.apiUrl, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${this.apiKey}`
},
body: JSON.stringify(payload)
});
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
const data = await response.json();
if (data?.choices?.[0]?.finish_reason === 'length') {
throw new Error('Context length exceeded');
}
completionContent = data?.choices?.[0]?.message?.content || '';
console.log('Received response from Hyperbolic.');
} catch (err) {
if (
(err.message === 'Context length exceeded' || err.code === 'context_length_exceeded') &&
turns.length > 1
) {
console.log('Context length exceeded, trying again with a shorter context...');
return await this.sendRequest(turns.slice(1), systemMessage, stopSeq);
} else {
console.error(err);
completionContent = 'My brain disconnected, try again.';
}
}
// Check for <think> blocks
const hasOpenTag = completionContent.includes("<think>");
const hasCloseTag = completionContent.includes("</think>");
if ((hasOpenTag && !hasCloseTag)) {
console.warn("Partial <think> block detected. Re-generating...");
continue; // Retry the request
}
if (hasCloseTag && !hasOpenTag) {
completionContent = '<think>' + completionContent;
}
if (hasOpenTag && hasCloseTag) {
completionContent = completionContent.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
}
finalRes = completionContent.replace(/<\|separator\|>/g, '*no response*');
break; // Valid response obtained—exit loop
}
if (finalRes == null) {
console.warn("Could not get a valid <think> block or normal response after max attempts.");
finalRes = 'I thought too hard, sorry, try again.';
}
return finalRes;
}
async embed(text) {
throw new Error('Embeddings are not supported by Hyperbolic.');
}
}

View file

@ -10,45 +10,86 @@ export class Local {
}
async sendRequest(turns, systemMessage) {
let model = this.model_name || 'llama3';
let model = this.model_name || 'llama3.1'; // Updated to llama3.1, as it is more performant than llama3
let messages = strictFormat(turns);
messages.unshift({role: 'system', content: systemMessage});
let res = null;
try {
console.log(`Awaiting local response... (model: ${model})`)
res = await this.send(this.chat_endpoint, {
model: model,
messages: messages,
stream: false,
...(this.params || {})
});
if (res)
res = res['message']['content'];
}
catch (err) {
if (err.message.toLowerCase().includes('context length') && turns.length > 1) {
console.log('Context length exceeded, trying again with shorter context.');
return await sendRequest(turns.slice(1), systemMessage, stop_seq);
} else {
console.log(err);
res = 'My brain disconnected, try again.';
messages.unshift({ role: 'system', content: systemMessage });
// We'll attempt up to 5 times for models with deepseek-r1-esk reasoning if the <think> tags are mismatched.
const maxAttempts = 5;
let attempt = 0;
let finalRes = null;
while (attempt < maxAttempts) {
attempt++;
console.log(`Awaiting local response... (model: ${model}, attempt: ${attempt})`);
let res = null;
try {
res = await this.send(this.chat_endpoint, {
model: model,
messages: messages,
stream: false,
...(this.params || {})
});
if (res) {
res = res['message']['content'];
} else {
res = 'No response data.';
}
} catch (err) {
if (err.message.toLowerCase().includes('context length') && turns.length > 1) {
console.log('Context length exceeded, trying again with shorter context.');
return await this.sendRequest(turns.slice(1), systemMessage);
} else {
console.log(err);
res = 'My brain disconnected, try again.';
}
}
// If the model name includes "deepseek-r1" or "Andy-3.5-reasoning", then handle the <think> block.
const hasOpenTag = res.includes("<think>");
const hasCloseTag = res.includes("</think>");
// If there's a partial mismatch, retry to get a complete response.
if ((hasOpenTag && !hasCloseTag)) {
console.warn("Partial <think> block detected. Re-generating...");
continue;
}
// If </think> is present but <think> is not, prepend <think>
if (hasCloseTag && !hasOpenTag) {
res = '<think>' + res;
}
// Changed this so if the model reasons, using <think> and </think> but doesn't start the message with <think>, <think> ges prepended to the message so no error occur.
// If both tags appear, remove them (and everything inside).
if (hasOpenTag && hasCloseTag) {
res = res.replace(/<think>[\s\S]*?<\/think>/g, '');
}
finalRes = res;
break; // Exit the loop if we got a valid response.
}
return res;
if (finalRes == null) {
console.warn("Could not get a valid <think> block or normal response after max attempts.");
finalRes = 'I thought too hard, sorry, try again.';
}
return finalRes;
}
async embed(text) {
let model = this.model_name || 'nomic-embed-text';
let body = {model: model, prompt: text};
let body = { model: model, input: text };
let res = await this.send(this.embedding_endpoint, body);
return res['embedding']
return res['embedding'];
}
async send(endpoint, body) {
const url = new URL(endpoint, this.url);
let method = 'POST';
let headers = new Headers();
const request = new Request(url, {method, headers, body: JSON.stringify(body)});
const request = new Request(url, { method, headers, body: JSON.stringify(body) });
let data = null;
try {
const res = await fetch(request);
@ -63,4 +104,4 @@ export class Local {
}
return data;
}
}
}

View file

@ -1,7 +1,6 @@
import { readFileSync, mkdirSync, writeFileSync} from 'fs';
import { Examples } from '../utils/examples.js';
import { getCommandDocs } from '../agent/commands/index.js';
import { getSkillDocs } from '../agent/library/index.js';
import { SkillLibrary } from "../agent/library/skill_library.js";
import { stringifyTurns } from '../utils/text.js';
import { getCommand } from '../agent/commands/index.js';
@ -19,6 +18,8 @@ import { HuggingFace } from './huggingface.js';
import { Qwen } from "./qwen.js";
import { Grok } from "./grok.js";
import { DeepSeek } from './deepseek.js';
import { Hyperbolic } from './hyperbolic.js';
import { GLHF } from './glhf.js';
import { OpenRouter } from './openrouter.js';
export class Prompter {
@ -41,7 +42,6 @@ export class Prompter {
}
// base overrides default, individual overrides base
this.convo_examples = null;
this.coding_examples = null;
@ -121,10 +121,12 @@ export class Prompter {
profile = {model: profile};
}
if (!profile.api) {
if (profile.model.includes('gemini'))
if (profile.model.includes('openrouter/'))
profile.api = 'openrouter'; // must do first because shares names with other models
else if (profile.model.includes('ollama/'))
profile.api = 'ollama'; // also must do early because shares names with other models
else if (profile.model.includes('gemini'))
profile.api = 'google';
else if (profile.model.includes('openrouter/'))
profile.api = 'openrouter'; // must do before others bc shares model names
else if (profile.model.includes('gpt') || profile.model.includes('o1')|| profile.model.includes('o3'))
profile.api = 'openai';
else if (profile.model.includes('claude'))
@ -137,6 +139,10 @@ export class Prompter {
model_profile.api = 'mistral';
else if (profile.model.includes("groq/") || profile.model.includes("groqcloud/"))
profile.api = 'groq';
else if (profile.model.includes("glhf/"))
profile.api = 'glhf';
else if (profile.model.includes("hyperbolic/"))
profile.api = 'hyperbolic';
else if (profile.model.includes('novita/'))
profile.api = 'novita';
else if (profile.model.includes('qwen'))
@ -145,12 +151,13 @@ export class Prompter {
profile.api = 'xai';
else if (profile.model.includes('deepseek'))
profile.api = 'deepseek';
else
profile.api = 'ollama'; // Fixed here to make it so if the model does not meet this criteria, it is an Ollama model, instead of blocking all models but llama3, which was an odd choice.
else if (profile.model.includes('mistral'))
profile.api = 'mistral';
else
throw new Error('Unknown model:', profile.model);
}
return profile;
}
_createModel(profile) {
let model = null;
if (profile.api === 'google')
@ -162,13 +169,17 @@ export class Prompter {
else if (profile.api === 'replicate')
model = new ReplicateAPI(profile.model.replace('replicate/', ''), profile.url, profile.params);
else if (profile.api === 'ollama')
model = new Local(profile.model, profile.url, profile.params);
model = new Local(profile.model.replace('ollama/', ''), profile.url, profile.params);
else if (profile.api === 'mistral')
model = new Mistral(profile.model, profile.url, profile.params);
else if (profile.api === 'groq')
model = new GroqCloudAPI(profile.model.replace('groq/', '').replace('groqcloud/', ''), profile.url, profile.params);
else if (profile.api === 'huggingface')
model = new HuggingFace(profile.model, profile.url, profile.params);
else if (profile.api === 'glhf')
model = new GLHF(profile.model.replace('glhf/', ''), profile.url, profile.params);
else if (profile.api === 'hyperbolic')
model = new Hyperbolic(profile.model.replace('hyperbolic/', ''), profile.url, profile.params);
else if (profile.api === 'novita')
model = new Novita(profile.model.replace('novita/', ''), profile.url, profile.params);
else if (profile.api === 'qwen')
@ -183,7 +194,6 @@ export class Prompter {
throw new Error('Unknown API:', profile.api);
return model;
}
getName() {
return this.profile.name;
}
@ -243,9 +253,6 @@ export class Prompter {
await this.skill_libary.getRelevantSkillDocs(code_task_content, settings.relevant_docs_count)
);
}
prompt = prompt.replaceAll('$COMMAND_DOCS', getCommandDocs());
if (prompt.includes('$CODE_DOCS'))
prompt = prompt.replaceAll('$CODE_DOCS', getSkillDocs());
if (prompt.includes('$EXAMPLES') && examples !== null)
prompt = prompt.replaceAll('$EXAMPLES', await examples.createExampleMessage(messages));
if (prompt.includes('$MEMORY'))