Merge remote-tracking branch 'refs/remotes/upstream/main' into Tasks-more-relevant-docs-and-code-exception-fixes

# Conflicts:
#	src/agent/prompter.js
This commit is contained in:
“Qu 2025-01-19 18:28:17 +08:00
commit 5dd57dd429
23 changed files with 268 additions and 44 deletions

View file

@ -13,7 +13,7 @@ Do not connect this bot to public servers with coding enabled. This project allo
- [Minecraft Java Edition](https://www.minecraft.net/en-us/store/minecraft-java-bedrock-edition-pc) (up to v1.21.1, recommend v1.20.4)
- [Node.js Installed](https://nodejs.org/) (at least v14)
- One of these: [OpenAI API Key](https://openai.com/blog/openai-api) | [Gemini API Key](https://aistudio.google.com/app/apikey) | [Anthropic API Key](https://docs.anthropic.com/claude/docs/getting-access-to-claude) | [Replicate API Key](https://replicate.com/) | [Hugging Face API Key](https://huggingface.co/) | [Groq API Key](https://console.groq.com/keys) | [Ollama Installed](https://ollama.com/download). | [Qwen API Key [Intl.]](https://www.alibabacloud.com/help/en/model-studio/developer-reference/get-api-key)/[[cn]](https://help.aliyun.com/zh/model-studio/getting-started/first-api-call-to-qwen?) | [Novita AI API Key](https://novita.ai/settings?utm_source=github_mindcraft&utm_medium=github_readme&utm_campaign=link#key-management) |
- One of these: [OpenAI API Key](https://openai.com/blog/openai-api) | [Gemini API Key](https://aistudio.google.com/app/apikey) | [Anthropic API Key](https://docs.anthropic.com/claude/docs/getting-access-to-claude) | [Replicate API Key](https://replicate.com/) | [Hugging Face API Key](https://huggingface.co/) | [Groq API Key](https://console.groq.com/keys) | [Ollama Installed](https://ollama.com/download). | [Mistral API Key](https://docs.mistral.ai/getting-started/models/models_overview/) | [Qwen API Key [Intl.]](https://www.alibabacloud.com/help/en/model-studio/developer-reference/get-api-key)/[[cn]](https://help.aliyun.com/zh/model-studio/getting-started/first-api-call-to-qwen?) | [Novita AI API Key](https://novita.ai/settings?utm_source=github_mindcraft&utm_medium=github_readme&utm_campaign=link#key-management) |
## Install and Run
@ -48,6 +48,7 @@ You can configure the agent's name, model, and prompts in their profile like `an
| Hugging Face | `HUGGINGFACE_API_KEY` | `huggingface/mistralai/Mistral-Nemo-Instruct-2407` | [docs](https://huggingface.co/models) |
| Novita AI | `NOVITA_API_KEY` | `gryphe/mythomax-l2-13b` | [docs](https://novita.ai/model-api/product/llm-api?utm_source=github_mindcraft&utm_medium=github_readme&utm_campaign=link) |
| Qwen | `QWEN_API_KEY` | `qwen-max` | [Intl.](https://www.alibabacloud.com/help/en/model-studio/developer-reference/use-qwen-by-calling-api)/[cn](https://help.aliyun.com/zh/model-studio/getting-started/models) |
| Mistral | `MISTRAL_API_KEY` | `mistral-large-latest` | [docs](https://docs.mistral.ai/getting-started/models/models_overview/) |
| xAI | `XAI_API_KEY` | `grok-beta` | [docs](https://docs.x.ai/docs) |
If you use Ollama, to install the models used by default (generation and embedding), execute the following terminal command:

View file

@ -17,6 +17,14 @@
},
"type": "debug"
},
"debug_inventory_restriction": {
"goal": "Place 1 oak plank, then place 1 stone brick",
"initial_inventory": {
"oak_planks": 20
},
"type": "debug",
"restrict_to_inventory": true
},
"construction": {
"type": "construction",
"goal": "Build a house",

View file

@ -8,5 +8,6 @@
"HUGGINGFACE_API_KEY": "",
"QWEN_API_KEY": "",
"XAI_API_KEY": "",
"MISTRAL_API_KEY": "",
"DEEPSEEK_API_KEY": ""
}

View file

@ -31,7 +31,7 @@ function getProfiles(args) {
async function main() {
if (settings.host_mindserver) {
const mindServer = createMindServer();
const mindServer = createMindServer(settings.mindserver_port);
}
mainProxy.connect();
@ -55,4 +55,4 @@ try {
} catch (error) {
console.error('An error occurred:', error);
process.exit(1);
}
}

View file

@ -4,6 +4,7 @@
"@anthropic-ai/sdk": "^0.17.1",
"@google/generative-ai": "^0.2.1",
"@huggingface/inference": "^2.8.1",
"@mistralai/mistralai": "^1.1.0",
"google-translate-api-x": "^10.7.1",
"groq-sdk": "^0.5.0",
"minecraft-data": "^3.78.0",

View file

@ -3,11 +3,11 @@
"conversing": "You are a playful Minecraft bot named $NAME that can converse with players, see, move, mine, build, and interact with the world by using commands.\n$SELF_PROMPT Act human-like as if you were a typical Minecraft player, rather than an AI. Be very brief in your responses, don't apologize constantly, don't give instructions or make lists unless asked, and don't refuse requests. Don't pretend to act, use commands immediately when requested. Do NOT say this: 'Sure, I've stopped.', instead say this: 'Sure, I'll stop. !stop'. Do NOT say this: 'On my way! Give me a moment.', instead say this: 'On my way! !goToPlayer(\"playername\", 3)'. Respond only as $NAME, never output '(FROM OTHER BOT)' or pretend to be someone else. If you have nothing to say or do, respond with an just a tab '\t'. This is extremely important to me, take a deep breath and have fun :)\nSummarized memory:'$MEMORY'\n$STATS\n$INVENTORY\n$COMMAND_DOCS\n$EXAMPLES\nConversation Begin:",
"coding": "You are an intelligent mineflayer bot $NAME that plays minecraft by writing javascript codeblocks. Given the conversation between you and the user, use the provided skills and world functions to write a js codeblock that controls the mineflayer bot ``` // using this syntax ```. The code will be executed and you will recieve it's output. If you are satisfied with the response, respond without a codeblock in a conversational way. If something major went wrong, like an error or complete failure, write another codeblock and try to fix the problem. Minor mistakes are acceptable. Be maximally efficient, creative, and clear. Do not use commands !likeThis, only use codeblocks. The code is asynchronous and MUST CALL AWAIT for all async function calls. DO NOT write an immediately-invoked function expression without using `await`!! DO NOT WRITE LIKE THIS: ```(async () => {console.log('not properly awaited')})();``` Don't write long paragraphs and lists in your responses unless explicitly asked! Only summarize the code you write with a sentence or two when done. This is extremely important to me, think step-by-step, take a deep breath and good luck! \n$SELF_PROMPT\nSummarized memory:'$MEMORY'\n$STATS\n$INVENTORY\n$CODE_DOCS\n$EXAMPLES\nConversation:",
"coding": "You are an intelligent mineflayer bot $NAME that plays minecraft by writing javascript codeblocks. Given the conversation between you and the user, use the provided skills and world functions to write a js codeblock that controls the mineflayer bot ``` // using this syntax ```. The code will be executed and you will receive it's output. If you are satisfied with the response, respond without a codeblock in a conversational way. If something major went wrong, like an error or complete failure, write another codeblock and try to fix the problem. Minor mistakes are acceptable. Be maximally efficient, creative, and clear. Do not use commands !likeThis, only use codeblocks. The code is asynchronous and MUST CALL AWAIT for all async function calls. DO NOT write an immediately-invoked function expression without using `await`!! DO NOT WRITE LIKE THIS: ```(async () => {console.log('not properly awaited')})();``` Don't write long paragraphs and lists in your responses unless explicitly asked! Only summarize the code you write with a sentence or two when done. This is extremely important to me, think step-by-step, take a deep breath and good luck! \n$SELF_PROMPT\nSummarized memory:'$MEMORY'\n$STATS\n$INVENTORY\n$CODE_DOCS\n$EXAMPLES\nConversation:",
"saving_memory": "You are a minecraft bot named $NAME that has been talking and playing minecraft by using commands. Update your memory by summarizing the following conversation and your old memory in your next response. Prioritize preserving important facts, things you've learned, useful tips, and long term reminders. Do Not record stats, inventory, or docs! Only save transient information from your chat history. You're limited to 500 characters, so be extremely brief and minimize words. Compress useful information. \nOld Memory: '$MEMORY'\nRecent conversation: \n$TO_SUMMARIZE\nSummarize your old memory and recent conversation into a new memory, and respond only with the unwrapped memory text: ",
"bot_responder": "You are a minecraft bot named $NAME that is currently in conversation with another AI bot. Both of you can take actions with the !command syntax, and actions take time to complete. You are currently busy with the following action: '$ACTION' but have recieved a new message. Decide whether to 'respond' immediately or 'ignore' it and wait for your current action to finish. Be conservative and only respond when necessary, like when you need to change/stop your action, or convey necessary information. Example 1: You:Building a house! !newAction('Build a house.').\nOther Bot: 'Come here!'\nYour decision: ignore\nExample 2: You:Collecting dirt !collectBlocks('dirt',10).\nOther Bot: 'No, collect some wood instead.'\nYour decision: respond\nExample 3: You:Coming to you now. !goToPlayer('billy',3).\nOther Bot: 'What biome are you in?'\nYour decision: respond\nActual Conversation: $TO_SUMMARIZE\nDecide by outputting ONLY 'respond' or 'ignore', nothing else. Your decision:",
"bot_responder": "You are a minecraft bot named $NAME that is currently in conversation with another AI bot. Both of you can take actions with the !command syntax, and actions take time to complete. You are currently busy with the following action: '$ACTION' but have received a new message. Decide whether to 'respond' immediately or 'ignore' it and wait for your current action to finish. Be conservative and only respond when necessary, like when you need to change/stop your action, or convey necessary information. Example 1: You:Building a house! !newAction('Build a house.').\nOther Bot: 'Come here!'\nYour decision: ignore\nExample 2: You:Collecting dirt !collectBlocks('dirt',10).\nOther Bot: 'No, collect some wood instead.'\nYour decision: respond\nExample 3: You:Coming to you now. !goToPlayer('billy',3).\nOther Bot: 'What biome are you in?'\nYour decision: respond\nActual Conversation: $TO_SUMMARIZE\nDecide by outputting ONLY 'respond' or 'ignore', nothing else. Your decision:",
"modes": {
"self_preservation": true,
@ -242,4 +242,4 @@
]
]
}
}

View file

@ -7,7 +7,7 @@
"conversing": "You are a playful Minecraft bot named $NAME that can converse with players, see, move, mine, build, and interact with the world by using commands. Act human-like as if you were a typical Minecraft player, rather than an AI. Be very brief in your responses, don't apologize constantly, don't give instructions or make lists unless asked, and don't refuse requests. Don't pretend to act, use commands immediately when requested. Do NOT say this: 'Sure, I've stopped.', instead say this: 'Sure, I'll stop. !stop'. Do NOT say this: 'On my way! Give me a moment.', instead say this: 'On my way! !goToPlayer('playername', 3)'. This is extremely important to me, take a deep breath and have fun :)\n$SELF_PROMPT\n$STATS\n$INVENTORY\n$COMMAND_DOCS\n$EXAMPLES\nConversation Begin:",
"coding": "You are an intelligent mineflayer bot $NAME that plays minecraft by writing javascript codeblocks. Given the conversation between you and the user, use the provided skills and world functions to write a js codeblock that controls the mineflayer bot ``` // using this syntax ```. The code will be executed and you will recieve it's output. If you are satisfied with the response, respond without a codeblock in a conversational way. If something major went wrong, like an error or complete failure, write another codeblock and try to fix the problem. Minor mistakes are acceptable. Be maximally efficient, creative, and clear. Do not use commands !likeThis, only use codeblocks. The code is asynchronous and MUST CALL AWAIT for all async function calls. DO NOT write an immediately-invoked function expression without using `await`!! DO NOT WRITE LIKE THIS: ```(async () => {console.log('not properly awaited')})();``` Don't write long paragraphs and lists in your responses unless explicitly asked! Only summarize the code you write with a sentence or two when done. This is extremely important to me, take a deep breath and good luck! \n$SELF_PROMPT\n$STATS\n$INVENTORY\n$CODE_DOCS\n$EXAMPLES\nConversation:",
"coding": "You are an intelligent mineflayer bot $NAME that plays minecraft by writing javascript codeblocks. Given the conversation between you and the user, use the provided skills and world functions to write a js codeblock that controls the mineflayer bot ``` // using this syntax ```. The code will be executed and you will receive it's output. If you are satisfied with the response, respond without a codeblock in a conversational way. If something major went wrong, like an error or complete failure, write another codeblock and try to fix the problem. Minor mistakes are acceptable. Be maximally efficient, creative, and clear. Do not use commands !likeThis, only use codeblocks. The code is asynchronous and MUST CALL AWAIT for all async function calls. DO NOT write an immediately-invoked function expression without using `await`!! DO NOT WRITE LIKE THIS: ```(async () => {console.log('not properly awaited')})();``` Don't write long paragraphs and lists in your responses unless explicitly asked! Only summarize the code you write with a sentence or two when done. This is extremely important to me, take a deep breath and good luck! \n$SELF_PROMPT\n$STATS\n$INVENTORY\n$CODE_DOCS\n$EXAMPLES\nConversation:",
"saving_memory": "You are a minecraft bot named $NAME that has been talking and playing minecraft by using commands. Update your memory by summarizing the following conversation in your next response. Store information that will help you improve as a Minecraft bot. Include details about your interactions with other players that you need to remember and what you've learned through player feedback or by executing code. Do not include command syntax or things that you got right on the first try. Be extremely brief and use as few words as possible.\nOld Memory: '$MEMORY'\nRecent conversation: \n$TO_SUMMARIZE\nSummarize your old memory and recent conversation into a new memory, and respond only with the memory text: ",
@ -210,4 +210,4 @@
]
]
}
}

5
profiles/mistral.json Normal file
View file

@ -0,0 +1,5 @@
{
"name": "Mistral",
"model": "mistral/mistral-large-latest"
}

View file

@ -17,9 +17,11 @@ export default
// "./profiles/gemini.json",
// "./profiles/llama.json",
// "./profiles/qwen.json",
// "./profiles/mistral.json",
// "./profiles/grok.json",
// "./profiles/mistral.json",
// "./profiles/deepseek.json",
// using more than 1 profile requires you to /msg each bot indivually
],
"load_memory": false, // load memory from previous session
@ -34,7 +36,8 @@ export default
"relevant_docs_count": 5, // Parameter: -1 = all, 0 = no references, 5 = five references. If exceeding the maximum, all reference documents are returned.
"max_messages": 15, // max number of messages to keep in context
"max_commands": -1, // max number of commands to use in a response. -1 for no limit
"num_examples": 2, // number of examples to give to the model
"max_commands": -1, // max number of commands that can be used in consecutive responses. -1 for no limit
"verbose_commands": true, // show full command syntax
"narrate_behavior": true, // chat simple automatic actions ('Picking up item!')
"chat_bot_messages": true, // publicly chat messages to other bots

View file

@ -3,7 +3,7 @@ import { Coder } from './coder.js';
import { Prompter } from './prompter.js';
import { initModes } from './modes.js';
import { initBot } from '../utils/mcdata.js';
import { containsCommand, commandExists, executeCommand, truncCommandMessage, isAction } from './commands/index.js';
import { containsCommand, commandExists, executeCommand, truncCommandMessage, isAction, blacklistCommands } from './commands/index.js';
import { ActionManager } from './action_manager.js';
import { NPCContoller } from './npc/controller.js';
import { MemoryBank } from './memory_bank.js';
@ -47,7 +47,8 @@ export class Agent {
await this.prompter.initExamples();
console.log('Initializing task...');
this.task = new Task(this, task_path, task_id);
this.blocked_actions = this.task.blocked_actions || [];
const blocked_actions = this.task.blocked_actions || [];
blacklistCommands(blocked_actions);
serverProxy.connect(this);
@ -129,7 +130,7 @@ export class Agent {
console.log(this.name, 'received message from', username, ':', message);
if (convoManager.isOtherAgent(username)) {
console.warn('recieved whisper from other bot??')
console.warn('received whisper from other bot??')
}
else {
let translation = await handleEnglishTranslation(message);
@ -164,7 +165,7 @@ export class Agent {
message: `You have restarted and this message is auto-generated. Continue the conversation with me.`,
start: true
};
convoManager.recieveFromBot(this.last_sender, msg_package);
convoManager.receiveFromBot(this.last_sender, msg_package);
}
}
else if (init_message) {

View file

@ -31,7 +31,7 @@ class AgentServerProxy {
});
this.socket.on('chat-message', (agentName, json) => {
convoManager.recieveFromBot(agentName, json);
convoManager.receiveFromBot(agentName, json);
});
this.socket.on('agents-update', (agents) => {

View file

@ -406,7 +406,7 @@ export const actionsList = [
convoManager.endConversation(player_name);
return `Converstaion with ${player_name} ended.`;
}
}
},
// { // commented for now, causes confusion with goal command
// name: '!npcGoal',
// description: 'Set a simple goal for an item or building to automatically work towards. Do not use for complex goals.',

View file

@ -14,6 +14,18 @@ export function getCommand(name) {
return commandMap[name];
}
export function blacklistCommands(commands) {
const unblockable = ['!stop', '!stats', '!inventory', '!goal'];
for (let command_name of commands) {
if (unblockable.includes(command_name)){
console.warn(`Command ${command_name} is unblockable`);
continue;
}
delete commandMap[command_name];
delete commandList.find(command => command.name === command_name);
}
}
const commandRegex = /!(\w+)(?:\(((?:-?\d+(?:\.\d+)?|true|false|"[^"]*")(?:\s*,\s*(?:-?\d+(?:\.\d+)?|true|false|"[^"]*"))*)\))?/
const argRegex = /-?\d+(?:\.\d+)?|true|false|"[^"]*"/g;
@ -214,7 +226,7 @@ export async function executeCommand(agent, message) {
}
}
export function getCommandDocs(blacklist=null) {
export function getCommandDocs() {
const typeTranslations = {
//This was added to keep the prompt the same as before type checks were implemented.
//If the language model is giving invalid inputs changing this might help.
@ -228,9 +240,6 @@ export function getCommandDocs(blacklist=null) {
Use the commands with the syntax: !commandName or !commandName("arg1", 1.2, ...) if the command takes arguments.\n
Do not use codeblocks. Use double quotes for strings. Only use one command in each response, trailing commands and comments will be ignored.\n`;
for (let command of commandList) {
if (blacklist && blacklist.includes(command.name)) {
continue;
}
docs += command.name + ': ' + command.description + '\n';
if (command.params) {
docs += 'Params:\n';

View file

@ -1,5 +1,6 @@
import * as world from '../library/world.js';
import * as mc from '../../utils/mcdata.js';
import { getCommandDocs } from './index.js';
import convoManager from '../conversation.js';
const pad = (str) => {
@ -17,6 +18,7 @@ export const queryList = [
let pos = bot.entity.position;
// display position to 2 decimal places
res += `\n- Position: x: ${pos.x.toFixed(2)}, y: ${pos.y.toFixed(2)}, z: ${pos.z.toFixed(2)}`;
// Gameplay
res += `\n- Gamemode: ${bot.game.gameMode}`;
res += `\n- Health: ${Math.round(bot.health)} / 20`;
res += `\n- Hunger: ${Math.round(bot.food)} / 20`;
@ -31,6 +33,9 @@ export const queryList = [
// res += `\n- Artficial light: ${block.skyLight}`;
// res += `\n- Sky light: ${block.light}`;
// light properties are bugged, they are not accurate
res += '\n- ' + world.getSurroundingBlocks(bot).join('\n- ')
res += `\n- First Solid Block Above Head: ${world.getFirstBlockAboveHead(bot, null, 32)}`;
if (bot.time.timeOfDay < 6000) {
res += '\n- Time: Morning';
@ -107,6 +112,11 @@ export const queryList = [
}
if (blocks.length == 0) {
res += ': none';
}
else {
// Environmental Awareness
res += '\n- ' + world.getSurroundingBlocks(bot).join('\n- ')
res += `\n- First Solid Block Above Head: ${world.getFirstBlockAboveHead(bot, null, 32)}`;
}
return pad(res);
}
@ -167,5 +177,12 @@ export const queryList = [
perform: async function (agent) {
return "Saved place names: " + agent.memory_bank.getKeys();
}
}
},
{
name: '!help',
description: 'Lists all available commands and their descriptions.',
perform: async function (agent) {
return getCommandDocs();
}
},
];

View file

@ -169,10 +169,10 @@ class ConversationManager {
sendBotChatToServer(send_to, json);
}
async recieveFromBot(sender, recieved) {
async receiveFromBot(sender, received) {
const convo = this._getConvo(sender);
if (convo.ignore_until_start && !recieved.start)
if (convo.ignore_until_start && !received.start)
return;
// check if any convo is active besides the sender
@ -182,13 +182,13 @@ class ConversationManager {
return;
}
if (recieved.start) {
if (received.start) {
convo.reset();
this.startConversationFromOtherBot(sender);
}
this._clearMonitorTimeouts();
convo.queue(recieved);
convo.queue(received);
// responding to conversation takes priority over self prompting
if (agent.self_prompter.on){
@ -196,7 +196,7 @@ class ConversationManager {
self_prompter_paused = true;
}
_scheduleProcessInMessage(sender, recieved, convo);
_scheduleProcessInMessage(sender, received, convo);
}
responseScheduledFor(sender) {
@ -278,15 +278,15 @@ The logic is as follows:
- If only the other bot is busy, respond with a long delay to allow it to finish short actions (ex check inventory)
- If I'm busy but other bot isn't, let LLM decide whether to respond
- If both bots are busy, don't respond until someone is done, excluding a few actions that allow fast responses
- New messages recieved during the delay will reset the delay following this logic, and be queued to respond in bulk
- New messages received during the delay will reset the delay following this logic, and be queued to respond in bulk
*/
const talkOverActions = ['stay', 'followPlayer', 'mode:']; // all mode actions
const fastDelay = 200;
const longDelay = 5000;
async function _scheduleProcessInMessage(sender, recieved, convo) {
async function _scheduleProcessInMessage(sender, received, convo) {
if (convo.inMessageTimer)
clearTimeout(convo.inMessageTimer);
let otherAgentBusy = containsCommand(recieved.message);
let otherAgentBusy = containsCommand(received.message);
const scheduleResponse = (delay) => convo.inMessageTimer = setTimeout(() => _processInMessageQueue(sender), delay);
@ -307,7 +307,7 @@ async function _scheduleProcessInMessage(sender, recieved, convo) {
scheduleResponse(fastDelay);
}
else {
let shouldRespond = await agent.prompter.promptShouldRespondToBot(recieved.message);
let shouldRespond = await agent.prompter.promptShouldRespondToBot(received.message);
console.log(`${agent.name} decided to ${shouldRespond?'respond':'not respond'} to ${sender}`);
if (shouldRespond)
scheduleResponse(fastDelay);
@ -335,19 +335,19 @@ function _compileInMessages(convo) {
return pack;
}
function _handleFullInMessage(sender, recieved) {
console.log(`${agent.name} responding to "${recieved.message}" from ${sender}`);
function _handleFullInMessage(sender, received) {
console.log(`${agent.name} responding to "${received.message}" from ${sender}`);
const convo = convoManager._getConvo(sender);
convo.active = true;
let message = _tagMessage(recieved.message);
if (recieved.end) {
let message = _tagMessage(received.message);
if (received.end) {
convoManager.endConversation(sender);
message = `Conversation with ${sender} ended with message: "${message}"`;
sender = 'system'; // bot will respond to system instead of the other bot
}
else if (recieved.start)
else if (received.start)
agent.shut_up = false;
convo.inMessageTimer = null;
agent.handleMessage(sender, message);

View file

@ -558,6 +558,14 @@ export async function placeBlock(bot, blockType, x, y, z, placeOn='bottom', dont
const target_dest = new Vec3(Math.floor(x), Math.floor(y), Math.floor(z));
if (bot.modes.isOn('cheat') && !dontCheat) {
if (bot.restrict_to_inventory) {
let block = bot.inventory.items().find(item => item.name === blockType);
if (!block) {
log(bot, `Cannot place ${blockType}, you are restricted to your current inventory.`);
return false;
}
}
// invert the facing direction
let face = placeOn === 'north' ? 'south' : placeOn === 'south' ? 'north' : placeOn === 'east' ? 'west' : 'east';
if (blockType.includes('torch') && placeOn !== 'bottom') {
@ -599,7 +607,7 @@ export async function placeBlock(bot, blockType, x, y, z, placeOn='bottom', dont
if (item_name == "redstone_wire")
item_name = "redstone";
let block = bot.inventory.items().find(item => item.name === item_name);
if (!block && bot.game.gameMode === 'creative') {
if (!block && bot.game.gameMode === 'creative' && !bot.restrict_to_inventory) {
await bot.creative.setInventorySlot(36, mc.makeItem(item_name, 1)); // 36 is first hotbar slot
block = bot.inventory.items().find(item => item.name === item_name);
}
@ -905,7 +913,7 @@ export async function giveToPlayer(bot, itemType, username, num=1) {
bot.once('playerCollect', (collector, collected) => {
console.log(collected.name);
if (collector.username === username) {
log(bot, `${username} recieved ${itemType}.`);
log(bot, `${username} received ${itemType}.`);
given = true;
}
});

View file

@ -39,6 +39,82 @@ export function getNearestFreeSpace(bot, size=1, distance=8) {
}
export function getBlockAtPosition(bot, x=0, y=0, z=0) {
/**
* Get a block from the bot's relative position
* @param {Bot} bot - The bot to get the block for.
* @param {number} x - The relative x offset to serach, default 0.
* @param {number} y - The relative y offset to serach, default 0.
* @param {number} y - The relative z offset to serach, default 0.
* @returns {Block} - The nearest block.
* @example
* let blockBelow = world.getBlockAtPosition(bot, 0, -1, 0);
* let blockAbove = world.getBlockAtPosition(bot, 0, 2, 0); since minecraft position is at the feet
**/
let block = bot.blockAt(bot.entity.position.offset(x, y, z));
if (!block) block = {name: 'air'};
return block;
}
export function getSurroundingBlocks(bot) {
/**
* Get the surrounding blocks from the bot's environment.
* @param {Bot} bot - The bot to get the block for.
* @returns {string[]} - A list of block results as strings.
* @example
**/
// Create a list of block position results that can be unpacked.
let res = [];
res.push(`Block Below: ${getBlockAtPosition(bot, 0, -1, 0).name}`);
res.push(`Block at Legs: ${getBlockAtPosition(bot, 0, 0, 0).name}`);
res.push(`Block at Head: ${getBlockAtPosition(bot, 0, 1, 0).name}`);
return res;
}
export function getFirstBlockAboveHead(bot, ignore_types=null, distance=32) {
/**
* Searches a column from the bot's position for the first solid block above its head
* @param {Bot} bot - The bot to get the block for.
* @param {string[]} ignore_types - The names of the blocks to ignore.
* @param {number} distance - The maximum distance to search, default 32.
* @returns {string} - The fist block above head.
* @example
* let firstBlockAboveHead = world.getFirstBlockAboveHead(bot, null, 32);
**/
// if ignore_types is not a list, make it a list.
let ignore_blocks = [];
if (ignore_types === null) ignore_blocks = ['air', 'cave_air'];
else {
if (!Array.isArray(ignore_types))
ignore_types = [ignore_types];
for(let ignore_type of ignore_types) {
if (mc.getBlockId(ignore_type)) ignore_blocks.push(ignore_type);
}
}
// The block above, stops when it finds a solid block .
let block_above = {name: 'air'};
let height = 0
for (let i = 0; i < distance; i++) {
let block = bot.blockAt(bot.entity.position.offset(0, i+2, 0));
if (!block) block = {name: 'air'};
// Ignore and continue
if (ignore_blocks.includes(block.name)) continue;
// Defaults to any block
block_above = block;
height = i;
break;
}
if (ignore_blocks.includes(block_above.name)) return 'none';
return `${block_above.name} (${height} blocks up)`;
}
export function getNearestBlocks(bot, block_types=null, distance=16, count=10000) {
/**
* Get a list of the nearest blocks of the given types.

View file

@ -404,6 +404,9 @@ export function initModes(agent) {
_agent = agent;
// the mode controller is added to the bot object so it is accessible from anywhere the bot is used
agent.bot.modes = new ModeController();
if (agent.task) {
agent.bot.restrict_to_inventory = agent.task.restrict_to_inventory;
}
let modes_json = agent.prompter.getInitModes();
if (modes_json) {
agent.bot.modes.loadJson(modes_json);

View file

@ -4,9 +4,11 @@ import { getCommandDocs } from './commands/index.js';
import { stringifyTurns } from '../utils/text.js';
import { getCommand } from './commands/index.js';
import settings from '../../settings.js';
import { Gemini } from '../models/gemini.js';
import { GPT } from '../models/gpt.js';
import { Claude } from '../models/claude.js';
import { Mistral } from '../models/mistral.js';
import { ReplicateAPI } from '../models/replicate.js';
import { Local } from '../models/local.js';
import { Novita } from '../models/novita.js';
@ -51,8 +53,10 @@ export class Prompter {
chat.api = 'anthropic';
else if (chat.model.includes('huggingface/'))
chat.api = "huggingface";
else if (chat.model.includes('meta/') || chat.model.includes('mistralai/') || chat.model.includes('replicate/'))
else if (chat.model.includes('meta/') || chat.model.includes('replicate/'))
chat.api = 'replicate';
else if (chat.model.includes('mistralai/') || chat.model.includes("mistral/"))
chat.api = 'mistral';
else if (chat.model.includes("groq/") || chat.model.includes("groqcloud/"))
chat.api = 'groq';
else if (chat.model.includes('novita/'))
@ -79,6 +83,8 @@ export class Prompter {
this.chat_model = new ReplicateAPI(chat.model, chat.url);
else if (chat.api === 'ollama')
this.chat_model = new Local(chat.model, chat.url);
else if (chat.api === 'mistral')
this.chat_model = new Mistral(chat.model, chat.url);
else if (chat.api === 'groq') {
this.chat_model = new GroqCloudAPI(chat.model.replace('groq/', '').replace('groqcloud/', ''), chat.url, max_tokens ? max_tokens : 8192);
}
@ -118,6 +124,8 @@ export class Prompter {
this.embedding_model = new Local(embedding.model, embedding.url);
else if (embedding.api === 'qwen')
this.embedding_model = new Qwen(embedding.model, embedding.url);
else if (embedding.api === 'mistral')
this.embedding_model = new Mistral(embedding.model, embedding.url);
else {
this.embedding_model = null;
console.log('Unknown embedding: ', embedding ? embedding.api : '[NOT SPECIFIED]', '. Using word overlap.');
@ -148,8 +156,8 @@ export class Prompter {
async initExamples() {
try {
this.convo_examples = new Examples(this.embedding_model);
this.coding_examples = new Examples(this.embedding_model);
this.convo_examples = new Examples(this.embedding_model, settings.num_examples);
this.coding_examples = new Examples(this.embedding_model, settings.num_examples);
// Wait for both examples to load before proceeding
await Promise.all([
@ -180,7 +188,7 @@ export class Prompter {
prompt = prompt.replaceAll('$ACTION', this.agent.actions.currentActionLabel);
}
if (prompt.includes('$COMMAND_DOCS'))
prompt = prompt.replaceAll('$COMMAND_DOCS', getCommandDocs(this.agent.blocked_actions));
prompt = prompt.replaceAll('$COMMAND_DOCS', getCommandDocs());
if (prompt.includes('$CODE_DOCS')) {
const code_task_content = messages.slice().reverse().find(msg =>
msg.role !== 'system' && msg.content.includes('!newAction(')
@ -191,6 +199,9 @@ export class Prompter {
await this.skill_libary.getRelevantSkillDocs(code_task_content, settings.relevant_docs_count)
);
}
prompt = prompt.replaceAll('$COMMAND_DOCS', getCommandDocs());
if (prompt.includes('$CODE_DOCS'))
prompt = prompt.replaceAll('$CODE_DOCS', getSkillDocs());
if (prompt.includes('$EXAMPLES') && examples !== null)
prompt = prompt.replaceAll('$EXAMPLES', await examples.createExampleMessage(messages));
if (prompt.includes('$MEMORY'))
@ -257,7 +268,7 @@ export class Prompter {
continue;
}
if (current_msg_time !== this.most_recent_msg_time) {
console.warn(this.agent.name + ' recieved new message while generating, discarding old response.');
console.warn(this.agent.name + ' received new message while generating, discarding old response.');
return '';
}
return generation;

View file

@ -51,6 +51,7 @@ export class Task {
this.taskStartTime = Date.now();
this.validator = new TaskValidator(this.data, this.agent);
this.blocked_actions = this.data.blocked_actions || [];
this.restrict_to_inventory = !!this.data.restrict_to_inventory;
if (this.data.goal)
this.blocked_actions.push('!endGoal');
if (this.data.conversation)

71
src/models/mistral.js Normal file
View file

@ -0,0 +1,71 @@
import { Mistral as MistralClient } from '@mistralai/mistralai';
import { getKey } from '../utils/keys.js';
import { strictFormat } from '../utils/text.js';
export class Mistral {
#client;
constructor(model_name, url) {
if (typeof url === "string") {
console.warn("Mistral does not support custom URL's, ignoring!");
}
if (!getKey("MISTRAL_API_KEY")) {
throw new Error("Mistral API Key missing, make sure to set MISTRAL_API_KEY in settings.json")
}
this.#client = new MistralClient(
{
apiKey: getKey("MISTRAL_API_KEY")
}
);
this.model_name = model_name;
// Prevents the following code from running when model not specified
if (typeof this.model_name === "undefined") return;
// get the model name without the "mistral" or "mistralai" prefix
// e.g "mistral/mistral-large-latest" -> "mistral-large-latest"
if (typeof model_name.split("/")[1] !== "undefined") {
this.model_name = model_name.split("/")[1];
}
}
async sendRequest(turns, systemMessage) {
let result;
try {
const model = this.model_name || "mistral-large-latest";
const messages = [
{ role: "system", content: systemMessage }
];
messages.push(...strictFormat(turns));
const response = await this.#client.chat.complete({
model,
messages,
});
result = response.choices[0].message.content;
} catch (err) {
console.log(err)
result = "My brain disconnected, try again.";
}
return result;
}
async embed(text) {
const embedding = await this.#client.embeddings.create({
model: "mistral-embed",
inputs: text
});
return embedding.data[0].embedding;
}
}

View file

@ -33,6 +33,9 @@ export class Examples {
this.examples = examples;
if (!this.model) return; // Early return if no embedding model
if (this.select_num === 0)
return;
try {
// Create array of promises first
const embeddingPromises = examples.map(example => {
@ -52,6 +55,9 @@ export class Examples {
}
async getRelevant(turns) {
if (this.select_num === 0)
return [];
let turn_text = this.turnsToText(turns);
if (this.model !== null) {
let embedding = await this.model.embed(turn_text);

View file

@ -18,7 +18,7 @@ const Item = prismarine_items(mc_version);
* @typedef {string} BlockName
*/
export const WOOD_TYPES = ['oak', 'spruce', 'birch', 'jungle', 'acacia', 'dark_oak'];
export const WOOD_TYPES = ['oak', 'spruce', 'birch', 'jungle', 'acacia', 'dark_oak', 'mangrove', 'cherry'];
export const MATCHING_WOOD_BLOCKS = [
'log',
'planks',
@ -202,7 +202,7 @@ export function isSmeltable(itemName) {
}
export function getSmeltingFuel(bot) {
let fuel = bot.inventory.items().find(i => i.name === 'coal' || i.name === 'charcoal')
let fuel = bot.inventory.items().find(i => i.name === 'coal' || i.name === 'charcoal' || i.name === 'blaze_rod')
if (fuel)
return fuel;
fuel = bot.inventory.items().find(i => i.name.includes('log') || i.name.includes('planks'))
@ -214,6 +214,8 @@ export function getSmeltingFuel(bot) {
export function getFuelSmeltOutput(fuelName) {
if (fuelName === 'coal' || fuelName === 'charcoal')
return 8;
if (fuelName === 'blaze_rod')
return 12;
if (fuelName.includes('log') || fuelName.includes('planks'))
return 1.5
if (fuelName === 'coal_block')