Merge branch 'develop' into vision

This commit is contained in:
MaxRobinsonTheGreat 2025-03-15 17:24:52 -05:00
commit d9a0b0802c
19 changed files with 584 additions and 117 deletions

View file

@ -39,11 +39,11 @@ You can configure the agent's name, model, and prompts in their profile like `an
| API | Config Variable | Example Model name | Docs |
|------|------|------|------|
| `openai` | `OPENAI_API_KEY` | `gpt-4o-mini` | [docs](https://platform.openai.com/docs/models) |
| `google` | `GEMINI_API_KEY` | `gemini-pro` | [docs](https://ai.google.dev/gemini-api/docs/models/gemini) |
| `google` | `GEMINI_API_KEY` | `gemini-2.0-flash` | [docs](https://ai.google.dev/gemini-api/docs/models/gemini) |
| `anthropic` | `ANTHROPIC_API_KEY` | `claude-3-haiku-20240307` | [docs](https://docs.anthropic.com/claude/docs/models-overview) |
| `xai` | `XAI_API_KEY` | `grok-2-1212` | [docs](https://docs.x.ai/docs) |
| `deepseek` | `DEEPSEEK_API_KEY` | `deepseek-chat` | [docs](https://api-docs.deepseek.com/) |
| `ollama` (local) | n/a | `llama3` | [docs](https://ollama.com/library) |
| `ollama` (local) | n/a | `llama3.1` | [docs](https://ollama.com/library) |
| `qwen` | `QWEN_API_KEY` | `qwen-max` | [Intl.](https://www.alibabacloud.com/help/en/model-studio/developer-reference/use-qwen-by-calling-api)/[cn](https://help.aliyun.com/zh/model-studio/getting-started/models) |
| `mistral` | `MISTRAL_API_KEY` | `mistral-large-latest` | [docs](https://docs.mistral.ai/getting-started/models/models_overview/) |
| `replicate` | `REPLICATE_API_KEY` | `replicate/meta/meta-llama-3-70b-instruct` | [docs](https://replicate.com/collections/language-models) |
@ -51,9 +51,11 @@ You can configure the agent's name, model, and prompts in their profile like `an
| `huggingface` | `HUGGINGFACE_API_KEY` | `huggingface/mistralai/Mistral-Nemo-Instruct-2407` | [docs](https://huggingface.co/models) |
| `novita` | `NOVITA_API_KEY` | `novita/deepseek/deepseek-r1` | [docs](https://novita.ai/model-api/product/llm-api?utm_source=github_mindcraft&utm_medium=github_readme&utm_campaign=link) |
| `openrouter` | `OPENROUTER_API_KEY` | `openrouter/anthropic/claude-3.5-sonnet` | [docs](https://openrouter.ai/models) |
| `glhf.chat` | `GHLF_API_KEY` | `glhf/hf:meta-llama/Llama-3.1-405B-Instruct` | [docs](https://glhf.chat/user-settings/api) |
| `hyperbolic` | `HYPERBOLIC_API_KEY` | `hyperbolic/deepseek-ai/DeepSeek-V3` | [docs](https://docs.hyperbolic.xyz/docs/getting-started) |
If you use Ollama, to install the models used by default (generation and embedding), execute the following terminal command:
`ollama pull llama3 && ollama pull nomic-embed-text`
`ollama pull llama3.1 && ollama pull nomic-embed-text`
### Online Servers
To connect to online servers your bot will need an official Microsoft/Minecraft account. You can use your own personal one, but will need another account if you want to connect too and play with it. To connect, change these lines in `settings.js`:

View file

@ -10,6 +10,8 @@
"XAI_API_KEY": "",
"MISTRAL_API_KEY": "",
"DEEPSEEK_API_KEY": "",
"GHLF_API_KEY": "",
"HYPERBOLIC_API_KEY": "",
"NOVITA_API_KEY": "",
"OPENROUTER_API_KEY": ""
}

View file

@ -6,6 +6,7 @@
"@huggingface/inference": "^2.8.1",
"@mistralai/mistralai": "^1.1.0",
"canvas": "^3.1.0",
"cheerio": "^1.0.0",
"express": "^4.18.2",
"google-translate-api-x": "^10.7.1",
"groq-sdk": "^0.15.0",

View file

@ -0,0 +1,13 @@
diff --git a/node_modules/@google/generative-ai/dist/index.mjs b/node_modules/@google/generative-ai/dist/index.mjs
index 23a175b..aab7e19 100644
--- a/node_modules/@google/generative-ai/dist/index.mjs
+++ b/node_modules/@google/generative-ai/dist/index.mjs
@@ -151,7 +151,7 @@ class GoogleGenerativeAIResponseError extends GoogleGenerativeAIError {
* limitations under the License.
*/
const BASE_URL = "https://generativelanguage.googleapis.com";
-const API_VERSION = "v1";
+const API_VERSION = "v1beta";
/**
* We can't `require` package.json if this runs on web. We will use rollup to
* swap in the version number here at build time.

View file

@ -1,7 +1,7 @@
{
"name": "gemini",
"model": "gemini-1.5-flash",
"model": "gemini-2.0-flash",
"cooldown": 10000
}
"cooldown": 5000
}

View file

@ -19,7 +19,6 @@ export default
// "./profiles/gemini.json",
// "./profiles/llama.json",
// "./profiles/qwen.json",
// "./profiles/mistral.json",
// "./profiles/grok.json",
// "./profiles/mistral.json",
// "./profiles/deepseek.json",
@ -30,7 +29,7 @@ export default
"load_memory": false, // load memory from previous session
"init_message": "Respond with hello world and your name", // sends to all on spawn
"only_chat_with": [], // users that the bots listen to and send general messages to. if empty it will chat publicly
"speak": false, // allows all bots to speak through system text-to-speech. works on windows, mac, on linux you need to `apt install espeak`
"language": "en", // translate to/from this language. Supports these language names: https://cloud.google.com/translate/docs/languages
"show_bot_views": false, // show bot's view in browser at localhost:3000, 3001...
@ -38,7 +37,7 @@ export default
"allow_vision": false, // allows vision model to interpret screenshots as inputs
"blocked_actions" : [], // commands to disable and remove from docs. Ex: ["!setMode"]
"code_timeout_mins": -1, // minutes code is allowed to run. -1 for no timeout
"relevant_docs_count": 5, // Parameter: -1 = all, 0 = no references, 5 = five references. If exceeding the maximum, all reference documents are returned.
"relevant_docs_count": 5, // number of relevant code function docs to select for prompting. -1 for all
"max_messages": 15, // max number of messages to keep in context
"num_examples": 2, // number of examples to give to the model

View file

@ -15,6 +15,7 @@ import { addBrowserViewer } from './vision/browser_viewer.js';
import settings from '../../settings.js';
import { serverProxy } from './agent_proxy.js';
import { Task } from './tasks.js';
import { say } from './speak.js';
export class Agent {
async start(profile_fp, load_mem=false, init_message=null, count_id=0, task_path=null, task_id=null) {
@ -355,6 +356,9 @@ export class Agent {
}
}
else {
if (settings.speak) {
say(message);
}
this.bot.chat(message);
}
}

View file

@ -445,5 +445,13 @@ export const actionsList = [
await agent.actions.runAction('action:lookAtPosition', actionFn);
return result;
}
}
},
{
name: '!digDown',
description: 'Digs down a specified distance. Will stop if it reaches lava, water, or a fall of >=4 blocks below the bot.',
params: {'distance': { type: 'int', description: 'Distance to dig down', domain: [1, Number.MAX_SAFE_INTEGER] }},
perform: runAsAction(async (agent, distance) => {
await skills.digDown(agent.bot, distance)
})
},
];

View file

@ -2,6 +2,7 @@ import * as world from '../library/world.js';
import * as mc from '../../utils/mcdata.js';
import { getCommandDocs } from './index.js';
import convoManager from '../conversation.js';
import { load } from 'cheerio';
const pad = (str) => {
return '\n' + str + '\n';
@ -214,6 +215,35 @@ export const queryList = [
return pad(craftingPlan);
},
},
{
name: '!searchWiki',
description: 'Search the Minecraft Wiki for the given query.',
params: {
'query': { type: 'string', description: 'The query to search for.' }
},
perform: async function (agent, query) {
const url = `https://minecraft.wiki/w/${query}`
try {
const response = await fetch(url);
if (response.status === 404) {
return `${query} was not found on the Minecraft Wiki. Try adjusting your search term.`;
}
const html = await response.text();
const $ = load(html);
const parserOutput = $("div.mw-parser-output");
parserOutput.find("table.navbox").remove();
const divContent = parserOutput.text();
return divContent.trim();
} catch (error) {
console.error("Error fetching or parsing HTML:", error);
return `The following error occured: ${error}`
}
}
},
{
name: '!help',
description: 'Lists all available commands and their descriptions.',

View file

@ -460,7 +460,14 @@ export async function collectBlock(bot, blockType, num=1, exclude=null) {
return false;
}
try {
await bot.collectBlock.collect(block);
if (mc.mustCollectManually(blockType)) {
await goToPosition(bot, block.position.x, block.position.y, block.position.z, 2);
await bot.dig(block);
await pickupNearbyItems(bot);
}
else {
await bot.collectBlock.collect(block);
}
collected++;
await autoLight(bot);
}
@ -1374,4 +1381,61 @@ export async function activateNearestBlock(bot, type) {
await bot.activateBlock(block);
log(bot, `Activated ${type} at x:${block.position.x.toFixed(1)}, y:${block.position.y.toFixed(1)}, z:${block.position.z.toFixed(1)}.`);
return true;
}
}
export async function digDown(bot, distance = 10) {
/**
* Digs down a specified distance. Will stop if it reaches lava, water, or a fall of >=4 blocks below the bot.
* @param {MinecraftBot} bot, reference to the minecraft bot.
* @param {int} distance, distance to dig down.
* @returns {Promise<boolean>} true if successfully dug all the way down.
* @example
* await skills.digDown(bot, 10);
**/
let start_block_pos = bot.blockAt(bot.entity.position).position;
for (let i = 1; i <= distance; i++) {
const targetBlock = bot.blockAt(start_block_pos.offset(0, -i, 0));
let belowBlock = bot.blockAt(start_block_pos.offset(0, -i-1, 0));
if (!targetBlock || !belowBlock) {
log(bot, `Dug down ${i-1} blocks, but reached the end of the world.`);
return true;
}
// Check for lava, water
if (targetBlock.name === 'lava' || targetBlock.name === 'water' ||
belowBlock.name === 'lava' || belowBlock.name === 'water') {
log(bot, `Dug down ${i-1} blocks, but reached ${belowBlock ? belowBlock.name : '(lava/water)'}`)
return false;
}
const MAX_FALL_BLOCKS = 2;
let num_fall_blocks = 0;
for (let j = 0; j <= MAX_FALL_BLOCKS; j++) {
if (!belowBlock || (belowBlock.name !== 'air' && belowBlock.name !== 'cave_air')) {
break;
}
num_fall_blocks++;
belowBlock = bot.blockAt(belowBlock.position.offset(0, -1, 0));
}
if (num_fall_blocks > MAX_FALL_BLOCKS) {
log(bot, `Dug down ${i-1} blocks, but reached a drop below the next block.`);
return false;
}
if (targetBlock.name === 'air' || targetBlock.name === 'cave_air') {
log(bot, 'Skipping air block');
console.log(targetBlock.position);
continue;
}
let dug = await breakBlockAt(bot, targetBlock.position.x, targetBlock.position.y, targetBlock.position.z);
if (!dug) {
log(bot, 'Failed to dig block at position:' + targetBlock.position);
return false;
}
}
log(bot, `Dug down ${distance} blocks.`);
return true;
}

43
src/agent/speak.js Normal file
View file

@ -0,0 +1,43 @@
import { exec } from 'child_process';
let speakingQueue = [];
let isSpeaking = false;
export function say(textToSpeak) {
speakingQueue.push(textToSpeak);
if (!isSpeaking) {
processQueue();
}
}
function processQueue() {
if (speakingQueue.length === 0) {
isSpeaking = false;
return;
}
isSpeaking = true;
const textToSpeak = speakingQueue.shift();
const isWin = process.platform === "win32";
const isMac = process.platform === "darwin";
let command;
if (isWin) {
command = `powershell -Command "Add-Type –AssemblyName System.Speech; (New-Object System.Speech.Synthesis.SpeechSynthesizer).Speak(\\"${textToSpeak}\\")"`;
} else if (isMac) {
command = `say "${textToSpeak}"`;
} else {
command = `espeak "${textToSpeak}"`;
}
exec(command, (error, stdout, stderr) => {
if (error) {
console.error(`Error: ${error.message}`);
console.error(`${error.stack}`);
} else if (stderr) {
console.error(`Error: ${stderr}`);
}
processQueue(); // Continue with the next message in the queue
});
}

View file

@ -39,7 +39,6 @@ export class Gemini {
model: this.model_name || "gemini-1.5-flash",
// systemInstruction does not work bc google is trash
};
if (this.url) {
model = this.genAI.getGenerativeModel(
modelConfig,
@ -72,7 +71,26 @@ export class Gemini {
}
});
const response = await result.response;
const text = response.text();
let text;
// Handle "thinking" models since they smart
if (this.model_name && this.model_name.includes("thinking")) {
if (
response.candidates &&
response.candidates.length > 0 &&
response.candidates[0].content &&
response.candidates[0].content.parts &&
response.candidates[0].content.parts.length > 1
) {
text = response.candidates[0].content.parts[1].text;
} else {
console.warn("Unexpected response structure for thinking model:", response);
text = response.text();
}
} else {
text = response.text();
}
console.log('Received.');
return text;
@ -139,4 +157,4 @@ export class Gemini {
const result = await model.embedContent(text);
return result.embedding.values;
}
}
}

70
src/models/glhf.js Normal file
View file

@ -0,0 +1,70 @@
import OpenAIApi from 'openai';
import { getKey } from '../utils/keys.js';
export class GLHF {
constructor(model_name, url) {
this.model_name = model_name;
const apiKey = getKey('GHLF_API_KEY');
if (!apiKey) {
throw new Error('API key not found. Please check keys.json and ensure GHLF_API_KEY is defined.');
}
this.openai = new OpenAIApi({
apiKey,
baseURL: url || "https://glhf.chat/api/openai/v1"
});
}
async sendRequest(turns, systemMessage, stop_seq = '***') {
// Construct the message array for the API request.
let messages = [{ role: 'system', content: systemMessage }].concat(turns);
const pack = {
model: this.model_name || "hf:meta-llama/Llama-3.1-405B-Instruct",
messages,
stop: [stop_seq]
};
const maxAttempts = 5;
let attempt = 0;
let finalRes = null;
while (attempt < maxAttempts) {
attempt++;
console.log(`Awaiting glhf.chat API response... (attempt: ${attempt})`);
try {
let completion = await this.openai.chat.completions.create(pack);
if (completion.choices[0].finish_reason === 'length') {
throw new Error('Context length exceeded');
}
let res = completion.choices[0].message.content;
// If there's an open <think> tag without a corresponding </think>, retry.
if (res.includes("<think>") && !res.includes("</think>")) {
console.warn("Partial <think> block detected. Re-generating...");
continue;
}
// If there's a closing </think> tag but no opening <think>, prepend one.
if (res.includes("</think>") && !res.includes("<think>")) {
res = "<think>" + res;
}
finalRes = res.replace(/<\|separator\|>/g, '*no response*');
break; // Valid response obtained.
} catch (err) {
if ((err.message === 'Context length exceeded' || err.code === 'context_length_exceeded') && turns.length > 1) {
console.log('Context length exceeded, trying again with shorter context.');
return await this.sendRequest(turns.slice(1), systemMessage, stop_seq);
} else {
console.error(err);
finalRes = 'My brain disconnected, try again.';
break;
}
}
}
if (finalRes === null) {
finalRes = "I thought too hard, sorry, try again";
}
return finalRes;
}
async embed(text) {
throw new Error('Embeddings are not supported by glhf.');
}
}

View file

@ -24,44 +24,47 @@ export class GroqCloudAPI {
this.groq = new Groq({ apiKey: getKey('GROQCLOUD_API_KEY') });
}
async sendRequest(turns, systemMessage, stop_seq=null) {
async sendRequest(turns, systemMessage, stop_seq = null) {
// Variables for DeepSeek-R1 models
const maxAttempts = 5;
let attempt = 0;
let finalRes = null;
let res = null;
let messages = [{"role": "system", "content": systemMessage}].concat(turns); // The standard for GroqCloud is just appending to a messages array starting with the system prompt, but
// this is perfectly acceptable too, and I recommend it.
// I still feel as though I should note it for any future revisions of MindCraft, though.
// Construct messages array
let messages = [{"role": "system", "content": systemMessage}].concat(turns);
// These variables look odd, but they're for the future. Please keep them intact.
let raw_res = null;
let res = null;
let tool_calls = null;
while (attempt < maxAttempts) {
attempt++;
try {
// These variables look odd, but they're for the future.
let raw_res = null;
let tool_calls = null;
console.log("Awaiting Groq response...");
try {
console.log("Awaiting Groq response...");
if (this.params.max_tokens) {
// Handle deprecated max_tokens parameter
if (this.params.max_tokens) {
console.warn("GROQCLOUD WARNING: A profile is using `max_tokens`. This is deprecated. Please move to `max_completion_tokens`.");
this.params.max_completion_tokens = this.params.max_tokens;
delete this.params.max_tokens;
}
console.warn("GROQCLOUD WARNING: A profile is using `max_tokens`. This is deprecated. Please move to `max_completion_tokens`.");
this.params.max_completion_tokens = this.params.max_tokens;
delete this.params.max_tokens;
if (!this.params.max_completion_tokens) {
this.params.max_completion_tokens = 8000; // Set it lower.
}
}
if (!this.params.max_completion_tokens) {
this.params.max_completion_tokens = 8000; // Set it lower. This is a common theme.
}
let completion = await this.groq.chat.completions.create({
"messages": messages,
"model": this.model_name || "llama-3.3-70b-versatile",
"stream": false,
"stop": stop_seq,
...(this.params || {})
});
let completion = await this.groq.chat.completions.create({
"messages": messages,
"model": this.model_name || "llama-3.3-70b-versatile",
"stream": false,
"stop": stop_seq,
...(this.params || {})
});
raw_res = completion.choices[0].message;
res = raw_res.content;

View file

@ -1,46 +1,85 @@
import {toSinglePrompt} from '../utils/text.js';
import {getKey} from '../utils/keys.js';
import {HfInference} from "@huggingface/inference";
import { toSinglePrompt } from '../utils/text.js';
import { getKey } from '../utils/keys.js';
import { HfInference } from "@huggingface/inference";
export class HuggingFace {
constructor(model_name, url, params) {
this.model_name = model_name.replace('huggingface/','');
this.url = url;
this.params = params;
constructor(model_name, url, params) {
// Remove 'huggingface/' prefix if present
this.model_name = model_name.replace('huggingface/', '');
this.url = url;
this.params = params;
if (this.url) {
console.warn("Hugging Face doesn't support custom urls!");
if (this.url) {
console.warn("Hugging Face doesn't support custom urls!");
}
this.huggingface = new HfInference(getKey('HUGGINGFACE_API_KEY'));
}
async sendRequest(turns, systemMessage) {
const stop_seq = '***';
// Build a single prompt from the conversation turns
const prompt = toSinglePrompt(turns, null, stop_seq);
// Fallback model if none was provided
const model_name = this.model_name || 'meta-llama/Meta-Llama-3-8B';
// Combine system message with the prompt
const input = systemMessage + "\n" + prompt;
// We'll try up to 5 times in case of partial <think> blocks for DeepSeek-R1 models.
const maxAttempts = 5;
let attempt = 0;
let finalRes = null;
while (attempt < maxAttempts) {
attempt++;
console.log(`Awaiting Hugging Face API response... (model: ${model_name}, attempt: ${attempt})`);
let res = '';
try {
// Consume the streaming response chunk by chunk
for await (const chunk of this.huggingface.chatCompletionStream({
model: model_name,
messages: [{ role: "user", content: input }],
...(this.params || {})
})) {
res += (chunk.choices[0]?.delta?.content || "");
}
} catch (err) {
console.log(err);
res = 'My brain disconnected, try again.';
// Break out immediately; we only retry when handling partial <think> tags.
break;
}
// If the model is DeepSeek-R1, check for mismatched <think> blocks.
const hasOpenTag = res.includes("<think>");
const hasCloseTag = res.includes("</think>");
// If there's a partial mismatch, warn and retry the entire request.
if ((hasOpenTag && !hasCloseTag)) {
console.warn("Partial <think> block detected. Re-generating...");
continue;
}
this.huggingface = new HfInference(getKey('HUGGINGFACE_API_KEY'));
}
async sendRequest(turns, systemMessage) {
const stop_seq = '***';
const prompt = toSinglePrompt(turns, null, stop_seq);
let model_name = this.model_name || 'meta-llama/Meta-Llama-3-8B';
const input = systemMessage + "\n" + prompt;
let res = '';
try {
console.log('Awaiting Hugging Face API response...');
for await (const chunk of this.huggingface.chatCompletionStream({
model: model_name,
messages: [{ role: "user", content: input }],
...(this.params || {})
})) {
res += (chunk.choices[0]?.delta?.content || "");
}
} catch (err) {
console.log(err);
res = 'My brain disconnected, try again.';
// If both tags are present, remove the <think> block entirely.
if (hasOpenTag && hasCloseTag) {
res = res.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
}
console.log('Received.');
console.log(res);
return res;
finalRes = res;
break; // Exit loop if we got a valid response.
}
async embed(text) {
throw new Error('Embeddings are not supported by HuggingFace.');
// If no valid response was obtained after max attempts, assign a fallback.
if (finalRes == null) {
console.warn("Could not get a valid <think> block or normal response after max attempts.");
finalRes = 'I thought too hard, sorry, try again.';
}
}
console.log('Received.');
console.log(finalRes);
return finalRes;
}
async embed(text) {
throw new Error('Embeddings are not supported by HuggingFace.');
}
}

113
src/models/hyperbolic.js Normal file
View file

@ -0,0 +1,113 @@
import { getKey } from '../utils/keys.js';
export class Hyperbolic {
constructor(modelName, apiUrl) {
this.modelName = modelName || "deepseek-ai/DeepSeek-V3";
this.apiUrl = apiUrl || "https://api.hyperbolic.xyz/v1/chat/completions";
// Retrieve the Hyperbolic API key from keys.js
this.apiKey = getKey('HYPERBOLIC_API_KEY');
if (!this.apiKey) {
throw new Error('HYPERBOLIC_API_KEY not found. Check your keys.js file.');
}
}
/**
* Sends a chat completion request to the Hyperbolic endpoint.
*
* @param {Array} turns - An array of message objects, e.g. [{role: 'user', content: 'Hi'}].
* @param {string} systemMessage - The system prompt or instruction.
* @param {string} stopSeq - A stopping sequence, default '***'.
* @returns {Promise<string>} - The model's reply.
*/
async sendRequest(turns, systemMessage, stopSeq = '***') {
// Prepare the messages with a system prompt at the beginning
const messages = [{ role: 'system', content: systemMessage }, ...turns];
// Build the request payload
const payload = {
model: this.modelName,
messages: messages,
max_tokens: 8192,
temperature: 0.7,
top_p: 0.9,
stream: false
};
const maxAttempts = 5;
let attempt = 0;
let finalRes = null;
while (attempt < maxAttempts) {
attempt++;
console.log(`Awaiting Hyperbolic API response... (attempt: ${attempt})`);
console.log('Messages:', messages);
let completionContent = null;
try {
const response = await fetch(this.apiUrl, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${this.apiKey}`
},
body: JSON.stringify(payload)
});
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
const data = await response.json();
if (data?.choices?.[0]?.finish_reason === 'length') {
throw new Error('Context length exceeded');
}
completionContent = data?.choices?.[0]?.message?.content || '';
console.log('Received response from Hyperbolic.');
} catch (err) {
if (
(err.message === 'Context length exceeded' || err.code === 'context_length_exceeded') &&
turns.length > 1
) {
console.log('Context length exceeded, trying again with a shorter context...');
return await this.sendRequest(turns.slice(1), systemMessage, stopSeq);
} else {
console.error(err);
completionContent = 'My brain disconnected, try again.';
}
}
// Check for <think> blocks
const hasOpenTag = completionContent.includes("<think>");
const hasCloseTag = completionContent.includes("</think>");
if ((hasOpenTag && !hasCloseTag)) {
console.warn("Partial <think> block detected. Re-generating...");
continue; // Retry the request
}
if (hasCloseTag && !hasOpenTag) {
completionContent = '<think>' + completionContent;
}
if (hasOpenTag && hasCloseTag) {
completionContent = completionContent.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
}
finalRes = completionContent.replace(/<\|separator\|>/g, '*no response*');
break; // Valid response obtained—exit loop
}
if (finalRes == null) {
console.warn("Could not get a valid <think> block or normal response after max attempts.");
finalRes = 'I thought too hard, sorry, try again.';
}
return finalRes;
}
async embed(text) {
throw new Error('Embeddings are not supported by Hyperbolic.');
}
}

View file

@ -10,45 +10,86 @@ export class Local {
}
async sendRequest(turns, systemMessage) {
let model = this.model_name || 'llama3';
let model = this.model_name || 'llama3.1'; // Updated to llama3.1, as it is more performant than llama3
let messages = strictFormat(turns);
messages.unshift({role: 'system', content: systemMessage});
let res = null;
try {
console.log(`Awaiting local response... (model: ${model})`)
res = await this.send(this.chat_endpoint, {
model: model,
messages: messages,
stream: false,
...(this.params || {})
});
if (res)
res = res['message']['content'];
}
catch (err) {
if (err.message.toLowerCase().includes('context length') && turns.length > 1) {
console.log('Context length exceeded, trying again with shorter context.');
return await sendRequest(turns.slice(1), systemMessage, stop_seq);
} else {
console.log(err);
res = 'My brain disconnected, try again.';
messages.unshift({ role: 'system', content: systemMessage });
// We'll attempt up to 5 times for models with deepseek-r1-esk reasoning if the <think> tags are mismatched.
const maxAttempts = 5;
let attempt = 0;
let finalRes = null;
while (attempt < maxAttempts) {
attempt++;
console.log(`Awaiting local response... (model: ${model}, attempt: ${attempt})`);
let res = null;
try {
res = await this.send(this.chat_endpoint, {
model: model,
messages: messages,
stream: false,
...(this.params || {})
});
if (res) {
res = res['message']['content'];
} else {
res = 'No response data.';
}
} catch (err) {
if (err.message.toLowerCase().includes('context length') && turns.length > 1) {
console.log('Context length exceeded, trying again with shorter context.');
return await this.sendRequest(turns.slice(1), systemMessage);
} else {
console.log(err);
res = 'My brain disconnected, try again.';
}
}
// If the model name includes "deepseek-r1" or "Andy-3.5-reasoning", then handle the <think> block.
const hasOpenTag = res.includes("<think>");
const hasCloseTag = res.includes("</think>");
// If there's a partial mismatch, retry to get a complete response.
if ((hasOpenTag && !hasCloseTag)) {
console.warn("Partial <think> block detected. Re-generating...");
continue;
}
// If </think> is present but <think> is not, prepend <think>
if (hasCloseTag && !hasOpenTag) {
res = '<think>' + res;
}
// Changed this so if the model reasons, using <think> and </think> but doesn't start the message with <think>, <think> ges prepended to the message so no error occur.
// If both tags appear, remove them (and everything inside).
if (hasOpenTag && hasCloseTag) {
res = res.replace(/<think>[\s\S]*?<\/think>/g, '');
}
finalRes = res;
break; // Exit the loop if we got a valid response.
}
return res;
if (finalRes == null) {
console.warn("Could not get a valid <think> block or normal response after max attempts.");
finalRes = 'I thought too hard, sorry, try again.';
}
return finalRes;
}
async embed(text) {
let model = this.model_name || 'nomic-embed-text';
let body = {model: model, prompt: text};
let body = { model: model, input: text };
let res = await this.send(this.embedding_endpoint, body);
return res['embedding']
return res['embedding'];
}
async send(endpoint, body) {
const url = new URL(endpoint, this.url);
let method = 'POST';
let headers = new Headers();
const request = new Request(url, {method, headers, body: JSON.stringify(body)});
const request = new Request(url, { method, headers, body: JSON.stringify(body) });
let data = null;
try {
const res = await fetch(request);
@ -63,4 +104,4 @@ export class Local {
}
return data;
}
}
}

View file

@ -18,6 +18,8 @@ import { HuggingFace } from './huggingface.js';
import { Qwen } from "./qwen.js";
import { Grok } from "./grok.js";
import { DeepSeek } from './deepseek.js';
import { Hyperbolic } from './hyperbolic.js';
import { GLHF } from './glhf.js';
import { OpenRouter } from './openrouter.js';
export class Prompter {
@ -40,7 +42,6 @@ export class Prompter {
}
// base overrides default, individual overrides base
this.convo_examples = null;
this.coding_examples = null;
@ -128,10 +129,12 @@ export class Prompter {
profile = {model: profile};
}
if (!profile.api) {
if (profile.model.includes('gemini'))
if (profile.model.includes('openrouter/'))
profile.api = 'openrouter'; // must do first because shares names with other models
else if (profile.model.includes('ollama/'))
profile.api = 'ollama'; // also must do early because shares names with other models
else if (profile.model.includes('gemini'))
profile.api = 'google';
else if (profile.model.includes('openrouter/'))
profile.api = 'openrouter'; // must do before others bc shares model names
else if (profile.model.includes('gpt') || profile.model.includes('o1')|| profile.model.includes('o3'))
profile.api = 'openai';
else if (profile.model.includes('claude'))
@ -144,6 +147,10 @@ export class Prompter {
model_profile.api = 'mistral';
else if (profile.model.includes("groq/") || profile.model.includes("groqcloud/"))
profile.api = 'groq';
else if (profile.model.includes("glhf/"))
profile.api = 'glhf';
else if (profile.model.includes("hyperbolic/"))
profile.api = 'hyperbolic';
else if (profile.model.includes('novita/'))
profile.api = 'novita';
else if (profile.model.includes('qwen'))
@ -152,16 +159,13 @@ export class Prompter {
profile.api = 'xai';
else if (profile.model.includes('deepseek'))
profile.api = 'deepseek';
else if (profile.model.includes('mistral'))
else if (profile.model.includes('mistral'))
profile.api = 'mistral';
else if (profile.model.includes('llama3'))
profile.api = 'ollama';
else
throw new Error('Unknown model:', profile.model);
}
return profile;
}
_createModel(profile) {
let model = null;
if (profile.api === 'google')
@ -173,13 +177,17 @@ export class Prompter {
else if (profile.api === 'replicate')
model = new ReplicateAPI(profile.model.replace('replicate/', ''), profile.url, profile.params);
else if (profile.api === 'ollama')
model = new Local(profile.model, profile.url, profile.params);
model = new Local(profile.model.replace('ollama/', ''), profile.url, profile.params);
else if (profile.api === 'mistral')
model = new Mistral(profile.model, profile.url, profile.params);
else if (profile.api === 'groq')
model = new GroqCloudAPI(profile.model.replace('groq/', '').replace('groqcloud/', ''), profile.url, profile.params);
else if (profile.api === 'huggingface')
model = new HuggingFace(profile.model, profile.url, profile.params);
else if (profile.api === 'glhf')
model = new GLHF(profile.model.replace('glhf/', ''), profile.url, profile.params);
else if (profile.api === 'hyperbolic')
model = new Hyperbolic(profile.model.replace('hyperbolic/', ''), profile.url, profile.params);
else if (profile.api === 'novita')
model = new Novita(profile.model.replace('novita/', ''), profile.url, profile.params);
else if (profile.api === 'qwen')
@ -194,7 +202,6 @@ export class Prompter {
throw new Error('Unknown API:', profile.api);
return model;
}
getName() {
return this.profile.name;
}

View file

@ -86,6 +86,16 @@ export function isHostile(mob) {
return (mob.type === 'mob' || mob.type === 'hostile') && mob.name !== 'iron_golem' && mob.name !== 'snow_golem';
}
// blocks that don't work with collectBlock, need to be manually collected
export function mustCollectManually(blockName) {
// all crops (that aren't normal blocks), torches, buttons, levers, redstone,
const full_names = ['wheat', 'carrots', 'potatoes', 'beetroots', 'nether_wart', 'cocoa', 'sugar_cane', 'kelp', 'short_grass', 'fern', 'tall_grass', 'bamboo',
'poppy', 'dandelion', 'blue_orchid', 'allium', 'azure_bluet', 'oxeye_daisy', 'cornflower', 'lilac', 'wither_rose', 'lily_of_the_valley', 'wither_rose',
'lever', 'redstone_wire', 'lantern']
const partial_names = ['sapling', 'torch', 'button', 'carpet', 'pressure_plate', 'mushroom', 'tulip', 'bush', 'vines', 'fern']
return full_names.includes(blockName.toLowerCase()) || partial_names.some(partial => blockName.toLowerCase().includes(partial));
}
export function getItemId(itemName) {
let item = mcdata.itemsByName[itemName];
if (item) {