mirror of
https://github.com/kolbytn/mindcraft.git
synced 2025-03-28 14:56:24 +01:00
added model parameters obj to profile
This commit is contained in:
parent
8f545089e1
commit
60187e2317
17 changed files with 134 additions and 74 deletions
|
@ -42,7 +42,7 @@ You can configure the agent's name, model, and prompts in their profile like `an
|
|||
| OpenAI | `OPENAI_API_KEY` | `gpt-4o-mini` | [docs](https://platform.openai.com/docs/models) |
|
||||
| Google | `GEMINI_API_KEY` | `gemini-pro` | [docs](https://ai.google.dev/gemini-api/docs/models/gemini) |
|
||||
| Anthropic | `ANTHROPIC_API_KEY` | `claude-3-haiku-20240307` | [docs](https://docs.anthropic.com/claude/docs/models-overview) |
|
||||
| Replicate | `REPLICATE_API_KEY` | `meta/meta-llama-3-70b-instruct` | [docs](https://replicate.com/collections/language-models) |
|
||||
| Replicate | `REPLICATE_API_KEY` | `replicate/meta/meta-llama-3-70b-instruct` | [docs](https://replicate.com/collections/language-models) |
|
||||
| Ollama (local) | n/a | `llama3` | [docs](https://ollama.com/library) |
|
||||
| Groq | `GROQCLOUD_API_KEY` | `groq/mixtral-8x7b-32768` | [docs](https://console.groq.com/docs/models) |
|
||||
| Hugging Face | `HUGGINGFACE_API_KEY` | `huggingface/mistralai/Mistral-Nemo-Instruct-2407` | [docs](https://huggingface.co/models) |
|
||||
|
|
|
@ -1,5 +1,11 @@
|
|||
{
|
||||
"name": "gpt",
|
||||
|
||||
"model": "gpt-4o"
|
||||
"model": {
|
||||
"model": "gpt-4o-mini",
|
||||
"params": {
|
||||
"temperature": 1,
|
||||
"not_real": true
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,6 +1,6 @@
|
|||
import { History } from './history.js';
|
||||
import { Coder } from './coder.js';
|
||||
import { Prompter } from './prompter.js';
|
||||
import { Prompter } from '../models/prompter.js';
|
||||
import { initModes } from './modes.js';
|
||||
import { initBot } from '../utils/mcdata.js';
|
||||
import { containsCommand, commandExists, executeCommand, truncCommandMessage, isAction, blacklistCommands } from './commands/index.js';
|
||||
|
@ -100,11 +100,9 @@ export class Agent {
|
|||
});
|
||||
} catch (error) {
|
||||
// Ensure we're not losing error details
|
||||
console.error('Agent start failed with error:', {
|
||||
message: error.message || 'No error message',
|
||||
stack: error.stack || 'No stack trace',
|
||||
error: error
|
||||
});
|
||||
console.error('Agent start failed with error')
|
||||
console.error(error)
|
||||
|
||||
throw error; // Re-throw with preserved details
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,8 +3,9 @@ import { strictFormat } from '../utils/text.js';
|
|||
import { getKey } from '../utils/keys.js';
|
||||
|
||||
export class Claude {
|
||||
constructor(model_name, url) {
|
||||
constructor(model_name, url, params) {
|
||||
this.model_name = model_name;
|
||||
this.params = params;
|
||||
|
||||
let config = {};
|
||||
if (url)
|
||||
|
@ -20,13 +21,16 @@ export class Claude {
|
|||
let res = null;
|
||||
try {
|
||||
console.log('Awaiting anthropic api response...')
|
||||
// console.log('Messages:', messages);
|
||||
if (!this.params.max_tokens) {
|
||||
this.params.max_tokens = 4096;
|
||||
}
|
||||
const resp = await this.anthropic.messages.create({
|
||||
model: this.model_name || "claude-3-sonnet-20240229",
|
||||
system: systemMessage,
|
||||
max_tokens: 2048,
|
||||
messages: messages,
|
||||
...(this.params || {})
|
||||
});
|
||||
|
||||
console.log('Received.')
|
||||
res = resp.content[0].text;
|
||||
}
|
||||
|
|
|
@ -3,8 +3,9 @@ import { getKey, hasKey } from '../utils/keys.js';
|
|||
import { strictFormat } from '../utils/text.js';
|
||||
|
||||
export class DeepSeek {
|
||||
constructor(model_name, url) {
|
||||
constructor(model_name, url, params) {
|
||||
this.model_name = model_name;
|
||||
this.params = params;
|
||||
|
||||
let config = {};
|
||||
|
||||
|
@ -23,6 +24,7 @@ export class DeepSeek {
|
|||
model: this.model_name || "deepseek-chat",
|
||||
messages,
|
||||
stop: stop_seq,
|
||||
...(this.params || {})
|
||||
};
|
||||
|
||||
let res = null;
|
||||
|
|
|
@ -3,8 +3,9 @@ import { toSinglePrompt } from '../utils/text.js';
|
|||
import { getKey } from '../utils/keys.js';
|
||||
|
||||
export class Gemini {
|
||||
constructor(model_name, url) {
|
||||
constructor(model_name, url, params) {
|
||||
this.model_name = model_name;
|
||||
this.params = params;
|
||||
this.url = url;
|
||||
this.safetySettings = [
|
||||
{
|
||||
|
@ -34,15 +35,20 @@ export class Gemini {
|
|||
|
||||
async sendRequest(turns, systemMessage) {
|
||||
let model;
|
||||
const modelConfig = {
|
||||
model: this.model_name || "gemini-1.5-flash",
|
||||
...(this.params || {})
|
||||
};
|
||||
|
||||
if (this.url) {
|
||||
model = this.genAI.getGenerativeModel(
|
||||
{ model: this.model_name || "gemini-1.5-flash" },
|
||||
modelConfig,
|
||||
{ baseUrl: this.url },
|
||||
{ safetySettings: this.safetySettings }
|
||||
);
|
||||
} else {
|
||||
model = this.genAI.getGenerativeModel(
|
||||
{ model: this.model_name || "gemini-1.5-flash" },
|
||||
modelConfig,
|
||||
{ safetySettings: this.safetySettings }
|
||||
);
|
||||
}
|
||||
|
@ -50,12 +56,27 @@ export class Gemini {
|
|||
const stop_seq = '***';
|
||||
const prompt = toSinglePrompt(turns, systemMessage, stop_seq, 'model');
|
||||
console.log('Awaiting Google API response...');
|
||||
const result = await model.generateContent(prompt);
|
||||
const result = await model.generateContent({
|
||||
contents: [
|
||||
{
|
||||
role: 'user',
|
||||
parts: [
|
||||
{
|
||||
text: "Explain how AI works",
|
||||
}
|
||||
],
|
||||
}
|
||||
],
|
||||
generateConfig: {
|
||||
...(this.params || {})
|
||||
}
|
||||
});
|
||||
const response = await result.response;
|
||||
const text = response.text();
|
||||
console.log('Received.');
|
||||
if (!text.includes(stop_seq)) return text;
|
||||
const idx = text.indexOf(stop_seq);
|
||||
|
||||
return text.slice(0, idx);
|
||||
}
|
||||
|
||||
|
|
|
@ -3,8 +3,9 @@ import { getKey, hasKey } from '../utils/keys.js';
|
|||
import { strictFormat } from '../utils/text.js';
|
||||
|
||||
export class GPT {
|
||||
constructor(model_name, url) {
|
||||
constructor(model_name, url, params) {
|
||||
this.model_name = model_name;
|
||||
this.params = params;
|
||||
|
||||
let config = {};
|
||||
if (url)
|
||||
|
@ -25,6 +26,7 @@ export class GPT {
|
|||
model: this.model_name || "gpt-3.5-turbo",
|
||||
messages,
|
||||
stop: stop_seq,
|
||||
...(this.params || {})
|
||||
};
|
||||
if (this.model_name.includes('o1')) {
|
||||
pack.messages = strictFormat(messages);
|
||||
|
@ -32,6 +34,7 @@ export class GPT {
|
|||
}
|
||||
|
||||
let res = null;
|
||||
|
||||
try {
|
||||
console.log('Awaiting openai api response from model', this.model_name)
|
||||
// console.log('Messages:', messages);
|
||||
|
|
|
@ -3,8 +3,10 @@ import { getKey } from '../utils/keys.js';
|
|||
|
||||
// xAI doesn't supply a SDK for their models, but fully supports OpenAI and Anthropic SDKs
|
||||
export class Grok {
|
||||
constructor(model_name, url) {
|
||||
constructor(model_name, url, params) {
|
||||
this.model_name = model_name;
|
||||
this.url = url;
|
||||
this.params = params;
|
||||
|
||||
let config = {};
|
||||
if (url)
|
||||
|
@ -23,7 +25,8 @@ export class Grok {
|
|||
const pack = {
|
||||
model: this.model_name || "grok-beta",
|
||||
messages,
|
||||
stop: [stop_seq]
|
||||
stop: [stop_seq],
|
||||
...(this.params || {})
|
||||
};
|
||||
|
||||
let res = null;
|
||||
|
|
|
@ -4,12 +4,13 @@ import { getKey } from '../utils/keys.js';
|
|||
|
||||
// Umbrella class for Mixtral, LLama, Gemma...
|
||||
export class GroqCloudAPI {
|
||||
constructor(model_name, url, max_tokens=16384) {
|
||||
constructor(model_name, url, params) {
|
||||
this.model_name = model_name;
|
||||
this.url = url;
|
||||
this.max_tokens = max_tokens;
|
||||
this.params = params;
|
||||
// ReplicateAPI theft :3
|
||||
if (this.url) {
|
||||
|
||||
console.warn("Groq Cloud has no implementation for custom URLs. Ignoring provided URL.");
|
||||
}
|
||||
this.groq = new Groq({ apiKey: getKey('GROQCLOUD_API_KEY') });
|
||||
|
@ -20,14 +21,15 @@ export class GroqCloudAPI {
|
|||
let res = null;
|
||||
try {
|
||||
console.log("Awaiting Groq response...");
|
||||
if (!this.params.max_tokens) {
|
||||
this.params.max_tokens = 16384;
|
||||
}
|
||||
let completion = await this.groq.chat.completions.create({
|
||||
"messages": messages,
|
||||
"model": this.model_name || "mixtral-8x7b-32768",
|
||||
"temperature": 0.2,
|
||||
"max_tokens": this.max_tokens, // maximum token limit, differs from model to model
|
||||
"top_p": 1,
|
||||
"stream": true,
|
||||
"stop": stop_seq // "***"
|
||||
"stop": stop_seq,
|
||||
...(this.params || {})
|
||||
});
|
||||
|
||||
let temp_res = "";
|
||||
|
|
|
@ -3,9 +3,10 @@ import {getKey} from '../utils/keys.js';
|
|||
import {HfInference} from "@huggingface/inference";
|
||||
|
||||
export class HuggingFace {
|
||||
constructor(model_name, url) {
|
||||
constructor(model_name, url, params) {
|
||||
this.model_name = model_name.replace('huggingface/','');
|
||||
this.url = url;
|
||||
this.params = params;
|
||||
|
||||
if (this.url) {
|
||||
console.warn("Hugging Face doesn't support custom urls!");
|
||||
|
@ -25,7 +26,8 @@ export class HuggingFace {
|
|||
console.log('Awaiting Hugging Face API response...');
|
||||
for await (const chunk of this.huggingface.chatCompletionStream({
|
||||
model: model_name,
|
||||
messages: [{ role: "user", content: input }]
|
||||
messages: [{ role: "user", content: input }],
|
||||
...(this.params || {})
|
||||
})) {
|
||||
res += (chunk.choices[0]?.delta?.content || "");
|
||||
}
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
import { strictFormat } from '../utils/text.js';
|
||||
|
||||
export class Local {
|
||||
constructor(model_name, url) {
|
||||
constructor(model_name, url, params) {
|
||||
this.model_name = model_name;
|
||||
this.params = params;
|
||||
this.url = url || 'http://127.0.0.1:11434';
|
||||
this.chat_endpoint = '/api/chat';
|
||||
this.embedding_endpoint = '/api/embeddings';
|
||||
|
@ -15,7 +16,12 @@ export class Local {
|
|||
let res = null;
|
||||
try {
|
||||
console.log(`Awaiting local response... (model: ${model})`)
|
||||
res = await this.send(this.chat_endpoint, {model: model, messages: messages, stream: false});
|
||||
res = await this.send(this.chat_endpoint, {
|
||||
model: model,
|
||||
messages: messages,
|
||||
stream: false,
|
||||
...(this.params || {})
|
||||
});
|
||||
if (res)
|
||||
res = res['message']['content'];
|
||||
}
|
||||
|
|
|
@ -5,10 +5,13 @@ import { strictFormat } from '../utils/text.js';
|
|||
export class Mistral {
|
||||
#client;
|
||||
|
||||
constructor(model_name, url) {
|
||||
constructor(model_name, url, params) {
|
||||
this.model_name = model_name;
|
||||
this.params = params;
|
||||
|
||||
if (typeof url === "string") {
|
||||
console.warn("Mistral does not support custom URL's, ignoring!");
|
||||
|
||||
}
|
||||
|
||||
if (!getKey("MISTRAL_API_KEY")) {
|
||||
|
@ -22,8 +25,6 @@ export class Mistral {
|
|||
);
|
||||
|
||||
|
||||
this.model_name = model_name;
|
||||
|
||||
// Prevents the following code from running when model not specified
|
||||
if (typeof this.model_name === "undefined") return;
|
||||
|
||||
|
@ -49,6 +50,7 @@ export class Mistral {
|
|||
const response = await this.#client.chat.complete({
|
||||
model,
|
||||
messages,
|
||||
...(this.params || {})
|
||||
});
|
||||
|
||||
result = response.choices[0].message.content;
|
||||
|
|
|
@ -4,9 +4,11 @@ import { strictFormat } from '../utils/text.js';
|
|||
|
||||
// llama, mistral
|
||||
export class Novita {
|
||||
constructor(model_name, url) {
|
||||
constructor(model_name, url, params) {
|
||||
this.model_name = model_name.replace('novita/', '');
|
||||
this.url = url || 'https://api.novita.ai/v3/openai';
|
||||
this.params = params;
|
||||
|
||||
|
||||
let config = {
|
||||
baseURL: this.url
|
||||
|
@ -26,6 +28,7 @@ export class Novita {
|
|||
model: this.model_name || "meta-llama/llama-3.1-70b-instruct",
|
||||
messages,
|
||||
stop: [stop_seq],
|
||||
...(this.params || {})
|
||||
};
|
||||
|
||||
let res = null;
|
||||
|
|
|
@ -1,23 +1,23 @@
|
|||
import { readFileSync, mkdirSync, writeFileSync} from 'fs';
|
||||
import { Examples } from '../utils/examples.js';
|
||||
import { getCommandDocs } from './commands/index.js';
|
||||
import { getSkillDocs } from './library/index.js';
|
||||
import { getCommandDocs } from '../agent/commands/index.js';
|
||||
import { getSkillDocs } from '../agent/library/index.js';
|
||||
import { stringifyTurns } from '../utils/text.js';
|
||||
import { getCommand } from './commands/index.js';
|
||||
import { getCommand } from '../agent/commands/index.js';
|
||||
import settings from '../../settings.js';
|
||||
|
||||
import { Gemini } from '../models/gemini.js';
|
||||
import { GPT } from '../models/gpt.js';
|
||||
import { Claude } from '../models/claude.js';
|
||||
import { Mistral } from '../models/mistral.js';
|
||||
import { ReplicateAPI } from '../models/replicate.js';
|
||||
import { Local } from '../models/local.js';
|
||||
import { Novita } from '../models/novita.js';
|
||||
import { GroqCloudAPI } from '../models/groq.js';
|
||||
import { HuggingFace } from '../models/huggingface.js';
|
||||
import { Qwen } from "../models/qwen.js";
|
||||
import { Grok } from "../models/grok.js";
|
||||
import { DeepSeek } from '../models/deepseek.js';
|
||||
import { Gemini } from './gemini.js';
|
||||
import { GPT } from './gpt.js';
|
||||
import { Claude } from './claude.js';
|
||||
import { Mistral } from './mistral.js';
|
||||
import { ReplicateAPI } from './replicate.js';
|
||||
import { Local } from './local.js';
|
||||
import { Novita } from './novita.js';
|
||||
import { GroqCloudAPI } from './groq.js';
|
||||
import { HuggingFace } from './huggingface.js';
|
||||
import { Qwen } from "./qwen.js";
|
||||
import { Grok } from "./grok.js";
|
||||
import { DeepSeek } from './deepseek.js';
|
||||
|
||||
export class Prompter {
|
||||
constructor(agent, fp) {
|
||||
|
@ -102,6 +102,8 @@ export class Prompter {
|
|||
_selectAPI(profile) {
|
||||
if (typeof profile === 'string' || profile instanceof String) {
|
||||
profile = {model: profile};
|
||||
}
|
||||
if (!profile.api) {
|
||||
if (profile.model.includes('gemini'))
|
||||
profile.api = 'google';
|
||||
else if (profile.model.includes('gpt') || profile.model.includes('o1')|| profile.model.includes('o3'))
|
||||
|
@ -110,7 +112,7 @@ export class Prompter {
|
|||
profile.api = 'anthropic';
|
||||
else if (profile.model.includes('huggingface/'))
|
||||
profile.api = "huggingface";
|
||||
else if (profile.model.includes('meta/') || profile.model.includes('replicate/'))
|
||||
else if (profile.model.includes('replicate/'))
|
||||
profile.api = 'replicate';
|
||||
else if (profile.model.includes('mistralai/') || profile.model.includes("mistral/"))
|
||||
model_profile.api = 'mistral';
|
||||
|
@ -133,32 +135,31 @@ export class Prompter {
|
|||
_createModel(profile) {
|
||||
let model = null;
|
||||
if (profile.api === 'google')
|
||||
model = new Gemini(profile.model, profile.url);
|
||||
model = new Gemini(profile.model, profile.url, profile.params);
|
||||
else if (profile.api === 'openai')
|
||||
model = new GPT(profile.model, profile.url);
|
||||
model = new GPT(profile.model, profile.url, profile.params);
|
||||
else if (profile.api === 'anthropic')
|
||||
model = new Claude(profile.model, profile.url);
|
||||
model = new Claude(profile.model, profile.url, profile.params);
|
||||
else if (profile.api === 'replicate')
|
||||
model = new ReplicateAPI(profile.model, profile.url);
|
||||
model = new ReplicateAPI(profile.model, profile.url, profile.params);
|
||||
else if (profile.api === 'ollama')
|
||||
model = new Local(profile.model, profile.url);
|
||||
model = new Local(profile.model, profile.url, profile.params);
|
||||
else if (profile.api === 'mistral')
|
||||
model = new Mistral(profile.model, profile.url);
|
||||
else if (profile.api === 'groq') {
|
||||
model = new GroqCloudAPI(profile.model.replace('groq/', '').replace('groqcloud/', ''), profile.url, max_tokens ? max_tokens : 8192);
|
||||
}
|
||||
model = new Mistral(profile.model, profile.url, profile.params);
|
||||
else if (profile.api === 'groq')
|
||||
model = new GroqCloudAPI(profile.model.replace('groq/', '').replace('groqcloud/', ''), profile.url, profile.params);
|
||||
else if (profile.api === 'huggingface')
|
||||
model = new HuggingFace(profile.model, profile.url);
|
||||
model = new HuggingFace(profile.model, profile.url, profile.params);
|
||||
else if (profile.api === 'novita')
|
||||
model = new Novita(profile.model.replace('novita/', ''), profile.url);
|
||||
model = new Novita(profile.model.replace('novita/', ''), profile.url, profile.params);
|
||||
else if (profile.api === 'qwen')
|
||||
model = new Qwen(profile.model, profile.url);
|
||||
model = new Qwen(profile.model, profile.url, profile.params);
|
||||
else if (profile.api === 'xai')
|
||||
model = new Grok(profile.model, profile.url);
|
||||
model = new Grok(profile.model, profile.url, profile.params);
|
||||
else if (profile.api === 'deepseek')
|
||||
model = new DeepSeek(profile.model, profile.url);
|
||||
model = new DeepSeek(profile.model, profile.url, profile.params);
|
||||
else
|
||||
throw new Error('Unknown API:', api);
|
||||
throw new Error('Unknown API:', profile.api);
|
||||
return model;
|
||||
}
|
||||
|
|
@ -4,8 +4,9 @@
|
|||
import { getKey } from '../utils/keys.js';
|
||||
|
||||
export class Qwen {
|
||||
constructor(modelName, url) {
|
||||
this.modelName = modelName;
|
||||
constructor(model_name, url, params) {
|
||||
this.model_name = model_name;
|
||||
this.params = params;
|
||||
this.url = url || 'https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation';
|
||||
this.apiKey = getKey('QWEN_API_KEY');
|
||||
}
|
||||
|
@ -19,7 +20,11 @@ export class Qwen {
|
|||
const data = {
|
||||
model: this.modelName || 'qwen-plus',
|
||||
input: { messages: [{ role: 'system', content: systemMessage }, ...turns] },
|
||||
parameters: { result_format: 'message', stop: stopSeq },
|
||||
parameters: {
|
||||
result_format: 'message',
|
||||
stop: stopSeq,
|
||||
...(this.params || {})
|
||||
},
|
||||
};
|
||||
|
||||
// Add default user message if all messages are 'system' role
|
||||
|
|
|
@ -4,9 +4,10 @@ import { getKey } from '../utils/keys.js';
|
|||
|
||||
// llama, mistral
|
||||
export class ReplicateAPI {
|
||||
constructor(model_name, url) {
|
||||
constructor(model_name, url, params) {
|
||||
this.model_name = model_name;
|
||||
this.url = url;
|
||||
this.params = params;
|
||||
|
||||
if (this.url) {
|
||||
console.warn('Replicate API does not support custom URLs. Ignoring provided URL.');
|
||||
|
@ -22,7 +23,11 @@ export class ReplicateAPI {
|
|||
const prompt = toSinglePrompt(turns, null, stop_seq);
|
||||
let model_name = this.model_name || 'meta/meta-llama-3-70b-instruct';
|
||||
|
||||
const input = { prompt, system_prompt: systemMessage };
|
||||
const input = {
|
||||
prompt,
|
||||
system_prompt: systemMessage,
|
||||
...(this.params || {})
|
||||
};
|
||||
let res = null;
|
||||
try {
|
||||
console.log('Awaiting Replicate API response...');
|
||||
|
|
|
@ -57,11 +57,8 @@ const argv = yargs(args)
|
|||
const agent = new Agent();
|
||||
await agent.start(argv.profile, argv.load_memory, argv.init_message, argv.count_id, argv.task_path, argv.task_id);
|
||||
} catch (error) {
|
||||
console.error('Failed to start agent process:', {
|
||||
message: error.message || 'No error message',
|
||||
stack: error.stack || 'No stack trace',
|
||||
error: error
|
||||
});
|
||||
console.error('Failed to start agent process:');
|
||||
console.error(error);
|
||||
process.exit(1);
|
||||
}
|
||||
})();
|
||||
|
|
Loading…
Add table
Reference in a new issue