my pr gets priority

O1 patches
This commit is contained in:
Max Robinson 2025-02-03 18:45:16 -06:00 committed by GitHub
commit 8f545089e1
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 104 additions and 64 deletions

View file

@ -120,7 +120,7 @@ LLM backends can be specified as simply as `"model": "gpt-3.5-turbo"`. However,
}
```
The model parameter accepts either a string or object. If a string, it should specify the model to be used. The api and url will be assumed. If an object, the api field must be specified. Each api has a default model and url, so those fields are optional.
The model or code_model parameter accepts either a string or object. If a string, it should specify the model to be used. The api and url will be assumed. If an object, the api field must be specified. Each api has a default model and url, so those fields are optional.
If the embedding field is not specified, then it will use the default embedding method for the chat model's api (Note that anthropic has no embedding model). The embedding parameter can also be a string or object. If a string, it should specify the embedding api and the default model and url will be used. If a valid embedding is not specified and cannot be assumed, then word overlap will be used to retrieve examples instead.
@ -137,6 +137,7 @@ Thus, all the below specifications are equivalent to the above example:
```json
"model": "gpt-3.5-turbo",
"embedding": "openai"
"code_model": "gpt-3.5-turbo"
```
## Patches

View file

@ -160,7 +160,7 @@ export function parseCommandMessage(message) {
suppressNoDomainWarning = true; //Don't spam console. Only give the warning once.
}
} else if(param.type === 'BlockName') { //Check that there is a block with this name
if(getBlockId(arg) == null) return `Invalid block type: ${arg}.`
if(getBlockId(arg) == null && arg !== 'air') return `Invalid block type: ${arg}.`
} else if(param.type === 'ItemName') { //Check that there is an item with this name
if(getItemId(arg) == null) return `Invalid item type: ${arg}.`
}

View file

@ -1275,8 +1275,14 @@ export async function tillAndSow(bot, x, y, z, seedType=null) {
let block = bot.blockAt(new Vec3(x, y, z));
if (bot.modes.isOn('cheat')) {
placeBlock(bot, x, y, z, 'farmland');
placeBlock(bot, x, y+1, z, seedType);
let to_remove = ['_seed', '_seeds'];
for (let remove of to_remove) {
if (seedType.endsWith(remove)) {
seedType = seedType.replace(remove, '');
}
}
placeBlock(bot, 'farmland', x, y, z);
placeBlock(bot, seedType, x, y+1, z);
return true;
}

View file

@ -34,7 +34,6 @@ export class Prompter {
this.coding_examples = null;
let name = this.profile.name;
let chat = this.profile.model;
this.cooldown = this.profile.cooldown ? this.profile.cooldown : 0;
this.last_prompt_time = 0;
this.awaiting_coding = false;
@ -43,68 +42,22 @@ export class Prompter {
let max_tokens = null;
if (this.profile.max_tokens)
max_tokens = this.profile.max_tokens;
if (typeof chat === 'string' || chat instanceof String) {
chat = {model: chat};
if (chat.model.includes('gemini'))
chat.api = 'google';
else if (chat.model.includes('gpt') || chat.model.includes('o1'))
chat.api = 'openai';
else if (chat.model.includes('claude'))
chat.api = 'anthropic';
else if (chat.model.includes('huggingface/'))
chat.api = "huggingface";
else if (chat.model.includes('meta/') || chat.model.includes('replicate/'))
chat.api = 'replicate';
else if (chat.model.includes('mistralai/') || chat.model.includes("mistral/"))
chat.api = 'mistral';
else if (chat.model.includes("groq/") || chat.model.includes("groqcloud/"))
chat.api = 'groq';
else if (chat.model.includes('novita/'))
chat.api = 'novita';
else if (chat.model.includes('qwen'))
chat.api = 'qwen';
else if (chat.model.includes('grok'))
chat.api = 'xai';
else if (chat.model.includes('deepseek'))
chat.api = 'deepseek';
else
chat.api = 'ollama';
}
console.log('Using chat settings:', chat);
let chat_model_profile = this._selectAPI(this.profile.model);
this.chat_model = this._createModel(chat_model_profile);
if (chat.api === 'google')
this.chat_model = new Gemini(chat.model, chat.url);
else if (chat.api === 'openai')
this.chat_model = new GPT(chat.model, chat.url);
else if (chat.api === 'anthropic')
this.chat_model = new Claude(chat.model, chat.url);
else if (chat.api === 'replicate')
this.chat_model = new ReplicateAPI(chat.model, chat.url);
else if (chat.api === 'ollama')
this.chat_model = new Local(chat.model, chat.url);
else if (chat.api === 'mistral')
this.chat_model = new Mistral(chat.model, chat.url);
else if (chat.api === 'groq') {
this.chat_model = new GroqCloudAPI(chat.model.replace('groq/', '').replace('groqcloud/', ''), chat.url, max_tokens ? max_tokens : 8192);
if (this.profile.code_model) {
let code_model_profile = this._selectAPI(this.profile.code_model);
this.code_model = this._createModel(code_model_profile);
}
else {
this.code_model = this.chat_model;
}
else if (chat.api === 'huggingface')
this.chat_model = new HuggingFace(chat.model, chat.url);
else if (chat.api === 'novita')
this.chat_model = new Novita(chat.model.replace('novita/', ''), chat.url);
else if (chat.api === 'qwen')
this.chat_model = new Qwen(chat.model, chat.url);
else if (chat.api === 'xai')
this.chat_model = new Grok(chat.model, chat.url);
else if (chat.api === 'deepseek')
this.chat_model = new DeepSeek(chat.model, chat.url);
else
throw new Error('Unknown API:', api);
let embedding = this.profile.embedding;
if (embedding === undefined) {
if (chat.api !== 'ollama')
embedding = {api: chat.api};
if (chat_model_profile.api !== 'ollama')
embedding = {api: chat_model_profile.api};
else
embedding = {api: 'none'};
}
@ -146,6 +99,69 @@ export class Prompter {
});
}
_selectAPI(profile) {
if (typeof profile === 'string' || profile instanceof String) {
profile = {model: profile};
if (profile.model.includes('gemini'))
profile.api = 'google';
else if (profile.model.includes('gpt') || profile.model.includes('o1')|| profile.model.includes('o3'))
profile.api = 'openai';
else if (profile.model.includes('claude'))
profile.api = 'anthropic';
else if (profile.model.includes('huggingface/'))
profile.api = "huggingface";
else if (profile.model.includes('meta/') || profile.model.includes('replicate/'))
profile.api = 'replicate';
else if (profile.model.includes('mistralai/') || profile.model.includes("mistral/"))
model_profile.api = 'mistral';
else if (profile.model.includes("groq/") || profile.model.includes("groqcloud/"))
profile.api = 'groq';
else if (profile.model.includes('novita/'))
profile.api = 'novita';
else if (profile.model.includes('qwen'))
profile.api = 'qwen';
else if (profile.model.includes('grok'))
profile.api = 'xai';
else if (profile.model.includes('deepseek'))
profile.api = 'deepseek';
else
profile.api = 'ollama';
}
return profile;
}
_createModel(profile) {
let model = null;
if (profile.api === 'google')
model = new Gemini(profile.model, profile.url);
else if (profile.api === 'openai')
model = new GPT(profile.model, profile.url);
else if (profile.api === 'anthropic')
model = new Claude(profile.model, profile.url);
else if (profile.api === 'replicate')
model = new ReplicateAPI(profile.model, profile.url);
else if (profile.api === 'ollama')
model = new Local(profile.model, profile.url);
else if (profile.api === 'mistral')
model = new Mistral(profile.model, profile.url);
else if (profile.api === 'groq') {
model = new GroqCloudAPI(profile.model.replace('groq/', '').replace('groqcloud/', ''), profile.url, max_tokens ? max_tokens : 8192);
}
else if (profile.api === 'huggingface')
model = new HuggingFace(profile.model, profile.url);
else if (profile.api === 'novita')
model = new Novita(profile.model.replace('novita/', ''), profile.url);
else if (profile.api === 'qwen')
model = new Qwen(profile.model, profile.url);
else if (profile.api === 'xai')
model = new Grok(profile.model, profile.url);
else if (profile.api === 'deepseek')
model = new DeepSeek(profile.model, profile.url);
else
throw new Error('Unknown API:', api);
return model;
}
getName() {
return this.profile.name;
}
@ -273,7 +289,7 @@ export class Prompter {
await this.checkCooldown();
let prompt = this.profile.coding;
prompt = await this.replaceStrings(prompt, messages, this.coding_examples);
let resp = await this.chat_model.sendRequest(messages, prompt);
let resp = await this.code_model.sendRequest(messages, prompt);
this.awaiting_coding = false;
return resp;
}

View file

@ -38,7 +38,7 @@ export class SelfPrompter {
let no_command_count = 0;
const MAX_NO_COMMAND = 3;
while (!this.interrupt) {
const msg = `You are self-prompting with the goal: '${this.prompt}'. Your next response MUST contain a command !withThisSyntax. Respond:`;
const msg = `You are self-prompting with the goal: '${this.prompt}'. Your next response MUST contain a command with this syntax: !commandName. Respond:`;
let used_command = await this.agent.handleMessage('system', msg, -1);
if (!used_command) {

View file

@ -33,7 +33,7 @@ export class GPT {
let res = null;
try {
console.log('Awaiting openai api response...')
console.log('Awaiting openai api response from model', this.model_name)
// console.log('Messages:', messages);
let completion = await this.openai.chat.completions.create(pack);
if (completion.choices[0].finish_reason == 'length')

View file

@ -1,5 +1,6 @@
import OpenAIApi from 'openai';
import { getKey } from '../utils/keys.js';
import { strictFormat } from '../utils/text.js';
// llama, mistral
export class Novita {
@ -17,6 +18,10 @@ export class Novita {
async sendRequest(turns, systemMessage, stop_seq='***') {
let messages = [{'role': 'system', 'content': systemMessage}].concat(turns);
messages = strictFormat(messages);
const pack = {
model: this.model_name || "meta-llama/llama-3.1-70b-instruct",
messages,
@ -41,6 +46,18 @@ export class Novita {
res = 'My brain disconnected, try again.';
}
}
if (res.includes('<think>')) {
let start = res.indexOf('<think>');
let end = res.indexOf('</think>') + 8;
if (start != -1) {
if (end != -1) {
res = res.substring(0, start) + res.substring(end);
} else {
res = res.substring(0, start+7);
}
}
res = res.trim();
}
return res;
}