made it compatible with other programme compatible with OpenAPI format (such as oobabooga's Text generation web UI)

This commit is contained in:
Radnos 2024-04-10 15:54:40 +02:00
parent 5951fd02e5
commit fe8324b034
6 changed files with 9 additions and 121 deletions

View file

@ -8,7 +8,7 @@ This project allows an AI model to write/execute code on your computer that may
## Requirements
- [OpenAI API Subscription](https://openai.com/blog/openai-api), [Gemini API Subscription](https://aistudio.google.com/app/apikey), [Anthropic API Subscription](https://docs.anthropic.com/claude/docs/getting-access-to-claude), or [Ollama](https://ollama.com/download)
- [OpenAI API Subscription](https://openai.com/blog/openai-api), [Gemini API Subscription](https://aistudio.google.com/app/apikey), [Anthropic API Subscription](https://docs.anthropic.com/claude/docs/getting-access-to-claude), or any service compatible with the OpenAI API format (such as [Ollama](https://ollama.com/download))
- [Minecraft Java Edition](https://www.minecraft.net/en-us/store/minecraft-java-bedrock-edition-pc)
- [Node.js](https://nodejs.org/) (at least v14)
@ -21,9 +21,8 @@ Add one of these environment variables:
⭐[How do I add the API key as an environment variable?](https://phoenixnap.com/kb/windows-set-environment-variable)⭐
Or you can also use Ollama.
To install the models used by default (generation and embedding), execute the following script:
`ollama pull mistral:instruct && ollama pull nomic-embed-text`
If you use Ollama, to install the models used by default (generation and embedding), execute the following script:
`ollama pull mistral:instruct && ollama pull nomic-embed-text`
Then, clone/download this repository
@ -39,7 +38,7 @@ Run `node main.js`
You can configure the agent's name, model, and prompts in their profile like `andy.json`.
You can configure ollama in `ollama-config.json`.
You can configure local service in `local-config.json`.
You can configure project details in `settings.json`.

View file

@ -1,6 +1,6 @@
import { AgentProcess } from './src/process/agent-process.js';
let profile = './andy.json';
let profile = './radley.json';
let load_memory = false;
let init_message = 'Say hello world and your name. Do NOT use any command yet, nor make any comment about that fact.';

View file

@ -1,4 +0,0 @@
{
"url": "http://localhost:11434",
"embedding_model": "nomic-embed-text"
}

View file

@ -1,7 +1,7 @@
{
"name": "radley",
"model": "ollama[mistral:instruct]",
"model": "local[mistral:instruct]",
"conversing": "You are a playful Minecraft bot named $NAME that can converse with players, see, move, mine, build, and interact with the world by using commands. Act human-like as if you were a typical Minecraft player, rather than an AI. Be very brief in your responses, don't apologize constantly, don't give instructions or make lists unless asked, and don't refuse requests. Don't pretend to act, use commands immediately when requested. Do NOT say this: 'Sure, I've stopped.', instead say this: 'Sure, I'll stop. !stop'. Do NOT say this: 'On my way! Give me a moment.', instead say this: 'On my way! !goToPlayer('playername', 3)'. This is extremely important to me, take a deep breath and have fun :)\n$STATS\n$COMMAND_DOCS\n$EXAMPLES\nConversation Begin:",

View file

@ -8,7 +8,7 @@ import { getCommand } from './commands/index.js';
import { Gemini } from '../models/gemini.js';
import { GPT } from '../models/gpt.js';
import { Claude } from '../models/claude.js';
import { Ollama } from '../models/ollama.js';
import { Local } from '../models/local.js';
export class Prompter {
@ -31,8 +31,8 @@ export class Prompter {
this.model = new GPT(model_name);
else if (model_name.includes('claude'))
this.model = new Claude(model_name);
else if (model_name.includes('ollama'))
this.model = new Ollama(model_name);
else if (model_name.includes('local'))
this.model = new Local(model_name);
else
throw new Error('Unknown model ' + model_name);
}

View file

@ -1,107 +0,0 @@
import OpenAIApi from 'openai';
import axios from 'axios';
import { readFileSync } from 'fs';
let ollamaSettings = JSON.parse(readFileSync('./ollama-config.json', 'utf8'));
function getContentInBrackets(str) {
const startIndex = str.indexOf("[");
const endIndex = str.indexOf("]");
if (startIndex !== -1 && endIndex !== -1 && endIndex > startIndex) {
return str.substring(startIndex + 1, endIndex);
} else {
return "";
}
}
export class Ollama {
constructor(model_name) {
this.model_name = getContentInBrackets(model_name);
let ollamaConfig = null;
if (this.model_name == "") {
throw new Error('Model is not specified! Please ensure you input the model in the following format: ollama[model]. For example, for Mistral, use: ollama[mistral]');
}
axios.get(ollamaSettings["url"]).then(response => {
if (response.status === 200) {
ollamaConfig = {
baseURL: `${ollamaSettings["url"]}/v1`,
apiKey: 'ollama', // required but unused
};
this.openai = new OpenAIApi(ollamaConfig);
}
else {
throw new Error(`Error relating the endpoint: ${response.status}.`);
}
});
}
async sendRequest(turns, systemMessage, stop_seq='***') {
console.log(this.model_name)
let messages = [{'role': 'system', 'content': systemMessage}].concat(turns);
let res = null;
try {
console.log(`Awaiting ollama response... (model: ${this.model_name})`)
console.log('Messages:', messages);
let completion = await this.openai.chat.completions.create({
model: this.model_name,
messages: messages,
stop: stop_seq,
});
if (completion.choices[0].finish_reason == 'length')
throw new Error('Context length exceeded');
console.log('Received.')
res = completion.choices[0].message.content;
}
catch (err) {
if ((err.message == 'Context length exceeded' || err.code == 'context_length_exceeded') && turns.length > 1) {
console.log('Context length exceeded, trying again with shorter context.');
return await sendRequest(turns.slice(1), systemMessage, stop_seq);
} else {
console.log(err);
res = 'My brain disconnected, try again.';
}
}
return res;
}
async embed(text) {
// Will implement this when Ollama will support embeddings in OpenAI format
/*
const embedding = await this.openai.embeddings.create({
model: "nomic-embed-text",
input: text,
encoding_format: "float",
});
return embedding.data[0].embedding;
*/
// For now, I'll do http request using axios:
try {
const response = await axios.post(`${ollamaSettings["url"]}/api/embeddings`, {
model: ollamaSettings["embedding_model"],
prompt: text
});
return response.data.embedding;
} catch (error) {
console.error('Error embedding text:', error.response ? error.response.data : error.message);
return Array(1).fill().map(() => Math.random());
}
}
}