mirror of
https://github.com/kolbytn/mindcraft.git
synced 2025-04-21 21:52:07 +02:00
commit
5c96e13b81
5 changed files with 65 additions and 13 deletions
|
@ -10,9 +10,9 @@ This project allows an AI model to write/execute code on your computer that may
|
|||
|
||||
## Requirements
|
||||
|
||||
- [OpenAI API Subscription](https://openai.com/blog/openai-api), [Gemini API Subscription](https://aistudio.google.com/app/apikey), [Anthropic API Subscription](https://docs.anthropic.com/claude/docs/getting-access-to-claude), [Replicate API Subscription](https://replicate.com/), [Ollama Installed](https://ollama.com/download), or, a [Groq Account & API Key](https://console.groq.com/keys)
|
||||
- [Minecraft Java Edition](https://www.minecraft.net/en-us/store/minecraft-java-bedrock-edition-pc)
|
||||
- [Minecraft Java Edition](https://www.minecraft.net/en-us/store/minecraft-java-bedrock-edition-pc) (up to v1.20.4)
|
||||
- [Node.js](https://nodejs.org/) (at least v14)
|
||||
- One of these: [OpenAI API Key](https://openai.com/blog/openai-api) | [Gemini API Key](https://aistudio.google.com/app/apikey) |[Anthropic API Key](https://docs.anthropic.com/claude/docs/getting-access-to-claude) | [Replicate API Key](https://replicate.com/) | [Hugging Face API Key](https://huggingface.co/) | [Groq API Key](https://console.groq.com/keys) | [Ollama Installed](https://ollama.com/download)
|
||||
|
||||
## Installation
|
||||
|
||||
|
@ -25,6 +25,7 @@ Rename `keys.example.json` to `keys.json` and fill in your API keys, and you can
|
|||
| Replicate | `REPLICATE_API_KEY` | `meta/meta-llama-3-70b-instruct` | [docs](https://replicate.com/collections/language-models) |
|
||||
| Ollama (local) | n/a | `llama3` | [docs](https://ollama.com/library) |
|
||||
| Groq | `GROQCLOUD_API_KEY` | `groq/mixtral-8x7b-32768` | [docs](https://console.groq.com/docs/models) |
|
||||
| Hugging Face | `HUGGINGFACE_API_KEY` | `huggingface/mistralai/Mistral-Nemo-Instruct-2407` | [docs](https://huggingface.co/models) |
|
||||
|
||||
If you use Ollama, to install the models used by default (generation and embedding), execute the following terminal command:
|
||||
`ollama pull llama3 && ollama pull nomic-embed-text`
|
||||
|
|
|
@ -4,5 +4,6 @@
|
|||
"GEMINI_API_KEY": "",
|
||||
"ANTHROPIC_API_KEY": "",
|
||||
"REPLICATE_API_KEY": "",
|
||||
"GROQCLOUD_API_KEY": ""
|
||||
"GROQCLOUD_API_KEY": "",
|
||||
"HUGGINGFACE_API_KEY": ""
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
"dependencies": {
|
||||
"@anthropic-ai/sdk": "^0.17.1",
|
||||
"@google/generative-ai": "^0.2.1",
|
||||
"@huggingface/inference": "^2.8.1",
|
||||
"google-translate-api-x": "^10.7.1",
|
||||
"groq-sdk": "^0.5.0",
|
||||
"minecraft-data": "^3.46.2",
|
||||
|
|
|
@ -11,6 +11,7 @@ import { Claude } from '../models/claude.js';
|
|||
import { ReplicateAPI } from '../models/replicate.js';
|
||||
import { Local } from '../models/local.js';
|
||||
import { GroqCloudAPI } from '../models/groq.js';
|
||||
import { HuggingFace } from '../models/huggingface.js';
|
||||
|
||||
export class Prompter {
|
||||
constructor(agent, fp) {
|
||||
|
@ -36,6 +37,8 @@ export class Prompter {
|
|||
chat.api = 'openai';
|
||||
else if (chat.model.includes('claude'))
|
||||
chat.api = 'anthropic';
|
||||
else if (chat.model.includes('huggingface/'))
|
||||
chat.api = "huggingface";
|
||||
else if (chat.model.includes('meta/') || chat.model.includes('mistralai/') || chat.model.includes('replicate/'))
|
||||
chat.api = 'replicate';
|
||||
else if (chat.model.includes("groq/") || chat.model.includes("groqcloud/"))
|
||||
|
@ -46,19 +49,21 @@ export class Prompter {
|
|||
|
||||
console.log('Using chat settings:', chat);
|
||||
|
||||
if (chat.api == 'google')
|
||||
if (chat.api === 'google')
|
||||
this.chat_model = new Gemini(chat.model, chat.url);
|
||||
else if (chat.api == 'openai')
|
||||
else if (chat.api === 'openai')
|
||||
this.chat_model = new GPT(chat.model, chat.url);
|
||||
else if (chat.api == 'anthropic')
|
||||
else if (chat.api === 'anthropic')
|
||||
this.chat_model = new Claude(chat.model, chat.url);
|
||||
else if (chat.api == 'replicate')
|
||||
else if (chat.api === 'replicate')
|
||||
this.chat_model = new ReplicateAPI(chat.model, chat.url);
|
||||
else if (chat.api == 'ollama')
|
||||
else if (chat.api === 'ollama')
|
||||
this.chat_model = new Local(chat.model, chat.url);
|
||||
else if (chat.api == 'groq') {
|
||||
else if (chat.api === 'groq') {
|
||||
this.chat_model = new GroqCloudAPI(chat.model.replace('groq/', '').replace('groqcloud/', ''), chat.url, max_tokens ? max_tokens : 8192);
|
||||
}
|
||||
else if (chat.api === 'huggingface')
|
||||
this.chat_model = new HuggingFace(chat.model, chat.url);
|
||||
else
|
||||
throw new Error('Unknown API:', api);
|
||||
|
||||
|
@ -74,13 +79,13 @@ export class Prompter {
|
|||
|
||||
console.log('Using embedding settings:', embedding);
|
||||
|
||||
if (embedding.api == 'google')
|
||||
if (embedding.api === 'google')
|
||||
this.embedding_model = new Gemini(embedding.model, embedding.url);
|
||||
else if (embedding.api == 'openai')
|
||||
else if (embedding.api === 'openai')
|
||||
this.embedding_model = new GPT(embedding.model, embedding.url);
|
||||
else if (embedding.api == 'replicate')
|
||||
else if (embedding.api === 'replicate')
|
||||
this.embedding_model = new ReplicateAPI(embedding.model, embedding.url);
|
||||
else if (embedding.api == 'ollama')
|
||||
else if (embedding.api === 'ollama')
|
||||
this.embedding_model = new Local(embedding.model, embedding.url);
|
||||
else {
|
||||
this.embedding_model = null;
|
||||
|
|
44
src/models/huggingface.js
Normal file
44
src/models/huggingface.js
Normal file
|
@ -0,0 +1,44 @@
|
|||
import {toSinglePrompt} from '../utils/text.js';
|
||||
import {getKey} from '../utils/keys.js';
|
||||
import {HfInference} from "@huggingface/inference";
|
||||
|
||||
export class HuggingFace {
|
||||
constructor(model_name, url) {
|
||||
this.model_name = model_name.replace('huggingface/','');
|
||||
this.url = url;
|
||||
|
||||
if (this.url) {
|
||||
console.warn("Hugging Face doesn't support custom urls!");
|
||||
}
|
||||
|
||||
this.huggingface = new HfInference(getKey('HUGGINGFACE_API_KEY'));
|
||||
}
|
||||
|
||||
async sendRequest(turns, systemMessage) {
|
||||
const stop_seq = '***';
|
||||
const prompt = toSinglePrompt(turns, null, stop_seq);
|
||||
let model_name = this.model_name || 'meta-llama/Meta-Llama-3-8B';
|
||||
|
||||
const input = systemMessage + "\n" + prompt;
|
||||
let res = '';
|
||||
try {
|
||||
console.log('Awaiting Hugging Face API response...');
|
||||
for await (const chunk of this.huggingface.chatCompletionStream({
|
||||
model: model_name,
|
||||
messages: [{ role: "user", content: input }]
|
||||
})) {
|
||||
res += (chunk.choices[0]?.delta?.content || "");
|
||||
}
|
||||
} catch (err) {
|
||||
console.log(err);
|
||||
res = 'My brain disconnected, try again.';
|
||||
}
|
||||
console.log('Received.');
|
||||
console.log(res);
|
||||
return res;
|
||||
}
|
||||
|
||||
async embed(text) {
|
||||
throw new Error('Embeddings are not supported by HuggingFace.');
|
||||
}
|
||||
}
|
Loading…
Add table
Reference in a new issue