mirror of
https://github.com/kolbytn/mindcraft.git
synced 2025-08-26 09:03:43 +02:00
115 lines
4 KiB
JavaScript
115 lines
4 KiB
JavaScript
import { strictFormat } from '../utils/text.js';
|
|
|
|
export class Ollama {
|
|
static prefix = 'ollama';
|
|
constructor(model_name, url, params) {
|
|
this.model_name = model_name;
|
|
this.params = params;
|
|
this.url = url || 'http://127.0.0.1:11434';
|
|
this.chat_endpoint = '/api/chat';
|
|
this.embedding_endpoint = '/api/embeddings';
|
|
}
|
|
|
|
async sendRequest(turns, systemMessage) {
|
|
let model = this.model_name || 'sweaterdog/andy-4:micro-q8_0';
|
|
let messages = strictFormat(turns);
|
|
messages.unshift({ role: 'system', content: systemMessage });
|
|
const maxAttempts = 5;
|
|
let attempt = 0;
|
|
let finalRes = null;
|
|
|
|
while (attempt < maxAttempts) {
|
|
attempt++;
|
|
console.log(`Awaiting local response... (model: ${model}, attempt: ${attempt})`);
|
|
let res = null;
|
|
try {
|
|
let apiResponse = await this.send(this.chat_endpoint, {
|
|
model: model,
|
|
messages: messages,
|
|
stream: false,
|
|
...(this.params || {})
|
|
});
|
|
if (apiResponse) {
|
|
res = apiResponse['message']['content'];
|
|
} else {
|
|
res = 'No response data.';
|
|
}
|
|
} catch (err) {
|
|
if (err.message.toLowerCase().includes('context length') && turns.length > 1) {
|
|
console.log('Context length exceeded, trying again with shorter context.');
|
|
return await this.sendRequest(turns.slice(1), systemMessage);
|
|
} else {
|
|
console.log(err);
|
|
res = 'My brain disconnected, try again.';
|
|
}
|
|
}
|
|
|
|
const hasOpenTag = res.includes("<think>");
|
|
const hasCloseTag = res.includes("</think>");
|
|
|
|
if ((hasOpenTag && !hasCloseTag)) {
|
|
console.warn("Partial <think> block detected. Re-generating...");
|
|
if (attempt < maxAttempts) continue;
|
|
}
|
|
if (hasCloseTag && !hasOpenTag) {
|
|
res = '<think>' + res;
|
|
}
|
|
if (hasOpenTag && hasCloseTag) {
|
|
res = res.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
|
|
}
|
|
finalRes = res;
|
|
break;
|
|
}
|
|
|
|
if (finalRes == null) {
|
|
console.warn("Could not get a valid response after max attempts.");
|
|
finalRes = 'I thought too hard, sorry, try again.';
|
|
}
|
|
return finalRes;
|
|
}
|
|
|
|
async embed(text) {
|
|
let model = this.model_name || 'nomic-embed-text';
|
|
let body = { model: model, input: text };
|
|
let res = await this.send(this.embedding_endpoint, body);
|
|
return res['embedding'];
|
|
}
|
|
|
|
async send(endpoint, body) {
|
|
const url = new URL(endpoint, this.url);
|
|
let method = 'POST';
|
|
let headers = new Headers();
|
|
const request = new Request(url, { method, headers, body: JSON.stringify(body) });
|
|
let data = null;
|
|
try {
|
|
const res = await fetch(request);
|
|
if (res.ok) {
|
|
data = await res.json();
|
|
} else {
|
|
throw new Error(`Ollama Status: ${res.status}`);
|
|
}
|
|
} catch (err) {
|
|
console.error('Failed to send Ollama request.');
|
|
console.error(err);
|
|
}
|
|
return data;
|
|
}
|
|
|
|
async sendVisionRequest(messages, systemMessage, imageBuffer) {
|
|
const imageMessages = [...messages];
|
|
imageMessages.push({
|
|
role: "user",
|
|
content: [
|
|
{ type: "text", text: systemMessage },
|
|
{
|
|
type: "image_url",
|
|
image_url: {
|
|
url: `data:image/jpeg;base64,${imageBuffer.toString('base64')}`
|
|
}
|
|
}
|
|
]
|
|
});
|
|
|
|
return this.sendRequest(imageMessages, systemMessage);
|
|
}
|
|
}
|