mirror of
https://github.com/kolbytn/mindcraft.git
synced 2025-04-22 06:02:07 +02:00
removed request queuing
This commit is contained in:
parent
3e3e80c0ed
commit
34d91dff79
1 changed files with 5 additions and 18 deletions
|
@ -20,28 +20,14 @@ else {
|
||||||
|
|
||||||
const openai = new OpenAIApi(openAiConfig);
|
const openai = new OpenAIApi(openAiConfig);
|
||||||
|
|
||||||
let counter = 0;
|
|
||||||
let request_queue = [];
|
|
||||||
export async function sendRequest(turns, systemMessage) {
|
|
||||||
// this wrapper function ensures that new requests await the completion of previous requests in order
|
|
||||||
let id = counter++;
|
|
||||||
request_queue.push(id);
|
|
||||||
if (request_queue.length > 1)
|
|
||||||
console.log('awaiting previous requests to complete, queueing request', id);
|
|
||||||
while (request_queue[0] !== id) {
|
|
||||||
await new Promise(r => setTimeout(r, 100));
|
|
||||||
}
|
|
||||||
let res = await queryGPT(turns, systemMessage);
|
|
||||||
request_queue.shift();
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
async function queryGPT(turns, systemMessage, stop_seq='***') {
|
export async function sendRequest(turns, systemMessage, stop_seq='***') {
|
||||||
|
|
||||||
let messages = [{'role': 'system', 'content': systemMessage}].concat(turns);
|
let messages = [{'role': 'system', 'content': systemMessage}].concat(turns);
|
||||||
|
|
||||||
let res = null;
|
let res = null;
|
||||||
try {
|
try {
|
||||||
console.log('Awaiting openai api response...');
|
console.log('Awaiting openai api response...')
|
||||||
let completion = await openai.chat.completions.create({
|
let completion = await openai.chat.completions.create({
|
||||||
model: 'gpt-3.5-turbo',
|
model: 'gpt-3.5-turbo',
|
||||||
messages: messages,
|
messages: messages,
|
||||||
|
@ -49,12 +35,13 @@ async function queryGPT(turns, systemMessage, stop_seq='***') {
|
||||||
});
|
});
|
||||||
if (completion.choices[0].finish_reason == 'length')
|
if (completion.choices[0].finish_reason == 'length')
|
||||||
throw new Error('Context length exceeded');
|
throw new Error('Context length exceeded');
|
||||||
|
console.log('Received.')
|
||||||
res = completion.choices[0].message.content;
|
res = completion.choices[0].message.content;
|
||||||
}
|
}
|
||||||
catch (err) {
|
catch (err) {
|
||||||
if ((err.message == 'Context length exceeded' || err.code == 'context_length_exceeded') && turns.length > 1) {
|
if ((err.message == 'Context length exceeded' || err.code == 'context_length_exceeded') && turns.length > 1) {
|
||||||
console.log('Context length exceeded, trying again with shorter context.');
|
console.log('Context length exceeded, trying again with shorter context.');
|
||||||
return await queryGPT(turns.slice(1), systemMessage, stop_seq);
|
return await sendRequest(turns.slice(1), systemMessage, stop_seq);
|
||||||
} else {
|
} else {
|
||||||
console.log(err);
|
console.log(err);
|
||||||
res = 'My brain disconnected, try again.';
|
res = 'My brain disconnected, try again.';
|
||||||
|
|
Loading…
Add table
Reference in a new issue