ai mod cmds improvements, remove phi4 models for bad performance to output, push example env

This commit is contained in:
Aidan 2025-07-05 16:24:00 -04:00
parent 173d4e7a52
commit 3b6d200b21
6 changed files with 40 additions and 20 deletions

View file

@ -984,11 +984,30 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
return;
}
let stoppedCurrentRequest = false;
const initialLength = requestQueue.length;
const filteredQueue = requestQueue.filter(item => item.userId !== targetUserId);
const removedCount = initialLength - filteredQueue.length;
if (removedCount === 0) {
requestQueue.length = 0;
requestQueue.push(...filteredQueue);
if (currentRequest && currentRequest.userId === targetUserId) {
currentRequest.abortController?.abort();
try {
await axios.post(`${process.env.ollamaApi}/api/generate`, {
model: currentRequest.model,
keep_alive: 0,
}, { timeout: 5000 });
} catch (error) {
console.log("[✨ AI] Could not unload model after cancellation:", error.message);
}
stoppedCurrentRequest = true;
}
if (removedCount === 0 && !stoppedCurrentRequest) {
await ctx.reply(Strings.ai.noQueueItems.replace("{userId}", String(targetUserId)), {
parse_mode: 'Markdown',
...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } })
@ -996,10 +1015,16 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
return;
}
requestQueue.length = 0;
requestQueue.push(...filteredQueue);
let responseMessage = "";
if (stoppedCurrentRequest && removedCount > 0) {
responseMessage = Strings.ai.stoppedCurrentAndCleared.replace("{count}", String(removedCount)).replace("{userId}", String(targetUserId));
} else if (stoppedCurrentRequest) {
responseMessage = Strings.ai.stoppedCurrentRequestOnly.replace("{userId}", String(targetUserId));
} else {
responseMessage = Strings.ai.queueCleared.replace("{count}", String(removedCount)).replace("{userId}", String(targetUserId));
}
await ctx.reply(Strings.ai.queueCleared.replace("{count}", String(removedCount)).replace("{userId}", String(targetUserId)), {
await ctx.reply(responseMessage, {
parse_mode: 'Markdown',
...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } })
});

View file

@ -120,7 +120,10 @@
"requestStopped": "🛑 Your AI request has been stopped.",
"requestRemovedFromQueue": "🛑 Your AI request has been removed from the queue.",
"noActiveRequest": " You don't have any active AI requests to stop.",
"executionTimeoutReached": "\n\n⏱ Max execution time limit reached!"
"executionTimeoutReached": "\n\n⏱ Max execution time limit reached!",
"stoppedCurrentAndCleared": "🛑 Stopped current request and cleared {count} queued item(s) for user {userId}.",
"stoppedCurrentRequestOnly": "🛑 Stopped current request for user {userId} (no queued items found).",
"stoppedCurrentAndClearedQueue": "🛑 Stopped current request and cleared all queued items for user {userId}."
},
"maInvalidModule": "Please provide a valid module ID from The Mod Archive.\nExample: `/modarchive 81574`",
"maDownloadError": "Error downloading the file. Check the module ID and try again.",

View file

@ -123,7 +123,10 @@
"requestStopped": "🛑 Sua solicitação de IA foi interrompida.",
"requestRemovedFromQueue": "🛑 Sua solicitação de IA foi removida da fila.",
"noActiveRequest": " Você não tem nenhuma solicitação ativa de IA para parar.",
"executionTimeoutReached": "\n\n⏱ Limite máximo de tempo de execução atingido!"
"executionTimeoutReached": "\n\n⏱ Limite máximo de tempo de execução atingido!",
"stoppedCurrentAndCleared": "🛑 Parou solicitação atual e limpou {count} item(s) da fila para o usuário {userId}.",
"stoppedCurrentRequestOnly": "🛑 Parou solicitação atual para o usuário {userId} (nenhum item na fila encontrado).",
"stoppedCurrentAndClearedQueue": "🛑 Parou solicitação atual e limpou todos os itens da fila para o usuário {userId}."
},
"maInvalidModule": "Por favor, forneça um ID de módulo válido do The Mod Archive.\nExemplo: `/modarchive 81574`",
"maDownloadError": "Erro ao baixar o arquivo. Verifique o ID do módulo e tente novamente.",