From 5270d2cae5a839cbd2889167fa8c875eb9277a87 Mon Sep 17 00:00:00 2001 From: Aidan Date: Mon, 30 Jun 2025 11:24:51 -0400 Subject: [PATCH] cleanup, bug fixes, better markdown parsing, better model display --- src/commands/ai.ts | 109 ++++++++++++++++++++++-------------- src/commands/main.ts | 6 +- src/locales/english.json | 4 +- src/locales/portuguese.json | 4 +- 4 files changed, 77 insertions(+), 46 deletions(-) diff --git a/src/commands/ai.ts b/src/commands/ai.ts index 4431f56..97a8c48 100644 --- a/src/commands/ai.ts +++ b/src/commands/ai.ts @@ -220,6 +220,10 @@ function extractAxiosErrorMessage(error: unknown): string { return 'An unexpected error occurred.'; } +function escapeMarkdown(text: string): string { + return text.replace(/([*_])/g, '\\$1'); +} + async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Message, model: string, aiTemperature: number): Promise<{ success: boolean; response?: string; error?: string }> { const Strings = getStrings(languageCode(ctx)); if (!ctx.chat) { @@ -228,6 +232,7 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me error: Strings.unexpectedErr.replace("{error}", "No chat found"), }; } + const modelHeader = `🤖 *${model}* | 🌡️ *${aiTemperature}*\n\n`; try { const aiResponse = await axios.post( `${process.env.ollamaApi}/api/generate`, @@ -246,6 +251,7 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me let fullResponse = ""; let thoughts = ""; let lastUpdate = Date.now(); + let sentHeader = false; const stream: NodeJS.ReadableStream = aiResponse.data as any; for await (const chunk of stream) { const lines = chunk.toString().split('\n'); @@ -275,23 +281,24 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me if (model === thinking_model) { let patchedThoughts = ln.response; const thinkTagRx = /([\s\S]*?)<\/think>/g; - patchedThoughts = patchedThoughts.replace(thinkTagRx, (match, p1) => p1.trim().length > 0 ? '`Thinking...`' + p1 + '`Finished thinking`' : ''); - patchedThoughts = patchedThoughts.replace(//g, '`Thinking...`'); - patchedThoughts = patchedThoughts.replace(/<\/think>/g, '`Finished thinking`'); + patchedThoughts = patchedThoughts.replace(thinkTagRx, (p1) => p1.trim().length > 0 ? '`' + Strings.ai.thinking + '`' + p1 + '`' + Strings.ai.finishedThinking + '`' : ''); + patchedThoughts = patchedThoughts.replace(//g, '`' + Strings.ai.thinking + '`'); + patchedThoughts = patchedThoughts.replace(/<\/think>/g, '`' + Strings.ai.finishedThinking + '`'); thoughts += patchedThoughts; fullResponse += patchedThoughts; } else { fullResponse += ln.response; } - if (now - lastUpdate >= 1000) { + if (now - lastUpdate >= 1000 || !sentHeader) { await rateLimiter.editMessageWithRetry( ctx, ctx.chat.id, replyGenerating.message_id, - thoughts, + modelHeader + fullResponse, { parse_mode: 'Markdown' } ); lastUpdate = now; + sentHeader = true; } } } @@ -315,7 +322,7 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me Strings.ai.pulling.replace("{model}", model), { parse_mode: 'Markdown' } ); - console.log(`[✨ AI | i] Pulling ${model} from ollama...`); + console.log(`[✨ AI] Pulling ${model} from ollama...`); try { await axios.post( `${process.env.ollamaApi}/api/pull`, @@ -330,13 +337,13 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me console.error("[✨ AI | !] Pull error:", pullMsg); return { success: false, - error: `❌ Something went wrong while pulling ${model}: ${pullMsg}`, + error: `❌ Something went wrong while pulling ${escapeMarkdown(model)}: ${escapeMarkdown(pullMsg)}`, }; } - console.log(`[✨ AI | i] ${model} pulled successfully`); + console.log(`[✨ AI] ${model} pulled successfully`); return { success: true, - response: `✅ Pulled ${model} successfully, please retry the command.`, + response: `✅ Pulled ${escapeMarkdown(model)} successfully, please retry the command.`, }; } } @@ -347,13 +354,13 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me } } -async function handleAiReply(ctx: TextContext, db: NodePgDatabase, model: string, prompt: string, replyGenerating: Message, aiTemperature: number) { +async function handleAiReply(ctx: TextContext, model: string, prompt: string, replyGenerating: Message, aiTemperature: number) { const Strings = getStrings(languageCode(ctx)); const aiResponse = await getResponse(prompt, ctx, replyGenerating, model, aiTemperature); if (!aiResponse) return; if (!ctx.chat) return; + const modelHeader = `🤖 *${model}* | 🌡️ *${aiTemperature}*\n\n`; if (aiResponse.success && aiResponse.response) { - const modelHeader = `🤖 *${model}* | 🌡️ *${aiTemperature}*\n\n`; await rateLimiter.editMessageWithRetry( ctx, ctx.chat.id, @@ -387,6 +394,14 @@ async function getUserWithStringsAndModel(ctx: Context, db: NodePgDatabase m.name === name); + if (found) return found.label; + } + return name; +} + export default (bot: Telegraf, db: NodePgDatabase) => { const botName = bot.botInfo?.first_name && bot.botInfo?.last_name ? `${bot.botInfo.first_name} ${bot.botInfo.last_name}` : "Kowalski" @@ -427,44 +442,56 @@ export default (bot: Telegraf, db: NodePgDatabase) => { logger.logPrompt(fixedMsg) const prompt = sanitizeForJson(await usingSystemPrompt(textCtx, db, botName)) - await handleAiReply(textCtx, db, model, prompt, replyGenerating, aiTemperature) + await handleAiReply(textCtx, model, prompt, replyGenerating, aiTemperature) }) bot.command(["ai"], spamwatchMiddleware, async (ctx) => { - if (!ctx.message || !('text' in ctx.message)) return - const textCtx = ctx as TextContext - const reply_to_message_id = replyToMessageId(textCtx) - const { Strings, customAiModel, aiTemperature } = await getUserWithStringsAndModel(textCtx, db) - const message = textCtx.message.text - const author = ("@" + ctx.from?.username) || ctx.from?.first_name + try { + if (!ctx.message || !("text" in ctx.message)) return + const textCtx = ctx as TextContext + const reply_to_message_id = replyToMessageId(textCtx) + const { Strings, customAiModel, aiTemperature } = await getUserWithStringsAndModel(textCtx, db) + const message = textCtx.message.text + const author = ("@" + ctx.from?.username) || ctx.from?.first_name - logger.logCmdStart(author, "ask") + logger.logCmdStart(author, "ask") - if (!process.env.ollamaApi) { - await ctx.reply(Strings.ai.disabled, { + if (!process.env.ollamaApi) { + await ctx.reply(Strings.ai.disabled, { + parse_mode: 'Markdown', + ...({ reply_to_message_id }) + }) + return + } + + const fixedMsg = message.replace(/^\/ai(@\w+)?\s*/, "").trim() + if (fixedMsg.length < 1) { + await ctx.reply(Strings.ai.askNoMessage, { + parse_mode: 'Markdown', + ...({ reply_to_message_id }) + }) + return + } + + const modelLabel = getModelLabelByName(customAiModel) + const replyGenerating = await ctx.reply(Strings.ai.askGenerating.replace("{model}", modelLabel), { parse_mode: 'Markdown', ...({ reply_to_message_id }) }) - return + + logger.logPrompt(fixedMsg) + + const prompt = sanitizeForJson(await usingSystemPrompt(textCtx, db, botName)) + await handleAiReply(textCtx, customAiModel, prompt, replyGenerating, aiTemperature) + } catch (err) { + const Strings = getStrings(languageCode(ctx)); + if (ctx && ctx.reply) { + try { + await ctx.reply(Strings.unexpectedErr.replace("{error}", (err && err.message ? err.message : String(err))), { parse_mode: 'Markdown' }) + } catch (e) { + console.error("[✨ AI | !] Failed to send error reply:", e) + } + } } - - const fixedMsg = message.replace(/^\/ai(@\w+)?\s*/, "").trim() - if (fixedMsg.length < 1) { - await ctx.reply(Strings.ai.askNoMessage, { - parse_mode: 'Markdown', - ...({ reply_to_message_id }) - }) - return - } - - const replyGenerating = await ctx.reply(Strings.ai.askGenerating.replace("{model}", customAiModel), { - parse_mode: 'Markdown', - ...({ reply_to_message_id }) - }) - - logger.logPrompt(fixedMsg) - - const prompt = sanitizeForJson(await usingSystemPrompt(textCtx, db, botName)) - await handleAiReply(textCtx, db, customAiModel, prompt, replyGenerating, aiTemperature) }) } \ No newline at end of file diff --git a/src/commands/main.ts b/src/commands/main.ts index a6c48ba..fe76037 100644 --- a/src/commands/main.ts +++ b/src/commands/main.ts @@ -7,7 +7,7 @@ import * as schema from '../db/schema'; import { eq } from 'drizzle-orm'; import { ensureUserInDb } from '../utils/ensure-user'; import type { NodePgDatabase } from 'drizzle-orm/node-postgres'; -import { models } from './ai'; +import { models, getModelLabelByName } from './ai'; import { langs } from '../locales/config'; type UserRow = typeof schema.usersTable.$inferSelect; @@ -54,7 +54,7 @@ function getSettingsMenu(user: UserRow, Strings: any): SettingsMenu { inline_keyboard: [ [ { text: `✨ ${Strings.settings.ai.aiEnabled}: ${user.aiEnabled ? Strings.settings.enabled : Strings.settings.disabled}`, callback_data: 'settings_aiEnabled' }, - { text: `🧠 ${Strings.settings.ai.aiModel}: ${user.customAiModel}`, callback_data: 'settings_aiModel' } + { text: `🧠 ${Strings.settings.ai.aiModel}: ${getModelLabelByName(user.customAiModel)}`, callback_data: 'settings_aiModel' } ], [ { text: `🌡️ ${Strings.settings.ai.aiTemperature}: ${user.aiTemperature}`, callback_data: 'settings_aiTemperature' }, @@ -78,7 +78,7 @@ export default (bot: Telegraf, db: NodePgDatabase) => { user.aiEnabled ? Strings.settings.enabled : Strings.settings.disabled ).replace( /{aiModel}/g, - user.customAiModel + getModelLabelByName(user.customAiModel) ).replace( /{aiTemperature}/g, user.aiTemperature.toString() diff --git a/src/locales/english.json b/src/locales/english.json index e1ad103..cd83da6 100644 --- a/src/locales/english.json +++ b/src/locales/english.json @@ -70,7 +70,9 @@ "pulling": "🔄 *Pulling {model} from Ollama...*\n\nThis may take a few minutes...", "askGenerating": "✨ _{model} is working..._", "askNoMessage": "Please provide a message to ask the model.", - "languageCode": "Language" + "languageCode": "Language", + "thinking": "Thinking...", + "finishedThinking": "Finished thinking" }, "maInvalidModule": "Please provide a valid module ID from The Mod Archive.\nExample: `/modarchive 81574`", "maDownloadError": "Error downloading the file. Check the module ID and try again.", diff --git a/src/locales/portuguese.json b/src/locales/portuguese.json index 63b3a4c..2d04a8f 100644 --- a/src/locales/portuguese.json +++ b/src/locales/portuguese.json @@ -69,7 +69,9 @@ "pulling": "🔄 *Puxando {model} do Ollama...*\n\nIsso pode levar alguns minutos...", "askGenerating": "✨ _{model} está funcionando..._", "askNoMessage": "Por favor, forneça uma mensagem para fazer a pergunta ao modelo.", - "languageCode": "Idioma" + "languageCode": "Idioma", + "thinking": "Pensando...", + "finishedThinking": "Pensamento finalizado" }, "maInvalidModule": "Por favor, forneça um ID de módulo válido do The Mod Archive.\nExemplo: `/modarchive 81574`", "maDownloadError": "Erro ao baixar o arquivo. Verifique o ID do módulo e tente novamente.",