From a4233c5cffe335d9cefa415e4fc1390ee9701f74 Mon Sep 17 00:00:00 2001 From: Aidan Date: Sat, 28 Jun 2025 15:08:48 -0400 Subject: [PATCH] clean up, axios tweaks --- src/commands/ai.ts | 240 ++++++++++++++++++++++----------------------- 1 file changed, 115 insertions(+), 125 deletions(-) diff --git a/src/commands/ai.ts b/src/commands/ai.ts index 4badbb5..0e27578 100644 --- a/src/commands/ai.ts +++ b/src/commands/ai.ts @@ -72,151 +72,145 @@ export async function preChecks() { async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Message, model: string) { const Strings = getStrings(languageCode(ctx)) - if (!ctx.chat) return { - "success": false, - "error": Strings.unexpectedErr.replace("{error}", "No chat found"), + if (!ctx.chat) { + return { + success: false, + error: Strings.unexpectedErr.replace("{error}", "No chat found"), + } } try { - const aiResponse = await axios.post(`${process.env.ollamaApi}/api/generate`, { - model: model, - prompt: prompt, - stream: true, - }, { - responseType: "stream", - }) + const aiResponse = await axios.post( + `${process.env.ollamaApi}/api/generate`, + { + model, + prompt, + stream: true, + }, + { + responseType: "stream", + } + ) let fullResponse = "" let thoughts = "" let lastUpdate = Date.now() - for await (const chunk of aiResponse.data) { + const stream = aiResponse.data + for await (const chunk of stream) { const lines = chunk.toString().split('\n') for (const line of lines) { if (!line.trim()) continue - let ln = JSON.parse(line) - - if (model === thinking_model && ln.response.includes('')) { - const thinkMatch = ln.response.match(/([\s\S]*?)<\/think>/) - if (thinkMatch) { - const innerContent = thinkMatch[1] - if (innerContent.trim().length > 0) { - logger.logThinking(ctx.chat.id, replyGenerating.message_id, true) - } - } else { - logger.logThinking(ctx.chat.id, replyGenerating.message_id, true) - } - } else if (model === thinking_model && ln.response.includes('')) { - logger.logThinking(ctx.chat.id, replyGenerating.message_id, false) - } - + let ln try { - const now = Date.now() - - if (ln.response) { - if (model === thinking_model) { - let patchedThoughts = ln.response - // TODO: hide blank thinking chunks - const thinkTagRx = /([\s\S]*?)<\/think>/g - patchedThoughts = patchedThoughts.replace(thinkTagRx, (match, p1) => { - if (p1.trim().length > 0) { - console.log(p1) - return '`Thinking...`' + p1 + '`Finished thinking`' - } else { - return '' - } - }) - patchedThoughts = patchedThoughts.replace(//g, '`Thinking...`') - patchedThoughts = patchedThoughts.replace(/<\/think>/g, '`Finished thinking`') - thoughts += patchedThoughts - fullResponse += patchedThoughts - } else { - fullResponse += ln.response - } - if (now - lastUpdate >= 1000) { - await rateLimiter.editMessageWithRetry( - ctx, - ctx.chat.id, - replyGenerating.message_id, - thoughts, - { parse_mode: 'Markdown' } - ) - lastUpdate = now - } - } + ln = JSON.parse(line) } catch (e) { console.error("[✨ AI | !] Error parsing chunk:", e) + continue + } + + if (model === thinking_model) { + if (ln.response.includes('')) { + const thinkMatch = ln.response.match(/([\s\S]*?)<\/think>/) + if (thinkMatch && thinkMatch[1].trim().length > 0) { + logger.logThinking(ctx.chat.id, replyGenerating.message_id, true) + } else if (!thinkMatch) { + logger.logThinking(ctx.chat.id, replyGenerating.message_id, true) + } + } else if (ln.response.includes('')) { + logger.logThinking(ctx.chat.id, replyGenerating.message_id, false) + } + } + + const now = Date.now() + if (ln.response) { + if (model === thinking_model) { + let patchedThoughts = ln.response + const thinkTagRx = /([\s\S]*?)<\/think>/g + patchedThoughts = patchedThoughts.replace(thinkTagRx, (match, p1) => p1.trim().length > 0 ? '`Thinking...`' + p1 + '`Finished thinking`' : '') + patchedThoughts = patchedThoughts.replace(//g, '`Thinking...`') + patchedThoughts = patchedThoughts.replace(/<\/think>/g, '`Finished thinking`') + thoughts += patchedThoughts + fullResponse += patchedThoughts + } else { + fullResponse += ln.response + } + if (now - lastUpdate >= 1000) { + await rateLimiter.editMessageWithRetry( + ctx, + ctx.chat.id, + replyGenerating.message_id, + thoughts, + { parse_mode: 'Markdown' } + ) + lastUpdate = now + } } } } return { - "success": true, - "response": fullResponse, + success: true, + response: fullResponse, } } catch (error: any) { let shouldPullModel = false - - if (error.response?.data?.error) { - if (error.response.data.error.includes(`model '${model}' not found`) || error.status === 404) { + if (error.response) { + const errData = error.response.data?.error + const errStatus = error.response.status + if (errData && (errData.includes(`model '${model}' not found`) || errStatus === 404)) { shouldPullModel = true } else { - console.error("[✨ AI | !] Error zone 1:", error.response.data.error) - return { - "success": false, - "error": error.response.data.error, - } + console.error("[✨ AI | !] Error zone 1:", errData) + return { success: false, error: errData } } - } else if (error.status === 404) { - shouldPullModel = true + } else if (error.request) { + console.error("[✨ AI | !] No response received:", error.request) + return { success: false, error: "No response received from server" } + } else { + console.error("[✨ AI | !] Error zone 3:", error.message) + return { success: false, error: error.message } } if (shouldPullModel) { ctx.telegram.editMessageText(ctx.chat.id, replyGenerating.message_id, undefined, `🔄 Pulling ${model} from ollama...\n\nThis may take a few minutes...`) console.log(`[✨ AI | i] Pulling ${model} from ollama...`) - let pullModelStream: any - try { - pullModelStream = await axios.post(`${process.env.ollamaApi}/api/pull`, { - model: model, - stream: false, - timeout: process.env.ollamaApiTimeout || 10000, - }) + await axios.post( + `${process.env.ollamaApi}/api/pull`, + { + model, + stream: false, + timeout: process.env.ollamaApiTimeout || 10000, + } + ) } catch (e: any) { - console.error("[✨ AI | !] Something went wrong:", e.response.data.error) - return { - "success": false, - "error": `❌ Something went wrong while pulling ${model}, please try your command again!`, + if (e.response) { + console.error("[✨ AI | !] Something went wrong:", e.response.data?.error) + return { + success: false, + error: `❌ Something went wrong while pulling ${model}, please try your command again!`, + } + } else if (e.request) { + console.error("[✨ AI | !] No response received while pulling:", e.request) + return { + success: false, + error: `❌ No response received while pulling ${model}, please try again!`, + } + } else { + console.error("[✨ AI | !] Error while pulling:", e.message) + return { + success: false, + error: `❌ Error while pulling ${model}: ${e.message}`, + } } } - console.log(`[✨ AI | i] ${model} pulled successfully`) return { - "success": true, - "response": `✅ Pulled ${model} successfully, please retry the command.`, + success: true, + response: `✅ Pulled ${model} successfully, please retry the command.`, } } - - if (error.response) { - console.error("[✨ AI | !] Error zone 2:", error.response) - return { - "success": false, - "error": error.response, - } - } - - if (error.statusText) { - console.error("[✨ AI | !] Error zone 3:", error.statusText) - return { - "success": false, - "error": error.statusText, - } - } - - return { - "success": false, - "error": "An unexpected error occurred", - } } } @@ -224,19 +218,16 @@ export default (bot: Telegraf) => { const botName = bot.botInfo?.first_name && bot.botInfo?.last_name ? `${bot.botInfo.first_name} ${bot.botInfo.last_name}` : "Kowalski" bot.command(["ask", "think"], spamwatchMiddleware, async (ctx) => { - if (!ctx.message || !('text' in ctx.message)) return; + if (!ctx.message || !('text' in ctx.message)) return const isAsk = ctx.message.text.startsWith("/ask") const model = isAsk ? flash_model : thinking_model - const textCtx = ctx as TextContext; + const textCtx = ctx as TextContext const reply_to_message_id = replyToMessageId(textCtx) const Strings = getStrings(languageCode(textCtx)) const message = textCtx.message.text const author = ("@" + ctx.from?.username) || ctx.from?.first_name - logger.logCmdStart( - author, - model === flash_model ? "ask" : "think" - ) + logger.logCmdStart(author, model === flash_model ? "ask" : "think") if (!process.env.ollamaApi) { await ctx.reply(Strings.aiDisabled, { @@ -251,7 +242,7 @@ export default (bot: Telegraf) => { ...({ reply_to_message_id }) }) - const fixedMsg = message.replace(/\/ask /, "").replace(/\/think /, "") + const fixedMsg = message.replace(/\/(ask|think) /, "") if (fixedMsg.length < 1) { await ctx.reply(Strings.askNoMessage, { parse_mode: 'Markdown', @@ -273,8 +264,8 @@ ${fixedMsg}`) const aiResponse = await getResponse(prompt, textCtx, replyGenerating, model) if (!aiResponse) return + if (!ctx.chat) return if (aiResponse.success && aiResponse.response) { - if (!ctx.chat) return await rateLimiter.editMessageWithRetry( ctx, ctx.chat.id, @@ -282,16 +273,15 @@ ${fixedMsg}`) aiResponse.response, { parse_mode: 'Markdown' } ) - } else { - if (!ctx.chat) return - const error = Strings.unexpectedErr.replace("{error}", aiResponse.error) - await rateLimiter.editMessageWithRetry( - ctx, - ctx.chat.id, - replyGenerating.message_id, - error, - { parse_mode: 'Markdown' } - ) + return } + const error = Strings.unexpectedErr.replace("{error}", aiResponse.error) + await rateLimiter.editMessageWithRetry( + ctx, + ctx.chat.id, + replyGenerating.message_id, + error, + { parse_mode: 'Markdown' } + ) }) } \ No newline at end of file