cleanup, bug fixes, better markdown parsing, better model display
Some checks failed
njsscan sarif / njsscan code scanning (push) Has been cancelled
Update AUTHORS File / update-authors (push) Has been cancelled

This commit is contained in:
Aidan 2025-06-30 11:24:51 -04:00
parent 04271f87b1
commit 5270d2cae5
4 changed files with 77 additions and 46 deletions

View file

@ -220,6 +220,10 @@ function extractAxiosErrorMessage(error: unknown): string {
return 'An unexpected error occurred.'; return 'An unexpected error occurred.';
} }
function escapeMarkdown(text: string): string {
return text.replace(/([*_])/g, '\\$1');
}
async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Message, model: string, aiTemperature: number): Promise<{ success: boolean; response?: string; error?: string }> { async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Message, model: string, aiTemperature: number): Promise<{ success: boolean; response?: string; error?: string }> {
const Strings = getStrings(languageCode(ctx)); const Strings = getStrings(languageCode(ctx));
if (!ctx.chat) { if (!ctx.chat) {
@ -228,6 +232,7 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
error: Strings.unexpectedErr.replace("{error}", "No chat found"), error: Strings.unexpectedErr.replace("{error}", "No chat found"),
}; };
} }
const modelHeader = `🤖 *${model}* | 🌡️ *${aiTemperature}*\n\n`;
try { try {
const aiResponse = await axios.post<unknown>( const aiResponse = await axios.post<unknown>(
`${process.env.ollamaApi}/api/generate`, `${process.env.ollamaApi}/api/generate`,
@ -246,6 +251,7 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
let fullResponse = ""; let fullResponse = "";
let thoughts = ""; let thoughts = "";
let lastUpdate = Date.now(); let lastUpdate = Date.now();
let sentHeader = false;
const stream: NodeJS.ReadableStream = aiResponse.data as any; const stream: NodeJS.ReadableStream = aiResponse.data as any;
for await (const chunk of stream) { for await (const chunk of stream) {
const lines = chunk.toString().split('\n'); const lines = chunk.toString().split('\n');
@ -275,23 +281,24 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
if (model === thinking_model) { if (model === thinking_model) {
let patchedThoughts = ln.response; let patchedThoughts = ln.response;
const thinkTagRx = /<think>([\s\S]*?)<\/think>/g; const thinkTagRx = /<think>([\s\S]*?)<\/think>/g;
patchedThoughts = patchedThoughts.replace(thinkTagRx, (match, p1) => p1.trim().length > 0 ? '`Thinking...`' + p1 + '`Finished thinking`' : ''); patchedThoughts = patchedThoughts.replace(thinkTagRx, (p1) => p1.trim().length > 0 ? '`' + Strings.ai.thinking + '`' + p1 + '`' + Strings.ai.finishedThinking + '`' : '');
patchedThoughts = patchedThoughts.replace(/<think>/g, '`Thinking...`'); patchedThoughts = patchedThoughts.replace(/<think>/g, '`' + Strings.ai.thinking + '`');
patchedThoughts = patchedThoughts.replace(/<\/think>/g, '`Finished thinking`'); patchedThoughts = patchedThoughts.replace(/<\/think>/g, '`' + Strings.ai.finishedThinking + '`');
thoughts += patchedThoughts; thoughts += patchedThoughts;
fullResponse += patchedThoughts; fullResponse += patchedThoughts;
} else { } else {
fullResponse += ln.response; fullResponse += ln.response;
} }
if (now - lastUpdate >= 1000) { if (now - lastUpdate >= 1000 || !sentHeader) {
await rateLimiter.editMessageWithRetry( await rateLimiter.editMessageWithRetry(
ctx, ctx,
ctx.chat.id, ctx.chat.id,
replyGenerating.message_id, replyGenerating.message_id,
thoughts, modelHeader + fullResponse,
{ parse_mode: 'Markdown' } { parse_mode: 'Markdown' }
); );
lastUpdate = now; lastUpdate = now;
sentHeader = true;
} }
} }
} }
@ -315,7 +322,7 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
Strings.ai.pulling.replace("{model}", model), Strings.ai.pulling.replace("{model}", model),
{ parse_mode: 'Markdown' } { parse_mode: 'Markdown' }
); );
console.log(`[✨ AI | i] Pulling ${model} from ollama...`); console.log(`[✨ AI] Pulling ${model} from ollama...`);
try { try {
await axios.post( await axios.post(
`${process.env.ollamaApi}/api/pull`, `${process.env.ollamaApi}/api/pull`,
@ -330,13 +337,13 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
console.error("[✨ AI | !] Pull error:", pullMsg); console.error("[✨ AI | !] Pull error:", pullMsg);
return { return {
success: false, success: false,
error: `❌ Something went wrong while pulling ${model}: ${pullMsg}`, error: `❌ Something went wrong while pulling ${escapeMarkdown(model)}: ${escapeMarkdown(pullMsg)}`,
}; };
} }
console.log(`[✨ AI | i] ${model} pulled successfully`); console.log(`[✨ AI] ${model} pulled successfully`);
return { return {
success: true, success: true,
response: `✅ Pulled ${model} successfully, please retry the command.`, response: `✅ Pulled ${escapeMarkdown(model)} successfully, please retry the command.`,
}; };
} }
} }
@ -347,13 +354,13 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
} }
} }
async function handleAiReply(ctx: TextContext, db: NodePgDatabase<typeof schema>, model: string, prompt: string, replyGenerating: Message, aiTemperature: number) { async function handleAiReply(ctx: TextContext, model: string, prompt: string, replyGenerating: Message, aiTemperature: number) {
const Strings = getStrings(languageCode(ctx)); const Strings = getStrings(languageCode(ctx));
const aiResponse = await getResponse(prompt, ctx, replyGenerating, model, aiTemperature); const aiResponse = await getResponse(prompt, ctx, replyGenerating, model, aiTemperature);
if (!aiResponse) return; if (!aiResponse) return;
if (!ctx.chat) return; if (!ctx.chat) return;
const modelHeader = `🤖 *${model}* | 🌡️ *${aiTemperature}*\n\n`;
if (aiResponse.success && aiResponse.response) { if (aiResponse.success && aiResponse.response) {
const modelHeader = `🤖 *${model}* | 🌡️ *${aiTemperature}*\n\n`;
await rateLimiter.editMessageWithRetry( await rateLimiter.editMessageWithRetry(
ctx, ctx,
ctx.chat.id, ctx.chat.id,
@ -387,6 +394,14 @@ async function getUserWithStringsAndModel(ctx: Context, db: NodePgDatabase<typeo
return { user, Strings, languageCode: user.languageCode, customAiModel: user.customAiModel, aiTemperature: user.aiTemperature }; return { user, Strings, languageCode: user.languageCode, customAiModel: user.customAiModel, aiTemperature: user.aiTemperature };
} }
export function getModelLabelByName(name: string): string {
for (const series of models) {
const found = series.models.find(m => m.name === name);
if (found) return found.label;
}
return name;
}
export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => { export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
const botName = bot.botInfo?.first_name && bot.botInfo?.last_name ? `${bot.botInfo.first_name} ${bot.botInfo.last_name}` : "Kowalski" const botName = bot.botInfo?.first_name && bot.botInfo?.last_name ? `${bot.botInfo.first_name} ${bot.botInfo.last_name}` : "Kowalski"
@ -427,44 +442,56 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
logger.logPrompt(fixedMsg) logger.logPrompt(fixedMsg)
const prompt = sanitizeForJson(await usingSystemPrompt(textCtx, db, botName)) const prompt = sanitizeForJson(await usingSystemPrompt(textCtx, db, botName))
await handleAiReply(textCtx, db, model, prompt, replyGenerating, aiTemperature) await handleAiReply(textCtx, model, prompt, replyGenerating, aiTemperature)
}) })
bot.command(["ai"], spamwatchMiddleware, async (ctx) => { bot.command(["ai"], spamwatchMiddleware, async (ctx) => {
if (!ctx.message || !('text' in ctx.message)) return try {
const textCtx = ctx as TextContext if (!ctx.message || !("text" in ctx.message)) return
const reply_to_message_id = replyToMessageId(textCtx) const textCtx = ctx as TextContext
const { Strings, customAiModel, aiTemperature } = await getUserWithStringsAndModel(textCtx, db) const reply_to_message_id = replyToMessageId(textCtx)
const message = textCtx.message.text const { Strings, customAiModel, aiTemperature } = await getUserWithStringsAndModel(textCtx, db)
const author = ("@" + ctx.from?.username) || ctx.from?.first_name const message = textCtx.message.text
const author = ("@" + ctx.from?.username) || ctx.from?.first_name
logger.logCmdStart(author, "ask") logger.logCmdStart(author, "ask")
if (!process.env.ollamaApi) { if (!process.env.ollamaApi) {
await ctx.reply(Strings.ai.disabled, { await ctx.reply(Strings.ai.disabled, {
parse_mode: 'Markdown',
...({ reply_to_message_id })
})
return
}
const fixedMsg = message.replace(/^\/ai(@\w+)?\s*/, "").trim()
if (fixedMsg.length < 1) {
await ctx.reply(Strings.ai.askNoMessage, {
parse_mode: 'Markdown',
...({ reply_to_message_id })
})
return
}
const modelLabel = getModelLabelByName(customAiModel)
const replyGenerating = await ctx.reply(Strings.ai.askGenerating.replace("{model}", modelLabel), {
parse_mode: 'Markdown', parse_mode: 'Markdown',
...({ reply_to_message_id }) ...({ reply_to_message_id })
}) })
return
logger.logPrompt(fixedMsg)
const prompt = sanitizeForJson(await usingSystemPrompt(textCtx, db, botName))
await handleAiReply(textCtx, customAiModel, prompt, replyGenerating, aiTemperature)
} catch (err) {
const Strings = getStrings(languageCode(ctx));
if (ctx && ctx.reply) {
try {
await ctx.reply(Strings.unexpectedErr.replace("{error}", (err && err.message ? err.message : String(err))), { parse_mode: 'Markdown' })
} catch (e) {
console.error("[✨ AI | !] Failed to send error reply:", e)
}
}
} }
const fixedMsg = message.replace(/^\/ai(@\w+)?\s*/, "").trim()
if (fixedMsg.length < 1) {
await ctx.reply(Strings.ai.askNoMessage, {
parse_mode: 'Markdown',
...({ reply_to_message_id })
})
return
}
const replyGenerating = await ctx.reply(Strings.ai.askGenerating.replace("{model}", customAiModel), {
parse_mode: 'Markdown',
...({ reply_to_message_id })
})
logger.logPrompt(fixedMsg)
const prompt = sanitizeForJson(await usingSystemPrompt(textCtx, db, botName))
await handleAiReply(textCtx, db, customAiModel, prompt, replyGenerating, aiTemperature)
}) })
} }

View file

@ -7,7 +7,7 @@ import * as schema from '../db/schema';
import { eq } from 'drizzle-orm'; import { eq } from 'drizzle-orm';
import { ensureUserInDb } from '../utils/ensure-user'; import { ensureUserInDb } from '../utils/ensure-user';
import type { NodePgDatabase } from 'drizzle-orm/node-postgres'; import type { NodePgDatabase } from 'drizzle-orm/node-postgres';
import { models } from './ai'; import { models, getModelLabelByName } from './ai';
import { langs } from '../locales/config'; import { langs } from '../locales/config';
type UserRow = typeof schema.usersTable.$inferSelect; type UserRow = typeof schema.usersTable.$inferSelect;
@ -54,7 +54,7 @@ function getSettingsMenu(user: UserRow, Strings: any): SettingsMenu {
inline_keyboard: [ inline_keyboard: [
[ [
{ text: `${Strings.settings.ai.aiEnabled}: ${user.aiEnabled ? Strings.settings.enabled : Strings.settings.disabled}`, callback_data: 'settings_aiEnabled' }, { text: `${Strings.settings.ai.aiEnabled}: ${user.aiEnabled ? Strings.settings.enabled : Strings.settings.disabled}`, callback_data: 'settings_aiEnabled' },
{ text: `🧠 ${Strings.settings.ai.aiModel}: ${user.customAiModel}`, callback_data: 'settings_aiModel' } { text: `🧠 ${Strings.settings.ai.aiModel}: ${getModelLabelByName(user.customAiModel)}`, callback_data: 'settings_aiModel' }
], ],
[ [
{ text: `🌡️ ${Strings.settings.ai.aiTemperature}: ${user.aiTemperature}`, callback_data: 'settings_aiTemperature' }, { text: `🌡️ ${Strings.settings.ai.aiTemperature}: ${user.aiTemperature}`, callback_data: 'settings_aiTemperature' },
@ -78,7 +78,7 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
user.aiEnabled ? Strings.settings.enabled : Strings.settings.disabled user.aiEnabled ? Strings.settings.enabled : Strings.settings.disabled
).replace( ).replace(
/{aiModel}/g, /{aiModel}/g,
user.customAiModel getModelLabelByName(user.customAiModel)
).replace( ).replace(
/{aiTemperature}/g, /{aiTemperature}/g,
user.aiTemperature.toString() user.aiTemperature.toString()

View file

@ -70,7 +70,9 @@
"pulling": "🔄 *Pulling {model} from Ollama...*\n\nThis may take a few minutes...", "pulling": "🔄 *Pulling {model} from Ollama...*\n\nThis may take a few minutes...",
"askGenerating": "✨ _{model} is working..._", "askGenerating": "✨ _{model} is working..._",
"askNoMessage": "Please provide a message to ask the model.", "askNoMessage": "Please provide a message to ask the model.",
"languageCode": "Language" "languageCode": "Language",
"thinking": "Thinking...",
"finishedThinking": "Finished thinking"
}, },
"maInvalidModule": "Please provide a valid module ID from The Mod Archive.\nExample: `/modarchive 81574`", "maInvalidModule": "Please provide a valid module ID from The Mod Archive.\nExample: `/modarchive 81574`",
"maDownloadError": "Error downloading the file. Check the module ID and try again.", "maDownloadError": "Error downloading the file. Check the module ID and try again.",

View file

@ -69,7 +69,9 @@
"pulling": "🔄 *Puxando {model} do Ollama...*\n\nIsso pode levar alguns minutos...", "pulling": "🔄 *Puxando {model} do Ollama...*\n\nIsso pode levar alguns minutos...",
"askGenerating": "✨ _{model} está funcionando..._", "askGenerating": "✨ _{model} está funcionando..._",
"askNoMessage": "Por favor, forneça uma mensagem para fazer a pergunta ao modelo.", "askNoMessage": "Por favor, forneça uma mensagem para fazer a pergunta ao modelo.",
"languageCode": "Idioma" "languageCode": "Idioma",
"thinking": "Pensando...",
"finishedThinking": "Pensamento finalizado"
}, },
"maInvalidModule": "Por favor, forneça um ID de módulo válido do The Mod Archive.\nExemplo: `/modarchive 81574`", "maInvalidModule": "Por favor, forneça um ID de módulo válido do The Mod Archive.\nExemplo: `/modarchive 81574`",
"maDownloadError": "Erro ao baixar o arquivo. Verifique o ID do módulo e tente novamente.", "maDownloadError": "Erro ao baixar o arquivo. Verifique o ID do módulo e tente novamente.",