diff --git a/README.md b/README.md index 612db3a..211d46a 100644 --- a/README.md +++ b/README.md @@ -124,6 +124,9 @@ If you prefer to use Docker directly, you can use these instructions instead. - **weatherKey**: Weather.com API key, used for the `/weather` command. - **longerLogs**: Set to `true` to enable verbose logging whenever possible. +> [!NOTE] +> Further, advanced fine-tuning and configuration can be done in TypeScript with the files in the `/config` folder. + ## Troubleshooting ### YouTube Downloading diff --git a/config/ai.ts b/config/ai.ts new file mode 100644 index 0000000..5b653aa --- /dev/null +++ b/config/ai.ts @@ -0,0 +1,93 @@ +import type { ModelInfo } from "../src/commands/ai" + +export const defaultFlashModel = "gemma3:4b" +export const defaultThinkingModel = "qwen3:4b" +export const unloadModelAfterB = 0.1 // how many billion params until model is auto-unloaded + +export const models: ModelInfo[] = [ + { + name: 'gemma3n', + label: 'gemma3n', + descriptionEn: 'Gemma3n is a family of open, light on-device models for general tasks.', + descriptionPt: 'Gemma3n é uma família de modelos abertos, leves e para dispositivos locais, para tarefas gerais.', + models: [ + { name: 'gemma3n:e2b', label: 'Gemma3n e2b', parameterSize: '2B' }, + { name: 'gemma3n:e4b', label: 'Gemma3n e4b', parameterSize: '4B' }, + ] + }, + { + name: 'gemma3', + label: 'gemma3 [ & Uncensored ]', + descriptionEn: 'Gemma3-abliterated is a family of open, uncensored models for general tasks.', + descriptionPt: 'Gemma3-abliterated é uma família de modelos abertos, não censurados, para tarefas gerais.', + models: [ + { name: 'huihui_ai/gemma3-abliterated:1b', label: 'Gemma3 Uncensored 1B', parameterSize: '1B' }, + { name: 'huihui_ai/gemma3-abliterated:4b', label: 'Gemma3 Uncensored 4B', parameterSize: '4B' }, + { name: 'gemma3:1b', label: 'Gemma3 1B', parameterSize: '1B' }, + { name: 'gemma3:4b', label: 'Gemma3 4B', parameterSize: '4B' }, + { name: 'gemma3:12b', label: 'Gemma3 12B', parameterSize: '12B' }, + ] + }, + { + name: 'qwen3', + label: 'Qwen3', + descriptionEn: 'Qwen3 is a multilingual reasoning model series.', + descriptionPt: 'Qwen3 é uma série de modelos multilingues.', + models: [ + { name: 'qwen3:0.6b', label: 'Qwen3 0.6B', parameterSize: '0.6B' }, + { name: 'qwen3:4b', label: 'Qwen3 4B', parameterSize: '4B' }, + { name: 'qwen3:8b', label: 'Qwen3 8B', parameterSize: '8B' }, + { name: 'qwen3:14b', label: 'Qwen3 14B', parameterSize: '14B' }, + { name: 'qwen3:30b', label: 'Qwen3 30B', parameterSize: '30B' }, + { name: 'qwen3:235b-a22b', label: 'Qwen3 235B A22B', parameterSize: '235B' }, + ] + }, + { + name: 'qwq', + label: 'QwQ', + descriptionEn: 'QwQ is the reasoning model of the Qwen series.', + descriptionPt: 'QwQ é o modelo de raciocínio da série Qwen.', + models: [ + { name: 'qwq:32b', label: 'QwQ 32B', parameterSize: '32B' }, + ] + }, + { + name: 'llama4', + label: 'Llama4', + descriptionEn: 'The latest collection of multimodal models from Meta.', + descriptionPt: 'A coleção mais recente de modelos multimodais da Meta.', + models: [ + { name: 'llama4:scout', label: 'Llama4 109B A17B', parameterSize: '109B' }, + ] + }, + { + name: 'mistral', + label: 'Mistral', + descriptionEn: 'The 7B model released by Mistral AI, updated to version 0.3.', + descriptionPt: 'O modelo 7B lançado pela Mistral AI, atualizado para a versão 0.3.', + models: [ + { name: 'mistral:7b', label: 'Mistral 7B', parameterSize: '7B' }, + ] + }, + { + name: 'deepseek', + label: 'DeepSeek [ & Uncensored ]', + descriptionEn: 'DeepSeek is a research model for reasoning tasks.', + descriptionPt: 'DeepSeek é um modelo de pesquisa para tarefas de raciocínio.', + models: [ + { name: 'deepseek-r1:1.5b', label: 'DeepSeek 1.5B', parameterSize: '1.5B' }, + { name: 'deepseek-r1:7b', label: 'DeepSeek 7B', parameterSize: '7B' }, + { name: 'huihui_ai/deepseek-r1-abliterated:1.5b', label: 'DeepSeek Uncensored 1.5B', parameterSize: '1.5B' }, + { name: 'huihui_ai/deepseek-r1-abliterated:7b', label: 'DeepSeek Uncensored 7B', parameterSize: '7B' }, + ] + }, + { + name: 'phi3', + label: 'Phi3', + descriptionEn: 'Phi-3 is a family of lightweight 3B (Mini) and 14B (Medium) state-of-the-art open models by Microsoft.', + descriptionPt: 'Phi-3 é uma família de modelos leves de 3B (Mini) e 14B (Médio) de última geração, abertos pela Microsoft.', + models: [ + { name: 'phi3:3.8b', label: 'Phi3 3.8B', parameterSize: '3.8B' }, + ] + } +]; \ No newline at end of file diff --git a/src/commands/ai.ts b/src/commands/ai.ts index 6b8706d..ebcd613 100644 --- a/src/commands/ai.ts +++ b/src/commands/ai.ts @@ -34,7 +34,6 @@ import { Telegraf, Context } from "telegraf" import type { Message } from "telegraf/types" import { replyToMessageId } from "../utils/reply-to-message-id" import { getStrings } from "../plugins/checklang" -import { languageCode } from "../utils/language-code" import axios from "axios" import { rateLimiter } from "../utils/rate-limiter" import { logger } from "../utils/log" @@ -42,6 +41,7 @@ import { ensureUserInDb } from "../utils/ensure-user" import * as schema from '../db/schema' import type { NodePgDatabase } from "drizzle-orm/node-postgres" import { eq, sql } from 'drizzle-orm' +import { models, unloadModelAfterB } from "../../config/ai" const spamwatchMiddleware = spamwatchMiddlewareModule(isOnSpamWatch) export const flash_model = process.env.flashModel || "gemma3:4b" @@ -51,7 +51,7 @@ type TextContext = Context & { message: Message.TextMessage } type User = typeof schema.usersTable.$inferSelect -interface ModelInfo { +export interface ModelInfo { name: string; label: string; descriptionEn: string; @@ -67,59 +67,6 @@ interface OllamaResponse { response: string; } -export const models: ModelInfo[] = [ - { - name: 'gemma3n', - label: 'gemma3n', - descriptionEn: 'Gemma3n is a family of open, light on-device models for general tasks.', - descriptionPt: 'Gemma3n é uma família de modelos abertos, leves e para dispositivos locais, para tarefas gerais.', - models: [ - { name: 'gemma3n:e2b', label: 'Gemma3n e2b', parameterSize: '2B' }, - { name: 'gemma3n:e4b', label: 'Gemma3n e4b', parameterSize: '4B' }, - ] - }, - { - name: 'gemma3-abliterated', - label: 'gemma3 Uncensored', - descriptionEn: 'Gemma3-abliterated is a family of open, uncensored models for general tasks.', - descriptionPt: 'Gemma3-abliterated é uma família de modelos abertos, não censurados, para tarefas gerais.', - models: [ - { name: 'huihui_ai/gemma3-abliterated:1b', label: 'Gemma3-abliterated 1B', parameterSize: '1b' }, - { name: 'huihui_ai/gemma3-abliterated:4b', label: 'Gemma3-abliterated 4B', parameterSize: '4b' }, - ] - }, - { - name: 'qwen3', - label: 'Qwen3', - descriptionEn: 'Qwen3 is a multilingual reasoning model series.', - descriptionPt: 'Qwen3 é uma série de modelos multilingues.', - models: [ - { name: 'qwen3:4b', label: 'Qwen3 4B', parameterSize: '4B' }, - ] - }, - { - name: 'deepseek', - label: 'DeepSeek', - descriptionEn: 'DeepSeek is a research model for reasoning tasks.', - descriptionPt: 'DeepSeek é um modelo de pesquisa para tarefas de raciocínio.', - models: [ - { name: 'deepseek-r1:1.5b', label: 'DeepSeek 1.5B', parameterSize: '1.5B' }, - { name: 'deepseek-r1:7b', label: 'DeepSeek 7B', parameterSize: '7B' }, - { name: 'huihui_ai/deepseek-r1-abliterated:1.5b', label: 'DeepSeek Uncensored 1.5B', parameterSize: '1.5B' }, - { name: 'huihui_ai/deepseek-r1-abliterated:7b', label: 'DeepSeek Uncensored 7B', parameterSize: '7B' }, - ] - }, - { - name: 'phi3', - label: 'Phi3', - descriptionEn: 'Phi-3 is a family of lightweight 3B (Mini) and 14B (Medium) state-of-the-art open models by Microsoft.', - descriptionPt: 'Phi-3 é uma família de modelos leves de 3B (Mini) e 14B (Médio) de última geração, abertos pela Microsoft.', - models: [ - { name: 'phi3:3.8b', label: 'Phi3 3.8B', parameterSize: '3.8B' }, - ] - } -]; - async function usingSystemPrompt(ctx: TextContext, db: NodePgDatabase, botName: string, message: string): Promise { const user = await db.query.usersTable.findMany({ where: (fields, { eq }) => eq(fields.telegramId, String(ctx.from!.id)), limit: 1 }); if (user.length === 0) await ensureUserInDb(ctx, db); @@ -263,16 +210,11 @@ function extractAxiosErrorMessage(error: unknown): string { return 'An unexpected error occurred.'; } -function escapeMarkdown(text: string): string { - return text.replace(/([_*\[\]()`>#\+\-=|{}.!~])/g, '\\$1'); -} - function containsUrls(text: string): boolean { - return text.includes('http://') || text.includes('https://'); + return text.includes('http://') || text.includes('https://') || text.includes('.com') || text.includes('.net') || text.includes('.org') || text.includes('.io') || text.includes('.ai') || text.includes('.dev') } -async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Message, model: string, aiTemperature: number, originalMessage: string, db: NodePgDatabase, userId: string): Promise<{ success: boolean; response?: string; error?: string }> { - const Strings = getStrings(languageCode(ctx)); +async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Message, model: string, aiTemperature: number, originalMessage: string, db: NodePgDatabase, userId: string, Strings: ReturnType, showThinking: boolean): Promise<{ success: boolean; response?: string; error?: string }> { if (!ctx.chat) { return { success: false, @@ -289,6 +231,8 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me await db.update(schema.usersTable) .set({ aiCharacters: sql`${schema.usersTable.aiCharacters} + ${promptCharCount}` }) .where(eq(schema.usersTable.telegramId, userId)); + const paramSizeStr = models.find(m => m.name === model)?.models.find(m => m.name === model)?.parameterSize?.replace('B', ''); + const shouldKeepAlive = paramSizeStr ? Number(paramSizeStr) > unloadModelAfterB : false; try { const aiResponse = await axios.post( @@ -297,6 +241,7 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me model, prompt, stream: true, + keep_alive: shouldKeepAlive ? '1' : '0', options: { temperature: aiTemperature } @@ -311,6 +256,16 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me let sentHeader = false; let firstChunk = true; const stream: NodeJS.ReadableStream = aiResponse.data as any; + let thinkingMessageSent = false; + let finalResponseText = ''; + + const formatThinkingMessage = (text: string) => { + const withPlaceholders = text + .replace(/___THINK_START___/g, `${Strings.ai.thinking}`) + .replace(/___THINK_END___/g, `${Strings.ai.finishedThinking}`); + return sanitizeMarkdownForTelegram(withPlaceholders); + }; + for await (const chunk of stream) { const lines = chunk.toString().split('\n'); for (const line of lines) { @@ -322,6 +277,22 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me console.error("[✨ AI | !] Error parsing chunk"); continue; } + if (model === thinking_model && !showThinking) { + if (ln.response) { + finalResponseText += ln.response; + if (finalResponseText.includes('') && !thinkingMessageSent) { + await rateLimiter.editMessageWithRetry( + ctx, + ctx.chat.id, + replyGenerating.message_id, + modelHeader + Strings.ai.thinking, + { parse_mode: 'Markdown' } + ); + thinkingMessageSent = true; + } + } + continue; + } if (model === thinking_model && ln.response) { if (ln.response.includes('')) { const thinkMatch = ln.response.match(/([\s\S]*?)<\/think>/); @@ -338,9 +309,9 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me if (model === thinking_model) { let patchedThoughts = ln.response; const thinkTagRx = /([\s\S]*?)<\/think>/g; - patchedThoughts = patchedThoughts.replace(thinkTagRx, (p1) => p1.trim().length > 0 ? '`' + Strings.ai.thinking + '`' + p1 + '`' + Strings.ai.finishedThinking + '`' : ''); - patchedThoughts = patchedThoughts.replace(//g, '`' + Strings.ai.thinking + '`'); - patchedThoughts = patchedThoughts.replace(/<\/think>/g, '`' + Strings.ai.finishedThinking + '`'); + patchedThoughts = patchedThoughts.replace(thinkTagRx, (p1) => p1.trim().length > 0 ? '___THINK_START___' + p1.trim() + '___THINK_END___' : ''); + patchedThoughts = patchedThoughts.replace(//g, '___THINK_START___'); + patchedThoughts = patchedThoughts.replace(/<\/think>/g, '___THINK_END___'); thoughts += patchedThoughts; fullResponse += patchedThoughts; } else { @@ -356,7 +327,7 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me ctx, ctx.chat.id, replyGenerating.message_id, - modelHeader + escapeMarkdown(fullResponse), + modelHeader + formatThinkingMessage(fullResponse), { parse_mode: 'Markdown' } ); lastUpdateCharCount = fullResponse.length; @@ -370,7 +341,7 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me ctx, ctx.chat.id, replyGenerating.message_id, - modelHeader + escapeMarkdown(fullResponse), + modelHeader + formatThinkingMessage(fullResponse), { parse_mode: 'Markdown' } ); lastUpdateCharCount = fullResponse.length; @@ -379,6 +350,10 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me } } } + if (model === thinking_model && !showThinking) { + const cleanedResponse = finalResponseText.replace(/[\s\S]*?<\/think>/g, '').trim(); + return { success: true, response: cleanedResponse }; + } status = Strings.ai.statusRendering; modelHeader = Strings.ai.modelHeader .replace("{model}", model) @@ -388,7 +363,7 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me ctx, ctx.chat.id, replyGenerating.message_id, - modelHeader + escapeMarkdown(fullResponse), + modelHeader + formatThinkingMessage(fullResponse), { parse_mode: 'Markdown' } ); const responseCharCount = fullResponse.length; @@ -432,13 +407,13 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me console.error("[✨ AI | !] Pull error:", pullMsg); return { success: false, - error: `❌ Something went wrong while pulling ${escapeMarkdown(model)}: ${escapeMarkdown(pullMsg)}`, + error: `❌ Something went wrong while pulling ${model}: ${pullMsg}`, }; } console.log(`[✨ AI] ${model} pulled successfully`); return { success: true, - response: Strings.ai.pulled.replace("{model}", escapeMarkdown(model)), + response: Strings.ai.pulled.replace("{model}", model), }; } } @@ -449,9 +424,8 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me } } -async function handleAiReply(ctx: TextContext, model: string, prompt: string, replyGenerating: Message, aiTemperature: number, originalMessage: string, db: NodePgDatabase, userId: string) { - const Strings = getStrings(languageCode(ctx)); - const aiResponse = await getResponse(prompt, ctx, replyGenerating, model, aiTemperature, originalMessage, db, userId); +async function handleAiReply(ctx: TextContext, model: string, prompt: string, replyGenerating: Message, aiTemperature: number, originalMessage: string, db: NodePgDatabase, userId: string, Strings: ReturnType, showThinking: boolean) { + const aiResponse = await getResponse(prompt, ctx, replyGenerating, model, aiTemperature, originalMessage, db, userId, Strings, showThinking); if (!aiResponse) return; if (!ctx.chat) return; if (aiResponse.success && aiResponse.response) { @@ -461,11 +435,17 @@ async function handleAiReply(ctx: TextContext, model: string, prompt: string, re .replace("{temperature}", aiTemperature) .replace("{status}", status) + "\n\n"; const urlWarning = containsUrls(originalMessage) ? Strings.ai.urlWarning : ''; + let finalResponse = aiResponse.response; + if (model === thinking_model) { + finalResponse = finalResponse.replace(/___THINK_START___/g, `${Strings.ai.thinking}`) + .replace(/___THINK_END___/g, `${Strings.ai.finishedThinking}`); + } + await rateLimiter.editMessageWithRetry( ctx, ctx.chat.id, replyGenerating.message_id, - modelHeader + sanitizeMarkdownForTelegram(aiResponse.response) + urlWarning, + modelHeader + sanitizeMarkdownForTelegram(finalResponse) + urlWarning, { parse_mode: 'Markdown' } ); return; @@ -480,7 +460,7 @@ async function handleAiReply(ctx: TextContext, model: string, prompt: string, re ); } -async function getUserWithStringsAndModel(ctx: Context, db: NodePgDatabase): Promise<{ user: User; Strings: ReturnType; languageCode: string; customAiModel: string; aiTemperature: number }> { +async function getUserWithStringsAndModel(ctx: Context, db: NodePgDatabase): Promise<{ user: User; Strings: ReturnType; languageCode: string; customAiModel: string; aiTemperature: number, showThinking: boolean }> { const userArr = await db.query.usersTable.findMany({ where: (fields, { eq }) => eq(fields.telegramId, String(ctx.from!.id)), limit: 1 }); let user = userArr[0]; if (!user) { @@ -488,10 +468,10 @@ async function getUserWithStringsAndModel(ctx: Context, db: NodePgDatabase eq(fields.telegramId, String(ctx.from!.id)), limit: 1 }); user = newUserArr[0]; const Strings = getStrings(user.languageCode); - return { user, Strings, languageCode: user.languageCode, customAiModel: user.customAiModel, aiTemperature: user.aiTemperature }; + return { user, Strings, languageCode: user.languageCode, customAiModel: user.customAiModel, aiTemperature: user.aiTemperature, showThinking: user.showThinking }; } const Strings = getStrings(user.languageCode); - return { user, Strings, languageCode: user.languageCode, customAiModel: user.customAiModel, aiTemperature: user.aiTemperature }; + return { user, Strings, languageCode: user.languageCode, customAiModel: user.customAiModel, aiTemperature: user.aiTemperature, showThinking: user.showThinking }; } export function getModelLabelByName(name: string): string { @@ -547,7 +527,7 @@ export default (bot: Telegraf, db: NodePgDatabase) => { async function aiCommandHandler(ctx: TextContext, command: 'ask' | 'think' | 'ai') { const reply_to_message_id = replyToMessageId(ctx); - const { user, Strings, customAiModel, aiTemperature } = await getUserWithStringsAndModel(ctx, db); + const { user, Strings, customAiModel, aiTemperature, showThinking } = await getUserWithStringsAndModel(ctx, db); const message = ctx.message.text; const author = ("@" + ctx.from?.username) || ctx.from?.first_name || "Unknown"; @@ -586,7 +566,7 @@ export default (bot: Telegraf, db: NodePgDatabase) => { ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) }); const prompt = sanitizeForJson(await usingSystemPrompt(ctx, db, botName, fixedMsg)); - await handleAiReply(ctx, model, prompt, replyGenerating, aiTemperature, fixedMsg, db, user.telegramId); + await handleAiReply(ctx, model, prompt, replyGenerating, aiTemperature, fixedMsg, db, user.telegramId, Strings, showThinking); }; if (isProcessing) { diff --git a/src/commands/main.ts b/src/commands/main.ts index 4c894ec..9478a13 100644 --- a/src/commands/main.ts +++ b/src/commands/main.ts @@ -7,7 +7,8 @@ import * as schema from '../db/schema'; import { eq } from 'drizzle-orm'; import { ensureUserInDb } from '../utils/ensure-user'; import type { NodePgDatabase } from 'drizzle-orm/node-postgres'; -import { models, getModelLabelByName } from './ai'; +import { getModelLabelByName } from './ai'; +import { models } from '../../config/ai'; import { langs } from '../locales/config'; type UserRow = typeof schema.usersTable.$inferSelect; @@ -55,11 +56,14 @@ function getSettingsMenu(user: UserRow, Strings: any): SettingsMenu { inline_keyboard: [ [ { text: `✨ ${Strings.settings.ai.aiEnabled}: ${user.aiEnabled ? Strings.settings.enabled : Strings.settings.disabled}`, callback_data: `settings_aiEnabled_${userId}` }, - { text: `🧠 ${Strings.settings.ai.aiModel}: ${getModelLabelByName(user.customAiModel)}`, callback_data: `settings_aiModel_${userId}` } + { text: `🧠 ${Strings.settings.ai.aiModel}: ${getModelLabelByName(user.customAiModel)}`, callback_data: `settings_aiModel_0_${userId}` } ], [ - { text: `🌡️ ${Strings.settings.ai.aiTemperature}: ${user.aiTemperature}`, callback_data: `settings_aiTemperature_${userId}` }, + { text: `🌡️ ${Strings.settings.ai.aiTemperature}: ${user.aiTemperature}`, callback_data: `settings_aiTemperature_${userId}` }, { text: `🌐 ${langLabel}`, callback_data: `settings_language_${userId}` } + ], + [ + { text: `🧠 ${Strings.settings.ai.showThinking}: ${user.showThinking ? Strings.settings.enabled : Strings.settings.disabled}`, callback_data: `settings_showThinking_${userId}` } ] ] } @@ -83,6 +87,22 @@ function logSettingsAccess(action: string, ctx: Context, allowed: boolean, expec } } +function handleTelegramError(err: any, context: string) { + const description = err?.response?.description || ''; + const ignoredErrors = [ + 'query is too old', + 'query ID is invalid', + 'message is not modified', + 'message to edit not found', + ]; + + const isIgnored = ignoredErrors.some(errorString => description.includes(errorString)); + + if (!isIgnored) { + console.error(`[${context}] Unexpected Telegram error:`, err); + } +} + export default (bot: Telegraf, db: NodePgDatabase) => { bot.start(spamwatchMiddleware, async (ctx: Context) => { const { user, Strings } = await getUserAndStrings(ctx, db); @@ -155,7 +175,26 @@ export default (bot: Telegraf, db: NodePgDatabase) => { await updateSettingsKeyboard(ctx, updatedUser, Strings); }); - bot.action(/^settings_aiModel_\d+$/, async (ctx) => { + bot.action(/^settings_showThinking_\d+$/, async (ctx) => { + const data = (ctx.callbackQuery as any).data; + const userId = extractUserIdFromCallback(data); + const allowed = !!userId && String(ctx.from.id) === userId; + logSettingsAccess('settings_showThinking', ctx, allowed, userId); + if (!allowed) { + const { Strings } = await getUserAndStrings(ctx, db); + return ctx.answerCbQuery(getNotAllowedMessage(Strings), { show_alert: true }); + } + await ctx.answerCbQuery(); + const { user, Strings } = await getUserAndStrings(ctx, db); + if (!user) return; + await db.update(schema.usersTable) + .set({ showThinking: !user.showThinking }) + .where(eq(schema.usersTable.telegramId, String(user.telegramId))); + const updatedUser = (await db.query.usersTable.findMany({ where: (fields, { eq }) => eq(fields.telegramId, String(user.telegramId)), limit: 1 }))[0]; + await updateSettingsKeyboard(ctx, updatedUser, Strings); + }); + + bot.action(/^settings_aiModel_(\d+)_(\d+)$/, async (ctx) => { const data = (ctx.callbackQuery as any).data; const userId = extractUserIdFromCallback(data); const allowed = !!userId && String(ctx.from.id) === userId; @@ -167,30 +206,54 @@ export default (bot: Telegraf, db: NodePgDatabase) => { await ctx.answerCbQuery(); const { user, Strings } = await getUserAndStrings(ctx, db); if (!user) return; + + const match = data.match(/^settings_aiModel_(\d+)_/); + if (!match) return; + + const page = parseInt(match[1], 10); + const pageSize = 4; + const start = page * pageSize; + const end = start + pageSize; + + const paginatedModels = models.slice(start, end); + + const buttons = paginatedModels.map((series, idx) => { + const originalIndex = start + idx; + const isSelected = series.models.some(m => m.name === user.customAiModel); + const label = isSelected ? `✅ ${series.label}` : series.label; + return { text: label, callback_data: `selectseries_${originalIndex}_${user.telegramId}` }; + }); + + const navigationButtons: any[] = []; + if (page > 0) { + navigationButtons.push({ text: Strings.varStrings.varBack, callback_data: `settings_aiModel_${page - 1}_${user.telegramId}` }); + } + if (end < models.length) { + navigationButtons.push({ text: Strings.varStrings.varMore, callback_data: `settings_aiModel_${page + 1}_${user.telegramId}` }); + } + + const keyboard: any[][] = []; + for (const button of buttons) { + keyboard.push([button]); + } + + if (navigationButtons.length > 0) { + keyboard.push(navigationButtons); + } + keyboard.push([{ text: `${Strings.varStrings.varBack}`, callback_data: `settings_back_${user.telegramId}` }]); + try { await ctx.editMessageText( `${Strings.settings.ai.selectSeries}`, { parse_mode: 'Markdown', reply_markup: { - inline_keyboard: models.map((series, idx) => [ - { text: series.label, callback_data: `selectseries_${idx}_${user.telegramId}` } - ]).concat([[ - { text: `${Strings.varStrings.varBack}`, callback_data: `settings_back_${user.telegramId}` } - ]]) + inline_keyboard: keyboard } } ); } catch (err) { - if ( - !( - err.response.description?.includes('query is too old') || - err.response.description?.includes('query ID is invalid') || - err.response.description?.includes('message is not modified') || - err.response.description?.includes('message to edit not found') - ) - ) - console.error('Unexpected Telegram error:', err); + handleTelegramError(err, 'settings_aiModel'); } }); @@ -211,30 +274,26 @@ export default (bot: Telegraf, db: NodePgDatabase) => { const seriesIdx = parseInt(match[1], 10); const series = models[seriesIdx]; if (!series) return; + const pageSize = 4; + const page = Math.floor(seriesIdx / pageSize); const desc = user.languageCode === 'pt' ? series.descriptionPt : series.descriptionEn; try { await ctx.editMessageText( - `${Strings.settings.ai.seriesDescription.replace('{seriesDescription}', desc)}\n\n${Strings.settings.ai.selectParameterSize.replace('{seriesLabel}', series.label)}\n\n${Strings.settings.ai.parameterSizeExplanation}`, + `${Strings.settings.ai.seriesDescription.replace('{seriesDescription}', desc)}\n\n${Strings.settings.ai.selectParameterSize.replace('{seriesLabel}', series.label).replace(' [ & Uncensored ]', '')}\n\n${Strings.settings.ai.parameterSizeExplanation}`, { reply_markup: { - inline_keyboard: series.models.map((m, idx) => [ - { text: `${m.label} (${m.parameterSize})`, callback_data: `setmodel_${seriesIdx}_${idx}_${user.telegramId}` } - ]).concat([[ - { text: `${Strings.varStrings.varBack}`, callback_data: `settings_aiModel_${user.telegramId}` } + inline_keyboard: series.models.map((m, idx) => { + const isSelected = m.name === user.customAiModel; + const label = isSelected ? `✅ ${m.label}` : m.label; + return [{ text: `${label} (${m.parameterSize})`, callback_data: `setmodel_${seriesIdx}_${idx}_${user.telegramId}` }]; + }).concat([[ + { text: `${Strings.varStrings.varBack}`, callback_data: `settings_aiModel_${page}_${user.telegramId}` } ]]) } } ); } catch (err) { - if ( - !( - err.response.description?.includes('query is too old') || - err.response.description?.includes('query ID is invalid') || - err.response.description?.includes('message is not modified') || - err.response.description?.includes('message to edit not found') - ) - ) - console.error('Unexpected Telegram error:', err); + handleTelegramError(err, 'selectseries'); } }); @@ -278,15 +337,7 @@ export default (bot: Telegraf, db: NodePgDatabase) => { }); } } catch (err) { - if ( - !( - err.response.description?.includes('query is too old') || - err.response.description?.includes('query ID is invalid') || - err.response.description?.includes('message is not modified') || - err.response.description?.includes('message to edit not found') - ) - ) - console.error('[Settings] Unexpected Telegram error:', err); + handleTelegramError(err, 'setmodel'); } }); @@ -320,15 +371,7 @@ export default (bot: Telegraf, db: NodePgDatabase) => { } ); } catch (err) { - if ( - !( - err.response.description?.includes('query is too old') || - err.response.description?.includes('query ID is invalid') || - err.response.description?.includes('message is not modified') || - err.response.description?.includes('message to edit not found') - ) - ) - console.error('Unexpected Telegram error:', err); + handleTelegramError(err, 'settings_aiTemperature'); } }); @@ -354,15 +397,7 @@ export default (bot: Telegraf, db: NodePgDatabase) => { ]) }); } catch (err) { - if ( - !( - err.response.description?.includes('query is too old') || - err.response.description?.includes('query ID is invalid') || - err.response.description?.includes('message is not modified') || - err.response.description?.includes('message to edit not found') - ) - ) - console.error('Unexpected Telegram error:', err); + handleTelegramError(err, 'show_more_temps'); } }); @@ -409,15 +444,7 @@ export default (bot: Telegraf, db: NodePgDatabase) => { } ); } catch (err) { - if ( - !( - err.response.description?.includes('query is too old') || - err.response.description?.includes('query ID is invalid') || - err.response.description?.includes('message is not modified') || - err.response.description?.includes('message to edit not found') - ) - ) - console.error('Unexpected Telegram error:', err); + handleTelegramError(err, 'settings_language'); } }); @@ -450,15 +477,7 @@ export default (bot: Telegraf, db: NodePgDatabase) => { }); } } catch (err) { - if ( - !( - err.response.description?.includes('query is too old') || - err.response.description?.includes('query ID is invalid') || - err.response.description?.includes('message is not modified') || - err.response.description?.includes('message to edit not found') - ) - ) - console.error('[Settings] Unexpected Telegram error:', err); + handleTelegramError(err, 'settings_back'); } }); @@ -500,15 +519,7 @@ export default (bot: Telegraf, db: NodePgDatabase) => { }); } } catch (err) { - if ( - !( - err.response.description?.includes('query is too old') || - err.response.description?.includes('query ID is invalid') || - err.response.description?.includes('message is not modified') || - err.response.description?.includes('message to edit not found') - ) - ) - console.error('[Settings] Unexpected Telegram error:', err); + handleTelegramError(err, 'setlang'); } }); diff --git a/src/db/schema.ts b/src/db/schema.ts index 208bc56..09fdb72 100644 --- a/src/db/schema.ts +++ b/src/db/schema.ts @@ -13,6 +13,7 @@ export const usersTable = pgTable("users", { firstName: varchar({ length: 255 }).notNull(), lastName: varchar({ length: 255 }).notNull(), aiEnabled: boolean().notNull().default(false), + showThinking: boolean().notNull().default(false), customAiModel: varchar({ length: 255 }).notNull().default("deepseek-r1:1.5b"), aiTemperature: real().notNull().default(0.9), aiRequests: integer().notNull().default(0), diff --git a/src/locales/english.json b/src/locales/english.json index 1a1471e..0f6f875 100644 --- a/src/locales/english.json +++ b/src/locales/english.json @@ -74,16 +74,16 @@ "askGenerating": "✨ Generating response with {model}...", "askNoMessage": "✨ You need to ask me a question!", "languageCode": "Language", - "thinking": "Thinking...", - "finishedThinking": "Done.", + "thinking": "`🧠 Thinking...`", + "finishedThinking": "`🧠 Done thinking.`", "urlWarning": "\n\n⚠️ Note: The model cannot access or visit links!", "inQueue": "ℹ️ You are {position} in the queue.", "startingProcessing": "✨ Starting to process your request...", - "systemPrompt": "You are a friendly assistant called {botName}, capable of Telegram MarkdownV2.\nYou are currently in a chat with a user, who has sent a message to you.\nCurrent Date/Time (UTC): {date}\n\n---\n\nRespond to the user's message:\n{message}", + "systemPrompt": "You are a friendly assistant called {botName}.\nCurrent Date/Time (UTC): {date}\n\n---\n\nUser message:\n{message}", "statusWaitingRender": "⏳ Waiting to Render...", "statusRendering": "🖼️ Rendering...", "statusComplete": "✅ Complete!", - "modelHeader": "🤖 *{model}* | 🌡️ *{temperature}* | {status}", + "modelHeader": "🤖 *{model}* 🌡️ *{temperature}* {status}", "noChatFound": "No chat found", "pulled": "✅ Pulled {model} successfully, please retry the command.", "selectTemperature": "*Please select a temperature:*", @@ -125,11 +125,12 @@ "aiTemperatureSetTo": "AI Temperature set to {aiTemperature}", "selectSeries": "*Please select a model series.*", "seriesDescription": "{seriesDescription}", - "selectParameterSize": "Please select a parameter size for {seriesLabel}.", + "selectParameterSize": "*Please select a parameter size for {seriesLabel}*.", "parameterSizeExplanation": "Parameter size (e.g. 2B, 4B) refers to the number of parameters in the model. Larger models may be more capable but require more resources.", "modelSetTo": "Model set to {aiModel} ({parameterSize})", "selectTemperature": "*Please select a temperature:*", - "temperatureExplanation": "Temperature controls the randomness of the AI's responses. Lower values (e.g., 0.2) make the model more focused and deterministic, while higher values (e.g., 1.2 or above) make it more creative and random." + "temperatureExplanation": "Temperature controls the randomness of the AI's responses. Lower values (e.g., 0.2) make the model more focused and deterministic, while higher values (e.g., 1.2 or above) make it more creative and random.", + "showThinking": "Show Model Thinking" }, "selectLanguage": "*Please select a language:*", "languageCodeSetTo": "Language set to {languageCode}", diff --git a/src/locales/portuguese.json b/src/locales/portuguese.json index e1026eb..6571b12 100644 --- a/src/locales/portuguese.json +++ b/src/locales/portuguese.json @@ -69,26 +69,26 @@ "helpDesc": "✨ *Comandos de IA*\n\n- /ask ``: Fazer uma pergunta a uma IA\n- /think ``: Fazer uma pergunta a um modelo de pensamento\n- /ai ``: Fazer uma pergunta a um modelo de IA personalizado\n- /aistats: Mostra suas estatísticas de uso de IA", "disabled": "A AIApi foi desativada\\.", "disabledForUser": "As funções de IA estão desativadas para a sua conta. Você pode ativá-las com o comando /settings.", - "pulling": "O modelo {model} não foi encontrado localmente, baixando\\.\\.\\.", - "askGenerating": "Gerando resposta com {model}\\.\\.\\.", - "askNoMessage": "Você precisa fazer uma pergunta\\.", - "thinking": "Pensando\\.\\.\\.", - "finishedThinking": "Pronto\\.", + "pulling": "🔄 Modelo {model} não encontrado localmente, baixando...", + "askGenerating": "✨ Gerando resposta com {model}...", + "askNoMessage": "⚠️ Você precisa fazer uma pergunta.", + "thinking": "`🧠 Pensando...`", + "finishedThinking": "`🧠 Pensamento concluido.`", "urlWarning": "\n\n⚠️ Nota: O modelo de IA não pode acessar ou visitar links!", "inQueue": "ℹ️ Você é o {position} na fila.", - "startingProcessing": "✨ Começando a processar o seu pedido\\.\\.\\.", + "startingProcessing": "✨ Começando a processar o seu pedido...", "aiEnabled": "IA", - "aiModel": "Modelo", + "aiModel": "Modelo de IA", "aiTemperature": "Temperatura", - "selectSeries": "*Por favor, selecione uma série de modelos.*", + "selectSeries": "*Por favor, selecione uma série de modelos de IA.*", "seriesDescription": "{seriesDescription}", "selectParameterSize": "Por favor, selecione um tamanho de parâmetro para {seriesLabel}.", "parameterSizeExplanation": "O tamanho do parâmetro (ex: 2B, 4B) refere-se ao número de parâmetros do modelo. Modelos maiores podem ser mais capazes, mas exigem mais recursos.", - "systemPrompt": "Você é um assistente de Telegram chamado {botName}, capaz de Telegram MarkdownV2.\nVocê está em um chat com um usuário, que enviou uma mensagem para você.\nData/Hora atual (UTC): {date}\n\n---\n\nResponda à mensagem do usuário:\n{message}", + "systemPrompt": "Você é um assistente de Telegram chamado {botName}.\nData/Hora atual (UTC): {date}\n\n---\n\nMensagem do usuário:\n{message}", "statusWaitingRender": "⏳ Aguardando renderização...", "statusRendering": "🖼️ Renderizando...", "statusComplete": "✅ Completo!", - "modelHeader": "🤖 *{model}* | 🌡️ *{temperature}* | {status}", + "modelHeader": "🤖 *{model}* 🌡️ *{temperature}* {status}", "noChatFound": "Nenhum chat encontrado", "pulled": "✅ {model} baixado com sucesso, por favor tente o comando novamente." }, @@ -132,7 +132,8 @@ "parameterSizeExplanation": "O tamanho do parâmetro (ex: 2B, 4B) refere-se ao número de parâmetros do modelo. Modelos maiores podem ser mais capazes, mas exigem mais recursos.", "modelSetTo": "Modelo definido para {aiModel} ({parameterSize})", "selectTemperature": "*Por favor, selecione uma temperatura:*", - "temperatureExplanation": "A temperatura controla a aleatoriedade das respostas da IA. Valores mais baixos (ex: 0.2) tornam o modelo mais focado e determinístico, enquanto valores mais altos (ex: 1.2 ou mais) tornam as respostas mais criativas e aleatórias." + "temperatureExplanation": "A temperatura controla a aleatoriedade das respostas da IA. Valores mais baixos (ex: 0.2) tornam o modelo mais focado e determinístico, enquanto valores mais altos (ex: 1.2 ou mais) tornam as respostas mais criativas e aleatórias.", + "showThinking": "Mostrar Pensamento do Modelo" }, "selectLanguage": "*Por favor, selecione um idioma:*", "languageCodeSetTo": "Idioma definido para {languageCode}", diff --git a/src/utils/ensure-user.ts b/src/utils/ensure-user.ts index 7726ffb..5322992 100644 --- a/src/utils/ensure-user.ts +++ b/src/utils/ensure-user.ts @@ -47,6 +47,7 @@ export async function ensureUserInDb(ctx, db) { lastName, languageCode, aiEnabled: false, + showThinking: false, customAiModel: "deepseek-r1:1.5b", aiTemperature: 0.9, aiRequests: 0,