From ea0ae1a47dd50bdde8d23d939571dabe14bddf2c Mon Sep 17 00:00:00 2001 From: Aidan Date: Fri, 27 Jun 2025 18:19:30 -0400 Subject: [PATCH 01/21] KOW-7 fix: better validation --- src/commands/ponyapi.ts | 26 ++++++++++++++++++++++++++ src/locales/english.json | 2 ++ src/locales/portuguese.json | 2 ++ 3 files changed, 30 insertions(+) diff --git a/src/commands/ponyapi.ts b/src/commands/ponyapi.ts index 2202949..daf99c7 100644 --- a/src/commands/ponyapi.ts +++ b/src/commands/ponyapi.ts @@ -74,6 +74,15 @@ export default (bot: Telegraf) => { return; } + // if special characters or numbers (max 30 characters) + if (/[^a-zA-Z\s]/.test(userInput) || userInput.length > 30) { + ctx.reply(Strings.mlpInvalidCharacter, { + parse_mode: 'Markdown', + ...({ reply_to_message_id }) + }); + return; + } + const capitalizedInput = capitalizeFirstLetter(userInput); const apiUrl = `${Resources.ponyApi}/character/${capitalizedInput}`; @@ -148,6 +157,14 @@ export default (bot: Telegraf) => { return; } + if (Number(userInput) > 100) { + ctx.reply(Strings.mlpInvalidEpisode, { + parse_mode: 'Markdown', + ...({ reply_to_message_id }) + }); + return; + } + const apiUrl = `${Resources.ponyApi}/episode/by-overall/${userInput}`; try { @@ -218,6 +235,15 @@ export default (bot: Telegraf) => { return; }; + // if special characters or numbers (max 30 characters) + if (/[^a-zA-Z\s]/.test(userInput) || userInput.length > 30) { + ctx.reply(Strings.mlpInvalidCharacter, { + parse_mode: 'Markdown', + ...({ reply_to_message_id }) + }); + return; + } + const apiUrl = `${Resources.ponyApi}/comics-story/${userInput}`; try { diff --git a/src/locales/english.json b/src/locales/english.json index 0d9f6dd..4bc9c06 100644 --- a/src/locales/english.json +++ b/src/locales/english.json @@ -84,6 +84,8 @@ "catImgErr": "Sorry, but I couldn't get the cat photo you wanted.", "catGifErr": "Sorry, but I couldn't get the cat GIF you wanted.", "dogImgErr": "Sorry, but I couldn't get the dog photo you wanted.", + "mlpInvalidCharacter": "Please provide a valid character name.", + "mlpInvalidEpisode": "Please provide a valid episode number.", "foxApiErr": "An error occurred while fetching data from the API.\n\n`{error}`", "duckApiErr": "An error occurred while fetching data from the API.\n\n`{error}`", "httpCodes": { diff --git a/src/locales/portuguese.json b/src/locales/portuguese.json index 5416fa5..0e13aaf 100644 --- a/src/locales/portuguese.json +++ b/src/locales/portuguese.json @@ -84,6 +84,8 @@ "catImgErr": "Desculpe, mas não consegui obter a foto do gato que você queria.", "catGifErr": "Desculpe, mas não consegui obter o GIF do gato que você queria.", "dogImgErr": "Desculpe, mas não consegui obter a foto do cacbhorro que você queria.", + "mlpInvalidCharacter": "Por favor, forneça um nome de personagem válido.", + "mlpInvalidEpisode": "Por favor, forneça um número de episódio válido.", "foxApiErr": "Ocorreu um erro ao buscar dados da API.\n\n`{error}`", "duckApiErr": "Ocorreu um erro ao buscar dados da API.\n\n`{error}`", "httpCodes": { From 0c364a181458338bdcc41681ea8378a9041a5ee4 Mon Sep 17 00:00:00 2001 From: Aidan Date: Fri, 27 Jun 2025 19:03:38 -0400 Subject: [PATCH 02/21] KOW-2 cleaner search and better codename search w/ fallback, export codename search --- src/commands/codename.ts | 24 +++++++++--------------- src/commands/gsmarena.ts | 25 ++++++++++++++++++++----- 2 files changed, 29 insertions(+), 20 deletions(-) diff --git a/src/commands/codename.ts b/src/commands/codename.ts index ba0e3b4..11e3e65 100644 --- a/src/commands/codename.ts +++ b/src/commands/codename.ts @@ -14,21 +14,18 @@ interface Device { brand: string; codename: string; model: string; + name: string; } -async function getDeviceList({ Strings, ctx }: { Strings: any, ctx: Context & { message: { text: string } } }) { - const reply_to_message_id = replyToMessageId(ctx); +export async function getDeviceByCodename(codename: string): Promise { try { const response = await axios.get(Resources.codenameApi); - return response.data + const jsonRes = response.data; + const deviceDetails = jsonRes[codename]; + if (!deviceDetails) return null; + return deviceDetails.find((item: Device) => item.brand) || deviceDetails[0]; } catch (error) { - const message = Strings.codenameCheck.apiErr - .replace('{error}', error.message); - - return ctx.reply(message, { - parse_mode: "Markdown", - ...({ reply_to_message_id }) - }); + return null; } } @@ -43,18 +40,15 @@ export default (bot: Telegraf) => { return; } - const jsonRes = await getDeviceList({ Strings, ctx }) - const phoneSearch = Object.keys(jsonRes).find((codename) => codename === userInput); + const device = await getDeviceByCodename(userInput); - if (!phoneSearch) { + if (!device) { return ctx.reply(Strings.codenameCheck.notFound, { parse_mode: "Markdown", ...({ reply_to_message_id }) }); } - const deviceDetails = jsonRes[phoneSearch]; - const device = deviceDetails.find((item: Device) => item.brand) || deviceDetails[0]; const message = Strings.codenameCheck.resultMsg .replace('{brand}', device.brand) .replace('{codename}', userInput) diff --git a/src/commands/gsmarena.ts b/src/commands/gsmarena.ts index c44206a..1636c01 100644 --- a/src/commands/gsmarena.ts +++ b/src/commands/gsmarena.ts @@ -8,6 +8,7 @@ import { isOnSpamWatch } from '../spamwatch/spamwatch'; import spamwatchMiddlewareModule from '../spamwatch/Middleware'; import axios from 'axios'; import { parse } from 'node-html-parser'; +import { getDeviceByCodename } from './codename'; const spamwatchMiddleware = spamwatchMiddlewareModule(isOnSpamWatch); @@ -216,12 +217,27 @@ export default (bot) => { return ctx.reply("Please provide the phone name.", { reply_to_message_id: ctx.message.message_id }); } - const results = await searchPhone(phone); + console.log("[GSMArena] Searching for", phone); + const statusMsg = await ctx.reply(`Searching for \`${phone}\`...`, { reply_to_message_id: ctx.message.message_id, parse_mode: 'Markdown' }); + + let results = await searchPhone(phone); if (results.length === 0) { - return ctx.reply("No phones found.", { reply_to_message_id: ctx.message.message_id }); + const codenameResults = await getDeviceByCodename(phone.split(" ")[0]); + if (!codenameResults) { + await ctx.telegram.editMessageText(ctx.chat.id, statusMsg.message_id, undefined, `No phones found for \`${phone}\`.`, { parse_mode: 'Markdown' }); + return; + } + + await ctx.telegram.editMessageText(ctx.chat.id, statusMsg.message_id, undefined, `Searching for ${codenameResults.name}...`, { parse_mode: 'Markdown' }); + const nameResults = await searchPhone(codenameResults.name); + if (nameResults.length === 0) { + await ctx.telegram.editMessageText(ctx.chat.id, statusMsg.message_id, undefined, `No phones found for \`${codenameResults.name}\` and \`${phone}\`.`, { parse_mode: 'Markdown' }); + return; + } + results = nameResults; } - const testUser = `${userName}, please select your device:`; + const testUser = `${userName}, please select your device:`; const options = { parse_mode: 'HTML', reply_to_message_id: ctx.message.message_id, @@ -230,8 +246,7 @@ export default (bot) => { inline_keyboard: results.map(result => [{ text: result.name, callback_data: `details:${result.url}:${ctx.from.id}` }]) } }; - ctx.reply(testUser, options); - + await ctx.telegram.editMessageText(ctx.chat.id, statusMsg.message_id, undefined, testUser, options); }); bot.action(/details:(.+):(.+)/, async (ctx) => { From 81294f572121887162bc329ad91909b6769e6dd4 Mon Sep 17 00:00:00 2001 From: Lucas Gabriel Date: Sat, 28 Jun 2025 16:22:15 -0300 Subject: [PATCH 03/21] [FEATURE] Add AI-based /ask command (complementing #54) (#56) * docs: add ai documentation * docker: update docker files for ai/regular versions, lint * feat: add initial /ask command * Delete docker-compose.yml * docker: ignore ollama folder in builds * fix: add emojis to help commands, capitalize, add ai commands to help menu * feat: add better logging, thought handling improvements * bug fixes, better logging and seperation of ai, update docs for ai * clean, remove prompt and user info from logs, more docs edits * system prompt change (plaintext only), parse out /think * clean up, axios tweaks * cleanup, logging of ratelimit --------- Co-authored-by: Aidan --- .dockerignore | 3 +- .env.example | 5 + .gitignore | 8 +- README.md | 46 ++- docker-compose.yml.ai.example | 15 + ...-compose.yml => docker-compose.yml.example | 2 +- src/bot.ts | 15 +- src/commands/ai.ts | 287 ++++++++++++++++++ src/commands/help.ts | 7 +- src/locales/english.json | 44 +-- src/locales/portuguese.json | 7 +- src/utils/log.ts | 83 +++++ src/utils/rate-limiter.ts | 246 +++++++++++++++ 13 files changed, 733 insertions(+), 35 deletions(-) create mode 100644 docker-compose.yml.ai.example rename docker-compose.yml => docker-compose.yml.example (84%) create mode 100644 src/commands/ai.ts create mode 100644 src/utils/log.ts create mode 100644 src/utils/rate-limiter.ts diff --git a/.dockerignore b/.dockerignore index 33e390a..9fe19f3 100644 --- a/.dockerignore +++ b/.dockerignore @@ -4,4 +4,5 @@ npm-debug.log .gitignore .env *.md -!README.md \ No newline at end of file +!README.md +ollama/ \ No newline at end of file diff --git a/.env.example b/.env.example index af81e1d..8e9cb3f 100644 --- a/.env.example +++ b/.env.example @@ -5,6 +5,11 @@ botSource = "https://github.com/ABOCN/TelegramBot" # insert token here botToken = "" +# ai features +ollamaEnabled = false +# ollamaApi = "http://ollama:11434" +# handlerTimeout = "600_000" # set higher if you expect to download larger models + # misc (botAdmins isnt a array here!) maxRetries = 9999 botAdmins = 00000000, 00000000, 00000000 diff --git a/.gitignore b/.gitignore index 6b42f1f..278fef8 100644 --- a/.gitignore +++ b/.gitignore @@ -144,4 +144,10 @@ yt-dlp ffmpeg # Bun -bun.lock* \ No newline at end of file +bun.lock* + +# Ollama +ollama/ + +# Docker +docker-compose.yml \ No newline at end of file diff --git a/README.md b/README.md index 8fa5b60..3cc7c99 100644 --- a/README.md +++ b/README.md @@ -10,12 +10,6 @@ Kowalski is a a simple Telegram bot made in Node.js. - You can find Kowalski at [@KowalskiNodeBot](https://t.me/KowalskiNodeBot) on Telegram. -## Translations - - -Translation status - - ## Self-host requirements > [!IMPORTANT] @@ -26,6 +20,11 @@ Kowalski is a a simple Telegram bot made in Node.js. - FFmpeg (only for the `/yt` command) - Docker and Docker Compose (only required for Docker setup) +### AI Requirements + +- High-end CPU *or* GPU (~ 6GB vRAM) +- If using CPU, enough RAM to load the models (~6GB w/ defaults) + ## Running locally (non-Docker setup) First, clone the repo with Git: @@ -55,9 +54,28 @@ You can also run Kowalski using Docker, which simplifies the setup process. Make ### Using Docker Compose -1. **Make sure to setup your `.env` file first!** +1. **Copy compose file** -2. **Run the container** + _Without AI (Ollama)_ + + ```bash + mv docker-compose.yml.example docker-compose.yml + ``` + + _With AI (Ollama)_ + + ```bash + mv docker-compose.yml.ai.example docker-compose.yml + ``` + +2. **Make sure to setup your `.env` file first!** + + > [!TIP] + > If you intend to setup AI, the defaults for Docker are already included (just uncomment) and don't need to be changed. + > + > Further setup may be needed for GPUs. See the Ollama documentation for more. + +3. **Run the container** ```bash docker compose up -d @@ -81,6 +99,9 @@ If you prefer to use Docker directly, you can use these instructions instead. docker run -d --name kowalski --restart unless-stopped -v $(pwd)/.env:/usr/src/app/.env:ro kowalski ``` +> [!NOTE] +> You must setup Ollama on your own if you would like to use AI features. + ## .env Functions > [!IMPORTANT] @@ -90,6 +111,9 @@ If you prefer to use Docker directly, you can use these instructions instead. - **botPrivacy**: Put the link to your bot privacy policy. - **maxRetries**: Maximum number of retries for a failing command on Kowalski. Default is 5. If the limit is hit, the bot will crash past this number. - **botToken**: Put your bot token that you created at [@BotFather](https://t.me/botfather). +- **ollamaEnabled** (optional): Enables/disables AI features +- **ollamaApi** (optional): Ollama API endpoint for various AI features, will be disabled if not set +- **handlerTimeout** (default: `600_000`): How long handlers will wait before timing out. Set this high if using large AI models. - **botAdmins**: Put the ID of the people responsible for managing the bot. They can use some administrative + exclusive commands on any group. - **lastKey**: Last.fm API key, for use on `lastfm.js` functions, like see who is listening to what song and etc. - **weatherKey**: Weather.com API key, used for the `/weather` command. @@ -106,6 +130,12 @@ If you prefer to use Docker directly, you can use these instructions instead. chmod +x src/plugins/yt-dlp/yt-dlp ``` +### AI + +**Q:** How can I disable AI features? + +**A:** AI features are disabled by default, unless you have set `ollamaEnabled` to `true` in your `.env` file. Set it back to `false` to disable. + ## Contributors diff --git a/docker-compose.yml.ai.example b/docker-compose.yml.ai.example new file mode 100644 index 0000000..2c516f7 --- /dev/null +++ b/docker-compose.yml.ai.example @@ -0,0 +1,15 @@ +services: + kowalski: + build: . + container_name: kowalski + restart: unless-stopped + volumes: + - ./.env:/usr/src/app/.env:ro + environment: + - NODE_ENV=production + ollama: + image: ollama/ollama + container_name: kowalski-ollama + restart: unless-stopped + volumes: + - ./ollama:/root/.ollama \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml.example similarity index 84% rename from docker-compose.yml rename to docker-compose.yml.example index 0aab44a..f3bb819 100644 --- a/docker-compose.yml +++ b/docker-compose.yml.example @@ -6,4 +6,4 @@ services: volumes: - ./.env:/usr/src/app/.env:ro environment: - - NODE_ENV=production \ No newline at end of file + - NODE_ENV=production \ No newline at end of file diff --git a/src/bot.ts b/src/bot.ts index 3422e56..04d2c97 100644 --- a/src/bot.ts +++ b/src/bot.ts @@ -4,6 +4,7 @@ import fs from 'fs'; import { isOnSpamWatch } from './spamwatch/spamwatch'; import '@dotenvx/dotenvx'; import './plugins/ytDlpWrapper'; +import { preChecks } from './commands/ai'; // Ensures bot token is set, and not default value if (!process.env.botToken || process.env.botToken === 'InsertYourBotTokenHere') { @@ -11,7 +12,17 @@ if (!process.env.botToken || process.env.botToken === 'InsertYourBotTokenHere') process.exit(1) } -const bot = new Telegraf(process.env.botToken); +// Detect AI and run pre-checks +if (process.env.ollamaEnabled === "true") { + if (!(await preChecks())) { + process.exit(1) + } +} + +const bot = new Telegraf( + process.env.botToken, + { handlerTimeout: Number(process.env.handlerTimeout) || 600_000 } +); const maxRetries = process.env.maxRetries || 5; let restartCount = 0; @@ -21,7 +32,7 @@ const loadCommands = () => { try { const files = fs.readdirSync(commandsPath) .filter(file => file.endsWith('.ts') || file.endsWith('.js')); - + files.forEach((file) => { try { const commandPath = path.join(commandsPath, file); diff --git a/src/commands/ai.ts b/src/commands/ai.ts new file mode 100644 index 0000000..0e27578 --- /dev/null +++ b/src/commands/ai.ts @@ -0,0 +1,287 @@ +// AI.TS +// by ihatenodejs/Aidan +// +// ----------------------------------------------------------------------- +// +// This is free and unencumbered software released into the public domain. +// +// Anyone is free to copy, modify, publish, use, compile, sell, or +// distribute this software, either in source code form or as a compiled +// binary, for any purpose, commercial or non-commercial, and by any +// means. +// +// In jurisdictions that recognize copyright laws, the author or authors +// of this software dedicate any and all copyright interest in the +// software to the public domain. We make this dedication for the benefit +// of the public at large and to the detriment of our heirs and +// successors. We intend this dedication to be an overt act of +// relinquishment in perpetuity of all present and future rights to this +// software under copyright law. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +// IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +// OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +// OTHER DEALINGS IN THE SOFTWARE. +// +// For more information, please refer to + +import { isOnSpamWatch } from "../spamwatch/spamwatch" +import spamwatchMiddlewareModule from "../spamwatch/Middleware" +import { Telegraf, Context } from "telegraf" +import type { Message } from "telegraf/types" +import { replyToMessageId } from "../utils/reply-to-message-id" +import { getStrings } from "../plugins/checklang" +import { languageCode } from "../utils/language-code" +import axios from "axios" +import { rateLimiter } from "../utils/rate-limiter" +import { logger } from "../utils/log" + +const spamwatchMiddleware = spamwatchMiddlewareModule(isOnSpamWatch) +export const flash_model = "gemma3:4b" +export const thinking_model = "deepseek-r1:1.5b" + +type TextContext = Context & { message: Message.TextMessage } + +export function sanitizeForJson(text: string): string { + return text + .replace(/\\/g, '\\\\') + .replace(/"/g, '\\"') + .replace(/\n/g, '\\n') + .replace(/\r/g, '\\r') + .replace(/\t/g, '\\t') +} + +export async function preChecks() { + const envs = [ + "ollamaApi", + ] + + for (const env of envs) { + if (!process.env[env]) { + console.error(`[✨ AI | !] ❌ ${env} not set!`) + return false + } + } + console.log("[✨ AI] Pre-checks passed\n") + return true +} + +async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Message, model: string) { + const Strings = getStrings(languageCode(ctx)) + + if (!ctx.chat) { + return { + success: false, + error: Strings.unexpectedErr.replace("{error}", "No chat found"), + } + } + + try { + const aiResponse = await axios.post( + `${process.env.ollamaApi}/api/generate`, + { + model, + prompt, + stream: true, + }, + { + responseType: "stream", + } + ) + + let fullResponse = "" + let thoughts = "" + let lastUpdate = Date.now() + + const stream = aiResponse.data + for await (const chunk of stream) { + const lines = chunk.toString().split('\n') + for (const line of lines) { + if (!line.trim()) continue + let ln + try { + ln = JSON.parse(line) + } catch (e) { + console.error("[✨ AI | !] Error parsing chunk:", e) + continue + } + + if (model === thinking_model) { + if (ln.response.includes('')) { + const thinkMatch = ln.response.match(/([\s\S]*?)<\/think>/) + if (thinkMatch && thinkMatch[1].trim().length > 0) { + logger.logThinking(ctx.chat.id, replyGenerating.message_id, true) + } else if (!thinkMatch) { + logger.logThinking(ctx.chat.id, replyGenerating.message_id, true) + } + } else if (ln.response.includes('')) { + logger.logThinking(ctx.chat.id, replyGenerating.message_id, false) + } + } + + const now = Date.now() + if (ln.response) { + if (model === thinking_model) { + let patchedThoughts = ln.response + const thinkTagRx = /([\s\S]*?)<\/think>/g + patchedThoughts = patchedThoughts.replace(thinkTagRx, (match, p1) => p1.trim().length > 0 ? '`Thinking...`' + p1 + '`Finished thinking`' : '') + patchedThoughts = patchedThoughts.replace(//g, '`Thinking...`') + patchedThoughts = patchedThoughts.replace(/<\/think>/g, '`Finished thinking`') + thoughts += patchedThoughts + fullResponse += patchedThoughts + } else { + fullResponse += ln.response + } + if (now - lastUpdate >= 1000) { + await rateLimiter.editMessageWithRetry( + ctx, + ctx.chat.id, + replyGenerating.message_id, + thoughts, + { parse_mode: 'Markdown' } + ) + lastUpdate = now + } + } + } + } + + return { + success: true, + response: fullResponse, + } + } catch (error: any) { + let shouldPullModel = false + if (error.response) { + const errData = error.response.data?.error + const errStatus = error.response.status + if (errData && (errData.includes(`model '${model}' not found`) || errStatus === 404)) { + shouldPullModel = true + } else { + console.error("[✨ AI | !] Error zone 1:", errData) + return { success: false, error: errData } + } + } else if (error.request) { + console.error("[✨ AI | !] No response received:", error.request) + return { success: false, error: "No response received from server" } + } else { + console.error("[✨ AI | !] Error zone 3:", error.message) + return { success: false, error: error.message } + } + + if (shouldPullModel) { + ctx.telegram.editMessageText(ctx.chat.id, replyGenerating.message_id, undefined, `🔄 Pulling ${model} from ollama...\n\nThis may take a few minutes...`) + console.log(`[✨ AI | i] Pulling ${model} from ollama...`) + try { + await axios.post( + `${process.env.ollamaApi}/api/pull`, + { + model, + stream: false, + timeout: process.env.ollamaApiTimeout || 10000, + } + ) + } catch (e: any) { + if (e.response) { + console.error("[✨ AI | !] Something went wrong:", e.response.data?.error) + return { + success: false, + error: `❌ Something went wrong while pulling ${model}, please try your command again!`, + } + } else if (e.request) { + console.error("[✨ AI | !] No response received while pulling:", e.request) + return { + success: false, + error: `❌ No response received while pulling ${model}, please try again!`, + } + } else { + console.error("[✨ AI | !] Error while pulling:", e.message) + return { + success: false, + error: `❌ Error while pulling ${model}: ${e.message}`, + } + } + } + console.log(`[✨ AI | i] ${model} pulled successfully`) + return { + success: true, + response: `✅ Pulled ${model} successfully, please retry the command.`, + } + } + } +} + +export default (bot: Telegraf) => { + const botName = bot.botInfo?.first_name && bot.botInfo?.last_name ? `${bot.botInfo.first_name} ${bot.botInfo.last_name}` : "Kowalski" + + bot.command(["ask", "think"], spamwatchMiddleware, async (ctx) => { + if (!ctx.message || !('text' in ctx.message)) return + const isAsk = ctx.message.text.startsWith("/ask") + const model = isAsk ? flash_model : thinking_model + const textCtx = ctx as TextContext + const reply_to_message_id = replyToMessageId(textCtx) + const Strings = getStrings(languageCode(textCtx)) + const message = textCtx.message.text + const author = ("@" + ctx.from?.username) || ctx.from?.first_name + + logger.logCmdStart(author, model === flash_model ? "ask" : "think") + + if (!process.env.ollamaApi) { + await ctx.reply(Strings.aiDisabled, { + parse_mode: 'Markdown', + ...({ reply_to_message_id }) + }) + return + } + + const replyGenerating = await ctx.reply(Strings.askGenerating.replace("{model}", model), { + parse_mode: 'Markdown', + ...({ reply_to_message_id }) + }) + + const fixedMsg = message.replace(/\/(ask|think) /, "") + if (fixedMsg.length < 1) { + await ctx.reply(Strings.askNoMessage, { + parse_mode: 'Markdown', + ...({ reply_to_message_id }) + }) + return + } + + logger.logPrompt(fixedMsg) + + const prompt = sanitizeForJson( +`You are a plaintext-only, helpful assistant called ${botName}. +Current Date/Time (UTC): ${new Date().toLocaleString()} + +--- + +Respond to the user's message: +${fixedMsg}`) + const aiResponse = await getResponse(prompt, textCtx, replyGenerating, model) + if (!aiResponse) return + + if (!ctx.chat) return + if (aiResponse.success && aiResponse.response) { + await rateLimiter.editMessageWithRetry( + ctx, + ctx.chat.id, + replyGenerating.message_id, + aiResponse.response, + { parse_mode: 'Markdown' } + ) + return + } + const error = Strings.unexpectedErr.replace("{error}", aiResponse.error) + await rateLimiter.editMessageWithRetry( + ctx, + ctx.chat.id, + replyGenerating.message_id, + error, + { parse_mode: 'Markdown' } + ) + }) +} \ No newline at end of file diff --git a/src/commands/help.ts b/src/commands/help.ts index 39191c1..3a6d3a0 100644 --- a/src/commands/help.ts +++ b/src/commands/help.ts @@ -32,7 +32,8 @@ async function sendHelpMessage(ctx, isEditing) { [{ text: Strings.mainCommands, callback_data: 'helpMain' }, { text: Strings.usefulCommands, callback_data: 'helpUseful' }], [{ text: Strings.interactiveEmojis, callback_data: 'helpInteractive' }, { text: Strings.funnyCommands, callback_data: 'helpFunny' }], [{ text: Strings.lastFm.helpEntry, callback_data: 'helpLast' }, { text: Strings.animalCommands, callback_data: 'helpAnimals' }], - [{ text: Strings.ytDownload.helpEntry, callback_data: 'helpYouTube' }, { text: Strings.ponyApi.helpEntry, callback_data: 'helpMLP' }] + [{ text: Strings.ytDownload.helpEntry, callback_data: 'helpYouTube' }, { text: Strings.ponyApi.helpEntry, callback_data: 'helpMLP' }], + [{ text: Strings.aiCmds, callback_data: 'helpAi' }] ] } }; @@ -112,6 +113,10 @@ export default (bot) => { await ctx.answerCbQuery(); await ctx.editMessageText(Strings.ponyApi.helpDesc, options); break; + case 'helpAi': + await ctx.answerCbQuery(); + await ctx.editMessageText(Strings.aiCmdsDesc, options); + break; case 'helpBack': await ctx.answerCbQuery(); await sendHelpMessage(ctx, true); diff --git a/src/locales/english.json b/src/locales/english.json index 4bc9c06..fadfcd6 100644 --- a/src/locales/english.json +++ b/src/locales/english.json @@ -33,8 +33,8 @@ "funEmojiResult": "*You rolled {emoji} and got* `{value}`*!*\nYou don't know what that means? Me neither!", "gifErr": "*Something went wrong while sending the GIF. Please try again later.*\n\n{err}", "lastFm": { - "helpEntry": "Last.fm", - "helpDesc": "*Last.fm*\n\n- /lt | /lmu | /last | /lfm: Shows the last song from your Last.fm profile + the number of plays.\n- /setuser ``: Sets the user for the command above.", + "helpEntry": "🎵 Last.fm", + "helpDesc": "🎵 *Last.fm*\n\n- /lt | /lmu | /last | /lfm: Shows the last song from your Last.fm profile + the number of plays.\n- /setuser ``: Sets the user for the command above.", "noUser": "*Please provide a Last.fm username.*\nExample: `/setuser `", "noUserSet": "*You haven't set your Last.fm username yet.*\nUse the command /setuser to set.\n\nExample: `/setuser `", "noRecentTracks": "*No recent tracks found for Last.fm user* `{lastfmUser}`*.*", @@ -52,25 +52,27 @@ "apiErr": "*An error occurred while retrieving the weather. Please try again later.*\n\n`{error}`", "apiKeyErr": "*An API key was not set by the bot owner. Please try again later.*" }, - "mainCommands": "Main commands", - "mainCommandsDesc": "*Main commands*\n\n- /help: Show bot's help\n- /start: Start the bot\n- /privacy: Read the bot's Privacy Policy", - "usefulCommands": "Useful commands", - "usefulCommandsDesc": "*Useful commands*\n\n- /chatinfo: Send information about the group\n- /userinfo: Send information about yourself\n- /d | /device ``: Search for a device on GSMArena and show its specs.\n/codename | /whatis ``: Shows what device is based on the codename. Example: `/codename begonia`\n- /weather | /clima ``: See weather status for a specific location.\n- /modarchive | /tma ``: Download a module from The Mod Archive.\n- /http ``: Send details about a specific HTTP code. Example: `/http 404`", - "funnyCommands": "Funny commands", - "funnyCommandsDesc": "*Funny commands*\n\n- /gay: Check if you are gay\n- /furry: Check if you are a furry\n- /random: Pick a random number between 0-10", - "interactiveEmojis": "Interactive emojis", - "interactiveEmojisDesc": "*Interactive emojis*\n\n- /dice: Roll a dice\n- /idice: Infinitely roll a colored dice\n- /slot: Try to combine the figures!\n- /ball: Try to kick the ball into the goal!\n- /bowling: Try to hit the pins!\n- /dart: Try to hit the target!", - "animalCommands": "Animals", - "animalCommandsDesc": "*Animals*\n\n- /soggy | /soggycat `<1 | 2 | 3 | 4 | orig | thumb | sticker | alt>`: Sends the [Soggy cat meme](https://knowyourmeme.com/memes/soggy-cat)\n- /cat: Sends a random picture of a cat.\n- /fox: Sends a random picture of a fox.\n- /duck: Sends a random picture of a duck.\n- /dog: Sends a random picture of a dog.\n- /httpcat ``: Send cat memes from http.cat with your specified HTTP code. Example: `/httpcat 404`", + "mainCommands": "ℹ️ Main Commands", + "mainCommandsDesc": "ℹ️ *Main Commands*\n\n- /help: Show bot's help\n- /start: Start the bot\n- /privacy: Read the bot's Privacy Policy", + "usefulCommands": "🛠️ Useful Commands", + "usefulCommandsDesc": "🛠️ *Useful commands*\n\n- /chatinfo: Send information about the group\n- /userinfo: Send information about yourself\n- /d | /device ``: Search for a device on GSMArena and show its specs.\n/codename | /whatis ``: Shows what device is based on the codename. Example: `/codename begonia`\n- /weather | /clima ``: See weather status for a specific location.\n- /modarchive | /tma ``: Download a module from The Mod Archive.\n- /http ``: Send details about a specific HTTP code. Example: `/http 404`", + "funnyCommands": "😂 Funny Commands", + "funnyCommandsDesc": "😂 *Funny Commands*\n\n- /gay: Check if you are gay\n- /furry: Check if you are a furry\n- /random: Pick a random number between 0-10", + "interactiveEmojis": "🎲 Interactive Emojis", + "interactiveEmojisDesc": "🎲 *Interactive emojis*\n\n- /dice: Roll a dice\n- /idice: Infinitely roll a colored dice\n- /slot: Try to combine the figures!\n- /ball: Try to kick the ball into the goal!\n- /bowling: Try to hit the pins!\n- /dart: Try to hit the target!", + "animalCommands": "🐱 Animals", + "animalCommandsDesc": "🐱 *Animals*\n\n- /soggy | /soggycat `<1 | 2 | 3 | 4 | orig | thumb | sticker | alt>`: Sends the [Soggy cat meme](https://knowyourmeme.com/memes/soggy-cat)\n- /cat: Sends a random picture of a cat.\n- /fox: Sends a random picture of a fox.\n- /duck: Sends a random picture of a duck.\n- /dog: Sends a random picture of a dog.\n- /httpcat ``: Send cat memes from http.cat with your specified HTTP code. Example: `/httpcat 404`", + "aiCmds": "✨ AI Commands", + "aiCmdsDesc": "✨ *AI Commands*\n\n- /ask ``: Ask a question to an AI", "maInvalidModule": "Please provide a valid module ID from The Mod Archive.\nExample: `/modarchive 81574`", "maDownloadError": "Error downloading the file. Check the module ID and try again.", "ytDownload": { - "helpEntry": "Video download", - "helpDesc": "*Video download*\n\n- /yt | /ytdl | /sdl | /dl | /video `${userName}, please select your device:`; - const options = { - parse_mode: 'HTML', - reply_to_message_id: ctx.message.message_id, - disable_web_page_preview: true, - reply_markup: { - inline_keyboard: results.map(result => [{ text: result.name, callback_data: `details:${result.url}:${ctx.from.id}` }]) - } + if (deviceSelectionCache[userId]?.timeout) { + clearTimeout(deviceSelectionCache[userId].timeout); + } + deviceSelectionCache[userId] = { + results, + timeout: setTimeout(() => { delete deviceSelectionCache[userId]; }, 5 * 60 * 1000) }; - await ctx.telegram.editMessageText(ctx.chat.id, statusMsg.message_id, undefined, testUser, options); + + if (lastSelectionMessageId[userId]) { + try { + await ctx.telegram.editMessageText( + ctx.chat.id, + lastSelectionMessageId[userId], + undefined, + Strings.gsmarenaSelectDevice || "[TODO: Add gsmarenaSelectDevice to locales] Please select your device:", + { + parse_mode: 'HTML', + reply_to_message_id: ctx.message.message_id, + disable_web_page_preview: true, + reply_markup: { + inline_keyboard: results.map((result, idx) => { + const callbackData = `gsmadetails:${idx}:${ctx.from.id}`; + return [{ text: result.name, callback_data: callbackData }]; + }) + } + } + ); + } catch (e) { + const testUser = `${userName}, ${Strings.gsmarenaSelectDevice || "[TODO: Add gsmarenaSelectDevice to locales] please select your device:"}`; + const options = { + parse_mode: 'HTML', + reply_to_message_id: ctx.message.message_id, + disable_web_page_preview: true, + reply_markup: { + inline_keyboard: results.map((result, idx) => { + const callbackData = `gsmadetails:${idx}:${ctx.from.id}`; + return [{ text: result.name, callback_data: callbackData }]; + }) + } + }; + const selectionMsg = await ctx.reply(testUser, options); + lastSelectionMessageId[userId] = selectionMsg.message_id; + } + } else { + const testUser = `${userName}, ${Strings.gsmarenaSelectDevice || "[TODO: Add gsmarenaSelectDevice to locales] please select your device:"}`; + const inlineKeyboard = results.map((result, idx) => { + const callbackData = `gsmadetails:${idx}:${ctx.from.id}`; + return [{ text: result.name, callback_data: callbackData }]; + }); + const options = { + parse_mode: 'HTML', + reply_to_message_id: ctx.message.message_id, + disable_web_page_preview: true, + reply_markup: { + inline_keyboard: inlineKeyboard + } + }; + const selectionMsg = await ctx.reply(testUser, options); + lastSelectionMessageId[userId] = selectionMsg.message_id; + } + await ctx.telegram.deleteMessage(ctx.chat.id, statusMsg.message_id).catch(() => {}); }); - bot.action(/details:(.+):(.+)/, async (ctx) => { - const url = ctx.match[1]; + bot.action(/gsmadetails:(\d+):(\d+)/, async (ctx) => { + const idx = parseInt(ctx.match[1]); const userId = parseInt(ctx.match[2]); const userName = getUsername(ctx); + const Strings = getStrings(languageCode(ctx)); const callbackQueryUserId = ctx.update.callback_query.from.id; if (userId !== callbackQueryUserId) { - return ctx.answerCbQuery(`${userName}, you are not allowed to interact with this.`); + return ctx.answerCbQuery(`${userName}, ${Strings.gsmarenaNotAllowed || "[TODO: Add gsmarenaNotAllowed to locales] you are not allowed to interact with this."}`); } ctx.answerCbQuery(); + const cache = deviceSelectionCache[userId]; + if (!cache || !cache.results[idx]) { + return ctx.reply(Strings.gsmarenaInvalidOrExpired || "[TODO: Add gsmarenaInvalidOrExpired to locales] Whoops, invalid or expired option. Please try again.", { ...(ctx.message?.message_id ? { reply_parameters: { message_id: ctx.message.message_id } } : {}) }); + } + const url = cache.results[idx].url; + const phoneDetails = await checkPhoneDetails(url); if (phoneDetails.name) { const message = formatPhone(phoneDetails); - ctx.editMessageText(`${userName}, these are the details of your device:` + message, { parse_mode: 'HTML', disable_web_page_preview: false }); + ctx.editMessageText(`${userName}, ${Strings.gsmarenaDeviceDetails || "[TODO: Add gsmarenaDeviceDetails to locales] these are the details of your device:"}` + message, { parse_mode: 'HTML', disable_web_page_preview: false }); } else { - ctx.reply("Error fetching phone details.", { reply_to_message_id: ctx.message.message_id }); + ctx.reply(Strings.gsmarenaErrorFetchingDetails || "[TODO: Add gsmarenaErrorFetchingDetails to locales] Error fetching phone details.", { ...(ctx.message?.message_id ? { reply_parameters: { message_id: ctx.message.message_id } } : {}) }); } }); }; diff --git a/src/commands/help.ts b/src/commands/help.ts index 3a6d3a0..f01f5e5 100644 --- a/src/commands/help.ts +++ b/src/commands/help.ts @@ -1,21 +1,38 @@ import { getStrings } from '../plugins/checklang'; import { isOnSpamWatch } from '../spamwatch/spamwatch'; import spamwatchMiddlewareModule from '../spamwatch/Middleware'; -import { languageCode } from '../utils/language-code'; +import type { Context } from 'telegraf'; const spamwatchMiddleware = spamwatchMiddlewareModule(isOnSpamWatch); +async function getUserAndStrings(ctx: Context, db?: any): Promise<{ Strings: any, languageCode: string }> { + let languageCode = 'en'; + if (!ctx.from) { + const Strings = getStrings(languageCode); + return { Strings, languageCode }; + } + const from = ctx.from; + if (db && from.id) { + const dbUser = await db.query.usersTable.findMany({ where: (fields, { eq }) => eq(fields.telegramId, String(from.id)), limit: 1 }); + if (dbUser.length > 0) { + languageCode = dbUser[0].languageCode; + } + } + const Strings = getStrings(languageCode); + return { Strings, languageCode }; +} + interface MessageOptions { parse_mode: string; disable_web_page_preview: boolean; reply_markup: { - inline_keyboard: { text: any; callback_data: string; }[][]; + inline_keyboard: { text: string; callback_data: string; }[][]; }; reply_to_message_id?: number; } -async function sendHelpMessage(ctx, isEditing) { - const Strings = getStrings(languageCode(ctx)); +async function sendHelpMessage(ctx, isEditing, db) { + const { Strings } = await getUserAndStrings(ctx, db); const botInfo = await ctx.telegram.getMe(); const helpText = Strings.botHelp .replace(/{botName}/g, botInfo.first_name) @@ -33,14 +50,14 @@ async function sendHelpMessage(ctx, isEditing) { [{ text: Strings.interactiveEmojis, callback_data: 'helpInteractive' }, { text: Strings.funnyCommands, callback_data: 'helpFunny' }], [{ text: Strings.lastFm.helpEntry, callback_data: 'helpLast' }, { text: Strings.animalCommands, callback_data: 'helpAnimals' }], [{ text: Strings.ytDownload.helpEntry, callback_data: 'helpYouTube' }, { text: Strings.ponyApi.helpEntry, callback_data: 'helpMLP' }], - [{ text: Strings.aiCmds, callback_data: 'helpAi' }] + [{ text: Strings.ai.helpEntry, callback_data: 'helpAi' }] ] } }; if (includeReplyTo) { const messageId = getMessageId(ctx); if (messageId) { - options.reply_to_message_id = messageId; + (options as any).reply_parameters = { message_id: messageId }; }; }; return options; @@ -52,78 +69,78 @@ async function sendHelpMessage(ctx, isEditing) { }; } -export default (bot) => { +export default (bot, db) => { bot.help(spamwatchMiddleware, async (ctx) => { - await sendHelpMessage(ctx, false); + await sendHelpMessage(ctx, false, db); }); bot.command("about", spamwatchMiddleware, async (ctx) => { - const Strings = getStrings(languageCode(ctx)); + const { Strings } = await getUserAndStrings(ctx, db); const aboutMsg = Strings.botAbout.replace(/{sourceLink}/g, `${process.env.botSource}`); ctx.reply(aboutMsg, { parse_mode: 'Markdown', disable_web_page_preview: true, - reply_to_message_id: ctx.message.message_id + ...(ctx.message?.message_id ? { reply_parameters: { message_id: ctx.message.message_id } } : {}) }); - }) + }); - bot.on('callback_query', async (ctx) => { - const callbackData = ctx.callbackQuery.data; - const Strings = getStrings(languageCode(ctx)); - const options = { - parse_mode: 'Markdown', - disable_web_page_preview: true, - reply_markup: JSON.stringify({ - inline_keyboard: [ - [{ text: Strings.varStrings.varBack, callback_data: 'helpBack' }], - ] - }) - }; + const options = (Strings) => ({ + parse_mode: 'Markdown', + disable_web_page_preview: true, + reply_markup: JSON.stringify({ + inline_keyboard: [ + [{ text: Strings.varStrings.varBack, callback_data: 'helpBack' }], + ] + }) + }); - switch (callbackData) { - case 'helpMain': - await ctx.answerCbQuery(); - await ctx.editMessageText(Strings.mainCommandsDesc, options); - break; - case 'helpUseful': - await ctx.answerCbQuery(); - await ctx.editMessageText(Strings.usefulCommandsDesc, options); - break; - case 'helpInteractive': - await ctx.answerCbQuery(); - await ctx.editMessageText(Strings.interactiveEmojisDesc, options); - break; - case 'helpFunny': - await ctx.answerCbQuery(); - await ctx.editMessageText(Strings.funnyCommandsDesc, options); - break; - case 'helpLast': - await ctx.answerCbQuery(); - await ctx.editMessageText(Strings.lastFm.helpDesc, options); - break; - case 'helpYouTube': - await ctx.answerCbQuery(); - await ctx.editMessageText(Strings.ytDownload.helpDesc, options); - break; - case 'helpAnimals': - await ctx.answerCbQuery(); - await ctx.editMessageText(Strings.animalCommandsDesc, options); - break; - case 'helpMLP': - await ctx.answerCbQuery(); - await ctx.editMessageText(Strings.ponyApi.helpDesc, options); - break; - case 'helpAi': - await ctx.answerCbQuery(); - await ctx.editMessageText(Strings.aiCmdsDesc, options); - break; - case 'helpBack': - await ctx.answerCbQuery(); - await sendHelpMessage(ctx, true); - break; - default: - await ctx.answerCbQuery(Strings.errInvalidOption); - break; - } + bot.action('helpMain', async (ctx) => { + const { Strings } = await getUserAndStrings(ctx, db); + await ctx.editMessageText(Strings.mainCommandsDesc, options(Strings)); + await ctx.answerCbQuery(); + }); + bot.action('helpUseful', async (ctx) => { + const { Strings } = await getUserAndStrings(ctx, db); + await ctx.editMessageText(Strings.usefulCommandsDesc, options(Strings)); + await ctx.answerCbQuery(); + }); + bot.action('helpInteractive', async (ctx) => { + const { Strings } = await getUserAndStrings(ctx, db); + await ctx.editMessageText(Strings.interactiveEmojisDesc, options(Strings)); + await ctx.answerCbQuery(); + }); + bot.action('helpFunny', async (ctx) => { + const { Strings } = await getUserAndStrings(ctx, db); + await ctx.editMessageText(Strings.funnyCommandsDesc, options(Strings)); + await ctx.answerCbQuery(); + }); + bot.action('helpLast', async (ctx) => { + const { Strings } = await getUserAndStrings(ctx, db); + await ctx.editMessageText(Strings.lastFm.helpDesc, options(Strings)); + await ctx.answerCbQuery(); + }); + bot.action('helpYouTube', async (ctx) => { + const { Strings } = await getUserAndStrings(ctx, db); + await ctx.editMessageText(Strings.ytDownload.helpDesc, options(Strings)); + await ctx.answerCbQuery(); + }); + bot.action('helpAnimals', async (ctx) => { + const { Strings } = await getUserAndStrings(ctx, db); + await ctx.editMessageText(Strings.animalCommandsDesc, options(Strings)); + await ctx.answerCbQuery(); + }); + bot.action('helpMLP', async (ctx) => { + const { Strings } = await getUserAndStrings(ctx, db); + await ctx.editMessageText(Strings.ponyApi.helpDesc, options(Strings)); + await ctx.answerCbQuery(); + }); + bot.action('helpAi', async (ctx) => { + const { Strings } = await getUserAndStrings(ctx, db); + await ctx.editMessageText(Strings.ai.helpDesc, options(Strings)); + await ctx.answerCbQuery(); + }); + bot.action('helpBack', async (ctx) => { + await sendHelpMessage(ctx, true, db); + await ctx.answerCbQuery(); }); } diff --git a/src/commands/http.ts b/src/commands/http.ts index b1fe636..9ef0fdb 100644 --- a/src/commands/http.ts +++ b/src/commands/http.ts @@ -5,14 +5,37 @@ import spamwatchMiddlewareModule from '../spamwatch/Middleware'; import axios from 'axios'; import verifyInput from '../plugins/verifyInput'; import { Context, Telegraf } from 'telegraf'; +import * as schema from '../db/schema'; import { languageCode } from '../utils/language-code'; +import type { NodePgDatabase } from 'drizzle-orm/node-postgres'; const spamwatchMiddleware = spamwatchMiddlewareModule(isOnSpamWatch); -export default (bot: Telegraf) => { +async function getUserAndStrings(ctx: Context, db?: NodePgDatabase): Promise<{ Strings: any, languageCode: string }> { + let languageCode = 'en'; + if (!ctx.from) { + const Strings = getStrings(languageCode); + return { Strings, languageCode }; + } + const from = ctx.from; + if (db && from.id) { + const dbUser = await db.query.usersTable.findMany({ where: (fields, { eq }) => eq(fields.telegramId, String(from.id)), limit: 1 }); + if (dbUser.length > 0) { + languageCode = dbUser[0].languageCode; + } + } + if (from.language_code && languageCode === 'en') { + languageCode = from.language_code; + console.warn('[WARN !] Falling back to Telegram language_code for user', from.id); + } + const Strings = getStrings(languageCode); + return { Strings, languageCode }; +} + +export default (bot: Telegraf, db) => { bot.command("http", spamwatchMiddleware, async (ctx: Context & { message: { text: string } }) => { const reply_to_message_id = ctx.message.message_id; - const Strings = getStrings(languageCode(ctx)); + const { Strings } = await getUserAndStrings(ctx, db); const userInput = ctx.message.text.split(' ')[1]; const apiUrl = Resources.httpApi; const { invalidCode } = Strings.httpCodes @@ -34,19 +57,19 @@ export default (bot: Telegraf) => { .replace("{description}", codeInfo.description); await ctx.reply(message, { parse_mode: 'Markdown', - ...({ reply_to_message_id }) + ...(reply_to_message_id ? { reply_parameters: { message_id: reply_to_message_id } } : {}) }); } else { await ctx.reply(Strings.httpCodes.notFound, { parse_mode: 'Markdown', - ...({ reply_to_message_id }) + ...(reply_to_message_id ? { reply_parameters: { message_id: reply_to_message_id } } : {}) }); }; } catch (error) { - const message = Strings.httpCodes.fetchErr.replace("{error}", error); + const message = Strings.httpCodes.fetchErr.replace('{error}', error); ctx.reply(message, { parse_mode: 'Markdown', - ...({ reply_to_message_id }) + ...(reply_to_message_id ? { reply_parameters: { message_id: reply_to_message_id } } : {}) }); }; }); @@ -63,7 +86,7 @@ export default (bot: Telegraf) => { if (userInput.length !== 3) { ctx.reply(Strings.httpCodes.invalidCode, { parse_mode: 'Markdown', - ...({ reply_to_message_id }) + ...(reply_to_message_id ? { reply_parameters: { message_id: reply_to_message_id } } : {}) }) return } @@ -74,12 +97,12 @@ export default (bot: Telegraf) => { await ctx.replyWithPhoto(apiUrl, { caption: `🐱 ${apiUrl}`, parse_mode: 'Markdown', - ...({ reply_to_message_id }) + ...(reply_to_message_id ? { reply_parameters: { message_id: reply_to_message_id } } : {}) }); } catch (error) { ctx.reply(Strings.catImgErr, { parse_mode: 'Markdown', - ...({ reply_to_message_id }) + ...(reply_to_message_id ? { reply_parameters: { message_id: reply_to_message_id } } : {}) }); } }); diff --git a/src/commands/info.ts b/src/commands/info.ts index 2bf2b2d..c9f8042 100644 --- a/src/commands/info.ts +++ b/src/commands/info.ts @@ -2,64 +2,81 @@ import { getStrings } from '../plugins/checklang'; import { isOnSpamWatch } from '../spamwatch/spamwatch'; import spamwatchMiddlewareModule from '../spamwatch/Middleware'; import { Context, Telegraf } from 'telegraf'; +import * as schema from '../db/schema'; +import type { NodePgDatabase } from 'drizzle-orm/node-postgres'; const spamwatchMiddleware = spamwatchMiddlewareModule(isOnSpamWatch); -async function getUserInfo(ctx: Context & { message: { text: string } }) { - const Strings = getStrings(ctx.from?.language_code || 'en'); +async function getUserAndStrings(ctx: Context, db?: NodePgDatabase): Promise<{ Strings: any, languageCode: string }> { + let languageCode = 'en'; + if (!ctx.from) { + const Strings = getStrings(languageCode); + return { Strings, languageCode }; + } + const from = ctx.from; + if (db && from.id) { + const dbUser = await db.query.usersTable.findMany({ where: (fields, { eq }) => eq(fields.telegramId, String(from.id)), limit: 1 }); + if (dbUser.length > 0) { + languageCode = dbUser[0].languageCode; + } + } + if (from.language_code && languageCode === 'en') { + languageCode = from.language_code; + console.warn('[WARN !] Falling back to Telegram language_code for user', from.id); + } + const Strings = getStrings(languageCode); + return { Strings, languageCode }; +} + +async function getUserInfo(ctx: Context & { message: { text: string } }, db: any) { + const { Strings } = await getUserAndStrings(ctx, db); let lastName = ctx.from?.last_name; if (lastName === undefined) { lastName = " "; } - const userInfo = Strings.userInfo .replace('{userName}', `${ctx.from?.first_name} ${lastName}` || Strings.varStrings.varUnknown) .replace('{userId}', ctx.from?.id || Strings.varStrings.varUnknown) .replace('{userHandle}', ctx.from?.username ? `@${ctx.from?.username}` : Strings.varStrings.varNone) .replace('{userPremium}', ctx.from?.is_premium ? Strings.varStrings.varYes : Strings.varStrings.varNo) .replace('{userLang}', ctx.from?.language_code || Strings.varStrings.varUnknown); - return userInfo; } -async function getChatInfo(ctx: Context & { message: { text: string } }) { - const Strings = getStrings(ctx.from?.language_code || 'en'); - if (ctx.chat?.type === 'group' || ctx.chat?.type === 'supergroup') { +async function getChatInfo(ctx: Context & { message: { text: string } }, db: any) { + const { Strings } = await getUserAndStrings(ctx, db); + if ((ctx.chat?.type === 'group' || ctx.chat?.type === 'supergroup')) { + const chat = ctx.chat as (typeof ctx.chat & { username?: string; is_forum?: boolean }); const chatInfo = Strings.chatInfo - .replace('{chatId}', ctx.chat?.id || Strings.varStrings.varUnknown) - .replace('{chatName}', ctx.chat?.title || Strings.varStrings.varUnknown) - // @ts-ignore - .replace('{chatHandle}', ctx.chat?.username ? `@${ctx.chat?.username}` : Strings.varStrings.varNone) + .replace('{chatId}', chat?.id || Strings.varStrings.varUnknown) + .replace('{chatName}', chat?.title || Strings.varStrings.varUnknown) + .replace('{chatHandle}', chat?.username ? `@${chat.username}` : Strings.varStrings.varNone) .replace('{chatMembersCount}', await ctx.getChatMembersCount()) - .replace('{chatType}', ctx.chat?.type || Strings.varStrings.varUnknown) - // @ts-ignore - .replace('{isForum}', ctx.chat?.is_forum ? Strings.varStrings.varYes : Strings.varStrings.varNo); - + .replace('{chatType}', chat?.type || Strings.varStrings.varUnknown) + .replace('{isForum}', chat?.is_forum ? Strings.varStrings.varYes : Strings.varStrings.varNo); return chatInfo; } else { - return Strings.groupOnly + return Strings.groupOnly; } } -export default (bot: Telegraf) => { +export default (bot: Telegraf, db) => { bot.command('chatinfo', spamwatchMiddleware, async (ctx: Context & { message: { text: string } }) => { - const chatInfo = await getChatInfo(ctx); + const chatInfo = await getChatInfo(ctx, db); ctx.reply( chatInfo, { parse_mode: 'Markdown', - // @ts-ignore - reply_to_message_id: ctx.message.message_id + ...(ctx.message?.message_id ? { reply_parameters: { message_id: ctx.message.message_id } } : {}) } ); }); bot.command('userinfo', spamwatchMiddleware, async (ctx: Context & { message: { text: string } }) => { - const userInfo = await getUserInfo(ctx); + const userInfo = await getUserInfo(ctx, db); ctx.reply( userInfo, { parse_mode: 'Markdown', - // @ts-ignore - reply_to_message_id: ctx.message.message_id + ...(ctx.message?.message_id ? { reply_parameters: { message_id: ctx.message.message_id } } : {}) } ); }); diff --git a/src/commands/lastfm.ts b/src/commands/lastfm.ts index 39d6c8d..d51ca25 100644 --- a/src/commands/lastfm.ts +++ b/src/commands/lastfm.ts @@ -72,7 +72,7 @@ export default (bot) => { return ctx.reply(Strings.lastFm.noUser, { parse_mode: "Markdown", disable_web_page_preview: true, - reply_to_message_id: ctx.message.message_id + ...(ctx.message?.message_id ? { reply_parameters: { message_id: ctx.message.message_id } } : {}) }); }; @@ -84,7 +84,7 @@ export default (bot) => { ctx.reply(message, { parse_mode: "Markdown", disable_web_page_preview: true, - reply_to_message_id: ctx.message.message_id + ...(ctx.message?.message_id ? { reply_parameters: { message_id: ctx.message.message_id } } : {}) }); }); @@ -94,12 +94,12 @@ export default (bot) => { const lastfmUser = users[userId]; const genericImg = Resources.lastFmGenericImg; const botInfo = await ctx.telegram.getMe(); - + if (!lastfmUser) { return ctx.reply(Strings.lastFm.noUserSet, { parse_mode: "Markdown", disable_web_page_preview: true, - reply_to_message_id: ctx.message.message_id + ...(ctx.message?.message_id ? { reply_parameters: { message_id: ctx.message.message_id } } : {}) }); }; @@ -124,7 +124,7 @@ export default (bot) => { return ctx.reply(noRecent, { parse_mode: "Markdown", disable_web_page_preview: true, - reply_to_message_id: ctx.message.message_id + ...(ctx.message?.message_id ? { reply_parameters: { message_id: ctx.message.message_id } } : {}) }); }; @@ -137,8 +137,8 @@ export default (bot) => { if (albumMbid) { imageUrl = await getFromMusicBrainz(albumMbid); - } - + } + if (!imageUrl) { imageUrl = getFromLast(track); } @@ -166,7 +166,7 @@ export default (bot) => { 'User-Agent': `@${botInfo.username}-node-telegram-bot` } }); - + num_plays = response_plays.data.track.userplaycount; } catch (err) { console.log(err) @@ -176,7 +176,7 @@ export default (bot) => { ctx.reply(message, { parse_mode: "Markdown", disable_web_page_preview: true, - reply_to_message_id: ctx.message.message_id + ...(ctx.message?.message_id ? { reply_parameters: { message_id: ctx.message.message_id } } : {}) }); }; @@ -200,13 +200,13 @@ export default (bot) => { caption: message, parse_mode: "Markdown", disable_web_page_preview: true, - reply_to_message_id: ctx.message.message_id + ...(ctx.message?.message_id ? { reply_parameters: { message_id: ctx.message.message_id } } : {}) }); } else { ctx.reply(message, { parse_mode: "Markdown", disable_web_page_preview: true, - reply_to_message_id: ctx.message.message_id + ...(ctx.message?.message_id ? { reply_parameters: { message_id: ctx.message.message_id } } : {}) }); }; } catch (err) { @@ -217,7 +217,7 @@ export default (bot) => { ctx.reply(message, { parse_mode: "Markdown", disable_web_page_preview: true, - reply_to_message_id: ctx.message.message_id + ...(ctx.message?.message_id ? { reply_parameters: { message_id: ctx.message.message_id } } : {}) }); }; }); diff --git a/src/commands/main.ts b/src/commands/main.ts index a5d581c..a6c48ba 100644 --- a/src/commands/main.ts +++ b/src/commands/main.ts @@ -3,31 +3,390 @@ import { isOnSpamWatch } from '../spamwatch/spamwatch'; import spamwatchMiddlewareModule from '../spamwatch/Middleware'; import { Context, Telegraf } from 'telegraf'; import { replyToMessageId } from '../utils/reply-to-message-id'; -import { languageCode } from '../utils/language-code'; +import * as schema from '../db/schema'; +import { eq } from 'drizzle-orm'; +import { ensureUserInDb } from '../utils/ensure-user'; +import type { NodePgDatabase } from 'drizzle-orm/node-postgres'; +import { models } from './ai'; +import { langs } from '../locales/config'; + +type UserRow = typeof schema.usersTable.$inferSelect; const spamwatchMiddleware = spamwatchMiddlewareModule(isOnSpamWatch); -export default (bot: Telegraf) => { - bot.start(spamwatchMiddleware, async (ctx: Context) => { - const Strings = getStrings(languageCode(ctx)); - const botInfo = await ctx.telegram.getMe(); - const reply_to_message_id = replyToMessageId(ctx) - const startMsg = Strings.botWelcome.replace(/{botName}/g, botInfo.first_name); +async function getUserAndStrings(ctx: Context, db: NodePgDatabase): Promise<{ user: UserRow | null, Strings: any, languageCode: string }> { + let user: UserRow | null = null; + let languageCode = 'en'; + if (!ctx.from) { + const Strings = getStrings(languageCode); + return { user, Strings, languageCode }; + } + const { id, language_code } = ctx.from; + if (id) { + const dbUser = await db.query.usersTable.findMany({ where: (fields, { eq }) => eq(fields.telegramId, String(id)), limit: 1 }); + if (dbUser.length === 0) { + await ensureUserInDb(ctx, db); + const newUser = await db.query.usersTable.findMany({ where: (fields, { eq }) => eq(fields.telegramId, String(id)), limit: 1 }); + if (newUser.length > 0) { + user = newUser[0]; + languageCode = user.languageCode; + } + } else { + user = dbUser[0]; + languageCode = user.languageCode; + } + } + if (!user && language_code) { + languageCode = language_code; + console.warn('[WARN !] Falling back to Telegram language_code for user', id); + } + const Strings = getStrings(languageCode); + return { user, Strings, languageCode }; +} - ctx.reply(startMsg, { - parse_mode: 'Markdown', - ...({ reply_to_message_id }) - }); +type SettingsMenu = { text: string, reply_markup: any }; +function getSettingsMenu(user: UserRow, Strings: any): SettingsMenu { + const langObj = langs.find(l => l.code === user.languageCode); + const langLabel = langObj ? langObj.label : user.languageCode; + return { + text: Strings.settings.selectSetting, + reply_markup: { + inline_keyboard: [ + [ + { text: `✨ ${Strings.settings.ai.aiEnabled}: ${user.aiEnabled ? Strings.settings.enabled : Strings.settings.disabled}`, callback_data: 'settings_aiEnabled' }, + { text: `🧠 ${Strings.settings.ai.aiModel}: ${user.customAiModel}`, callback_data: 'settings_aiModel' } + ], + [ + { text: `🌡️ ${Strings.settings.ai.aiTemperature}: ${user.aiTemperature}`, callback_data: 'settings_aiTemperature' }, + { text: `🌐 ${langLabel}`, callback_data: 'settings_language' } + ] + ] + } + }; +} + +export default (bot: Telegraf, db: NodePgDatabase) => { + bot.start(spamwatchMiddleware, async (ctx: Context) => { + const { user, Strings } = await getUserAndStrings(ctx, db); + const botInfo = await ctx.telegram.getMe(); + const reply_to_message_id = replyToMessageId(ctx); + const startMsg = Strings.botWelcome.replace(/{botName}/g, botInfo.first_name); + if (!user) return; + ctx.reply( + startMsg.replace( + /{aiEnabled}/g, + user.aiEnabled ? Strings.settings.enabled : Strings.settings.disabled + ).replace( + /{aiModel}/g, + user.customAiModel + ).replace( + /{aiTemperature}/g, + user.aiTemperature.toString() + ).replace( + /{aiRequests}/g, + user.aiRequests.toString() + ).replace( + /{aiCharacters}/g, + user.aiCharacters.toString() + ).replace( + /{languageCode}/g, + user.languageCode + ), { + parse_mode: 'Markdown', + ...({ reply_to_message_id }) + } + ); }); - bot.command('privacy', spamwatchMiddleware, async (ctx: any) => { - const Strings = getStrings(ctx.from.language_code); - const message = Strings.botPrivacy.replace("{botPrivacy}", process.env.botPrivacy); + bot.command(["settings"], spamwatchMiddleware, async (ctx: Context) => { + const reply_to_message_id = replyToMessageId(ctx); + const { user, Strings } = await getUserAndStrings(ctx, db); + if (!user) return; + const menu = getSettingsMenu(user, Strings); + await ctx.reply( + menu.text, + { + reply_markup: menu.reply_markup, + parse_mode: 'Markdown', + ...({ reply_to_message_id }) + } + ); + }); + const updateSettingsKeyboard = async (ctx: Context, user: UserRow, Strings: any) => { + const menu = getSettingsMenu(user, Strings); + await ctx.editMessageReplyMarkup(menu.reply_markup); + }; + + bot.action('settings_aiEnabled', async (ctx) => { + try { + await ctx.answerCbQuery(); + const { user, Strings } = await getUserAndStrings(ctx, db); + if (!user) return; + await db.update(schema.usersTable) + .set({ aiEnabled: !user.aiEnabled }) + .where(eq(schema.usersTable.telegramId, String(user.telegramId))); + const updatedUser = (await db.query.usersTable.findMany({ where: (fields, { eq }) => eq(fields.telegramId, String(user.telegramId)), limit: 1 }))[0]; + await updateSettingsKeyboard(ctx, updatedUser, Strings); + } catch (err) { + console.error('Error handling settings_aiEnabled callback:', err); + } + }); + + bot.action('settings_aiModel', async (ctx) => { + try { + await ctx.answerCbQuery(); + const { user, Strings } = await getUserAndStrings(ctx, db); + if (!user) return; + try { + await ctx.editMessageText( + `${Strings.settings.ai.selectSeries}`, + { + reply_markup: { + inline_keyboard: models.map(series => [ + { text: series.label, callback_data: `selectseries_${series.name}` } + ]).concat([[ + { text: `⬅️ ${Strings.settings.ai.back}`, callback_data: 'settings_back' } + ]]) + } + } + ); + } catch (err) { + if ( + !( + err.response.description?.includes('query is too old') || + err.response.description?.includes('query ID is invalid') || + err.response.description?.includes('message is not modified') || + err.response.description?.includes('message to edit not found') + ) + ) + console.error('Unexpected Telegram error:', err); + } + } catch (err) { + console.error('Error handling settings_aiModel callback:', err); + } + }); + + bot.action(/^selectseries_.+$/, async (ctx) => { + try { + await ctx.answerCbQuery(); + const { user, Strings } = await getUserAndStrings(ctx, db); + if (!user) return; + const data = (ctx.callbackQuery as any).data; + const seriesName = data.replace('selectseries_', ''); + const series = models.find(s => s.name === seriesName); + if (!series) return; + const desc = user.languageCode === 'pt' ? series.descriptionPt : series.descriptionEn; + try { + await ctx.editMessageText( + `${Strings.settings.ai.seriesDescription.replace('{seriesDescription}', desc)}\n\n${Strings.settings.ai.selectParameterSize.replace('{seriesLabel}', series.label)}\n\n${Strings.settings.ai.parameterSizeExplanation}`, + { + reply_markup: { + inline_keyboard: series.models.map(m => [ + { text: `${m.label} (${m.parameterSize})`, callback_data: `setmodel_${series.name}_${m.name}` } + ]).concat([[ + { text: `⬅️ ${Strings.settings.ai.back}`, callback_data: 'settings_aiModel' } + ]]) + } + } + ); + } catch (err) { + if ( + !( + err.response.description?.includes('query is too old') || + err.response.description?.includes('query ID is invalid') || + err.response.description?.includes('message is not modified') || + err.response.description?.includes('message to edit not found') + ) + ) + console.error('Unexpected Telegram error:', err); + } + } catch (err) { + console.error('Error handling selectseries callback:', err); + } + }); + + bot.action(/^setmodel_.+$/, async (ctx) => { + try { + await ctx.answerCbQuery(); + const { user, Strings } = await getUserAndStrings(ctx, db); + if (!user) return; + const data = (ctx.callbackQuery as any).data; + const parts = data.split('_'); + const seriesName = parts[1]; + const modelName = parts.slice(2).join('_'); + const series = models.find(s => s.name === seriesName); + const model = series?.models.find(m => m.name === modelName); + if (!series || !model) return; + await db.update(schema.usersTable) + .set({ customAiModel: model.name }) + .where(eq(schema.usersTable.telegramId, String(user.telegramId))); + const updatedUser = (await db.query.usersTable.findMany({ where: (fields, { eq }) => eq(fields.telegramId, String(user.telegramId)), limit: 1 }))[0]; + const menu = getSettingsMenu(updatedUser, Strings); + try { + if (ctx.callbackQuery.message) { + await ctx.editMessageText( + menu.text, + { + reply_markup: menu.reply_markup, + parse_mode: 'Markdown' + } + ); + } else { + await ctx.reply(menu.text, { + reply_markup: menu.reply_markup, + parse_mode: 'Markdown' + }); + } + } catch (err) { + if ( + !( + err.response.description?.includes('query is too old') || + err.response.description?.includes('query ID is invalid') || + err.response.description?.includes('message is not modified') || + err.response.description?.includes('message to edit not found') + ) + ) + console.error('[Settings] Unexpected Telegram error:', err); + } + } catch (err) { + console.error('Error handling setmodel callback:', err); + } + }); + + bot.action('settings_aiTemperature', async (ctx) => { + try { + await ctx.answerCbQuery(); + const { user, Strings } = await getUserAndStrings(ctx, db); + if (!user) return; + const temps = [0.2, 0.5, 0.7, 0.9, 1.2]; + try { + await ctx.editMessageReplyMarkup({ + inline_keyboard: temps.map(t => [{ text: t.toString(), callback_data: `settemp_${t}` }]).concat([[{ text: `⬅️ ${Strings.settings.ai.back}`, callback_data: 'settings_back' }]]) + }); + } catch (err) { + if ( + !( + err.response.description?.includes('query is too old') || + err.response.description?.includes('query ID is invalid') || + err.response.description?.includes('message is not modified') || + err.response.description?.includes('message to edit not found') + ) + ) + console.error('Unexpected Telegram error:', err); + } + } catch (err) { + console.error('Error handling settings_aiTemperature callback:', err); + } + }); + + bot.action(/^settemp_.+$/, async (ctx) => { + try { + await ctx.answerCbQuery(); + const { user, Strings } = await getUserAndStrings(ctx, db); + if (!user) return; + const data = (ctx.callbackQuery as any).data; + const temp = parseFloat(data.replace('settemp_', '')); + await db.update(schema.usersTable) + .set({ aiTemperature: temp }) + .where(eq(schema.usersTable.telegramId, String(user.telegramId))); + const updatedUser = (await db.query.usersTable.findMany({ where: (fields, { eq }) => eq(fields.telegramId, String(user.telegramId)), limit: 1 }))[0]; + await updateSettingsKeyboard(ctx, updatedUser, Strings); + } catch (err) { + console.error('Error handling settemp callback:', err); + } + }); + + bot.action('settings_language', async (ctx) => { + try { + await ctx.answerCbQuery(); + const { user, Strings } = await getUserAndStrings(ctx, db); + if (!user) return; + try { + await ctx.editMessageReplyMarkup({ + inline_keyboard: langs.map(l => [{ text: l.label, callback_data: `setlang_${l.code}` }]).concat([[{ text: `⬅️ ${Strings.settings.ai.back}`, callback_data: 'settings_back' }]]) + }); + } catch (err) { + if ( + !( + err.response.description?.includes('query is too old') || + err.response.description?.includes('query ID is invalid') || + err.response.description?.includes('message is not modified') || + err.response.description?.includes('message to edit not found') + ) + ) + console.error('Unexpected Telegram error:', err); + } + } catch (err) { + console.error('Error handling settings_language callback:', err); + } + }); + + bot.action('settings_back', async (ctx) => { + try { + await ctx.answerCbQuery(); + const { user, Strings } = await getUserAndStrings(ctx, db); + if (!user) return; + await updateSettingsKeyboard(ctx, user, Strings); + } catch (err) { + console.error('Error handling settings_back callback:', err); + } + }); + + bot.command('privacy', spamwatchMiddleware, async (ctx: Context) => { + const { Strings } = await getUserAndStrings(ctx, db); + if (!ctx.from || !ctx.message) return; + const message = Strings.botPrivacy.replace("{botPrivacy}", process.env.botPrivacy ?? ""); ctx.reply(message, { parse_mode: 'Markdown', - disable_web_page_preview: true, reply_to_message_id: ctx.message.message_id - }); + } as any); + }); + + bot.action(/^setlang_.+$/, async (ctx) => { + try { + await ctx.answerCbQuery(); + const { user } = await getUserAndStrings(ctx, db); + if (!user) { + console.log('[Settings] No user found'); + return; + } + const data = (ctx.callbackQuery as any).data; + const lang = data.replace('setlang_', ''); + await db.update(schema.usersTable) + .set({ languageCode: lang }) + .where(eq(schema.usersTable.telegramId, String(user.telegramId))); + const updatedUser = (await db.query.usersTable.findMany({ where: (fields, { eq }) => eq(fields.telegramId, String(user.telegramId)), limit: 1 }))[0]; + const updatedStrings = getStrings(updatedUser.languageCode); + const menu = getSettingsMenu(updatedUser, updatedStrings); + try { + if (ctx.callbackQuery.message) { + await ctx.editMessageText( + menu.text, + { + reply_markup: menu.reply_markup, + parse_mode: 'Markdown' + } + ); + } else { + await ctx.reply(menu.text, { + reply_markup: menu.reply_markup, + parse_mode: 'Markdown' + }); + } + } catch (err) { + if ( + !( + err.response.description?.includes('query is too old') || + err.response.description?.includes('query ID is invalid') || + err.response.description?.includes('message is not modified') || + err.response.description?.includes('message to edit not found') + ) + ) + console.error('[Settings] Unexpected Telegram error:', err); + } + } catch (err) { + console.error('[Settings] Error handling setlang callback:', err); + } }); }; \ No newline at end of file diff --git a/src/commands/modarchive.ts b/src/commands/modarchive.ts index 7d1489e..5f7333b 100644 --- a/src/commands/modarchive.ts +++ b/src/commands/modarchive.ts @@ -24,22 +24,17 @@ async function downloadModule(moduleId: string): Promise { method: 'GET', responseType: 'stream', }); - const disposition = response.headers['content-disposition']; let fileName = moduleId; - if (disposition && disposition.includes('filename=')) { fileName = disposition .split('filename=')[1] .split(';')[0] .replace(/['"]/g, ''); } - - const filePath = path.resolve(__dirname, fileName); - + const filePath = path.join(__dirname, fileName); const writer = fs.createWriteStream(filePath); response.data.pipe(writer); - return new Promise((resolve, reject) => { writer.on('finish', () => resolve({ filePath, fileName })); writer.on('error', reject); @@ -49,39 +44,41 @@ async function downloadModule(moduleId: string): Promise { } } -export default (bot: Telegraf) => { - bot.command(['modarchive', 'tma'], spamwatchMiddleware, async (ctx) => { - const Strings = getStrings(languageCode(ctx)); - const reply_to_message_id = replyToMessageId(ctx); - const moduleId = ctx.message?.text.split(' ')[1]; - - if (Number.isNaN(moduleId) || null) { - return ctx.reply(Strings.maInvalidModule, { - parse_mode: "Markdown", - ...({ reply_to_message_id }) - }); - } - const numberRegex = /^\d+$/; - const isNumber = numberRegex.test(moduleId); - if (isNumber) { - const result = await downloadModule(moduleId); - if (result) { - const { filePath, fileName } = result; - const regexExtension = /\.\w+$/i; - const hasExtension = regexExtension.test(fileName); - if (hasExtension) { - await ctx.replyWithDocument({ source: filePath }, { - caption: fileName, - ...({ reply_to_message_id }) - }); - fs.unlinkSync(filePath); - return; - } - } - } +export const modarchiveHandler = async (ctx: Context) => { + const Strings = getStrings(languageCode(ctx)); + const reply_to_message_id = replyToMessageId(ctx); + const moduleId = ctx.message && 'text' in ctx.message && typeof ctx.message.text === 'string' + ? ctx.message.text.split(' ')[1]?.trim() + : undefined; + if (!moduleId || !/^\d+$/.test(moduleId)) { return ctx.reply(Strings.maInvalidModule, { parse_mode: "Markdown", - ...({ reply_to_message_id }) + ...(reply_to_message_id ? { reply_parameters: { message_id: reply_to_message_id } } : {}) }); + } + const result = await downloadModule(moduleId); + if (result) { + const { filePath, fileName } = result; + const regexExtension = /\.\w+$/i; + const hasExtension = regexExtension.test(fileName); + if (hasExtension) { + try { + await ctx.replyWithDocument({ source: filePath }, { + caption: fileName, + ...(reply_to_message_id ? { reply_parameters: { message_id: reply_to_message_id } } : {}) + }); + } finally { + try { fs.unlinkSync(filePath); } catch (e) { /* ignore */ } + } + return; + } + } + return ctx.reply(Strings.maInvalidModule, { + parse_mode: "Markdown", + ...(reply_to_message_id ? { reply_parameters: { message_id: reply_to_message_id } } : {}) }); }; + +export default (bot: Telegraf) => { + bot.command(['modarchive', 'tma'], spamwatchMiddleware, modarchiveHandler); +}; diff --git a/src/commands/ponyapi.ts b/src/commands/ponyapi.ts index daf99c7..7f6320c 100644 --- a/src/commands/ponyapi.ts +++ b/src/commands/ponyapi.ts @@ -53,34 +53,38 @@ function capitalizeFirstLetter(letter: string) { return letter.charAt(0).toUpperCase() + letter.slice(1); } +function sendReply(ctx: Context, text: string, reply_to_message_id?: number) { + return ctx.reply(text, { + parse_mode: 'Markdown', + ...(reply_to_message_id ? { reply_parameters: { message_id: reply_to_message_id } } : {}) + }); +} + +function sendPhoto(ctx: Context, photo: string, caption: string, reply_to_message_id?: number) { + return ctx.replyWithPhoto(photo, { + caption, + parse_mode: 'Markdown', + ...(reply_to_message_id ? { reply_parameters: { message_id: reply_to_message_id } } : {}) + }); +} + export default (bot: Telegraf) => { bot.command("mlp", spamwatchMiddleware, async (ctx: Context & { message: { text: string } }) => { const Strings = getStrings(languageCode(ctx)); const reply_to_message_id = replyToMessageId(ctx); - - ctx.reply(Strings.ponyApi.helpDesc, { - parse_mode: 'Markdown', - ...({ reply_to_message_id, disable_web_page_preview: true }) - }); + sendReply(ctx, Strings.ponyApi.helpDesc, reply_to_message_id); }); bot.command("mlpchar", spamwatchMiddleware, async (ctx: Context & { message: { text: string } }) => { + const { message } = ctx; const reply_to_message_id = replyToMessageId(ctx); const Strings = getStrings(languageCode(ctx) || 'en'); - const userInput = ctx.message.text.split(' ').slice(1).join(' ').replace(" ", "+"); - const { noCharName } = Strings.ponyApi + const userInput = message.text.split(' ').slice(1).join(' ').trim().replace(/\s+/g, '+'); + const { noCharName } = Strings.ponyApi; - if (verifyInput(ctx, userInput, noCharName)) { - return; - } - - // if special characters or numbers (max 30 characters) - if (/[^a-zA-Z\s]/.test(userInput) || userInput.length > 30) { - ctx.reply(Strings.mlpInvalidCharacter, { - parse_mode: 'Markdown', - ...({ reply_to_message_id }) - }); - return; + if (verifyInput(ctx, userInput, noCharName)) return; + if (!userInput || /[^a-zA-Z\s]/.test(userInput) || userInput.length > 30) { + return sendReply(ctx, Strings.mlpInvalidCharacter, reply_to_message_id); } const capitalizedInput = capitalizeFirstLetter(userInput); @@ -88,62 +92,29 @@ export default (bot: Telegraf) => { try { const response = await axios(apiUrl); - const charactersArray: Character[] = []; - - if (Array.isArray(response.data.data)) { - response.data.data.forEach(character => { - let aliases: string[] = []; - if (character.alias) { - if (typeof character.alias === 'string') { - aliases.push(character.alias); - } else if (Array.isArray(character.alias)) { - aliases = aliases.concat(character.alias); - } - } - - charactersArray.push({ - id: character.id, - name: character.name, - alias: aliases.length > 0 ? aliases.join(', ') : Strings.varStrings.varNone, - url: character.url, - sex: character.sex, - residence: character.residence ? character.residence.replace(/\n/g, ' / ') : Strings.varStrings.varNone, - occupation: character.occupation ? character.occupation.replace(/\n/g, ' / ') : Strings.varStrings.varNone, - kind: character.kind ? character.kind.join(', ') : Strings.varStrings.varNone, - image: character.image - }); - }); - }; - - if (charactersArray.length > 0) { + const data = response.data.data; + if (Array.isArray(data) && data.length > 0) { + const character = data[0]; + const aliases = Array.isArray(character.alias) + ? character.alias.join(', ') + : character.alias || Strings.varStrings.varNone; const result = Strings.ponyApi.charRes - .replace("{id}", charactersArray[0].id) - .replace("{name}", charactersArray[0].name) - .replace("{alias}", charactersArray[0].alias) - .replace("{url}", charactersArray[0].url) - .replace("{sex}", charactersArray[0].sex) - .replace("{residence}", charactersArray[0].residence) - .replace("{occupation}", charactersArray[0].occupation) - .replace("{kind}", charactersArray[0].kind); - - ctx.replyWithPhoto(charactersArray[0].image[0], { - caption: `${result}`, - parse_mode: 'Markdown', - ...({ reply_to_message_id, disable_web_page_preview: true }) - }); + .replace("{id}", character.id) + .replace("{name}", character.name) + .replace("{alias}", aliases) + .replace("{url}", character.url) + .replace("{sex}", character.sex) + .replace("{residence}", character.residence ? character.residence.replace(/\n/g, ' / ') : Strings.varStrings.varNone) + .replace("{occupation}", character.occupation ? character.occupation.replace(/\n/g, ' / ') : Strings.varStrings.varNone) + .replace("{kind}", Array.isArray(character.kind) ? character.kind.join(', ') : Strings.varStrings.varNone); + sendPhoto(ctx, character.image[0], result, reply_to_message_id); } else { - ctx.reply(Strings.ponyApi.noCharFound, { - parse_mode: 'Markdown', - ...({ reply_to_message_id }) - }); - }; - } catch (error) { - const message = Strings.ponyApi.apiErr.replace('{error}', error.message); - ctx.reply(message, { - parse_mode: 'Markdown', - ...({ reply_to_message_id }) - }); - }; + sendReply(ctx, Strings.ponyApi.noCharFound, reply_to_message_id); + } + } catch (error: any) { + const message = Strings.ponyApi.apiErr.replace('{error}', error.message || 'Unknown error'); + sendReply(ctx, message, reply_to_message_id); + } }); bot.command("mlpep", spamwatchMiddleware, async (ctx: Context & { message: { text: string } }) => { @@ -157,10 +128,10 @@ export default (bot: Telegraf) => { return; } - if (Number(userInput) > 100) { + if (Number(userInput) > 10000) { ctx.reply(Strings.mlpInvalidEpisode, { parse_mode: 'Markdown', - ...({ reply_to_message_id }) + ...(reply_to_message_id ? { reply_parameters: { message_id: reply_to_message_id } } : {}) }); return; } @@ -205,21 +176,19 @@ export default (bot: Telegraf) => { ctx.replyWithPhoto(episodeArray[0].image, { caption: `${result}`, parse_mode: 'Markdown', - ...({ reply_to_message_id, disable_web_page_preview: true }) + ...(reply_to_message_id ? { reply_parameters: { message_id: reply_to_message_id } } : {}) }); } else { ctx.reply(Strings.ponyApi.noEpisodeFound, { parse_mode: 'Markdown', - - ...({ reply_to_message_id }) + ...(reply_to_message_id ? { reply_parameters: { message_id: reply_to_message_id } } : {}) }); }; } catch (error) { const message = Strings.ponyApi.apiErr.replace('{error}', error.message); ctx.reply(message, { parse_mode: 'Markdown', - - ...({ reply_to_message_id }) + ...(reply_to_message_id ? { reply_parameters: { message_id: reply_to_message_id } } : {}) }); }; }); @@ -239,7 +208,7 @@ export default (bot: Telegraf) => { if (/[^a-zA-Z\s]/.test(userInput) || userInput.length > 30) { ctx.reply(Strings.mlpInvalidCharacter, { parse_mode: 'Markdown', - ...({ reply_to_message_id }) + ...(reply_to_message_id ? { reply_parameters: { message_id: reply_to_message_id } } : {}) }); return; } @@ -289,21 +258,19 @@ export default (bot: Telegraf) => { ctx.replyWithPhoto(comicArray[0].image, { caption: `${result}`, parse_mode: 'Markdown', - ...({ reply_to_message_id, disable_web_page_preview: true }) + ...(reply_to_message_id ? { reply_parameters: { message_id: reply_to_message_id } } : {}) }); } else { ctx.reply(Strings.ponyApi.noComicFound, { parse_mode: 'Markdown', - - ...({ reply_to_message_id }) + ...(reply_to_message_id ? { reply_parameters: { message_id: reply_to_message_id } } : {}) }); }; } catch (error) { const message = Strings.ponyApi.apiErr.replace('{error}', error.message); ctx.reply(message, { parse_mode: 'Markdown', - - ...({ reply_to_message_id }) + ...(reply_to_message_id ? { reply_parameters: { message_id: reply_to_message_id } } : {}) }); }; }); diff --git a/src/commands/randompony.ts b/src/commands/randompony.ts index 175f283..de24016 100644 --- a/src/commands/randompony.ts +++ b/src/commands/randompony.ts @@ -9,39 +9,40 @@ import { replyToMessageId } from '../utils/reply-to-message-id'; const spamwatchMiddleware = spamwatchMiddlewareModule(isOnSpamWatch); -export default (bot: Telegraf) => { - // TODO: this would greatly benefit from a loading message - bot.command(["rpony", "randompony", "mlpart"], spamwatchMiddleware, async (ctx: Context & { message: { text: string } }) => { - const Strings = getStrings(languageCode(ctx)); - const reply_to_message_id = replyToMessageId(ctx); - ctx.reply(Strings.ponyApi.searching, { - parse_mode: 'Markdown', - ...({ reply_to_message_id }) - }); - try { - const response = await axios(Resources.randomPonyApi); - let tags: string[] = []; - - if (response.data.pony.tags) { - if (typeof response.data.pony.tags === 'string') { - tags.push(response.data.pony.tags); - } else if (Array.isArray(response.data.pony.tags)) { - tags = tags.concat(response.data.pony.tags); - } - } - - ctx.replyWithPhoto(response.data.pony.representations.full, { - caption: `${response.data.pony.sourceURL}\n\n${tags.length > 0 ? tags.join(', ') : ''}`, - parse_mode: 'Markdown', - ...({ reply_to_message_id }) - }); - } catch (error) { - const message = Strings.ponyApi.apiErr.replace('{error}', error.message); - ctx.reply(message, { - parse_mode: 'Markdown', - ...({ reply_to_message_id }) - }); - return; - } +export const randomponyHandler = async (ctx: Context & { message: { text: string } }) => { + const Strings = getStrings(languageCode(ctx)); + const reply_to_message_id = replyToMessageId(ctx); + ctx.reply(Strings.ponyApi.searching, { + parse_mode: 'Markdown', + ...(reply_to_message_id ? { reply_parameters: { message_id: reply_to_message_id } } : {}) }); + try { + const response = await axios(Resources.randomPonyApi); + let tags: string[] = []; + + if (response.data.pony.tags) { + if (typeof response.data.pony.tags === 'string') { + tags.push(response.data.pony.tags); + } else if (Array.isArray(response.data.pony.tags)) { + tags = tags.concat(response.data.pony.tags); + } + } + + ctx.replyWithPhoto(response.data.pony.representations.full, { + caption: `${response.data.pony.sourceURL}\n\n${tags.length > 0 ? tags.join(', ') : ''}`, + parse_mode: 'Markdown', + ...(reply_to_message_id ? { reply_parameters: { message_id: reply_to_message_id } } : {}) + }); + } catch (error) { + const message = Strings.ponyApi.apiErr.replace('{error}', error.message); + ctx.reply(message, { + parse_mode: 'Markdown', + ...(reply_to_message_id ? { reply_parameters: { message_id: reply_to_message_id } } : {}) + }); + return; + } +}; + +export default (bot: Telegraf) => { + bot.command(["rpony", "randompony", "mlpart"], spamwatchMiddleware, randomponyHandler); } \ No newline at end of file diff --git a/src/db/schema.ts b/src/db/schema.ts new file mode 100644 index 0000000..208bc56 --- /dev/null +++ b/src/db/schema.ts @@ -0,0 +1,23 @@ +import { + integer, + pgTable, + varchar, + timestamp, + boolean, + real +} from "drizzle-orm/pg-core"; + +export const usersTable = pgTable("users", { + telegramId: varchar({ length: 255 }).notNull().primaryKey(), + username: varchar({ length: 255 }).notNull(), + firstName: varchar({ length: 255 }).notNull(), + lastName: varchar({ length: 255 }).notNull(), + aiEnabled: boolean().notNull().default(false), + customAiModel: varchar({ length: 255 }).notNull().default("deepseek-r1:1.5b"), + aiTemperature: real().notNull().default(0.9), + aiRequests: integer().notNull().default(0), + aiCharacters: integer().notNull().default(0), + languageCode: varchar({ length: 255 }).notNull(), + createdAt: timestamp().notNull().defaultNow(), + updatedAt: timestamp().notNull().defaultNow(), +}); diff --git a/src/locales/config.ts b/src/locales/config.ts new file mode 100644 index 0000000..7da7d37 --- /dev/null +++ b/src/locales/config.ts @@ -0,0 +1,4 @@ +export const langs = [ + { code: 'en', label: 'English' }, + { code: 'pt', label: 'Português' } +]; \ No newline at end of file diff --git a/src/locales/english.json b/src/locales/english.json index fadfcd6..e1ad103 100644 --- a/src/locales/english.json +++ b/src/locales/english.json @@ -1,4 +1,5 @@ { + "userNotFound": "User not found.", "botWelcome": "*Hello! I'm {botName}!*\nI was made with love by some nerds who really love programming!\n\n*By using {botName}, you affirm that you have read to and agree with the privacy policy (/privacy). This helps you understand where your data goes when using this bot.*\n\nAlso, you can use /help to see the bot commands!", "botHelp": "*Hey, I'm {botName}, a simple bot made entirely from scratch in Telegraf and Node.js by some nerds who really love programming.*\n\nCheck out the source code: [Click here to go to GitHub]({sourceLink})\n\nClick on the buttons below to see which commands you can use!\n", "botPrivacy": "Check out [this link]({botPrivacy}) to read the bot's privacy policy.", @@ -53,7 +54,7 @@ "apiKeyErr": "*An API key was not set by the bot owner. Please try again later.*" }, "mainCommands": "ℹ️ Main Commands", - "mainCommandsDesc": "ℹ️ *Main Commands*\n\n- /help: Show bot's help\n- /start: Start the bot\n- /privacy: Read the bot's Privacy Policy", + "mainCommandsDesc": "ℹ️ *Main Commands*\n\n- /help: Show bot's help\n- /start: Start the bot\n- /privacy: Read the bot's Privacy Policy\n- /settings: Show your user settings", "usefulCommands": "🛠️ Useful Commands", "usefulCommandsDesc": "🛠️ *Useful commands*\n\n- /chatinfo: Send information about the group\n- /userinfo: Send information about yourself\n- /d | /device ``: Search for a device on GSMArena and show its specs.\n/codename | /whatis ``: Shows what device is based on the codename. Example: `/codename begonia`\n- /weather | /clima ``: See weather status for a specific location.\n- /modarchive | /tma ``: Download a module from The Mod Archive.\n- /http ``: Send details about a specific HTTP code. Example: `/http 404`", "funnyCommands": "😂 Funny Commands", @@ -62,8 +63,15 @@ "interactiveEmojisDesc": "🎲 *Interactive emojis*\n\n- /dice: Roll a dice\n- /idice: Infinitely roll a colored dice\n- /slot: Try to combine the figures!\n- /ball: Try to kick the ball into the goal!\n- /bowling: Try to hit the pins!\n- /dart: Try to hit the target!", "animalCommands": "🐱 Animals", "animalCommandsDesc": "🐱 *Animals*\n\n- /soggy | /soggycat `<1 | 2 | 3 | 4 | orig | thumb | sticker | alt>`: Sends the [Soggy cat meme](https://knowyourmeme.com/memes/soggy-cat)\n- /cat: Sends a random picture of a cat.\n- /fox: Sends a random picture of a fox.\n- /duck: Sends a random picture of a duck.\n- /dog: Sends a random picture of a dog.\n- /httpcat ``: Send cat memes from http.cat with your specified HTTP code. Example: `/httpcat 404`", - "aiCmds": "✨ AI Commands", - "aiCmdsDesc": "✨ *AI Commands*\n\n- /ask ``: Ask a question to an AI", + "ai": { + "helpEntry": "✨ AI Commands", + "helpDesc": "✨ *AI Commands*\n\n- /ask ``: Ask a question to an AI\n- /think ``: Ask a thinking model about a question", + "disabled": "✨ AI features are currently disabled", + "pulling": "🔄 *Pulling {model} from Ollama...*\n\nThis may take a few minutes...", + "askGenerating": "✨ _{model} is working..._", + "askNoMessage": "Please provide a message to ask the model.", + "languageCode": "Language" + }, "maInvalidModule": "Please provide a valid module ID from The Mod Archive.\nExample: `/modarchive 81574`", "maDownloadError": "Error downloading the file. Check the module ID and try again.", "ytDownload": { @@ -81,6 +89,33 @@ "noLink": "Please provide a link to a video to download.", "botDetection": "My server is being rate limited by the video provider! Please try again later, or ask the bot owner to add their cookies/account." }, + "settings": { + "helpEntry": "🔧 Settings", + "helpDesc": "🔧 *Settings*\n\n- /settings: Show your settings", + "mainSettings": "🔧 *Settings*\n\n- AI Enabled: {aiEnabled}\n- /ai Custom Model: {aiModel}\n- AI Temperature: {aiTemperature}\n- Total AI Requests: {aiRequests}\n- Total AI Characters Sent/Recieved: {aiCharacters}\n- Language: {languageCode}", + "enabled": "Enabled", + "disabled": "Disabled", + "selectSetting": "Please select a setting to modify or view.", + "ai": { + "aiEnabled": "AI Enabled", + "aiModel": "AI Model", + "aiTemperature": "AI Temperature", + "aiRequests": "Total AI Requests", + "aiCharacters": "Total AI Characters Sent/Recieved", + "languageCode": "Language", + "aiEnabledSetTo": "AI Enabled set to {aiEnabled}", + "aiModelSetTo": "AI Model set to {aiModel}", + "aiTemperatureSetTo": "AI Temperature set to {aiTemperature}", + "back": "Back", + "selectSeries": "Please select a model series.", + "seriesDescription": "{seriesDescription}", + "selectParameterSize": "Please select a parameter size for {seriesLabel}.", + "parameterSizeExplanation": "Parameter size (e.g. 2B, 4B) refers to the number of parameters in the model. Larger models may be more capable but require more resources.", + "modelSetTo": "Model set to {aiModel} ({parameterSize})" + }, + "languageCodeSetTo": "Language set to {languageCode}", + "unknownAction": "Unknown action." + }, "botUpdated": "Bot updated with success.\n\n```{result}```", "errorUpdatingBot": "Error updating bot\n\n{error}", "catImgErr": "Sorry, but I couldn't get the cat photo you wanted.", @@ -120,6 +155,13 @@ }, "chatNotFound": "Chat not found.", "noFileProvided": "Please provide a file to send.", - "askGenerating": "✨ _{model} is working..._", - "aiDisabled": "AI features are currently disabled" + "gsmarenaProvidePhoneName": "Please provide the phone name.", + "gsmarenaSearchingFor": "Searching for `{phone}`...", + "gsmarenaNoPhonesFound": "No phones found for `{phone}`.", + "gsmarenaNoPhonesFoundBoth": "No phones found for `{name}` and `{phone}`.", + "gsmarenaSelectDevice": "Please select your device:", + "gsmarenaNotAllowed": "you are not allowed to interact with this.", + "gsmarenaInvalidOrExpired": "Whoops, invalid or expired option. Please try again.", + "gsmarenaDeviceDetails": "these are the details of your device:", + "gsmarenaErrorFetchingDetails": "Error fetching phone details." } \ No newline at end of file diff --git a/src/locales/portuguese.json b/src/locales/portuguese.json index 415eeb1..63b3a4c 100644 --- a/src/locales/portuguese.json +++ b/src/locales/portuguese.json @@ -33,8 +33,8 @@ "funEmojiResult": "*Você lançou {emoji} e obteve *`{value}`*!*\nVocê não sabe o que isso significa? Nem eu!", "gifErr": "*Algo deu errado ao enviar o GIF. Tente novamente mais tarde.*\n\n{err}", "lastFm": { - "helpEntry": "Last.fm", - "helpDesc": "*Last.fm*\n\n- /lt | /lmu | /last | /lfm: Mostra a última música do seu perfil no Last.fm + o número de reproduções.\n- /setuser ``: Define o usuário para o comando acima.", + "helpEntry": "🎵 Last.fm", + "helpDesc": "🎵 *Last.fm*\n\n- /lt | /lmu | /last | /lfm: Mostra a última música do seu perfil no Last.fm + o número de reproduções.\n- /setuser ``: Define o usuário para o comando acima.", "noUser": "*Por favor, forneça um nome de usuário do Last.fm.*\nExemplo: `/setuser `", "noUserSet": "*Você ainda não definiu seu nome de usuário do Last.fm.*\nUse o comando /setuser para definir.\n\nExemplo: `/setuser `", "noRecentTracks": "*Nenhuma faixa recente encontrada para o usuário do Last.fm* `{lastfmUser}`*.*", @@ -52,27 +52,34 @@ "apiErr": "*Ocorreu um erro ao obter o clima. Tente novamente mais tarde.*\n\n`{error}`", "apiKeyErr": "*Uma chave de API não foi definida pelo proprietário do bot. Tente novamente mais tarde.*" }, - "mainCommands": "Comandos principais", - "mainCommandsDesc": "*Comandos principais*\n\n- /help: Exibe a ajuda do bot\n- /start: Inicia o bot\n- /privacy: Leia a política de privacidade do bot", - "usefulCommands": "Comandos úteis", - "usefulCommandsDesc": "*Comandos úteis*\n\n- /chatinfo: Envia informações sobre o grupo\n- /userinfo: Envia informações sobre você\n- /d | /device ``: Pesquisa um dispositivo no GSMArena e mostra suas especificações.\n- /weather | /clima ``: Veja o status do clima para uma localização específica\n- /modarchive | /tma ``: Baixa um módulo do The Mod Archive.\n- /http ``: Envia detalhes sobre um código HTTP específico. Exemplo: `/http 404`", - "funnyCommands": "Comandos engraçados", + "mainCommands": "ℹ️ Comandos principais", + "mainCommandsDesc": "ℹ️ *Comandos principais*\n\n- /help: Exibe a ajuda do bot\n- /start: Inicia o bot\n- /privacy: Leia a política de privacidade do bot\n- /settings: Exibe suas configurações", + "usefulCommands": "🛠️ Comandos úteis", + "usefulCommandsDesc": "🛠️ *Comandos úteis*\n\n- /chatinfo: Envia informações sobre o grupo\n- /userinfo: Envia informações sobre você\n- /d | /device ``: Pesquisa um dispositivo no GSMArena e mostra suas especificações.\n- /weather | /clima ``: Veja o status do clima para uma localização específica\n- /modarchive | /tma ``: Baixa um módulo do The Mod Archive.\n- /http ``: Envia detalhes sobre um código HTTP específico. Exemplo: `/http 404`", + "funnyCommands": "😂 Comandos engraçados", "funnyCommandsDesc": "*Comandos engraçados*\n\n- /gay: Verifique se você é gay\n- /furry: Verifique se você é furry\n- /random: Escolhe um número aleatório entre 0-10", - "interactiveEmojis": "Emojis interativos", - "interactiveEmojisDesc": "*Emojis interativos*\n\n- /dice: Jogue um dado\n- /idice: Role infinitamente um dado colorido\n- /slot: Tente combinar as figuras!\n- /ball: Tente chutar a bola no gol!\n- /bowling: Tente derrubar os pinos!\n- /dart: Tente acertar o alvo!", - "animalCommands": "Animais", - "animalCommandsDesc": "*Animais*\n\n- /soggy | /soggycat `<1 | 2 | 3 | 4 | orig | thumb | sticker | alt>`: Envia o [meme do gato encharcado](https://knowyourmeme.com/memes/soggy-cat)\n- /cat - Envia uma foto aleatória de um gato.\n- /fox - Envia uma foto aleatória de uma raposa.\n- /duck - Envia uma foto aleatória de um pato.\n- /dog - Envia uma imagem aleatória de um cachorro.\n- /httpcat ``: Envia memes de gato do http.cat com o código HTTP especificado. Exemplo: `/httpcat 404`", - "aiCmds": "Comandos de IA", - "aiCmdsDesc": "*Comandos de IA*\n\n- /ask ``: Fazer uma pergunta a uma IA", + "interactiveEmojis": "🎲 Emojis interativos", + "interactiveEmojisDesc": "🎲 *Emojis interativos*\n\n- /dice: Jogue um dado\n- /idice: Role infinitamente um dado colorido\n- /slot: Tente combinar as figuras!\n- /ball: Tente chutar a bola no gol!\n- /bowling: Tente derrubar os pinos!\n- /dart: Tente acertar o alvo!", + "animalCommands": "🐱 Animais", + "animalCommandsDesc": "🐱 *Animais*\n\n- /soggy | /soggycat `<1 | 2 | 3 | 4 | orig | thumb | sticker | alt>`: Envia o [meme do gato encharcado](https://knowyourmeme.com/memes/soggy-cat)\n- /cat - Envia uma foto aleatória de um gato.\n- /fox - Envia uma foto aleatória de uma raposa.\n- /duck - Envia uma foto aleatória de um pato.\n- /dog - Envia uma imagem aleatória de um cachorro.\n- /httpcat ``: Envia memes de gato do http.cat com o código HTTP especificado. Exemplo: `/httpcat 404`", + "ai": { + "helpEntry": "✨ Comandos de IA", + "helpDesc": "✨ *Comandos de IA*\n\n- /ask ``: Fazer uma pergunta a uma IA\n- /think ``: Fazer uma pergunta a um modelo de pensamento", + "disabled": "✨ Os recursos de IA estão desativados no momento", + "pulling": "🔄 *Puxando {model} do Ollama...*\n\nIsso pode levar alguns minutos...", + "askGenerating": "✨ _{model} está funcionando..._", + "askNoMessage": "Por favor, forneça uma mensagem para fazer a pergunta ao modelo.", + "languageCode": "Idioma" + }, "maInvalidModule": "Por favor, forneça um ID de módulo válido do The Mod Archive.\nExemplo: `/modarchive 81574`", "maDownloadError": "Erro ao baixar o arquivo. Verifique o ID do módulo e tente novamente.", "ytDownload": { - "helpEntry": "Download de vídeos", - "helpDesc": "*Download de vídeos*\n\n- /yt | /ytdl | /sdl | /dl | /video ``: Baixa um vídeo de algumas plataformas (ex: YouTube, Instagram, Facebook, etc.).\n\nConsulte [este link](https://github.com/yt-dlp/yt-dlp/blob/master/supportedsites.md) para obter mais informações e saber quais serviços são compatíveis.\n\n*Nota: O Telegram está atualmente limitando os uploads de bots a 50MB, o que significa que se o vídeo que você deseja baixar for maior que 50MB, a qualidade será reduzida para tentar carregá-lo de qualquer maneira. Estamos fazendo o possível para contornar ou corrigir esse problema.*", - "downloadingVid": "*Baixando vídeo...*", + "helpEntry": "📺 Download de vídeos", + "helpDesc": "📺 *Download de vídeos*\n\n- /yt | /ytdl | /sdl | /dl | /video ``: Baixa um vídeo de algumas plataformas (ex: YouTube, Instagram, Facebook, etc.).\n\nConsulte [este link](https://github.com/yt-dlp/yt-dlp/blob/master/supportedsites.md) para obter mais informações e saber quais serviços são compatíveis.\n\n*Nota: O Telegram está atualmente limitando os uploads de bots a 50MB, o que significa que se o vídeo que você deseja baixar for maior que 50MB, a qualidade será reduzida para tentar carregá-lo de qualquer maneira. Estamos fazendo o possível para contornar ou corrigir esse problema.*", + "downloadingVid": "⬇️ *Baixando vídeo...*", "libNotFound": "*Parece que o executável do yt-dlp não existe no nosso servidor...\n\nNesse caso, o problema está no nosso lado! Aguarde até que tenhamos notado e resolvido o problema.*", - "checkingSize": "Verificando se o vídeo excede o limite de 50 MB...", - "uploadingVid": "*Enviando vídeo...*", + "checkingSize": "🔎 *Verificando se o vídeo excede o limite de 50 MB...*", + "uploadingVid": "⬆️ *Enviando vídeo...*", "msgDesc": "{userMention}*, aqui está o seu vídeo baixado.*", "downloadErr": "*Erro durante o download do vídeo do YT:*\n\n`{err}`", "uploadErr": "Erro ao enviar o arquivo. Tente novamente mais tarde.", @@ -81,6 +88,33 @@ "noLink": "*Por favor, forneça um link de um vídeo para download.*", "botDetection": "Meu servidor está com a taxa limitada pelo provedor de vídeo! Tente novamente mais tarde ou peça ao proprietário do bot para adicionar seus cookies/conta." }, + "settings": { + "helpEntry": "🔧 Configurações", + "helpDesc": "🔧 *Configurações*\n\n- /settings: Mostrar suas configurações", + "mainSettings": "🔧 *Configurações*\n\n- Inteligência Artificial Ativado: {aiEnabled}\n- /ai Modelo personalizado: {aiModel}\n- Inteligência Artificial Temperatura: {aiTemperature}\n- Total de Requests: {aiRequests}\n- Total de Caracteres Enviados/Recebidos: {aiCharacters}\n- Idioma: {languageCode}", + "enabled": "Ativado", + "disabled": "Desativado", + "selectSetting": "Por favor, selecione uma configuração para modificar ou visualizar.", + "ai": { + "aiEnabled": "IA", + "aiModel": "Modelo", + "aiTemperature": "Temperatura", + "aiRequests": "Total de Requests", + "aiCharacters": "Total de Caracteres Enviados/Recebidos", + "languageCode": "Idioma", + "aiEnabledSetTo": "Inteligência Artificial definido para {aiEnabled}", + "aiModelSetTo": "Modelo personalizado definido para {aiModel}", + "aiTemperatureSetTo": "Temperatura definida para {aiTemperature}", + "selectSeries": "Por favor, selecione uma série de modelos.", + "seriesDescription": "{seriesDescription}", + "selectParameterSize": "Por favor, selecione um tamanho de parâmetro para {seriesLabel}.", + "parameterSizeExplanation": "O tamanho do parâmetro (ex: 2B, 4B) refere-se ao número de parâmetros do modelo. Modelos maiores podem ser mais capazes, mas exigem mais recursos.", + "modelSetTo": "Modelo definido para {aiModel} ({parameterSize})", + "back": "Voltar" + }, + "languageCodeSetTo": "Idioma definido para {languageCode}", + "unknownAction": "Ação desconhecida." + }, "botUpdated": "Bot atualizado com sucesso.\n\n```{result}```", "errorUpdatingBot": "Erro ao atualizar o bot\n\n{error}", "catImgErr": "Desculpe, mas não consegui obter a foto do gato que você queria.", @@ -97,8 +131,8 @@ "resultMsg": "*Código HTTP*: `{code}`\n*Nome*: `{message}`\n*Descrição*: `{description}`" }, "ponyApi": { - "helpEntry": "My Little Pony", - "helpDesc": "*My Little Pony*\n\n- /mlp: Exibe esta mensagem de ajuda.\n- /mlpchar ``: Mostra informações específicas sobre um personagem de My Little Pony em inglês. Exemplo: `/mlpchar twilight`\n- /mlpep: Mostra informações específicas sobre um episódio de My Little Pony em inglês. Exemplo: `/mlpep 136`\n- /mlpcomic ``: Mostra informações específicas sobre uma comic de My Little Pony em inglês. Exemplo: `/mlpcomic Nightmare Rarity`\n- /rpony | /randompony | /mlpart: Envia uma arte aleatória feita pela comunidade de My Little Pony.", + "helpEntry": "🐴 My Little Pony", + "helpDesc": "🐴 *My Little Pony*\n\n- /mlp: Exibe esta mensagem de ajuda.\n- /mlpchar ``: Mostra informações específicas sobre um personagem de My Little Pony em inglês. Exemplo: `/mlpchar twilight`\n- /mlpep: Mostra informações específicas sobre um episódio de My Little Pony em inglês. Exemplo: `/mlpep 136`\n- /mlpcomic ``: Mostra informações específicas sobre uma comic de My Little Pony em inglês. Exemplo: `/mlpcomic Nightmare Rarity`\n- /rpony | /randompony | /mlpart: Envia uma arte aleatória feita pela comunidade de My Little Pony.", "charRes": "*{name} (ID: {id})*\n\n*Apelido:* `{alias}`\n*Sexo:* `{sex}`\n*Residência:* `{residence}`\n*Ocupação:* `{occupation}`\n*Tipo:* `{kind}`\n\n*URL no Fandom:*\n[{url}]({url})", "epRes": "*{name} (ID: {id})*\n\n*Temporada:* `{season}`\n*Episódio:* `{episode}`\n*Número do Episódio:* `{overall}`\n*Data de lançamento:* `{airdate}`\n*História por:* `{storyby}`\n*Escrito por:* `{writtenby}`\n*Storyboard:* `{storyboard}`\n\n*URL no Fandom:*\n[{url}]({url})", "comicRes": "*{name} (ID: {id})*\n\n*Série:* `{series}`\n*Roteirista:* `{writer}`\n*Artista:* `{artist}`\n*Colorista:* `{colorist}`\n*Letrista:* `{letterer}`\n*Editor:* `{editor}`\n\n*URL no Fandom:*\n[{url}]({url})", @@ -119,6 +153,14 @@ "apiErr": "Ocorreu um erro ao buscar os dados da API.\n\n`{err}`" }, "noFileProvided": "Por favor, forneça um arquivo para envio.", - "askGenerating": "✨ _{modelo} está funcionando..._", - "aiDisabled": "Os recursos de IA estão desativados no momento" + "gsmarenaProvidePhoneName": "Por favor, forneça o nome do celular.", + "gsmarenaSearchingFor": "Procurando por `{phone}`...", + "gsmarenaNoPhonesFound": "Nenhum celular encontrado para `{phone}`.", + "gsmarenaNoPhonesFoundBoth": "Nenhum celular encontrado para `{name}` e `{phone}`.", + "gsmarenaSelectDevice": "Por favor, selecione seu dispositivo:", + "gsmarenaNotAllowed": "você não tem permissão para interagir com isso.", + "gsmarenaInvalidOrExpired": "Ops! Opção inválida ou expirada. Por favor, tente novamente.", + "gsmarenaDeviceDetails": "estes são os detalhes do seu dispositivo:", + "gsmarenaErrorFetchingDetails": "Erro ao buscar detalhes do celular.", + "userNotFound": "Usuário não encontrado." } diff --git a/src/utils/ensure-user.ts b/src/utils/ensure-user.ts new file mode 100644 index 0000000..0654476 --- /dev/null +++ b/src/utils/ensure-user.ts @@ -0,0 +1,64 @@ +// ENSURE-USER.TS +// by ihatenodejs/Aidan +// +// ----------------------------------------------------------------------- +// +// This is free and unencumbered software released into the public domain. +// +// Anyone is free to copy, modify, publish, use, compile, sell, or +// distribute this software, either in source code form or as a compiled +// binary, for any purpose, commercial or non-commercial, and by any +// means. +// +// In jurisdictions that recognize copyright laws, the author or authors +// of this software dedicate any and all copyright interest in the +// software to the public domain. We make this dedication for the benefit +// of the public at large and to the detriment of our heirs and +// successors. We intend this dedication to be an overt act of +// relinquishment in perpetuity of all present and future rights to this +// software under copyright law. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +// IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +// OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +// OTHER DEALINGS IN THE SOFTWARE. +// +// For more information, please refer to + +import { usersTable } from '../db/schema'; + +export async function ensureUserInDb(ctx, db) { + if (!ctx.from) return; + const telegramId = String(ctx.from.id); + const username = ctx.from.username || ''; + const firstName = ctx.from.first_name || ' '; + const lastName = ctx.from.last_name || ' '; + const languageCode = ctx.from.language_code || 'en'; + + const existing = await db.query.usersTable.findMany({ where: (fields, { eq }) => eq(fields.telegramId, telegramId), limit: 1 }); + if (existing.length === 0) { + const userToInsert = { + telegramId, + username, + firstName, + lastName, + languageCode, + aiEnabled: false, + customAiModel: "deepseek-r1:1.5b", + aiTemperature: 0.9, + aiRequests: 0, + aiCharacters: 0, + }; + console.log('[💽 DB] Inserting user with values:', userToInsert); + try { + await db.insert(usersTable).values(userToInsert); + console.log(`[💽 DB] Added new user: ${username || firstName} (${telegramId})`); + } catch (err) { + console.error('[💽 DB] Error inserting user:', err); + throw err; + } + } +} diff --git a/src/utils/log.ts b/src/utils/log.ts index 67019a8..2f7e9ac 100644 --- a/src/utils/log.ts +++ b/src/utils/log.ts @@ -63,19 +63,24 @@ class Logger { console.log(`[✨ AI | PROMPT] ${prompt.length} chars input`) } - logError(error: any): void { - if (error.response?.error_code === 429) { - const retryAfter = error.response.parameters?.retry_after || 1 - console.error(`[✨ AI | RATE_LIMIT] Too Many Requests - retry after ${retryAfter}s`) - } else if (error.response?.error_code === 400 && error.response?.description?.includes("can't parse entities")) { - console.error("[✨ AI | PARSE_ERROR] Markdown parsing failed, retrying with plain text") - } else { - const errorDetails = { - code: error.response?.error_code, - description: error.response?.description, - method: error.on?.method + logError(error: unknown): void { + if (typeof error === 'object' && error !== null && 'response' in error) { + const err = error as { response?: { error_code?: number, parameters?: { retry_after?: number }, description?: string }, on?: { method?: string } }; + if (err.response?.error_code === 429) { + const retryAfter = err.response.parameters?.retry_after || 1; + console.error(`[✨ AI | RATE_LIMIT] Too Many Requests - retry after ${retryAfter}s`); + } else if (err.response?.error_code === 400 && err.response?.description?.includes("can't parse entities")) { + console.error("[✨ AI | PARSE_ERROR] Markdown parsing failed, retrying with plain text"); + } else { + const errorDetails = { + code: err.response?.error_code, + description: err.response?.description, + method: err.on?.method + }; + console.error("[✨ AI | ERROR]", JSON.stringify(errorDetails, null, 2)); } - console.error("[✨ AI | ERROR]", JSON.stringify(errorDetails, null, 2)) + } else { + console.error("[✨ AI | ERROR]", error); } } } diff --git a/src/utils/rate-limiter.ts b/src/utils/rate-limiter.ts index 777bb4f..b65ebb2 100644 --- a/src/utils/rate-limiter.ts +++ b/src/utils/rate-limiter.ts @@ -90,7 +90,14 @@ class RateLimiter { return chunks } - private handleTelegramError(error: unknown, messageKey: string, options: any, ctx: Context, chatId: number, messageId: number): boolean { + private handleTelegramError( + error: unknown, + messageKey: string, + options: Record, + ctx: Context, + chatId: number, + messageId: number + ): boolean { if (!isTelegramError(error)) return false if (error.response.error_code === 429) { const retryAfter = error.response.parameters?.retry_after || 1 @@ -130,7 +137,7 @@ class RateLimiter { ctx: Context, chatId: number, messageId: number, - options: any + options: Record ): Promise { const messageKey = this.getMessageKey(chatId, messageId) const latestText = this.pendingUpdates.get(messageKey) @@ -184,7 +191,7 @@ class RateLimiter { const newMessage = await ctx.telegram.sendMessage(chatId, chunk, { ...options, reply_to_message_id: messageId - }) + } as any) logger.logChunk(chatId, newMessage.message_id, chunk, true) this.overflowMessages.set(messageKey, newMessage.message_id) } @@ -226,7 +233,7 @@ class RateLimiter { chatId: number, messageId: number, text: string, - options: any + options: Record ): Promise { const messageKey = this.getMessageKey(chatId, messageId) this.pendingUpdates.set(messageKey, text) From 04271f87b1bf7f602d986871dcff4b7fb32a0a48 Mon Sep 17 00:00:00 2001 From: Aidan Date: Mon, 30 Jun 2025 02:24:18 -0400 Subject: [PATCH 07/21] remove log --- src/utils/ensure-user.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/src/utils/ensure-user.ts b/src/utils/ensure-user.ts index 0654476..7726ffb 100644 --- a/src/utils/ensure-user.ts +++ b/src/utils/ensure-user.ts @@ -52,7 +52,6 @@ export async function ensureUserInDb(ctx, db) { aiRequests: 0, aiCharacters: 0, }; - console.log('[💽 DB] Inserting user with values:', userToInsert); try { await db.insert(usersTable).values(userToInsert); console.log(`[💽 DB] Added new user: ${username || firstName} (${telegramId})`); From 5270d2cae5a839cbd2889167fa8c875eb9277a87 Mon Sep 17 00:00:00 2001 From: Aidan Date: Mon, 30 Jun 2025 11:24:51 -0400 Subject: [PATCH 08/21] cleanup, bug fixes, better markdown parsing, better model display --- src/commands/ai.ts | 109 ++++++++++++++++++++++-------------- src/commands/main.ts | 6 +- src/locales/english.json | 4 +- src/locales/portuguese.json | 4 +- 4 files changed, 77 insertions(+), 46 deletions(-) diff --git a/src/commands/ai.ts b/src/commands/ai.ts index 4431f56..97a8c48 100644 --- a/src/commands/ai.ts +++ b/src/commands/ai.ts @@ -220,6 +220,10 @@ function extractAxiosErrorMessage(error: unknown): string { return 'An unexpected error occurred.'; } +function escapeMarkdown(text: string): string { + return text.replace(/([*_])/g, '\\$1'); +} + async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Message, model: string, aiTemperature: number): Promise<{ success: boolean; response?: string; error?: string }> { const Strings = getStrings(languageCode(ctx)); if (!ctx.chat) { @@ -228,6 +232,7 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me error: Strings.unexpectedErr.replace("{error}", "No chat found"), }; } + const modelHeader = `🤖 *${model}* | 🌡️ *${aiTemperature}*\n\n`; try { const aiResponse = await axios.post( `${process.env.ollamaApi}/api/generate`, @@ -246,6 +251,7 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me let fullResponse = ""; let thoughts = ""; let lastUpdate = Date.now(); + let sentHeader = false; const stream: NodeJS.ReadableStream = aiResponse.data as any; for await (const chunk of stream) { const lines = chunk.toString().split('\n'); @@ -275,23 +281,24 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me if (model === thinking_model) { let patchedThoughts = ln.response; const thinkTagRx = /([\s\S]*?)<\/think>/g; - patchedThoughts = patchedThoughts.replace(thinkTagRx, (match, p1) => p1.trim().length > 0 ? '`Thinking...`' + p1 + '`Finished thinking`' : ''); - patchedThoughts = patchedThoughts.replace(//g, '`Thinking...`'); - patchedThoughts = patchedThoughts.replace(/<\/think>/g, '`Finished thinking`'); + patchedThoughts = patchedThoughts.replace(thinkTagRx, (p1) => p1.trim().length > 0 ? '`' + Strings.ai.thinking + '`' + p1 + '`' + Strings.ai.finishedThinking + '`' : ''); + patchedThoughts = patchedThoughts.replace(//g, '`' + Strings.ai.thinking + '`'); + patchedThoughts = patchedThoughts.replace(/<\/think>/g, '`' + Strings.ai.finishedThinking + '`'); thoughts += patchedThoughts; fullResponse += patchedThoughts; } else { fullResponse += ln.response; } - if (now - lastUpdate >= 1000) { + if (now - lastUpdate >= 1000 || !sentHeader) { await rateLimiter.editMessageWithRetry( ctx, ctx.chat.id, replyGenerating.message_id, - thoughts, + modelHeader + fullResponse, { parse_mode: 'Markdown' } ); lastUpdate = now; + sentHeader = true; } } } @@ -315,7 +322,7 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me Strings.ai.pulling.replace("{model}", model), { parse_mode: 'Markdown' } ); - console.log(`[✨ AI | i] Pulling ${model} from ollama...`); + console.log(`[✨ AI] Pulling ${model} from ollama...`); try { await axios.post( `${process.env.ollamaApi}/api/pull`, @@ -330,13 +337,13 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me console.error("[✨ AI | !] Pull error:", pullMsg); return { success: false, - error: `❌ Something went wrong while pulling ${model}: ${pullMsg}`, + error: `❌ Something went wrong while pulling ${escapeMarkdown(model)}: ${escapeMarkdown(pullMsg)}`, }; } - console.log(`[✨ AI | i] ${model} pulled successfully`); + console.log(`[✨ AI] ${model} pulled successfully`); return { success: true, - response: `✅ Pulled ${model} successfully, please retry the command.`, + response: `✅ Pulled ${escapeMarkdown(model)} successfully, please retry the command.`, }; } } @@ -347,13 +354,13 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me } } -async function handleAiReply(ctx: TextContext, db: NodePgDatabase, model: string, prompt: string, replyGenerating: Message, aiTemperature: number) { +async function handleAiReply(ctx: TextContext, model: string, prompt: string, replyGenerating: Message, aiTemperature: number) { const Strings = getStrings(languageCode(ctx)); const aiResponse = await getResponse(prompt, ctx, replyGenerating, model, aiTemperature); if (!aiResponse) return; if (!ctx.chat) return; + const modelHeader = `🤖 *${model}* | 🌡️ *${aiTemperature}*\n\n`; if (aiResponse.success && aiResponse.response) { - const modelHeader = `🤖 *${model}* | 🌡️ *${aiTemperature}*\n\n`; await rateLimiter.editMessageWithRetry( ctx, ctx.chat.id, @@ -387,6 +394,14 @@ async function getUserWithStringsAndModel(ctx: Context, db: NodePgDatabase m.name === name); + if (found) return found.label; + } + return name; +} + export default (bot: Telegraf, db: NodePgDatabase) => { const botName = bot.botInfo?.first_name && bot.botInfo?.last_name ? `${bot.botInfo.first_name} ${bot.botInfo.last_name}` : "Kowalski" @@ -427,44 +442,56 @@ export default (bot: Telegraf, db: NodePgDatabase) => { logger.logPrompt(fixedMsg) const prompt = sanitizeForJson(await usingSystemPrompt(textCtx, db, botName)) - await handleAiReply(textCtx, db, model, prompt, replyGenerating, aiTemperature) + await handleAiReply(textCtx, model, prompt, replyGenerating, aiTemperature) }) bot.command(["ai"], spamwatchMiddleware, async (ctx) => { - if (!ctx.message || !('text' in ctx.message)) return - const textCtx = ctx as TextContext - const reply_to_message_id = replyToMessageId(textCtx) - const { Strings, customAiModel, aiTemperature } = await getUserWithStringsAndModel(textCtx, db) - const message = textCtx.message.text - const author = ("@" + ctx.from?.username) || ctx.from?.first_name + try { + if (!ctx.message || !("text" in ctx.message)) return + const textCtx = ctx as TextContext + const reply_to_message_id = replyToMessageId(textCtx) + const { Strings, customAiModel, aiTemperature } = await getUserWithStringsAndModel(textCtx, db) + const message = textCtx.message.text + const author = ("@" + ctx.from?.username) || ctx.from?.first_name - logger.logCmdStart(author, "ask") + logger.logCmdStart(author, "ask") - if (!process.env.ollamaApi) { - await ctx.reply(Strings.ai.disabled, { + if (!process.env.ollamaApi) { + await ctx.reply(Strings.ai.disabled, { + parse_mode: 'Markdown', + ...({ reply_to_message_id }) + }) + return + } + + const fixedMsg = message.replace(/^\/ai(@\w+)?\s*/, "").trim() + if (fixedMsg.length < 1) { + await ctx.reply(Strings.ai.askNoMessage, { + parse_mode: 'Markdown', + ...({ reply_to_message_id }) + }) + return + } + + const modelLabel = getModelLabelByName(customAiModel) + const replyGenerating = await ctx.reply(Strings.ai.askGenerating.replace("{model}", modelLabel), { parse_mode: 'Markdown', ...({ reply_to_message_id }) }) - return + + logger.logPrompt(fixedMsg) + + const prompt = sanitizeForJson(await usingSystemPrompt(textCtx, db, botName)) + await handleAiReply(textCtx, customAiModel, prompt, replyGenerating, aiTemperature) + } catch (err) { + const Strings = getStrings(languageCode(ctx)); + if (ctx && ctx.reply) { + try { + await ctx.reply(Strings.unexpectedErr.replace("{error}", (err && err.message ? err.message : String(err))), { parse_mode: 'Markdown' }) + } catch (e) { + console.error("[✨ AI | !] Failed to send error reply:", e) + } + } } - - const fixedMsg = message.replace(/^\/ai(@\w+)?\s*/, "").trim() - if (fixedMsg.length < 1) { - await ctx.reply(Strings.ai.askNoMessage, { - parse_mode: 'Markdown', - ...({ reply_to_message_id }) - }) - return - } - - const replyGenerating = await ctx.reply(Strings.ai.askGenerating.replace("{model}", customAiModel), { - parse_mode: 'Markdown', - ...({ reply_to_message_id }) - }) - - logger.logPrompt(fixedMsg) - - const prompt = sanitizeForJson(await usingSystemPrompt(textCtx, db, botName)) - await handleAiReply(textCtx, db, customAiModel, prompt, replyGenerating, aiTemperature) }) } \ No newline at end of file diff --git a/src/commands/main.ts b/src/commands/main.ts index a6c48ba..fe76037 100644 --- a/src/commands/main.ts +++ b/src/commands/main.ts @@ -7,7 +7,7 @@ import * as schema from '../db/schema'; import { eq } from 'drizzle-orm'; import { ensureUserInDb } from '../utils/ensure-user'; import type { NodePgDatabase } from 'drizzle-orm/node-postgres'; -import { models } from './ai'; +import { models, getModelLabelByName } from './ai'; import { langs } from '../locales/config'; type UserRow = typeof schema.usersTable.$inferSelect; @@ -54,7 +54,7 @@ function getSettingsMenu(user: UserRow, Strings: any): SettingsMenu { inline_keyboard: [ [ { text: `✨ ${Strings.settings.ai.aiEnabled}: ${user.aiEnabled ? Strings.settings.enabled : Strings.settings.disabled}`, callback_data: 'settings_aiEnabled' }, - { text: `🧠 ${Strings.settings.ai.aiModel}: ${user.customAiModel}`, callback_data: 'settings_aiModel' } + { text: `🧠 ${Strings.settings.ai.aiModel}: ${getModelLabelByName(user.customAiModel)}`, callback_data: 'settings_aiModel' } ], [ { text: `🌡️ ${Strings.settings.ai.aiTemperature}: ${user.aiTemperature}`, callback_data: 'settings_aiTemperature' }, @@ -78,7 +78,7 @@ export default (bot: Telegraf, db: NodePgDatabase) => { user.aiEnabled ? Strings.settings.enabled : Strings.settings.disabled ).replace( /{aiModel}/g, - user.customAiModel + getModelLabelByName(user.customAiModel) ).replace( /{aiTemperature}/g, user.aiTemperature.toString() diff --git a/src/locales/english.json b/src/locales/english.json index e1ad103..cd83da6 100644 --- a/src/locales/english.json +++ b/src/locales/english.json @@ -70,7 +70,9 @@ "pulling": "🔄 *Pulling {model} from Ollama...*\n\nThis may take a few minutes...", "askGenerating": "✨ _{model} is working..._", "askNoMessage": "Please provide a message to ask the model.", - "languageCode": "Language" + "languageCode": "Language", + "thinking": "Thinking...", + "finishedThinking": "Finished thinking" }, "maInvalidModule": "Please provide a valid module ID from The Mod Archive.\nExample: `/modarchive 81574`", "maDownloadError": "Error downloading the file. Check the module ID and try again.", diff --git a/src/locales/portuguese.json b/src/locales/portuguese.json index 63b3a4c..2d04a8f 100644 --- a/src/locales/portuguese.json +++ b/src/locales/portuguese.json @@ -69,7 +69,9 @@ "pulling": "🔄 *Puxando {model} do Ollama...*\n\nIsso pode levar alguns minutos...", "askGenerating": "✨ _{model} está funcionando..._", "askNoMessage": "Por favor, forneça uma mensagem para fazer a pergunta ao modelo.", - "languageCode": "Idioma" + "languageCode": "Idioma", + "thinking": "Pensando...", + "finishedThinking": "Pensamento finalizado" }, "maInvalidModule": "Por favor, forneça um ID de módulo válido do The Mod Archive.\nExemplo: `/modarchive 81574`", "maDownloadError": "Erro ao baixar o arquivo. Verifique o ID do módulo e tente novamente.", From df49bc4157dcfed668e8ecb3f8a85661cdd32387 Mon Sep 17 00:00:00 2001 From: Aidan Date: Mon, 30 Jun 2025 20:24:42 -0400 Subject: [PATCH 09/21] KOW-27 commands respect aiEnabled now, message sending fix, show warning model cant use links, add phi, deepseek 7b, clean --- src/commands/ai.ts | 62 +++++++++++++++++++++++++++++-------- src/locales/english.json | 4 ++- src/locales/portuguese.json | 4 ++- src/utils/rate-limiter.ts | 7 +++-- 4 files changed, 59 insertions(+), 18 deletions(-) diff --git a/src/commands/ai.ts b/src/commands/ai.ts index 97a8c48..de69903 100644 --- a/src/commands/ai.ts +++ b/src/commands/ai.ts @@ -69,7 +69,7 @@ interface OllamaResponse { export const models: ModelInfo[] = [ { name: 'gemma3n', - label: 'Gemma3n', + label: 'gemma3n', descriptionEn: 'Gemma3n is a family of open, light on-device models for general tasks.', descriptionPt: 'Gemma3n é uma família de modelos abertos, leves e para dispositivos locais, para tarefas gerais.', models: [ @@ -79,7 +79,7 @@ export const models: ModelInfo[] = [ }, { name: 'gemma3-abliterated', - label: 'Gemma3 Uncensored', + label: 'gemma3 Uncensored', descriptionEn: 'Gemma3-abliterated is a family of open, uncensored models for general tasks.', descriptionPt: 'Gemma3-abliterated é uma família de modelos abertos, não censurados, para tarefas gerais.', models: [ @@ -103,7 +103,18 @@ export const models: ModelInfo[] = [ descriptionPt: 'DeepSeek é um modelo de pesquisa para tarefas de raciocínio.', models: [ { name: 'deepseek-r1:1.5b', label: 'DeepSeek 1.5B', parameterSize: '1.5B' }, + { name: 'deepseek-r1:7b', label: 'DeepSeek 7B', parameterSize: '7B' }, { name: 'huihui_ai/deepseek-r1-abliterated:1.5b', label: 'DeepSeek Uncensored 1.5B', parameterSize: '1.5B' }, + { name: 'huihui_ai/deepseek-r1-abliterated:7b', label: 'DeepSeek Uncensored 7B', parameterSize: '7B' }, + ] + }, + { + name: 'phi3', + label: 'Phi3', + descriptionEn: 'Phi-3 is a family of lightweight 3B (Mini) and 14B (Medium) state-of-the-art open models by Microsoft.', + descriptionPt: 'Phi-3 é uma família de modelos leves de 3B (Mini) e 14B (Médio) de última geração, abertos pela Microsoft.', + models: [ + { name: 'phi3:3.8b', label: 'Phi3 3.8B', parameterSize: '3.8B' }, ] } ]; @@ -224,7 +235,11 @@ function escapeMarkdown(text: string): string { return text.replace(/([*_])/g, '\\$1'); } -async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Message, model: string, aiTemperature: number): Promise<{ success: boolean; response?: string; error?: string }> { +function containsUrls(text: string): boolean { + return text.includes('http://') || text.includes('https://'); +} + +async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Message, model: string, aiTemperature: number, originalMessage: string): Promise<{ success: boolean; response?: string; error?: string }> { const Strings = getStrings(languageCode(ctx)); if (!ctx.chat) { return { @@ -233,6 +248,8 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me }; } const modelHeader = `🤖 *${model}* | 🌡️ *${aiTemperature}*\n\n`; + const urlWarning = containsUrls(originalMessage) ? Strings.ai.urlWarning : ''; + try { const aiResponse = await axios.post( `${process.env.ollamaApi}/api/generate`, @@ -289,12 +306,12 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me } else { fullResponse += ln.response; } - if (now - lastUpdate >= 1000 || !sentHeader) { + if (now - lastUpdate >= 5000 || !sentHeader) { await rateLimiter.editMessageWithRetry( ctx, ctx.chat.id, replyGenerating.message_id, - modelHeader + fullResponse, + modelHeader + urlWarning + fullResponse, { parse_mode: 'Markdown' } ); lastUpdate = now; @@ -354,18 +371,21 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me } } -async function handleAiReply(ctx: TextContext, model: string, prompt: string, replyGenerating: Message, aiTemperature: number) { +async function handleAiReply(ctx: TextContext, model: string, prompt: string, replyGenerating: Message, aiTemperature: number, originalMessage: string) { const Strings = getStrings(languageCode(ctx)); - const aiResponse = await getResponse(prompt, ctx, replyGenerating, model, aiTemperature); + const aiResponse = await getResponse(prompt, ctx, replyGenerating, model, aiTemperature, originalMessage); if (!aiResponse) return; if (!ctx.chat) return; const modelHeader = `🤖 *${model}* | 🌡️ *${aiTemperature}*\n\n`; + + const urlWarning = containsUrls(originalMessage) ? Strings.ai.urlWarning : ''; + if (aiResponse.success && aiResponse.response) { await rateLimiter.editMessageWithRetry( ctx, ctx.chat.id, replyGenerating.message_id, - modelHeader + aiResponse.response, + modelHeader + urlWarning + aiResponse.response, { parse_mode: 'Markdown' } ); return; @@ -411,7 +431,7 @@ export default (bot: Telegraf, db: NodePgDatabase) => { const model = isAsk ? flash_model : thinking_model const textCtx = ctx as TextContext const reply_to_message_id = replyToMessageId(textCtx) - const { Strings, aiTemperature } = await getUserWithStringsAndModel(textCtx, db) + const { user, Strings, aiTemperature } = await getUserWithStringsAndModel(textCtx, db) const message = textCtx.message.text const author = ("@" + ctx.from?.username) || ctx.from?.first_name @@ -425,6 +445,14 @@ export default (bot: Telegraf, db: NodePgDatabase) => { return } + if (!user.aiEnabled) { + await ctx.reply(Strings.ai.disabledForUser, { + parse_mode: 'Markdown', + ...({ reply_to_message_id }) + }) + return + } + const fixedMsg = message.replace(/^\/(ask|think)(@\w+)?\s*/, "").trim() if (fixedMsg.length < 1) { await ctx.reply(Strings.ai.askNoMessage, { @@ -442,7 +470,7 @@ export default (bot: Telegraf, db: NodePgDatabase) => { logger.logPrompt(fixedMsg) const prompt = sanitizeForJson(await usingSystemPrompt(textCtx, db, botName)) - await handleAiReply(textCtx, model, prompt, replyGenerating, aiTemperature) + await handleAiReply(textCtx, model, prompt, replyGenerating, aiTemperature, fixedMsg) }) bot.command(["ai"], spamwatchMiddleware, async (ctx) => { @@ -450,7 +478,7 @@ export default (bot: Telegraf, db: NodePgDatabase) => { if (!ctx.message || !("text" in ctx.message)) return const textCtx = ctx as TextContext const reply_to_message_id = replyToMessageId(textCtx) - const { Strings, customAiModel, aiTemperature } = await getUserWithStringsAndModel(textCtx, db) + const { user, Strings, customAiModel, aiTemperature } = await getUserWithStringsAndModel(textCtx, db) const message = textCtx.message.text const author = ("@" + ctx.from?.username) || ctx.from?.first_name @@ -464,6 +492,14 @@ export default (bot: Telegraf, db: NodePgDatabase) => { return } + if (!user.aiEnabled) { + await ctx.reply(Strings.ai.disabledForUser, { + parse_mode: 'Markdown', + ...({ reply_to_message_id }) + }) + return + } + const fixedMsg = message.replace(/^\/ai(@\w+)?\s*/, "").trim() if (fixedMsg.length < 1) { await ctx.reply(Strings.ai.askNoMessage, { @@ -482,7 +518,7 @@ export default (bot: Telegraf, db: NodePgDatabase) => { logger.logPrompt(fixedMsg) const prompt = sanitizeForJson(await usingSystemPrompt(textCtx, db, botName)) - await handleAiReply(textCtx, customAiModel, prompt, replyGenerating, aiTemperature) + await handleAiReply(textCtx, customAiModel, prompt, replyGenerating, aiTemperature, fixedMsg) } catch (err) { const Strings = getStrings(languageCode(ctx)); if (ctx && ctx.reply) { @@ -494,4 +530,4 @@ export default (bot: Telegraf, db: NodePgDatabase) => { } } }) -} \ No newline at end of file +} diff --git a/src/locales/english.json b/src/locales/english.json index cd83da6..74aa29e 100644 --- a/src/locales/english.json +++ b/src/locales/english.json @@ -67,12 +67,14 @@ "helpEntry": "✨ AI Commands", "helpDesc": "✨ *AI Commands*\n\n- /ask ``: Ask a question to an AI\n- /think ``: Ask a thinking model about a question", "disabled": "✨ AI features are currently disabled", + "disabledForUser": "✨ AI features are disabled for your account. You can enable them in /settings", "pulling": "🔄 *Pulling {model} from Ollama...*\n\nThis may take a few minutes...", "askGenerating": "✨ _{model} is working..._", "askNoMessage": "Please provide a message to ask the model.", "languageCode": "Language", "thinking": "Thinking...", - "finishedThinking": "Finished thinking" + "finishedThinking": "Finished thinking", + "urlWarning": "⚠️ *Warning: I cannot access or open links. Please provide the content directly if you need me to analyze something from a website.*\n\n" }, "maInvalidModule": "Please provide a valid module ID from The Mod Archive.\nExample: `/modarchive 81574`", "maDownloadError": "Error downloading the file. Check the module ID and try again.", diff --git a/src/locales/portuguese.json b/src/locales/portuguese.json index 2d04a8f..81959af 100644 --- a/src/locales/portuguese.json +++ b/src/locales/portuguese.json @@ -66,12 +66,14 @@ "helpEntry": "✨ Comandos de IA", "helpDesc": "✨ *Comandos de IA*\n\n- /ask ``: Fazer uma pergunta a uma IA\n- /think ``: Fazer uma pergunta a um modelo de pensamento", "disabled": "✨ Os recursos de IA estão desativados no momento", + "disabledForUser": "✨ Os recursos de IA estão desativados para sua conta. Você pode ativá-los em /settings", "pulling": "🔄 *Puxando {model} do Ollama...*\n\nIsso pode levar alguns minutos...", "askGenerating": "✨ _{model} está funcionando..._", "askNoMessage": "Por favor, forneça uma mensagem para fazer a pergunta ao modelo.", "languageCode": "Idioma", "thinking": "Pensando...", - "finishedThinking": "Pensamento finalizado" + "finishedThinking": "Pensamento finalizado", + "urlWarning": "⚠️ *Aviso: Não posso acessar ou abrir links. Por favor, forneça o conteúdo diretamente se precisar que eu analise algo de um site.*\n\n" }, "maInvalidModule": "Por favor, forneça um ID de módulo válido do The Mod Archive.\nExemplo: `/modarchive 81574`", "maDownloadError": "Erro ao baixar o arquivo. Verifique o ID do módulo e tente novamente.", diff --git a/src/utils/rate-limiter.ts b/src/utils/rate-limiter.ts index b65ebb2..10673d9 100644 --- a/src/utils/rate-limiter.ts +++ b/src/utils/rate-limiter.ts @@ -32,7 +32,7 @@ import { Context } from 'telegraf' import { logger } from './log' class RateLimiter { - private lastEditTime: number = 0 + private lastEditTimes: Map = new Map() private readonly minInterval: number = 5000 private pendingUpdates: Map = new Map() private updateQueue: Map = new Map() @@ -144,7 +144,8 @@ class RateLimiter { if (!latestText) return const now = Date.now() - const timeSinceLastEdit = now - this.lastEditTime + const lastEditTime = this.lastEditTimes.get(messageKey) || 0 + const timeSinceLastEdit = now - lastEditTime await this.waitForRateLimit(chatId, messageId) if (timeSinceLastEdit < this.minInterval) { @@ -217,7 +218,7 @@ class RateLimiter { } this.pendingUpdates.delete(messageKey) } - this.lastEditTime = Date.now() + this.lastEditTimes.set(messageKey, Date.now()) this.updateQueue.delete(messageKey) } catch (error: unknown) { if (!this.handleTelegramError(error, messageKey, options, ctx, chatId, messageId)) { From 23ebd021f38b531039d8ea7678b7f26ac15123e2 Mon Sep 17 00:00:00 2001 From: Aidan Date: Mon, 30 Jun 2025 23:43:30 -0400 Subject: [PATCH 10/21] ai queue, better markdown parsing, refactor, better feedback --- README.md | 1 + src/commands/ai.ts | 319 ++++++++++++++++++++++-------------- src/commands/main.ts | 8 +- src/locales/english.json | 45 +++-- src/locales/portuguese.json | 45 +++-- src/plugins/verifyInput.ts | 28 ++-- 6 files changed, 273 insertions(+), 173 deletions(-) diff --git a/README.md b/README.md index e035285..ba6ecef 100644 --- a/README.md +++ b/README.md @@ -117,6 +117,7 @@ If you prefer to use Docker directly, you can use these instructions instead. - **handlerTimeout** (optional): How long handlers will wait before timing out. Set this high if using large AI models. - **flashModel** (optional): Which model will be used for /ask - **thinkingModel** (optional): Which model will be used for /think +- **updateEveryChars** (optional): The amount of chars until message update triggers (for streaming response) - **databaseUrl**: Database server configuration (see `.env.example`) - **botAdmins**: Put the ID of the people responsible for managing the bot. They can use some administrative + exclusive commands on any group. - **lastKey**: Last.fm API key, for use on `lastfm.js` functions, like see who is listening to what song and etc. diff --git a/src/commands/ai.ts b/src/commands/ai.ts index de69903..3781e9f 100644 --- a/src/commands/ai.ts +++ b/src/commands/ai.ts @@ -119,31 +119,17 @@ export const models: ModelInfo[] = [ } ]; -const enSystemPrompt = `You are a plaintext-only, helpful assistant called {botName}. -Current Date/Time (UTC): {date} - ---- - -Respond to the user's message: -{message}` - -const ptSystemPrompt = `Você é um assistente de texto puro e útil chamado {botName}. -Data/Hora atual (UTC): {date} - ---- - -Responda à mensagem do usuário: -{message}` - -async function usingSystemPrompt(ctx: TextContext, db: NodePgDatabase, botName: string): Promise { +async function usingSystemPrompt(ctx: TextContext, db: NodePgDatabase, botName: string, message: string): Promise { const user = await db.query.usersTable.findMany({ where: (fields, { eq }) => eq(fields.telegramId, String(ctx.from!.id)), limit: 1 }); if (user.length === 0) await ensureUserInDb(ctx, db); const userData = user[0]; const lang = userData?.languageCode || "en"; + const Strings = getStrings(lang); const utcDate = new Date().toISOString(); - const prompt = lang === "pt" - ? ptSystemPrompt.replace("{botName}", botName).replace("{date}", utcDate).replace("{message}", ctx.message.text) - : enSystemPrompt.replace("{botName}", botName).replace("{date}", utcDate).replace("{message}", ctx.message.text); + const prompt = Strings.ai.systemPrompt + .replace("{botName}", botName) + .replace("{date}", utcDate) + .replace("{message}", message); return prompt; } @@ -156,6 +142,51 @@ export function sanitizeForJson(text: string): string { .replace(/\t/g, '\\t') } +function sanitizeMarkdownForTelegram(text: string): string { + let sanitizedText = text; + + const replacements: string[] = []; + const addReplacement = (match: string): string => { + replacements.push(match); + return `___PLACEHOLDER_${replacements.length - 1}___`; + }; + + sanitizedText = sanitizedText.replace(/```([\s\S]*?)```/g, addReplacement); + sanitizedText = sanitizedText.replace(/`([^`]+)`/g, addReplacement); + sanitizedText = sanitizedText.replace(/\[([^\]]+)\]\(([^)]+)\)/g, addReplacement); + + const parts = sanitizedText.split(/(___PLACEHOLDER_\d+___)/g); + const processedParts = parts.map(part => { + if (part.match(/___PLACEHOLDER_\d+___/)) { + return part; + } else { + let processedPart = part; + processedPart = processedPart.replace(/^(#{1,6})\s+(.+)/gm, '*$2*'); + processedPart = processedPart.replace(/^(\s*)[-*]\s+/gm, '$1- '); + processedPart = processedPart.replace(/\*\*(.*?)\*\*/g, '*$1*'); + processedPart = processedPart.replace(/__(.*?)__/g, '*$1*'); + processedPart = processedPart.replace(/(^|\s)\*(?!\*)([^*]+?)\*(?!\*)/g, '$1_$2_'); + processedPart = processedPart.replace(/(^|\s)_(?!_)([^_]+?)_(?!_)/g, '$1_$2_'); + processedPart = processedPart.replace(/~~(.*?)~~/g, '~$1~'); + processedPart = processedPart.replace(/^\s*┃/gm, '>'); + processedPart = processedPart.replace(/^>\s?/gm, '> '); + + return processedPart; + } + }); + + sanitizedText = processedParts.join(''); + + sanitizedText = sanitizedText.replace(/___PLACEHOLDER_(\d+)___/g, (_, idx) => replacements[Number(idx)]); + + const codeBlockCount = (sanitizedText.match(/```/g) || []).length; + if (codeBlockCount % 2 !== 0) { + sanitizedText += '\n```'; + } + + return sanitizedText; +} + export async function preChecks() { const envs = [ "ollamaApi", @@ -232,7 +263,7 @@ function extractAxiosErrorMessage(error: unknown): string { } function escapeMarkdown(text: string): string { - return text.replace(/([*_])/g, '\\$1'); + return text.replace(/([_*\[\]()`>#\+\-=|{}.!~])/g, '\\$1'); } function containsUrls(text: string): boolean { @@ -244,10 +275,14 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me if (!ctx.chat) { return { success: false, - error: Strings.unexpectedErr.replace("{error}", "No chat found"), + error: Strings.unexpectedErr.replace("{error}", Strings.ai.noChatFound), }; } - const modelHeader = `🤖 *${model}* | 🌡️ *${aiTemperature}*\n\n`; + let status = Strings.ai.statusWaitingRender; + let modelHeader = Strings.ai.modelHeader + .replace("{model}", model) + .replace("{temperature}", aiTemperature) + .replace("{status}", status) + "\n\n"; const urlWarning = containsUrls(originalMessage) ? Strings.ai.urlWarning : ''; try { @@ -267,8 +302,9 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me ); let fullResponse = ""; let thoughts = ""; - let lastUpdate = Date.now(); + let lastUpdateCharCount = 0; let sentHeader = false; + let firstChunk = true; const stream: NodeJS.ReadableStream = aiResponse.data as any; for await (const chunk of stream) { const lines = chunk.toString().split('\n'); @@ -293,7 +329,6 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me logger.logThinking(ctx.chat.id, replyGenerating.message_id, false); } } - const now = Date.now(); if (ln.response) { if (model === thinking_model) { let patchedThoughts = ln.response; @@ -306,20 +341,51 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me } else { fullResponse += ln.response; } - if (now - lastUpdate >= 5000 || !sentHeader) { + if (firstChunk) { + status = Strings.ai.statusWaitingRender; + modelHeader = Strings.ai.modelHeader + .replace("{model}", model) + .replace("{temperature}", aiTemperature) + .replace("{status}", status) + "\n\n"; await rateLimiter.editMessageWithRetry( ctx, ctx.chat.id, replyGenerating.message_id, - modelHeader + urlWarning + fullResponse, + modelHeader + urlWarning + escapeMarkdown(fullResponse), { parse_mode: 'Markdown' } ); - lastUpdate = now; + lastUpdateCharCount = fullResponse.length; + sentHeader = true; + firstChunk = false; + continue; + } + const updateEveryChars = Number(process.env.updateEveryChars) || 100; + if (fullResponse.length - lastUpdateCharCount >= updateEveryChars || !sentHeader) { + await rateLimiter.editMessageWithRetry( + ctx, + ctx.chat.id, + replyGenerating.message_id, + modelHeader + urlWarning + escapeMarkdown(fullResponse), + { parse_mode: 'Markdown' } + ); + lastUpdateCharCount = fullResponse.length; sentHeader = true; } } } } + status = Strings.ai.statusRendering; + modelHeader = Strings.ai.modelHeader + .replace("{model}", model) + .replace("{temperature}", aiTemperature) + .replace("{status}", status) + "\n\n"; + await rateLimiter.editMessageWithRetry( + ctx, + ctx.chat.id, + replyGenerating.message_id, + modelHeader + urlWarning + escapeMarkdown(fullResponse), + { parse_mode: 'Markdown' } + ); return { success: true, response: fullResponse, @@ -360,7 +426,7 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me console.log(`[✨ AI] ${model} pulled successfully`); return { success: true, - response: `✅ Pulled ${escapeMarkdown(model)} successfully, please retry the command.`, + response: Strings.ai.pulled.replace("{model}", escapeMarkdown(model)), }; } } @@ -376,16 +442,18 @@ async function handleAiReply(ctx: TextContext, model: string, prompt: string, re const aiResponse = await getResponse(prompt, ctx, replyGenerating, model, aiTemperature, originalMessage); if (!aiResponse) return; if (!ctx.chat) return; - const modelHeader = `🤖 *${model}* | 🌡️ *${aiTemperature}*\n\n`; - - const urlWarning = containsUrls(originalMessage) ? Strings.ai.urlWarning : ''; - if (aiResponse.success && aiResponse.response) { + const status = Strings.ai.statusComplete; + const modelHeader = Strings.ai.modelHeader + .replace("{model}", model) + .replace("{temperature}", aiTemperature) + .replace("{status}", status) + "\n\n"; + const urlWarning = containsUrls(originalMessage) ? Strings.ai.urlWarning : ''; await rateLimiter.editMessageWithRetry( ctx, ctx.chat.id, replyGenerating.message_id, - modelHeader + urlWarning + aiResponse.response, + modelHeader + urlWarning + sanitizeMarkdownForTelegram(aiResponse.response), { parse_mode: 'Markdown' } ); return; @@ -425,109 +493,112 @@ export function getModelLabelByName(name: string): string { export default (bot: Telegraf, db: NodePgDatabase) => { const botName = bot.botInfo?.first_name && bot.botInfo?.last_name ? `${bot.botInfo.first_name} ${bot.botInfo.last_name}` : "Kowalski" - bot.command(["ask", "think"], spamwatchMiddleware, async (ctx) => { - if (!ctx.message || !('text' in ctx.message)) return - const isAsk = ctx.message.text.startsWith("/ask") - const model = isAsk ? flash_model : thinking_model - const textCtx = ctx as TextContext - const reply_to_message_id = replyToMessageId(textCtx) - const { user, Strings, aiTemperature } = await getUserWithStringsAndModel(textCtx, db) - const message = textCtx.message.text - const author = ("@" + ctx.from?.username) || ctx.from?.first_name + interface AiRequest { + task: () => Promise; + ctx: TextContext; + wasQueued: boolean; + } - logger.logCmdStart(author, model === flash_model ? "ask" : "think") + const requestQueue: AiRequest[] = []; + let isProcessing = false; + + async function processQueue() { + if (isProcessing || requestQueue.length === 0) { + return; + } + + isProcessing = true; + const { task, ctx, wasQueued } = requestQueue.shift()!; + const { Strings } = await getUserWithStringsAndModel(ctx, db); + const reply_to_message_id = replyToMessageId(ctx); + + try { + if (wasQueued) { + await ctx.reply(Strings.ai.startingProcessing, { + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }), + parse_mode: 'Markdown' + }); + } + await task(); + } catch (error) { + console.error("[✨ AI | !] Error processing task:", error); + const errorMessage = error instanceof Error ? error.message : String(error); + await ctx.reply(Strings.unexpectedErr.replace("{error}", errorMessage), { + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }), + parse_mode: 'Markdown' + }); + } finally { + isProcessing = false; + processQueue(); + } + } + + async function aiCommandHandler(ctx: TextContext, command: 'ask' | 'think' | 'ai') { + const reply_to_message_id = replyToMessageId(ctx); + const { user, Strings, customAiModel, aiTemperature } = await getUserWithStringsAndModel(ctx, db); + const message = ctx.message.text; + const author = ("@" + ctx.from?.username) || ctx.from?.first_name || "Unknown"; + + let model: string; + let fixedMsg: string; + + if (command === 'ai') { + model = customAiModel || flash_model; + fixedMsg = message.replace(/^\/ai(@\w+)?\s*/, "").trim(); + logger.logCmdStart(author, "ask"); + } else { + model = command === 'ask' ? flash_model : thinking_model; + fixedMsg = message.replace(/^\/(ask|think)(@\w+)?\s*/, "").trim(); + logger.logCmdStart(author, command); + } if (!process.env.ollamaApi) { - await ctx.reply(Strings.ai.disabled, { - parse_mode: 'Markdown', - ...({ reply_to_message_id }) - }) - return + await ctx.reply(Strings.ai.disabled, { parse_mode: 'Markdown', ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) }); + return; } if (!user.aiEnabled) { - await ctx.reply(Strings.ai.disabledForUser, { - parse_mode: 'Markdown', - ...({ reply_to_message_id }) - }) - return + await ctx.reply(Strings.ai.disabledForUser, { parse_mode: 'Markdown', ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) }); + return; } - const fixedMsg = message.replace(/^\/(ask|think)(@\w+)?\s*/, "").trim() if (fixedMsg.length < 1) { - await ctx.reply(Strings.ai.askNoMessage, { - parse_mode: 'Markdown', - ...({ reply_to_message_id }) - }) - return + await ctx.reply(Strings.ai.askNoMessage, { parse_mode: 'Markdown', ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) }); + return; } - const replyGenerating = await ctx.reply(Strings.ai.askGenerating.replace("{model}", model), { - parse_mode: 'Markdown', - ...({ reply_to_message_id }) - }) - - logger.logPrompt(fixedMsg) - - const prompt = sanitizeForJson(await usingSystemPrompt(textCtx, db, botName)) - await handleAiReply(textCtx, model, prompt, replyGenerating, aiTemperature, fixedMsg) - }) - - bot.command(["ai"], spamwatchMiddleware, async (ctx) => { - try { - if (!ctx.message || !("text" in ctx.message)) return - const textCtx = ctx as TextContext - const reply_to_message_id = replyToMessageId(textCtx) - const { user, Strings, customAiModel, aiTemperature } = await getUserWithStringsAndModel(textCtx, db) - const message = textCtx.message.text - const author = ("@" + ctx.from?.username) || ctx.from?.first_name - - logger.logCmdStart(author, "ask") - - if (!process.env.ollamaApi) { - await ctx.reply(Strings.ai.disabled, { - parse_mode: 'Markdown', - ...({ reply_to_message_id }) - }) - return - } - - if (!user.aiEnabled) { - await ctx.reply(Strings.ai.disabledForUser, { - parse_mode: 'Markdown', - ...({ reply_to_message_id }) - }) - return - } - - const fixedMsg = message.replace(/^\/ai(@\w+)?\s*/, "").trim() - if (fixedMsg.length < 1) { - await ctx.reply(Strings.ai.askNoMessage, { - parse_mode: 'Markdown', - ...({ reply_to_message_id }) - }) - return - } - - const modelLabel = getModelLabelByName(customAiModel) + const task = async () => { + const modelLabel = getModelLabelByName(model); const replyGenerating = await ctx.reply(Strings.ai.askGenerating.replace("{model}", modelLabel), { parse_mode: 'Markdown', - ...({ reply_to_message_id }) - }) + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + logger.logPrompt(fixedMsg); + const prompt = sanitizeForJson(await usingSystemPrompt(ctx, db, botName, fixedMsg)); + await handleAiReply(ctx, model, prompt, replyGenerating, aiTemperature, fixedMsg); + }; - logger.logPrompt(fixedMsg) - - const prompt = sanitizeForJson(await usingSystemPrompt(textCtx, db, botName)) - await handleAiReply(textCtx, customAiModel, prompt, replyGenerating, aiTemperature, fixedMsg) - } catch (err) { - const Strings = getStrings(languageCode(ctx)); - if (ctx && ctx.reply) { - try { - await ctx.reply(Strings.unexpectedErr.replace("{error}", (err && err.message ? err.message : String(err))), { parse_mode: 'Markdown' }) - } catch (e) { - console.error("[✨ AI | !] Failed to send error reply:", e) - } - } + if (isProcessing) { + requestQueue.push({ task, ctx, wasQueued: true }); + const position = requestQueue.length; + await ctx.reply(Strings.ai.inQueue.replace("{position}", String(position)), { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + } else { + requestQueue.push({ task, ctx, wasQueued: false }); + processQueue(); } - }) + } + + bot.command(["ask", "think"], spamwatchMiddleware, async (ctx) => { + if (!ctx.message || !('text' in ctx.message)) return; + const command = ctx.message.text.startsWith('/ask') ? 'ask' : 'think'; + await aiCommandHandler(ctx as TextContext, command); + }); + + bot.command(["ai"], spamwatchMiddleware, async (ctx) => { + if (!ctx.message || !('text' in ctx.message)) return; + await aiCommandHandler(ctx as TextContext, 'ai'); + }); } diff --git a/src/commands/main.ts b/src/commands/main.ts index fe76037..55ccc00 100644 --- a/src/commands/main.ts +++ b/src/commands/main.ts @@ -146,7 +146,7 @@ export default (bot: Telegraf, db: NodePgDatabase) => { inline_keyboard: models.map(series => [ { text: series.label, callback_data: `selectseries_${series.name}` } ]).concat([[ - { text: `⬅️ ${Strings.settings.ai.back}`, callback_data: 'settings_back' } + { text: `${Strings.varStrings.varBack}`, callback_data: 'settings_back' } ]]) } } @@ -185,7 +185,7 @@ export default (bot: Telegraf, db: NodePgDatabase) => { inline_keyboard: series.models.map(m => [ { text: `${m.label} (${m.parameterSize})`, callback_data: `setmodel_${series.name}_${m.name}` } ]).concat([[ - { text: `⬅️ ${Strings.settings.ai.back}`, callback_data: 'settings_aiModel' } + { text: `${Strings.varStrings.varBack}`, callback_data: 'settings_aiModel' } ]]) } } @@ -262,7 +262,7 @@ export default (bot: Telegraf, db: NodePgDatabase) => { const temps = [0.2, 0.5, 0.7, 0.9, 1.2]; try { await ctx.editMessageReplyMarkup({ - inline_keyboard: temps.map(t => [{ text: t.toString(), callback_data: `settemp_${t}` }]).concat([[{ text: `⬅️ ${Strings.settings.ai.back}`, callback_data: 'settings_back' }]]) + inline_keyboard: temps.map(t => [{ text: t.toString(), callback_data: `settemp_${t}` }]).concat([[{ text: `${Strings.varStrings.varBack}`, callback_data: 'settings_back' }]]) }); } catch (err) { if ( @@ -304,7 +304,7 @@ export default (bot: Telegraf, db: NodePgDatabase) => { if (!user) return; try { await ctx.editMessageReplyMarkup({ - inline_keyboard: langs.map(l => [{ text: l.label, callback_data: `setlang_${l.code}` }]).concat([[{ text: `⬅️ ${Strings.settings.ai.back}`, callback_data: 'settings_back' }]]) + inline_keyboard: langs.map(l => [{ text: l.label, callback_data: `setlang_${l.code}` }]).concat([[{ text: `${Strings.varStrings.varBack}`, callback_data: 'settings_back' }]]) }); } catch (err) { if ( diff --git a/src/locales/english.json b/src/locales/english.json index 74aa29e..3acb49b 100644 --- a/src/locales/english.json +++ b/src/locales/english.json @@ -13,9 +13,9 @@ "varWas": "was", "varNone": "None", "varUnknown": "Unknown", - "varBack": "Back" + "varBack": "⬅️ Back" }, - "unexpectedErr": "Some unexpected error occurred during a bot action. Please report it to the developers.\n\n{error}", + "unexpectedErr": "An unexpected error occurred: {error}", "errInvalidOption": "Whoops! Invalid option!", "kickingMyself": "*Since you don't need me, I'll leave.*", "kickingMyselfErr": "Error leaving the chat.", @@ -65,22 +65,31 @@ "animalCommandsDesc": "🐱 *Animals*\n\n- /soggy | /soggycat `<1 | 2 | 3 | 4 | orig | thumb | sticker | alt>`: Sends the [Soggy cat meme](https://knowyourmeme.com/memes/soggy-cat)\n- /cat: Sends a random picture of a cat.\n- /fox: Sends a random picture of a fox.\n- /duck: Sends a random picture of a duck.\n- /dog: Sends a random picture of a dog.\n- /httpcat ``: Send cat memes from http.cat with your specified HTTP code. Example: `/httpcat 404`", "ai": { "helpEntry": "✨ AI Commands", - "helpDesc": "✨ *AI Commands*\n\n- /ask ``: Ask a question to an AI\n- /think ``: Ask a thinking model about a question", - "disabled": "✨ AI features are currently disabled", - "disabledForUser": "✨ AI features are disabled for your account. You can enable them in /settings", - "pulling": "🔄 *Pulling {model} from Ollama...*\n\nThis may take a few minutes...", - "askGenerating": "✨ _{model} is working..._", - "askNoMessage": "Please provide a message to ask the model.", + "helpDesc": "✨ *AI Commands*\n\n- /ask ``: Ask a question to an AI model\n- /think ``: Ask a thinking model about a question\n- /ai ``: Ask your custom-set AI model a question", + "disabled": "✨ AI features are currently disabled globally.", + "disabledForUser": "✨ AI features are disabled for your account.", + "pulling": "🔄 Model {model} not found locally, pulling...", + "askGenerating": "✨ Generating response with {model}...", + "askNoMessage": "✨ You need to ask me a question!", "languageCode": "Language", "thinking": "Thinking...", - "finishedThinking": "Finished thinking", - "urlWarning": "⚠️ *Warning: I cannot access or open links. Please provide the content directly if you need me to analyze something from a website.*\n\n" + "finishedThinking": "Done.", + "urlWarning": "\n\n⚠️ The user provided one or more URLs in their message. Please do not visit any suspicious URLs.", + "inQueue": "ℹ️ You are {position} in the queue.", + "startingProcessing": "✨ Starting to process your request...", + "systemPrompt": "You are a friendly assistant called {botName}, capable of Telegram MarkdownV2.\nYou are currently in a chat with a user, who has sent a message to you.\nCurrent Date/Time (UTC): {date}\n\n---\n\nRespond to the user's message:\n{message}", + "statusWaitingRender": "⏳ Waiting to Render...", + "statusRendering": "🖼️ Rendering...", + "statusComplete": "✅ Complete!", + "modelHeader": "🤖 *{model}* | 🌡️ *{temperature}* | {status}", + "noChatFound": "No chat found", + "pulled": "✅ Pulled {model} successfully, please retry the command." }, "maInvalidModule": "Please provide a valid module ID from The Mod Archive.\nExample: `/modarchive 81574`", "maDownloadError": "Error downloading the file. Check the module ID and try again.", "ytDownload": { - "helpEntry": "📺 YouTube Download", - "helpDesc": "📺 *YouTube Download*\n\n- /yt | /ytdl | /sdl | /dl | /video `