[FEATURE] Add AI-based /ask command (complementing #54) (#56)

* docs: add ai documentation

* docker: update docker files for ai/regular versions, lint

* feat: add initial /ask command

* Delete docker-compose.yml

* docker: ignore ollama folder in builds

* fix: add emojis to help commands, capitalize, add ai commands to help menu

* feat: add better logging, thought handling improvements

* bug fixes, better logging and seperation of ai, update docs for ai

* clean, remove prompt and user info from logs, more docs edits

* system prompt change (plaintext only), parse out /think

* clean up, axios tweaks

* cleanup, logging of ratelimit

---------

Co-authored-by: Aidan <aidan@p0ntus.com>
This commit is contained in:
Lucas Gabriel 2025-06-28 16:22:15 -03:00 committed by GitHub
parent 0c364a1814
commit 81294f5721
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
13 changed files with 733 additions and 35 deletions

View file

@ -5,3 +5,4 @@ npm-debug.log
.env
*.md
!README.md
ollama/

View file

@ -5,6 +5,11 @@ botSource = "https://github.com/ABOCN/TelegramBot"
# insert token here
botToken = ""
# ai features
ollamaEnabled = false
# ollamaApi = "http://ollama:11434"
# handlerTimeout = "600_000" # set higher if you expect to download larger models
# misc (botAdmins isnt a array here!)
maxRetries = 9999
botAdmins = 00000000, 00000000, 00000000

6
.gitignore vendored
View file

@ -145,3 +145,9 @@ ffmpeg
# Bun
bun.lock*
# Ollama
ollama/
# Docker
docker-compose.yml

View file

@ -10,12 +10,6 @@ Kowalski is a a simple Telegram bot made in Node.js.
- You can find Kowalski at [@KowalskiNodeBot](https://t.me/KowalskiNodeBot) on Telegram.
## Translations
<a href="https://weblate.librecloud.cc/engage/kowalski/">
<img src="https://weblate.librecloud.cc/widget/kowalski/multi-auto.svg" alt="Translation status" />
</a>
## Self-host requirements
> [!IMPORTANT]
@ -26,6 +20,11 @@ Kowalski is a a simple Telegram bot made in Node.js.
- FFmpeg (only for the `/yt` command)
- Docker and Docker Compose (only required for Docker setup)
### AI Requirements
- High-end CPU *or* GPU (~ 6GB vRAM)
- If using CPU, enough RAM to load the models (~6GB w/ defaults)
## Running locally (non-Docker setup)
First, clone the repo with Git:
@ -55,9 +54,28 @@ You can also run Kowalski using Docker, which simplifies the setup process. Make
### Using Docker Compose
1. **Make sure to setup your `.env` file first!**
1. **Copy compose file**
2. **Run the container**
_Without AI (Ollama)_
```bash
mv docker-compose.yml.example docker-compose.yml
```
_With AI (Ollama)_
```bash
mv docker-compose.yml.ai.example docker-compose.yml
```
2. **Make sure to setup your `.env` file first!**
> [!TIP]
> If you intend to setup AI, the defaults for Docker are already included (just uncomment) and don't need to be changed.
>
> Further setup may be needed for GPUs. See the Ollama documentation for more.
3. **Run the container**
```bash
docker compose up -d
@ -81,6 +99,9 @@ If you prefer to use Docker directly, you can use these instructions instead.
docker run -d --name kowalski --restart unless-stopped -v $(pwd)/.env:/usr/src/app/.env:ro kowalski
```
> [!NOTE]
> You must setup Ollama on your own if you would like to use AI features.
## .env Functions
> [!IMPORTANT]
@ -90,6 +111,9 @@ If you prefer to use Docker directly, you can use these instructions instead.
- **botPrivacy**: Put the link to your bot privacy policy.
- **maxRetries**: Maximum number of retries for a failing command on Kowalski. Default is 5. If the limit is hit, the bot will crash past this number.
- **botToken**: Put your bot token that you created at [@BotFather](https://t.me/botfather).
- **ollamaEnabled** (optional): Enables/disables AI features
- **ollamaApi** (optional): Ollama API endpoint for various AI features, will be disabled if not set
- **handlerTimeout** (default: `600_000`): How long handlers will wait before timing out. Set this high if using large AI models.
- **botAdmins**: Put the ID of the people responsible for managing the bot. They can use some administrative + exclusive commands on any group.
- **lastKey**: Last.fm API key, for use on `lastfm.js` functions, like see who is listening to what song and etc.
- **weatherKey**: Weather.com API key, used for the `/weather` command.
@ -106,6 +130,12 @@ If you prefer to use Docker directly, you can use these instructions instead.
chmod +x src/plugins/yt-dlp/yt-dlp
```
### AI
**Q:** How can I disable AI features?
**A:** AI features are disabled by default, unless you have set `ollamaEnabled` to `true` in your `.env` file. Set it back to `false` to disable.
## Contributors
<a href="https://github.com/abocn/TelegramBot/graphs/contributors">

View file

@ -0,0 +1,15 @@
services:
kowalski:
build: .
container_name: kowalski
restart: unless-stopped
volumes:
- ./.env:/usr/src/app/.env:ro
environment:
- NODE_ENV=production
ollama:
image: ollama/ollama
container_name: kowalski-ollama
restart: unless-stopped
volumes:
- ./ollama:/root/.ollama

View file

@ -4,6 +4,7 @@ import fs from 'fs';
import { isOnSpamWatch } from './spamwatch/spamwatch';
import '@dotenvx/dotenvx';
import './plugins/ytDlpWrapper';
import { preChecks } from './commands/ai';
// Ensures bot token is set, and not default value
if (!process.env.botToken || process.env.botToken === 'InsertYourBotTokenHere') {
@ -11,7 +12,17 @@ if (!process.env.botToken || process.env.botToken === 'InsertYourBotTokenHere')
process.exit(1)
}
const bot = new Telegraf(process.env.botToken);
// Detect AI and run pre-checks
if (process.env.ollamaEnabled === "true") {
if (!(await preChecks())) {
process.exit(1)
}
}
const bot = new Telegraf(
process.env.botToken,
{ handlerTimeout: Number(process.env.handlerTimeout) || 600_000 }
);
const maxRetries = process.env.maxRetries || 5;
let restartCount = 0;

287
src/commands/ai.ts Normal file
View file

@ -0,0 +1,287 @@
// AI.TS
// by ihatenodejs/Aidan
//
// -----------------------------------------------------------------------
//
// This is free and unencumbered software released into the public domain.
//
// Anyone is free to copy, modify, publish, use, compile, sell, or
// distribute this software, either in source code form or as a compiled
// binary, for any purpose, commercial or non-commercial, and by any
// means.
//
// In jurisdictions that recognize copyright laws, the author or authors
// of this software dedicate any and all copyright interest in the
// software to the public domain. We make this dedication for the benefit
// of the public at large and to the detriment of our heirs and
// successors. We intend this dedication to be an overt act of
// relinquishment in perpetuity of all present and future rights to this
// software under copyright law.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
// IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
// OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
// OTHER DEALINGS IN THE SOFTWARE.
//
// For more information, please refer to <https://unlicense.org/>
import { isOnSpamWatch } from "../spamwatch/spamwatch"
import spamwatchMiddlewareModule from "../spamwatch/Middleware"
import { Telegraf, Context } from "telegraf"
import type { Message } from "telegraf/types"
import { replyToMessageId } from "../utils/reply-to-message-id"
import { getStrings } from "../plugins/checklang"
import { languageCode } from "../utils/language-code"
import axios from "axios"
import { rateLimiter } from "../utils/rate-limiter"
import { logger } from "../utils/log"
const spamwatchMiddleware = spamwatchMiddlewareModule(isOnSpamWatch)
export const flash_model = "gemma3:4b"
export const thinking_model = "deepseek-r1:1.5b"
type TextContext = Context & { message: Message.TextMessage }
export function sanitizeForJson(text: string): string {
return text
.replace(/\\/g, '\\\\')
.replace(/"/g, '\\"')
.replace(/\n/g, '\\n')
.replace(/\r/g, '\\r')
.replace(/\t/g, '\\t')
}
export async function preChecks() {
const envs = [
"ollamaApi",
]
for (const env of envs) {
if (!process.env[env]) {
console.error(`[✨ AI | !] ❌ ${env} not set!`)
return false
}
}
console.log("[✨ AI] Pre-checks passed\n")
return true
}
async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Message, model: string) {
const Strings = getStrings(languageCode(ctx))
if (!ctx.chat) {
return {
success: false,
error: Strings.unexpectedErr.replace("{error}", "No chat found"),
}
}
try {
const aiResponse = await axios.post(
`${process.env.ollamaApi}/api/generate`,
{
model,
prompt,
stream: true,
},
{
responseType: "stream",
}
)
let fullResponse = ""
let thoughts = ""
let lastUpdate = Date.now()
const stream = aiResponse.data
for await (const chunk of stream) {
const lines = chunk.toString().split('\n')
for (const line of lines) {
if (!line.trim()) continue
let ln
try {
ln = JSON.parse(line)
} catch (e) {
console.error("[✨ AI | !] Error parsing chunk:", e)
continue
}
if (model === thinking_model) {
if (ln.response.includes('<think>')) {
const thinkMatch = ln.response.match(/<think>([\s\S]*?)<\/think>/)
if (thinkMatch && thinkMatch[1].trim().length > 0) {
logger.logThinking(ctx.chat.id, replyGenerating.message_id, true)
} else if (!thinkMatch) {
logger.logThinking(ctx.chat.id, replyGenerating.message_id, true)
}
} else if (ln.response.includes('</think>')) {
logger.logThinking(ctx.chat.id, replyGenerating.message_id, false)
}
}
const now = Date.now()
if (ln.response) {
if (model === thinking_model) {
let patchedThoughts = ln.response
const thinkTagRx = /<think>([\s\S]*?)<\/think>/g
patchedThoughts = patchedThoughts.replace(thinkTagRx, (match, p1) => p1.trim().length > 0 ? '`Thinking...`' + p1 + '`Finished thinking`' : '')
patchedThoughts = patchedThoughts.replace(/<think>/g, '`Thinking...`')
patchedThoughts = patchedThoughts.replace(/<\/think>/g, '`Finished thinking`')
thoughts += patchedThoughts
fullResponse += patchedThoughts
} else {
fullResponse += ln.response
}
if (now - lastUpdate >= 1000) {
await rateLimiter.editMessageWithRetry(
ctx,
ctx.chat.id,
replyGenerating.message_id,
thoughts,
{ parse_mode: 'Markdown' }
)
lastUpdate = now
}
}
}
}
return {
success: true,
response: fullResponse,
}
} catch (error: any) {
let shouldPullModel = false
if (error.response) {
const errData = error.response.data?.error
const errStatus = error.response.status
if (errData && (errData.includes(`model '${model}' not found`) || errStatus === 404)) {
shouldPullModel = true
} else {
console.error("[✨ AI | !] Error zone 1:", errData)
return { success: false, error: errData }
}
} else if (error.request) {
console.error("[✨ AI | !] No response received:", error.request)
return { success: false, error: "No response received from server" }
} else {
console.error("[✨ AI | !] Error zone 3:", error.message)
return { success: false, error: error.message }
}
if (shouldPullModel) {
ctx.telegram.editMessageText(ctx.chat.id, replyGenerating.message_id, undefined, `🔄 Pulling ${model} from ollama...\n\nThis may take a few minutes...`)
console.log(`[✨ AI | i] Pulling ${model} from ollama...`)
try {
await axios.post(
`${process.env.ollamaApi}/api/pull`,
{
model,
stream: false,
timeout: process.env.ollamaApiTimeout || 10000,
}
)
} catch (e: any) {
if (e.response) {
console.error("[✨ AI | !] Something went wrong:", e.response.data?.error)
return {
success: false,
error: `❌ Something went wrong while pulling ${model}, please try your command again!`,
}
} else if (e.request) {
console.error("[✨ AI | !] No response received while pulling:", e.request)
return {
success: false,
error: `❌ No response received while pulling ${model}, please try again!`,
}
} else {
console.error("[✨ AI | !] Error while pulling:", e.message)
return {
success: false,
error: `❌ Error while pulling ${model}: ${e.message}`,
}
}
}
console.log(`[✨ AI | i] ${model} pulled successfully`)
return {
success: true,
response: `✅ Pulled ${model} successfully, please retry the command.`,
}
}
}
}
export default (bot: Telegraf<Context>) => {
const botName = bot.botInfo?.first_name && bot.botInfo?.last_name ? `${bot.botInfo.first_name} ${bot.botInfo.last_name}` : "Kowalski"
bot.command(["ask", "think"], spamwatchMiddleware, async (ctx) => {
if (!ctx.message || !('text' in ctx.message)) return
const isAsk = ctx.message.text.startsWith("/ask")
const model = isAsk ? flash_model : thinking_model
const textCtx = ctx as TextContext
const reply_to_message_id = replyToMessageId(textCtx)
const Strings = getStrings(languageCode(textCtx))
const message = textCtx.message.text
const author = ("@" + ctx.from?.username) || ctx.from?.first_name
logger.logCmdStart(author, model === flash_model ? "ask" : "think")
if (!process.env.ollamaApi) {
await ctx.reply(Strings.aiDisabled, {
parse_mode: 'Markdown',
...({ reply_to_message_id })
})
return
}
const replyGenerating = await ctx.reply(Strings.askGenerating.replace("{model}", model), {
parse_mode: 'Markdown',
...({ reply_to_message_id })
})
const fixedMsg = message.replace(/\/(ask|think) /, "")
if (fixedMsg.length < 1) {
await ctx.reply(Strings.askNoMessage, {
parse_mode: 'Markdown',
...({ reply_to_message_id })
})
return
}
logger.logPrompt(fixedMsg)
const prompt = sanitizeForJson(
`You are a plaintext-only, helpful assistant called ${botName}.
Current Date/Time (UTC): ${new Date().toLocaleString()}
---
Respond to the user's message:
${fixedMsg}`)
const aiResponse = await getResponse(prompt, textCtx, replyGenerating, model)
if (!aiResponse) return
if (!ctx.chat) return
if (aiResponse.success && aiResponse.response) {
await rateLimiter.editMessageWithRetry(
ctx,
ctx.chat.id,
replyGenerating.message_id,
aiResponse.response,
{ parse_mode: 'Markdown' }
)
return
}
const error = Strings.unexpectedErr.replace("{error}", aiResponse.error)
await rateLimiter.editMessageWithRetry(
ctx,
ctx.chat.id,
replyGenerating.message_id,
error,
{ parse_mode: 'Markdown' }
)
})
}

View file

@ -32,7 +32,8 @@ async function sendHelpMessage(ctx, isEditing) {
[{ text: Strings.mainCommands, callback_data: 'helpMain' }, { text: Strings.usefulCommands, callback_data: 'helpUseful' }],
[{ text: Strings.interactiveEmojis, callback_data: 'helpInteractive' }, { text: Strings.funnyCommands, callback_data: 'helpFunny' }],
[{ text: Strings.lastFm.helpEntry, callback_data: 'helpLast' }, { text: Strings.animalCommands, callback_data: 'helpAnimals' }],
[{ text: Strings.ytDownload.helpEntry, callback_data: 'helpYouTube' }, { text: Strings.ponyApi.helpEntry, callback_data: 'helpMLP' }]
[{ text: Strings.ytDownload.helpEntry, callback_data: 'helpYouTube' }, { text: Strings.ponyApi.helpEntry, callback_data: 'helpMLP' }],
[{ text: Strings.aiCmds, callback_data: 'helpAi' }]
]
}
};
@ -112,6 +113,10 @@ export default (bot) => {
await ctx.answerCbQuery();
await ctx.editMessageText(Strings.ponyApi.helpDesc, options);
break;
case 'helpAi':
await ctx.answerCbQuery();
await ctx.editMessageText(Strings.aiCmdsDesc, options);
break;
case 'helpBack':
await ctx.answerCbQuery();
await sendHelpMessage(ctx, true);

View file

@ -33,8 +33,8 @@
"funEmojiResult": "*You rolled {emoji} and got* `{value}`*!*\nYou don't know what that means? Me neither!",
"gifErr": "*Something went wrong while sending the GIF. Please try again later.*\n\n{err}",
"lastFm": {
"helpEntry": "Last.fm",
"helpDesc": "*Last.fm*\n\n- /lt | /lmu | /last | /lfm: Shows the last song from your Last.fm profile + the number of plays.\n- /setuser `<user>`: Sets the user for the command above.",
"helpEntry": "🎵 Last.fm",
"helpDesc": "🎵 *Last.fm*\n\n- /lt | /lmu | /last | /lfm: Shows the last song from your Last.fm profile + the number of plays.\n- /setuser `<user>`: Sets the user for the command above.",
"noUser": "*Please provide a Last.fm username.*\nExample: `/setuser <username>`",
"noUserSet": "*You haven't set your Last.fm username yet.*\nUse the command /setuser to set.\n\nExample: `/setuser <username>`",
"noRecentTracks": "*No recent tracks found for Last.fm user* `{lastfmUser}`*.*",
@ -52,25 +52,27 @@
"apiErr": "*An error occurred while retrieving the weather. Please try again later.*\n\n`{error}`",
"apiKeyErr": "*An API key was not set by the bot owner. Please try again later.*"
},
"mainCommands": "Main commands",
"mainCommandsDesc": "*Main commands*\n\n- /help: Show bot's help\n- /start: Start the bot\n- /privacy: Read the bot's Privacy Policy",
"usefulCommands": "Useful commands",
"usefulCommandsDesc": "*Useful commands*\n\n- /chatinfo: Send information about the group\n- /userinfo: Send information about yourself\n- /d | /device `<model>`: Search for a device on GSMArena and show its specs.\n/codename | /whatis `<device codename>`: Shows what device is based on the codename. Example: `/codename begonia`\n- /weather | /clima `<city>`: See weather status for a specific location.\n- /modarchive | /tma `<module id>`: Download a module from The Mod Archive.\n- /http `<HTTP code>`: Send details about a specific HTTP code. Example: `/http 404`",
"funnyCommands": "Funny commands",
"funnyCommandsDesc": "*Funny commands*\n\n- /gay: Check if you are gay\n- /furry: Check if you are a furry\n- /random: Pick a random number between 0-10",
"interactiveEmojis": "Interactive emojis",
"interactiveEmojisDesc": "*Interactive emojis*\n\n- /dice: Roll a dice\n- /idice: Infinitely roll a colored dice\n- /slot: Try to combine the figures!\n- /ball: Try to kick the ball into the goal!\n- /bowling: Try to hit the pins!\n- /dart: Try to hit the target!",
"animalCommands": "Animals",
"animalCommandsDesc": "*Animals*\n\n- /soggy | /soggycat `<1 | 2 | 3 | 4 | orig | thumb | sticker | alt>`: Sends the [Soggy cat meme](https://knowyourmeme.com/memes/soggy-cat)\n- /cat: Sends a random picture of a cat.\n- /fox: Sends a random picture of a fox.\n- /duck: Sends a random picture of a duck.\n- /dog: Sends a random picture of a dog.\n- /httpcat `<http code>`: Send cat memes from http.cat with your specified HTTP code. Example: `/httpcat 404`",
"mainCommands": " Main Commands",
"mainCommandsDesc": " *Main Commands*\n\n- /help: Show bot's help\n- /start: Start the bot\n- /privacy: Read the bot's Privacy Policy",
"usefulCommands": "🛠️ Useful Commands",
"usefulCommandsDesc": "🛠️ *Useful commands*\n\n- /chatinfo: Send information about the group\n- /userinfo: Send information about yourself\n- /d | /device `<model>`: Search for a device on GSMArena and show its specs.\n/codename | /whatis `<device codename>`: Shows what device is based on the codename. Example: `/codename begonia`\n- /weather | /clima `<city>`: See weather status for a specific location.\n- /modarchive | /tma `<module id>`: Download a module from The Mod Archive.\n- /http `<HTTP code>`: Send details about a specific HTTP code. Example: `/http 404`",
"funnyCommands": "😂 Funny Commands",
"funnyCommandsDesc": "😂 *Funny Commands*\n\n- /gay: Check if you are gay\n- /furry: Check if you are a furry\n- /random: Pick a random number between 0-10",
"interactiveEmojis": "🎲 Interactive Emojis",
"interactiveEmojisDesc": "🎲 *Interactive emojis*\n\n- /dice: Roll a dice\n- /idice: Infinitely roll a colored dice\n- /slot: Try to combine the figures!\n- /ball: Try to kick the ball into the goal!\n- /bowling: Try to hit the pins!\n- /dart: Try to hit the target!",
"animalCommands": "🐱 Animals",
"animalCommandsDesc": "🐱 *Animals*\n\n- /soggy | /soggycat `<1 | 2 | 3 | 4 | orig | thumb | sticker | alt>`: Sends the [Soggy cat meme](https://knowyourmeme.com/memes/soggy-cat)\n- /cat: Sends a random picture of a cat.\n- /fox: Sends a random picture of a fox.\n- /duck: Sends a random picture of a duck.\n- /dog: Sends a random picture of a dog.\n- /httpcat `<http code>`: Send cat memes from http.cat with your specified HTTP code. Example: `/httpcat 404`",
"aiCmds": "✨ AI Commands",
"aiCmdsDesc": "✨ *AI Commands*\n\n- /ask `<prompt>`: Ask a question to an AI",
"maInvalidModule": "Please provide a valid module ID from The Mod Archive.\nExample: `/modarchive 81574`",
"maDownloadError": "Error downloading the file. Check the module ID and try again.",
"ytDownload": {
"helpEntry": "Video download",
"helpDesc": "*Video download*\n\n- /yt | /ytdl | /sdl | /dl | /video `<video link>`: Download a video from some platforms (e.g. YouTube, Instagram, Facebook, etc.).\n\n See [this link](https://github.com/yt-dlp/yt-dlp/blob/master/supportedsites.md) for more information and which services are supported.\n\n*Note: Telegram is currently limiting bot uploads to 50MB, which means that if the video you want to download is larger than 50MB, the quality will be reduced to try to upload it anyway. We're trying our best to work around or fix this problem.*",
"downloadingVid": "*Downloading video...*",
"helpEntry": "📺 YouTube Download",
"helpDesc": "📺 *YouTube Download*\n\n- /yt | /ytdl | /sdl | /dl | /video `<video link>`: Download a video from some platforms (e.g. YouTube, Instagram, Facebook, etc.).\n\n See [this link](https://github.com/yt-dlp/yt-dlp/blob/master/supportedsites.md) for more information and which services are supported.\n\n*Note: Telegram is currently limiting bot uploads to 50MB, which means that if the video you want to download is larger than 50MB, the quality will be reduced to try to upload it anyway. We're trying our best to work around or fix this problem.*",
"downloadingVid": "⬇️ *Downloading video...*",
"libNotFound": "*It seems that the yt-dlp executable does not exist on our server...\n\nIn that case, the problem is on our end! Please wait until we have noticed and solved the problem.*",
"checkingSize": "*Checking if the video exceeds the 50MB limit...*",
"uploadingVid": "*Uploading video...*",
"checkingSize": "🔎 *Checking if the video exceeds the 50MB limit...*",
"uploadingVid": "⬆️ *Uploading video...*",
"msgDesc": "{userMention}*, there is your downloaded video.*",
"downloadErr": "*Error during YT video download:*\n\n`{err}`",
"uploadErr": "Error uploading file. Please try again later.",
@ -95,8 +97,8 @@
"resultMsg": "*HTTP Code*: {code}\n*Name*: `{message}`\n*Description*: {description}"
},
"ponyApi": {
"helpEntry": "My Little Pony",
"helpDesc": "*My Little Pony*\n\n- /mlp: Displays this help message.\n- /mlpchar `<character name>`: Shows specific information about a My Little Pony character. Example: `/mlpchar Twilight Sparkle`\n- /mlpep: Shows specific information about a My Little Pony episode. Example: `/mlpep 136`\n- /mlpcomic `<comic name>`: Shows specific information about a My Little Pony comic. Example: `/mlpcomic Nightmare Rarity`\n- /rpony | /randompony | /mlpart: Sends a random artwork made by the My Little Pony community.",
"helpEntry": "🐴 My Little Pony",
"helpDesc": "🐴 *My Little Pony*\n\n- /mlp: Displays this help message.\n- /mlpchar `<character name>`: Shows specific information about a My Little Pony character. Example: `/mlpchar Twilight Sparkle`\n- /mlpep: Shows specific information about a My Little Pony episode. Example: `/mlpep 136`\n- /mlpcomic `<comic name>`: Shows specific information about a My Little Pony comic. Example: `/mlpcomic Nightmare Rarity`\n- /rpony | /randompony | /mlpart: Sends a random artwork made by the My Little Pony community.",
"charRes": "*{name} (ID: {id})*\n\n*Alias:* `{alias}`\n*Sex:* `{sex}`\n*Residence:* `{residence}`\n*Occupation:* `{occupation}`\n*Kind:* `{kind}`\n\n*Fandom URL:*\n[{url}]({url})",
"epRes": "*{name} (ID: {id})*\n\n*Season:* `{season}`\n*Episode:* `{episode}`\n*Overall Ep.:* `{overall}`\n*Release date:* `{airdate}`\n*Story by:* `{storyby}`\n*Written by:* `{writtenby}`\n*Storyboard:* `{storyboard}`\n\n*Fandom URL:*\n[{url}]({url})",
"comicRes": "*{name} (ID: {id})*\n\n*Series:* `{series}`\n*Writer:* `{writer}`\n*Artist:* `{artist}`\n*Colorist:* `{colorist}`\n*Letterer:* `{letterer}`\n*Editor:* `{editor}`\n\n*Fandom URL:*\n[{url}]({url})",
@ -117,5 +119,7 @@
"apiErr": "An error occurred while fetching data from the API.\n\n`{err}`"
},
"chatNotFound": "Chat not found.",
"noFileProvided": "Please provide a file to send."
"noFileProvided": "Please provide a file to send.",
"askGenerating": "✨ _{model} is working..._",
"aiDisabled": "AI features are currently disabled"
}

View file

@ -62,6 +62,8 @@
"interactiveEmojisDesc": "*Emojis interativos*\n\n- /dice: Jogue um dado\n- /idice: Role infinitamente um dado colorido\n- /slot: Tente combinar as figuras!\n- /ball: Tente chutar a bola no gol!\n- /bowling: Tente derrubar os pinos!\n- /dart: Tente acertar o alvo!",
"animalCommands": "Animais",
"animalCommandsDesc": "*Animais*\n\n- /soggy | /soggycat `<1 | 2 | 3 | 4 | orig | thumb | sticker | alt>`: Envia o [meme do gato encharcado](https://knowyourmeme.com/memes/soggy-cat)\n- /cat - Envia uma foto aleatória de um gato.\n- /fox - Envia uma foto aleatória de uma raposa.\n- /duck - Envia uma foto aleatória de um pato.\n- /dog - Envia uma imagem aleatória de um cachorro.\n- /httpcat `<código http>`: Envia memes de gato do http.cat com o código HTTP especificado. Exemplo: `/httpcat 404`",
"aiCmds": "Comandos de IA",
"aiCmdsDesc": "*Comandos de IA*\n\n- /ask `<prompt>`: Fazer uma pergunta a uma IA",
"maInvalidModule": "Por favor, forneça um ID de módulo válido do The Mod Archive.\nExemplo: `/modarchive 81574`",
"maDownloadError": "Erro ao baixar o arquivo. Verifique o ID do módulo e tente novamente.",
"ytDownload": {
@ -115,5 +117,8 @@
"notFound": "Celular não encontrado.",
"resultMsg": "*Nome:* `{name}`\n*Marca:* `{brand}`\n*Modelo:* `{model}`\n*Codinome:* `{codename}`",
"apiErr": "Ocorreu um erro ao buscar os dados da API.\n\n`{err}`"
}
},
"noFileProvided": "Por favor, forneça um arquivo para envio.",
"askGenerating": "✨ _{modelo} está funcionando..._",
"aiDisabled": "Os recursos de IA estão desativados no momento"
}

83
src/utils/log.ts Normal file
View file

@ -0,0 +1,83 @@
// LOG.TS
// by ihatenodejs/Aidan
//
// -----------------------------------------------------------------------
//
// This is free and unencumbered software released into the public domain.
//
// Anyone is free to copy, modify, publish, use, compile, sell, or
// distribute this software, either in source code form or as a compiled
// binary, for any purpose, commercial or non-commercial, and by any
// means.
//
// In jurisdictions that recognize copyright laws, the author or authors
// of this software dedicate any and all copyright interest in the
// software to the public domain. We make this dedication for the benefit
// of the public at large and to the detriment of our heirs and
// successors. We intend this dedication to be an overt act of
// relinquishment in perpetuity of all present and future rights to this
// software under copyright law.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
// IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
// OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
// OTHER DEALINGS IN THE SOFTWARE.
//
// For more information, please refer to <https://unlicense.org/>
import { flash_model, thinking_model } from "../commands/ai"
class Logger {
private static instance: Logger
private constructor() {}
static getInstance(): Logger {
if (!Logger.instance) {
Logger.instance = new Logger()
}
return Logger.instance
}
logCmdStart(user: string, type: "ask" | "think"): void {
console.log(`\n[✨ AI | START] Received /${type} for model ${type === "ask" ? flash_model : thinking_model}`)
}
logThinking(chatId: number, messageId: number, thinking: boolean): void {
if (thinking) {
console.log(`[✨ AI | THINKING | ${chatId}:${messageId}] Model started thinking`)
} else {
console.log(`[✨ AI | THINKING | ${chatId}:${messageId}] Model stopped thinking`)
}
}
logChunk(chatId: number, messageId: number, text: string, isOverflow: boolean = false): void {
const prefix = isOverflow ? "[✨ AI | OVERFLOW]" : "[✨ AI | CHUNK]"
console.log(`${prefix} [${chatId}:${messageId}] ${text.length} chars pushed to Telegram`)
}
logPrompt(prompt: string): void {
console.log(`[✨ AI | PROMPT] ${prompt.length} chars input`)
}
logError(error: any): void {
if (error.response?.error_code === 429) {
const retryAfter = error.response.parameters?.retry_after || 1
console.error(`[✨ AI | RATE_LIMIT] Too Many Requests - retry after ${retryAfter}s`)
} else if (error.response?.error_code === 400 && error.response?.description?.includes("can't parse entities")) {
console.error("[✨ AI | PARSE_ERROR] Markdown parsing failed, retrying with plain text")
} else {
const errorDetails = {
code: error.response?.error_code,
description: error.response?.description,
method: error.on?.method
}
console.error("[✨ AI | ERROR]", JSON.stringify(errorDetails, null, 2))
}
}
}
export const logger = Logger.getInstance()

246
src/utils/rate-limiter.ts Normal file
View file

@ -0,0 +1,246 @@
// RATE-LIMITER.TS
// by ihatenodejs/Aidan
//
// -----------------------------------------------------------------------
//
// This is free and unencumbered software released into the public domain.
//
// Anyone is free to copy, modify, publish, use, compile, sell, or
// distribute this software, either in source code form or as a compiled
// binary, for any purpose, commercial or non-commercial, and by any
// means.
//
// In jurisdictions that recognize copyright laws, the author or authors
// of this software dedicate any and all copyright interest in the
// software to the public domain. We make this dedication for the benefit
// of the public at large and to the detriment of our heirs and
// successors. We intend this dedication to be an overt act of
// relinquishment in perpetuity of all present and future rights to this
// software under copyright law.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
// IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
// OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
// OTHER DEALINGS IN THE SOFTWARE.
//
// For more information, please refer to <https://unlicense.org/>
import { Context } from 'telegraf'
import { logger } from './log'
class RateLimiter {
private lastEditTime: number = 0
private readonly minInterval: number = 5000
private pendingUpdates: Map<string, string> = new Map()
private updateQueue: Map<string, NodeJS.Timeout> = new Map()
private readonly max_msg_length: number = 3500
private overflowMessages: Map<string, number> = new Map()
private isRateLimited: boolean = false
private rateLimitEndTime: number = 0
private getMessageKey(chatId: number, messageId: number): string {
return `${chatId}:${messageId}`
}
private async waitForRateLimit(chatId: number, messageId: number): Promise<void> {
if (!this.isRateLimited) return
console.log(`[✨ AI | RATELIMIT] [${chatId}:${messageId}] Ratelimited, waiting for end of ${this.rateLimitEndTime - Date.now()}ms`)
const now = Date.now()
if (now < this.rateLimitEndTime) {
await new Promise(resolve => setTimeout(resolve, this.rateLimitEndTime - now))
}
this.isRateLimited = false
}
private chunkText(text: string): string[] {
const chunks: string[] = []
let currentChunk = ''
let currentLength = 0
const lines = text.split('\n')
for (const line of lines) {
if (currentLength + line.length + 1 > this.max_msg_length) {
if (currentChunk) {
chunks.push(currentChunk)
currentChunk = ''
currentLength = 0
}
if (line.length > this.max_msg_length) {
for (let i = 0; i < line.length; i += this.max_msg_length) {
chunks.push(line.substring(i, i + this.max_msg_length))
}
} else {
currentChunk = line
currentLength = line.length
}
} else {
if (currentChunk) {
currentChunk += '\n'
currentLength++
}
currentChunk += line
currentLength += line.length
}
}
if (currentChunk) {
chunks.push(currentChunk)
}
return chunks
}
private handleTelegramError(error: unknown, messageKey: string, options: any, ctx: Context, chatId: number, messageId: number): boolean {
if (!isTelegramError(error)) return false
if (error.response.error_code === 429) {
const retryAfter = error.response.parameters?.retry_after || 1
this.isRateLimited = true
this.rateLimitEndTime = Date.now() + (retryAfter * 1000)
const existingTimeout = this.updateQueue.get(messageKey)
if (existingTimeout) clearTimeout(existingTimeout)
const timeout = setTimeout(() => {
this.processUpdate(ctx, chatId, messageId, options)
}, retryAfter * 1000)
this.updateQueue.set(messageKey, timeout)
return true
}
if (error.response.error_code === 400) {
if (error.response.description?.includes("can't parse entities") || error.response.description?.includes("MESSAGE_TOO_LONG")) {
const plainOptions = { ...options, parse_mode: undefined }
this.processUpdate(ctx, chatId, messageId, plainOptions)
return true
}
if (error.response.description?.includes("message is not modified")) {
this.pendingUpdates.delete(messageKey)
this.updateQueue.delete(messageKey)
return true
}
logger.logError(error)
this.pendingUpdates.delete(messageKey)
this.updateQueue.delete(messageKey)
return true
}
logger.logError(error)
this.pendingUpdates.delete(messageKey)
this.updateQueue.delete(messageKey)
return true
}
private async processUpdate(
ctx: Context,
chatId: number,
messageId: number,
options: any
): Promise<void> {
const messageKey = this.getMessageKey(chatId, messageId)
const latestText = this.pendingUpdates.get(messageKey)
if (!latestText) return
const now = Date.now()
const timeSinceLastEdit = now - this.lastEditTime
await this.waitForRateLimit(chatId, messageId)
if (timeSinceLastEdit < this.minInterval) {
const existingTimeout = this.updateQueue.get(messageKey)
if (existingTimeout) clearTimeout(existingTimeout)
const timeout = setTimeout(() => {
this.processUpdate(ctx, chatId, messageId, options)
}, this.minInterval - timeSinceLastEdit)
this.updateQueue.set(messageKey, timeout)
return
}
try {
if (latestText.length > this.max_msg_length) {
const chunks = this.chunkText(latestText)
const firstChunk = chunks[0]
logger.logChunk(chatId, messageId, firstChunk)
try {
await ctx.telegram.editMessageText(chatId, messageId, undefined, firstChunk, options)
} catch (error: unknown) {
if (
isTelegramError(error) &&
!error.response.description?.includes("message is not modified")
) {
throw error
}
}
for (let i = 1; i < chunks.length; i++) {
const chunk = chunks[i]
const overflowMessageId = this.overflowMessages.get(messageKey)
if (overflowMessageId) {
try {
await ctx.telegram.editMessageText(chatId, overflowMessageId, undefined, chunk, options)
logger.logChunk(chatId, overflowMessageId, chunk, true)
} catch (error: unknown) {
if (
isTelegramError(error) &&
!error.response.description?.includes("message is not modified")
) {
throw error
}
}
} else {
const newMessage = await ctx.telegram.sendMessage(chatId, chunk, {
...options,
reply_to_message_id: messageId
})
logger.logChunk(chatId, newMessage.message_id, chunk, true)
this.overflowMessages.set(messageKey, newMessage.message_id)
}
}
this.pendingUpdates.set(messageKey, firstChunk)
if (chunks.length > 1) {
this.pendingUpdates.set(
this.getMessageKey(chatId, this.overflowMessages.get(messageKey)!),
chunks[chunks.length - 1]
)
}
} else {
logger.logChunk(chatId, messageId, latestText)
try {
await ctx.telegram.editMessageText(chatId, messageId, undefined, latestText, options)
} catch (error: unknown) {
if (
isTelegramError(error) &&
!error.response.description?.includes("message is not modified")
) {
throw error
}
}
this.pendingUpdates.delete(messageKey)
}
this.lastEditTime = Date.now()
this.updateQueue.delete(messageKey)
} catch (error: unknown) {
if (!this.handleTelegramError(error, messageKey, options, ctx, chatId, messageId)) {
logger.logError(error)
this.pendingUpdates.delete(messageKey)
this.updateQueue.delete(messageKey)
}
}
}
async editMessageWithRetry(
ctx: Context,
chatId: number,
messageId: number,
text: string,
options: any
): Promise<void> {
const messageKey = this.getMessageKey(chatId, messageId)
this.pendingUpdates.set(messageKey, text)
await this.processUpdate(ctx, chatId, messageId, options)
}
}
export const rateLimiter = new RateLimiter()
function isTelegramError(error: unknown): error is { response: { description?: string, error_code?: number, parameters?: { retry_after?: number } } } {
return (
typeof error === "object" &&
error !== null &&
"response" in error &&
typeof (error as any).response === "object"
)
}