diff --git a/.env.example b/.env.example
index 452211b..997fbc5 100644
--- a/.env.example
+++ b/.env.example
@@ -5,6 +5,9 @@ botSource = "https://github.com/ABOCN/TelegramBot"
# insert token here
botToken = ""
+# ai features
+# ollamaApi = "http://ollama:11434"
+
# misc (botAdmins isnt a array here!)
maxRetries = 9999
botAdmins = 00000000, 00000000, 00000000
diff --git a/.gitignore b/.gitignore
index 6b42f1f..278fef8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -144,4 +144,10 @@ yt-dlp
ffmpeg
# Bun
-bun.lock*
\ No newline at end of file
+bun.lock*
+
+# Ollama
+ollama/
+
+# Docker
+docker-compose.yml
\ No newline at end of file
diff --git a/src/commands/ai.ts b/src/commands/ai.ts
new file mode 100644
index 0000000..0ac6e53
--- /dev/null
+++ b/src/commands/ai.ts
@@ -0,0 +1,248 @@
+// AI.TS
+// by ihatenodejs/Aidan
+//
+// -----------------------------------------------------------------------
+//
+// This is free and unencumbered software released into the public domain.
+//
+// Anyone is free to copy, modify, publish, use, compile, sell, or
+// distribute this software, either in source code form or as a compiled
+// binary, for any purpose, commercial or non-commercial, and by any
+// means.
+//
+// In jurisdictions that recognize copyright laws, the author or authors
+// of this software dedicate any and all copyright interest in the
+// software to the public domain. We make this dedication for the benefit
+// of the public at large and to the detriment of our heirs and
+// successors. We intend this dedication to be an overt act of
+// relinquishment in perpetuity of all present and future rights to this
+// software under copyright law.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+// IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+// OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+// OTHER DEALINGS IN THE SOFTWARE.
+//
+// For more information, please refer to
+
+import { isOnSpamWatch } from "../spamwatch/spamwatch"
+import spamwatchMiddlewareModule from "../spamwatch/Middleware"
+import { Telegraf, Context } from "telegraf"
+import type { Message } from "telegraf/types"
+import { replyToMessageId } from "../utils/reply-to-message-id"
+import { getStrings } from "../plugins/checklang"
+import { languageCode } from "../utils/language-code"
+import axios from "axios"
+import { rateLimiter } from "../utils/rate-limiter"
+
+const spamwatchMiddleware = spamwatchMiddlewareModule(isOnSpamWatch)
+//const model = "qwen3:0.6b"
+const model = "deepseek-r1:1.5b"
+
+type TextContext = Context & { message: Message.TextMessage }
+
+export function sanitizeForJson(text: string): string {
+ return text
+ .replace(/\\/g, '\\\\')
+ .replace(/"/g, '\\"')
+ .replace(/\n/g, '\\n')
+ .replace(/\r/g, '\\r')
+ .replace(/\t/g, '\\t')
+}
+
+async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Message) {
+ const Strings = getStrings(languageCode(ctx))
+
+ if (!ctx.chat) return {
+ "success": false,
+ "error": Strings.unexpectedErr.replace("{error}", "No chat found"),
+ }
+
+ try {
+ const aiResponse = await axios.post(`${process.env.ollamaApi}/api/generate`, {
+ model: model,
+ prompt: prompt,
+ stream: true,
+ }, {
+ responseType: "stream",
+ })
+
+ let fullResponse = ""
+ let thoughts = ""
+ let thinking = false
+ let lastUpdate = Date.now()
+
+ for await (const chunk of aiResponse.data) {
+ const lines = chunk.toString().split('\n')
+ for (const line of lines) {
+ if (!line.trim()) continue
+ if (line.includes("\u003cthink\u003e")) {
+ // intercept thoughts
+ console.log("thinking started")
+ thinking = true
+ thoughts += line
+ continue
+ } else if (line.includes("\u003c/think\u003e")) {
+ // thinking finished
+ thinking = false
+ console.log("thinking finished")
+ continue
+ }
+
+ try {
+ const now = Date.now()
+ let data = JSON.parse(line)
+
+ if (data.response && !thinking) {
+ fullResponse += data.response
+ if (now - lastUpdate >= 1000) {
+ await rateLimiter.editMessageWithRetry(
+ ctx,
+ ctx.chat.id,
+ replyGenerating.message_id,
+ fullResponse,
+ { parse_mode: 'Markdown' }
+ )
+ lastUpdate = now
+ }
+ } else if (data.response && thinking) {
+ if (now - lastUpdate >= 1000) {
+ await rateLimiter.editMessageWithRetry(
+ ctx,
+ ctx.chat.id,
+ replyGenerating.message_id,
+ thoughts,
+ { parse_mode: 'Markdown' }
+ )
+ lastUpdate = now
+ }
+ }
+ } catch (e) {
+ console.error("Error parsing chunk:", e)
+ }
+ }
+ }
+
+ return {
+ "success": true,
+ "response": fullResponse,
+ }
+ } catch (error: any) {
+ let shouldPullModel = false
+
+ if (error.response?.data?.error) {
+ if (error.response.data.error.includes(`model '${model}' not found`) || error.status === 404) {
+ shouldPullModel = true
+ } else {
+ console.error("[!] 1", error.response.data.error)
+ return {
+ "success": false,
+ "error": error.response.data.error,
+ }
+ }
+ } else if (error.status === 404) {
+ shouldPullModel = true
+ }
+
+ if (shouldPullModel) {
+ ctx.telegram.editMessageText(ctx.chat.id, replyGenerating.message_id, undefined, `🔄 Pulling ${model} from ollama...`)
+ console.log(`[i] Pulling ${model} from ollama...`)
+
+ const pullModelStream = await axios.post(`${process.env.ollamaApi}/api/pull`, {
+ model: model,
+ stream: false,
+ })
+
+ if (pullModelStream.data.status !== ("success")) {
+ console.error("[!] Something went wrong:", pullModelStream.data)
+ return {
+ "success": false,
+ "error": `❌ Something went wrong while pulling ${model}, please try your command again!`,
+ }
+ }
+
+ console.log("[i] Model pulled successfully")
+ return {
+ "success": true,
+ "response": `✅ Pulled ${model} successfully, please retry the command.`,
+ }
+ }
+
+ if (error.response) {
+ console.error("[!] 2", error.response)
+ return {
+ "success": false,
+ "error": error.response,
+ }
+ }
+
+ if (error.statusText) {
+ console.error("[!] 3", error.statusText)
+ return {
+ "success": false,
+ "error": error.statusText,
+ }
+ }
+
+ return {
+ "success": false,
+ "error": "An unexpected error occurred",
+ }
+ }
+}
+
+export default (bot: Telegraf) => {
+ bot.command("ask", spamwatchMiddleware, async (ctx) => {
+ if (!ctx.message || !('text' in ctx.message)) return;
+ const textCtx = ctx as TextContext;
+ const reply_to_message_id = replyToMessageId(textCtx)
+ const Strings = getStrings(languageCode(textCtx))
+ const message = textCtx.message.text
+
+ if (!process.env.ollamaApi) {
+ await ctx.reply(Strings.aiDisabled, {
+ parse_mode: 'Markdown',
+ ...({ reply_to_message_id })
+ })
+ return
+ }
+
+ const replyGenerating = await ctx.reply(Strings.askGenerating.replace("{model}", model), {
+ parse_mode: 'Markdown',
+ ...({ reply_to_message_id })
+ })
+
+ const prompt = sanitizeForJson(
+`You are a helpful assistant named Kowalski, who has been given a message from a user.
+
+The message is:
+
+${message}`)
+ const aiResponse = await getResponse(prompt, textCtx, replyGenerating)
+ if (!aiResponse) return
+
+ if (aiResponse.success && aiResponse.response) {
+ if (!ctx.chat) return
+ await rateLimiter.editMessageWithRetry(
+ ctx,
+ ctx.chat.id,
+ replyGenerating.message_id,
+ aiResponse.response,
+ { parse_mode: 'Markdown' }
+ )
+ } else {
+ if (!ctx.chat) return
+ const error = Strings.unexpectedErr.replace("{error}", aiResponse.error)
+ await rateLimiter.editMessageWithRetry(
+ ctx,
+ ctx.chat.id,
+ replyGenerating.message_id,
+ error,
+ { parse_mode: 'Markdown' }
+ )
+ }
+ })
+}
\ No newline at end of file
diff --git a/src/locales/english.json b/src/locales/english.json
index f8bb0ec..53c33fb 100644
--- a/src/locales/english.json
+++ b/src/locales/english.json
@@ -114,5 +114,7 @@
"apiErr": "An error occurred while fetching data from the API.\n\n`{err}`"
},
"chatNotFound": "Chat not found.",
- "noFileProvided": "Please provide a file to send."
+ "noFileProvided": "Please provide a file to send.",
+ "askGenerating": "✨ _{model} is working..._",
+ "aiDisabled": "AI features are currently disabled"
}
\ No newline at end of file
diff --git a/src/locales/portuguese.json b/src/locales/portuguese.json
index 2e485f7..9c437fb 100644
--- a/src/locales/portuguese.json
+++ b/src/locales/portuguese.json
@@ -112,5 +112,8 @@
"notFound": "Celular não encontrado.",
"resultMsg": "*Nome:* `{name}`\n*Marca:* `{brand}`\n*Modelo:* `{model}`\n*Codinome:* `{codename}`",
"apiErr": "Ocorreu um erro ao buscar os dados da API.\n\n`{err}`"
- }
+ },
+ "noFileProvided": "Por favor, forneça um arquivo para envio.",
+ "askGenerating": "✨ _{modelo} está funcionando..._",
+ "aiDisabled": "Os recursos de IA estão desativados no momento"
}
diff --git a/src/utils/log.ts b/src/utils/log.ts
new file mode 100644
index 0000000..ceae23f
--- /dev/null
+++ b/src/utils/log.ts
@@ -0,0 +1,84 @@
+// LOG.TS
+// by ihatenodejs/Aidan
+//
+// -----------------------------------------------------------------------
+//
+// This is free and unencumbered software released into the public domain.
+//
+// Anyone is free to copy, modify, publish, use, compile, sell, or
+// distribute this software, either in source code form or as a compiled
+// binary, for any purpose, commercial or non-commercial, and by any
+// means.
+//
+// In jurisdictions that recognize copyright laws, the author or authors
+// of this software dedicate any and all copyright interest in the
+// software to the public domain. We make this dedication for the benefit
+// of the public at large and to the detriment of our heirs and
+// successors. We intend this dedication to be an overt act of
+// relinquishment in perpetuity of all present and future rights to this
+// software under copyright law.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+// IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+// OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+// OTHER DEALINGS IN THE SOFTWARE.
+//
+// For more information, please refer to
+
+class Logger {
+ private static instance: Logger
+ private thinking: boolean = false
+
+ private constructor() {}
+
+ static getInstance(): Logger {
+ if (!Logger.instance) {
+ Logger.instance = new Logger()
+ }
+ return Logger.instance
+ }
+
+ logChunk(chatId: number, messageId: number, text: string, isOverflow: boolean = false): void {
+ const prefix = isOverflow ? '[OVERFLOW]' : '[CHUNK]'
+ console.log(`${prefix} [${chatId}:${messageId}] ${text.length} chars: ${text.substring(0, 50)}${text.length > 50 ? '...' : ''}`)
+ }
+
+ logPrompt(prompt: string): void {
+ console.log(`[PROMPT] ${prompt.length} chars: ${prompt.substring(0, 50)}${prompt.length > 50 ? '...' : ''}`)
+ }
+
+ logThinkingStart(): void {
+ if (!this.thinking) {
+ console.log('[THINKING] started')
+ this.thinking = true
+ }
+ }
+
+ logThinkingEnd(): void {
+ if (this.thinking) {
+ console.log('[THINKING] ended')
+ this.thinking = false
+ }
+ }
+
+ logError(error: any): void {
+ if (error.response?.error_code === 429) {
+ const retryAfter = error.response.parameters?.retry_after || 1
+ console.error(`[RATE_LIMIT] Too Many Requests - retry after ${retryAfter}s`)
+ } else if (error.response?.error_code === 400 && error.response?.description?.includes("can't parse entities")) {
+ console.error('[PARSE_ERROR] Markdown parsing failed, retrying with plain text')
+ } else {
+ const errorDetails = {
+ code: error.response?.error_code,
+ description: error.response?.description,
+ method: error.on?.method
+ }
+ console.error('[ERROR]', JSON.stringify(errorDetails, null, 2))
+ }
+ }
+}
+
+export const logger = Logger.getInstance()
\ No newline at end of file
diff --git a/src/utils/rate-limiter.ts b/src/utils/rate-limiter.ts
new file mode 100644
index 0000000..3c1d537
--- /dev/null
+++ b/src/utils/rate-limiter.ts
@@ -0,0 +1,235 @@
+// RATE-LIMITER.TS
+// by ihatenodejs/Aidan
+//
+// -----------------------------------------------------------------------
+//
+// This is free and unencumbered software released into the public domain.
+//
+// Anyone is free to copy, modify, publish, use, compile, sell, or
+// distribute this software, either in source code form or as a compiled
+// binary, for any purpose, commercial or non-commercial, and by any
+// means.
+//
+// In jurisdictions that recognize copyright laws, the author or authors
+// of this software dedicate any and all copyright interest in the
+// software to the public domain. We make this dedication for the benefit
+// of the public at large and to the detriment of our heirs and
+// successors. We intend this dedication to be an overt act of
+// relinquishment in perpetuity of all present and future rights to this
+// software under copyright law.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+// IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+// OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+// OTHER DEALINGS IN THE SOFTWARE.
+//
+// For more information, please refer to
+
+import { Context } from 'telegraf'
+import { logger } from './log'
+
+class RateLimiter {
+ private lastEditTime: number = 0
+ private readonly minInterval: number = 5000
+ private pendingUpdates: Map = new Map()
+ private updateQueue: Map = new Map()
+ private readonly max_msg_length: number = 3500
+ private overflowMessages: Map = new Map()
+ private isRateLimited: boolean = false
+ private rateLimitEndTime: number = 0
+
+ private getMessageKey(chatId: number, messageId: number): string {
+ return `${chatId}:${messageId}`
+ }
+
+ private async waitForRateLimit(): Promise {
+ if (this.isRateLimited) {
+ const now = Date.now()
+ if (now < this.rateLimitEndTime) {
+ const waitTime = this.rateLimitEndTime - now
+ await new Promise(resolve => setTimeout(resolve, waitTime))
+ }
+ this.isRateLimited = false
+ }
+ }
+
+ private async processUpdate(
+ ctx: Context,
+ chatId: number,
+ messageId: number,
+ options: any
+ ): Promise {
+ const messageKey = this.getMessageKey(chatId, messageId)
+ const latestText = this.pendingUpdates.get(messageKey)
+ if (!latestText) return
+
+ const now = Date.now()
+ const timeSinceLastEdit = now - this.lastEditTime
+
+ await this.waitForRateLimit()
+
+ if (timeSinceLastEdit < this.minInterval) {
+ const existingTimeout = this.updateQueue.get(messageKey)
+ if (existingTimeout) {
+ clearTimeout(existingTimeout)
+ }
+
+ const timeout = setTimeout(() => {
+ this.processUpdate(ctx, chatId, messageId, options)
+ }, this.minInterval - timeSinceLastEdit)
+
+ this.updateQueue.set(messageKey, timeout)
+ return
+ }
+
+ try {
+ if (latestText.length > this.max_msg_length) {
+ const chunks: string[] = []
+ let currentChunk = ''
+ let currentLength = 0
+
+ // Split text into chunks while preserving markdown formatting
+ const lines = latestText.split('\n')
+ for (const line of lines) {
+ if (currentLength + line.length + 1 > this.max_msg_length) {
+ if (currentChunk) {
+ chunks.push(currentChunk)
+ currentChunk = ''
+ currentLength = 0
+ }
+ // if a single line is too long, split
+ if (line.length > this.max_msg_length) {
+ for (let i = 0; i < line.length; i += this.max_msg_length) {
+ chunks.push(line.substring(i, i + this.max_msg_length))
+ }
+ } else {
+ currentChunk = line
+ currentLength = line.length
+ }
+ } else {
+ if (currentChunk) {
+ currentChunk += '\n'
+ currentLength++
+ }
+ currentChunk += line
+ currentLength += line.length
+ }
+ }
+ if (currentChunk) {
+ chunks.push(currentChunk)
+ }
+
+ const firstChunk = chunks[0]
+ logger.logChunk(chatId, messageId, firstChunk)
+
+ try {
+ await ctx.telegram.editMessageText(chatId, messageId, undefined, firstChunk, options)
+ } catch (error: any) {
+ if (!error.response?.description?.includes("message is not modified")) {
+ throw error
+ }
+ }
+
+ for (let i = 1; i < chunks.length; i++) {
+ const chunk = chunks[i]
+ const overflowMessageId = this.overflowMessages.get(messageKey)
+
+ if (overflowMessageId) {
+ logger.logChunk(chatId, overflowMessageId, chunk, true)
+ try {
+ await ctx.telegram.editMessageText(chatId, overflowMessageId, undefined, chunk, options)
+ } catch (error: any) {
+ if (!error.response?.description?.includes("message is not modified")) {
+ throw error
+ }
+ }
+ } else {
+ const newMessage = await ctx.telegram.sendMessage(chatId, chunk, {
+ ...options,
+ reply_to_message_id: messageId
+ })
+ logger.logChunk(chatId, newMessage.message_id, chunk, true)
+ this.overflowMessages.set(messageKey, newMessage.message_id)
+ }
+ }
+
+ this.pendingUpdates.set(messageKey, firstChunk)
+ if (chunks.length > 1) {
+ this.pendingUpdates.set(
+ this.getMessageKey(chatId, this.overflowMessages.get(messageKey)!),
+ chunks[chunks.length - 1]
+ )
+ }
+ } else {
+ const sanitizedText = latestText
+ logger.logChunk(chatId, messageId, sanitizedText)
+
+ try {
+ await ctx.telegram.editMessageText(chatId, messageId, undefined, sanitizedText, options)
+ } catch (error: any) {
+ if (!error.response?.description?.includes("message is not modified")) {
+ throw error
+ }
+ }
+ this.pendingUpdates.delete(messageKey)
+ }
+
+ this.lastEditTime = Date.now()
+ this.updateQueue.delete(messageKey)
+ } catch (error: any) {
+ if (error.response?.error_code === 429) {
+ const retryAfter = error.response.parameters?.retry_after || 1
+ this.isRateLimited = true
+ this.rateLimitEndTime = Date.now() + (retryAfter * 1000)
+
+ const existingTimeout = this.updateQueue.get(messageKey)
+ if (existingTimeout) {
+ clearTimeout(existingTimeout)
+ }
+
+ const timeout = setTimeout(() => {
+ this.processUpdate(ctx, chatId, messageId, options)
+ }, retryAfter * 1000)
+
+ this.updateQueue.set(messageKey, timeout)
+ } else if (error.response?.error_code === 400) {
+ if (error.response?.description?.includes("can't parse entities")) {
+ // try again with plain text
+ const plainOptions = { ...options, parse_mode: undefined }
+ await this.processUpdate(ctx, chatId, messageId, plainOptions)
+ } else if (error.response?.description?.includes("MESSAGE_TOO_LONG")) {
+ const plainOptions = { ...options, parse_mode: undefined }
+ await this.processUpdate(ctx, chatId, messageId, plainOptions)
+ } else if (error.response?.description?.includes("message is not modified")) {
+ this.pendingUpdates.delete(messageKey)
+ this.updateQueue.delete(messageKey)
+ } else {
+ logger.logError(error)
+ this.pendingUpdates.delete(messageKey)
+ this.updateQueue.delete(messageKey)
+ }
+ } else {
+ logger.logError(error)
+ this.pendingUpdates.delete(messageKey)
+ this.updateQueue.delete(messageKey)
+ }
+ }
+ }
+
+ async editMessageWithRetry(
+ ctx: Context,
+ chatId: number,
+ messageId: number,
+ text: string,
+ options: any
+ ): Promise {
+ const messageKey = this.getMessageKey(chatId, messageId)
+ this.pendingUpdates.set(messageKey, text)
+ await this.processUpdate(ctx, chatId, messageId, options)
+ }
+}
+
+export const rateLimiter = new RateLimiter()
\ No newline at end of file