diff --git a/.env.example b/.env.example
index de7d379..8e9cb3f 100644
--- a/.env.example
+++ b/.env.example
@@ -6,7 +6,9 @@ botSource = "https://github.com/ABOCN/TelegramBot"
botToken = ""
# ai features
+ollamaEnabled = false
# ollamaApi = "http://ollama:11434"
+# handlerTimeout = "600_000" # set higher if you expect to download larger models
# misc (botAdmins isnt a array here!)
maxRetries = 9999
diff --git a/README.md b/README.md
index a0e4aab..f1b8d2b 100644
--- a/README.md
+++ b/README.md
@@ -10,12 +10,6 @@ Kowalski is a a simple Telegram bot made in Node.js.
- You can find Kowalski at [@KowalskiNodeBot](https://t.me/KowalskiNodeBot) on Telegram.
-## Translations
-
-
-
-
-
## Self-host requirements
> [!IMPORTANT]
@@ -26,7 +20,10 @@ Kowalski is a a simple Telegram bot made in Node.js.
- FFmpeg (only for the `/yt` command)
- Docker and Docker Compose (only required for Docker setup)
-_AI features require a higher-end system with a CPU/GPU_
+### AI Requirements
+
+- High-end CPU *or* GPU (~ 6GB vRAM)
+- If using CPU, enough RAM to load the models (~6GB w/ defaults)
## Running locally (non-Docker setup)
@@ -60,7 +57,7 @@ You can also run Kowalski using Docker, which simplifies the setup process. Make
1. **Copy compose file**
_Without AI (Ollama)_
-
+
```bash
mv docker-compose.yml.example docker-compose.yml
```
@@ -70,7 +67,7 @@ You can also run Kowalski using Docker, which simplifies the setup process. Make
```bash
mv docker-compose.yml.ai.example docker-compose.yml
```
-
+
2. **Make sure to setup your `.env` file first!**
3. **Run the container**
@@ -109,7 +106,9 @@ If you prefer to use Docker directly, you can use these instructions instead.
- **botPrivacy**: Put the link to your bot privacy policy.
- **maxRetries**: Maximum number of retries for a failing command on Kowalski. Default is 5. If the limit is hit, the bot will crash past this number.
- **botToken**: Put your bot token that you created at [@BotFather](https://t.me/botfather).
+- **ollamaEnabled** (optional): Enables/disables AI features
- **ollamaApi** (optional): Ollama API endpoint for various AI features, will be disabled if not set
+- **handlerTimeout** (default: `600_000`): How long handlers will wait before timing out. Set this high if using large AI models.
- **botAdmins**: Put the ID of the people responsible for managing the bot. They can use some administrative + exclusive commands on any group.
- **lastKey**: Last.fm API key, for use on `lastfm.js` functions, like see who is listening to what song and etc.
- **weatherKey**: Weather.com API key, used for the `/weather` command.
diff --git a/src/bot.ts b/src/bot.ts
index 3422e56..04d2c97 100644
--- a/src/bot.ts
+++ b/src/bot.ts
@@ -4,6 +4,7 @@ import fs from 'fs';
import { isOnSpamWatch } from './spamwatch/spamwatch';
import '@dotenvx/dotenvx';
import './plugins/ytDlpWrapper';
+import { preChecks } from './commands/ai';
// Ensures bot token is set, and not default value
if (!process.env.botToken || process.env.botToken === 'InsertYourBotTokenHere') {
@@ -11,7 +12,17 @@ if (!process.env.botToken || process.env.botToken === 'InsertYourBotTokenHere')
process.exit(1)
}
-const bot = new Telegraf(process.env.botToken);
+// Detect AI and run pre-checks
+if (process.env.ollamaEnabled === "true") {
+ if (!(await preChecks())) {
+ process.exit(1)
+ }
+}
+
+const bot = new Telegraf(
+ process.env.botToken,
+ { handlerTimeout: Number(process.env.handlerTimeout) || 600_000 }
+);
const maxRetries = process.env.maxRetries || 5;
let restartCount = 0;
@@ -21,7 +32,7 @@ const loadCommands = () => {
try {
const files = fs.readdirSync(commandsPath)
.filter(file => file.endsWith('.ts') || file.endsWith('.js'));
-
+
files.forEach((file) => {
try {
const commandPath = path.join(commandsPath, file);
diff --git a/src/commands/ai.ts b/src/commands/ai.ts
index e62489c..33330e8 100644
--- a/src/commands/ai.ts
+++ b/src/commands/ai.ts
@@ -40,8 +40,8 @@ import { rateLimiter } from "../utils/rate-limiter"
import { logger } from "../utils/log"
const spamwatchMiddleware = spamwatchMiddlewareModule(isOnSpamWatch)
-//const model = "qwen3:0.6b"
-const model = "deepseek-r1:1.5b"
+export const flash_model = "gemma3:4b"
+export const thinking_model = "deepseek-r1:1.5b"
type TextContext = Context & { message: Message.TextMessage }
@@ -54,7 +54,22 @@ export function sanitizeForJson(text: string): string {
.replace(/\t/g, '\\t')
}
-async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Message) {
+export async function preChecks() {
+ const envs = [
+ "ollamaApi",
+ ]
+
+ for (const env of envs) {
+ if (!process.env[env]) {
+ console.error(`[✨ AI | !] ❌ ${env} not set!`)
+ return false
+ }
+ }
+ console.log("[✨ AI] Pre-checks passed\n")
+ return true
+}
+
+async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Message, model: string) {
const Strings = getStrings(languageCode(ctx))
if (!ctx.chat) return {
@@ -74,23 +89,50 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
let fullResponse = ""
let thoughts = ""
let lastUpdate = Date.now()
-
+
for await (const chunk of aiResponse.data) {
const lines = chunk.toString().split('\n')
for (const line of lines) {
if (!line.trim()) continue
let ln = JSON.parse(line)
-
- if (ln.response.includes("")) { logger.logThinking(true) } else if (ln.response.includes("")) { logger.logThinking(false) }
+
+ if (model === thinking_model && ln.response.includes('')) {
+ const thinkMatch = ln.response.match(/([\s\S]*?)<\/think>/)
+ if (thinkMatch) {
+ const innerContent = thinkMatch[1]
+ if (innerContent.trim().length > 0) {
+ logger.logThinking(ctx.chat.id, replyGenerating.message_id, true)
+ }
+ } else {
+ logger.logThinking(ctx.chat.id, replyGenerating.message_id, true)
+ }
+ } else if (model === thinking_model && ln.response.includes('')) {
+ logger.logThinking(ctx.chat.id, replyGenerating.message_id, false)
+ }
try {
const now = Date.now()
-
- if (ln.response) {
- const patchedThoughts = ln.response.replace("", "`Thinking...`").replace("", "`Finished thinking`")
- thoughts += patchedThoughts
- fullResponse += patchedThoughts
+ if (ln.response) {
+ if (model === thinking_model) {
+ let patchedThoughts = ln.response
+ // TODO: hide blank thinking chunks
+ const thinkTagRx = /([\s\S]*?)<\/think>/g
+ patchedThoughts = patchedThoughts.replace(thinkTagRx, (match, p1) => {
+ if (p1.trim().length > 0) {
+ console.log(p1)
+ return '`Thinking...`' + p1 + '`Finished thinking`'
+ } else {
+ return ''
+ }
+ })
+ patchedThoughts = patchedThoughts.replace(//g, '`Thinking...`')
+ patchedThoughts = patchedThoughts.replace(/<\/think>/g, '`Finished thinking`')
+ thoughts += patchedThoughts
+ fullResponse += patchedThoughts
+ } else {
+ fullResponse += ln.response
+ }
if (now - lastUpdate >= 1000) {
await rateLimiter.editMessageWithRetry(
ctx,
@@ -103,7 +145,7 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
}
}
} catch (e) {
- console.error("Error parsing chunk:", e)
+ console.error("[✨ AI | !] Error parsing chunk:", e)
}
}
}
@@ -119,7 +161,7 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
if (error.response.data.error.includes(`model '${model}' not found`) || error.status === 404) {
shouldPullModel = true
} else {
- console.error("[!] 1", error.response.data.error)
+ console.error("[✨ AI | !] Error zone 1:", error.response.data.error)
return {
"success": false,
"error": error.response.data.error,
@@ -130,23 +172,25 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
}
if (shouldPullModel) {
- ctx.telegram.editMessageText(ctx.chat.id, replyGenerating.message_id, undefined, `🔄 Pulling ${model} from ollama...`)
- console.log(`[i] Pulling ${model} from ollama...`)
+ ctx.telegram.editMessageText(ctx.chat.id, replyGenerating.message_id, undefined, `🔄 Pulling ${model} from ollama...\n\nThis may take a few minutes...`)
+ console.log(`[✨ AI | i] Pulling ${model} from ollama...`)
+ let pullModelStream: any
- const pullModelStream = await axios.post(`${process.env.ollamaApi}/api/pull`, {
- model: model,
- stream: false,
- })
-
- if (pullModelStream.data.status !== ("success")) {
- console.error("[!] Something went wrong:", pullModelStream.data)
+ try {
+ pullModelStream = await axios.post(`${process.env.ollamaApi}/api/pull`, {
+ model: model,
+ stream: false,
+ timeout: process.env.ollamaApiTimeout || 10000,
+ })
+ } catch (e: any) {
+ console.error("[✨ AI | !] Something went wrong:", e.response.data.error)
return {
"success": false,
"error": `❌ Something went wrong while pulling ${model}, please try your command again!`,
}
}
- console.log("[i] Model pulled successfully")
+ console.log(`[✨ AI | i] ${model} pulled successfully`)
return {
"success": true,
"response": `✅ Pulled ${model} successfully, please retry the command.`,
@@ -154,7 +198,7 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
}
if (error.response) {
- console.error("[!] 2", error.response)
+ console.error("[✨ AI | !] Error zone 2:", error.response)
return {
"success": false,
"error": error.response,
@@ -162,7 +206,7 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
}
if (error.statusText) {
- console.error("[!] 3", error.statusText)
+ console.error("[✨ AI | !] Error zone 3:", error.statusText)
return {
"success": false,
"error": error.statusText,
@@ -177,15 +221,24 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
}
export default (bot: Telegraf) => {
- bot.command("ask", spamwatchMiddleware, async (ctx) => {
+ const botName = bot.botInfo?.first_name && bot.botInfo?.last_name ? `${bot.botInfo.first_name} ${bot.botInfo.last_name}` : "Kowalski"
+
+ bot.command(["ask", "think"], spamwatchMiddleware, async (ctx) => {
if (!ctx.message || !('text' in ctx.message)) return;
+ const isAsk = ctx.message.text.startsWith("/ask")
+ const model = isAsk ? flash_model : thinking_model
+ console.log(model)
+ console.log(ctx.message.text)
const textCtx = ctx as TextContext;
const reply_to_message_id = replyToMessageId(textCtx)
const Strings = getStrings(languageCode(textCtx))
const message = textCtx.message.text
const author = ("@" + ctx.from?.username) || ctx.from?.first_name
- logger.logCmdStart(author)
+ logger.logCmdStart(
+ author,
+ model === flash_model ? "ask" : "think"
+ )
if (!process.env.ollamaApi) {
await ctx.reply(Strings.aiDisabled, {
@@ -212,12 +265,14 @@ export default (bot: Telegraf) => {
logger.logPrompt(fixedMsg)
const prompt = sanitizeForJson(
-`You are a helpful assistant named Kowalski, who has been given a message from a user.
+`You are a helpful assistant called ${botName}.
+Current Date/Time (UTC): ${new Date().toLocaleString()}
-The message is:
+---
+Respond to the user's message:
${fixedMsg}`)
- const aiResponse = await getResponse(prompt, textCtx, replyGenerating)
+ const aiResponse = await getResponse(prompt, textCtx, replyGenerating, model)
if (!aiResponse) return
if (aiResponse.success && aiResponse.response) {
@@ -239,7 +294,6 @@ ${fixedMsg}`)
error,
{ parse_mode: 'Markdown' }
)
- console.error("[!] Error sending response:", aiResponse.error)
}
})
}
\ No newline at end of file
diff --git a/src/utils/log.ts b/src/utils/log.ts
index b70e5f3..63c046a 100644
--- a/src/utils/log.ts
+++ b/src/utils/log.ts
@@ -28,6 +28,8 @@
//
// For more information, please refer to
+import { flash_model, thinking_model } from "../commands/ai"
+
class Logger {
private static instance: Logger
private thinking: boolean = false
@@ -41,42 +43,42 @@ class Logger {
return Logger.instance
}
- logCmdStart(user: string): void {
- console.log(`[START] Received /ask from ${user}`)
+ logCmdStart(user: string, type: "ask" | "think"): void {
+ console.log(`\n[✨ AI | START] Received /${type} for model ${type === "ask" ? flash_model : thinking_model} from ${user}`)
}
- logThinking(thinking: boolean): void {
+ logThinking(chatId: number, messageId: number, thinking: boolean): void {
if (thinking) {
- console.log("[THINKING] Started")
+ console.log(`[✨ AI | THINKING | ${chatId}:${messageId}] Model started thinking`)
} else {
- console.log("[THINKING] Ended")
+ console.log(`[✨ AI | THINKING | ${chatId}:${messageId}] Model stopped thinking`)
}
}
logChunk(chatId: number, messageId: number, text: string, isOverflow: boolean = false): void {
- const prefix = isOverflow ? "[OVERFLOW]" : "[CHUNK]"
- console.log(`${prefix} [${chatId}:${messageId}] ${text.length} chars`)
+ const prefix = isOverflow ? "[✨ AI | OVERFLOW]" : "[✨ AI | CHUNK]"
+ console.log(`${prefix} [${chatId}:${messageId}] ${text.length} chars pushed to Telegram`)
}
logPrompt(prompt: string): void {
- console.log(`[PROMPT] ${prompt.length} chars: ${prompt.substring(0, 50)}${prompt.length > 50 ? "..." : ""}`)
+ console.log(`[✨ AI | PROMPT] ${prompt.length} chars: ${prompt.substring(0, 50)}${prompt.length > 50 ? "..." : ""}`)
}
logError(error: any): void {
if (error.response?.error_code === 429) {
const retryAfter = error.response.parameters?.retry_after || 1
- console.error(`[RATE_LIMIT] Too Many Requests - retry after ${retryAfter}s`)
+ console.error(`[✨ AI | RATE_LIMIT] Too Many Requests - retry after ${retryAfter}s`)
} else if (error.response?.error_code === 400 && error.response?.description?.includes("can't parse entities")) {
- console.error("[PARSE_ERROR] Markdown parsing failed, retrying with plain text")
+ console.error("[✨ AI | PARSE_ERROR] Markdown parsing failed, retrying with plain text")
} else {
const errorDetails = {
code: error.response?.error_code,
description: error.response?.description,
method: error.on?.method
}
- console.error("[ERROR]", JSON.stringify(errorDetails, null, 2))
+ console.error("[✨ AI | ERROR]", JSON.stringify(errorDetails, null, 2))
}
}
}
-export const logger = Logger.getInstance()
\ No newline at end of file
+export const logger = Logger.getInstance()
\ No newline at end of file
diff --git a/src/utils/rate-limiter.ts b/src/utils/rate-limiter.ts
index 3c1d537..4b4a324 100644
--- a/src/utils/rate-limiter.ts
+++ b/src/utils/rate-limiter.ts
@@ -80,7 +80,7 @@ class RateLimiter {
const timeout = setTimeout(() => {
this.processUpdate(ctx, chatId, messageId, options)
}, this.minInterval - timeSinceLastEdit)
-
+
this.updateQueue.set(messageKey, timeout)
return
}
@@ -124,7 +124,7 @@ class RateLimiter {
const firstChunk = chunks[0]
logger.logChunk(chatId, messageId, firstChunk)
-
+
try {
await ctx.telegram.editMessageText(chatId, messageId, undefined, firstChunk, options)
} catch (error: any) {
@@ -136,11 +136,11 @@ class RateLimiter {
for (let i = 1; i < chunks.length; i++) {
const chunk = chunks[i]
const overflowMessageId = this.overflowMessages.get(messageKey)
-
+
if (overflowMessageId) {
- logger.logChunk(chatId, overflowMessageId, chunk, true)
try {
await ctx.telegram.editMessageText(chatId, overflowMessageId, undefined, chunk, options)
+ logger.logChunk(chatId, overflowMessageId, chunk, true)
} catch (error: any) {
if (!error.response?.description?.includes("message is not modified")) {
throw error
@@ -166,7 +166,7 @@ class RateLimiter {
} else {
const sanitizedText = latestText
logger.logChunk(chatId, messageId, sanitizedText)
-
+
try {
await ctx.telegram.editMessageText(chatId, messageId, undefined, sanitizedText, options)
} catch (error: any) {
@@ -184,7 +184,7 @@ class RateLimiter {
const retryAfter = error.response.parameters?.retry_after || 1
this.isRateLimited = true
this.rateLimitEndTime = Date.now() + (retryAfter * 1000)
-
+
const existingTimeout = this.updateQueue.get(messageKey)
if (existingTimeout) {
clearTimeout(existingTimeout)
@@ -193,7 +193,7 @@ class RateLimiter {
const timeout = setTimeout(() => {
this.processUpdate(ctx, chatId, messageId, options)
}, retryAfter * 1000)
-
+
this.updateQueue.set(messageKey, timeout)
} else if (error.response?.error_code === 400) {
if (error.response?.description?.includes("can't parse entities")) {