bug fixes, better logging and seperation of ai, update docs for ai

This commit is contained in:
Aidan 2025-06-27 03:03:17 -04:00
parent 995f61b0b9
commit f43fcd470f
6 changed files with 129 additions and 61 deletions

View file

@ -6,7 +6,9 @@ botSource = "https://github.com/ABOCN/TelegramBot"
botToken = "" botToken = ""
# ai features # ai features
ollamaEnabled = false
# ollamaApi = "http://ollama:11434" # ollamaApi = "http://ollama:11434"
# handlerTimeout = "600_000" # set higher if you expect to download larger models
# misc (botAdmins isnt a array here!) # misc (botAdmins isnt a array here!)
maxRetries = 9999 maxRetries = 9999

View file

@ -10,12 +10,6 @@ Kowalski is a a simple Telegram bot made in Node.js.
- You can find Kowalski at [@KowalskiNodeBot](https://t.me/KowalskiNodeBot) on Telegram. - You can find Kowalski at [@KowalskiNodeBot](https://t.me/KowalskiNodeBot) on Telegram.
## Translations
<a href="https://weblate.librecloud.cc/engage/kowalski/">
<img src="https://weblate.librecloud.cc/widget/kowalski/multi-auto.svg" alt="Translation status" />
</a>
## Self-host requirements ## Self-host requirements
> [!IMPORTANT] > [!IMPORTANT]
@ -26,7 +20,10 @@ Kowalski is a a simple Telegram bot made in Node.js.
- FFmpeg (only for the `/yt` command) - FFmpeg (only for the `/yt` command)
- Docker and Docker Compose (only required for Docker setup) - Docker and Docker Compose (only required for Docker setup)
_AI features require a higher-end system with a CPU/GPU_ ### AI Requirements
- High-end CPU *or* GPU (~ 6GB vRAM)
- If using CPU, enough RAM to load the models (~6GB w/ defaults)
## Running locally (non-Docker setup) ## Running locally (non-Docker setup)
@ -109,7 +106,9 @@ If you prefer to use Docker directly, you can use these instructions instead.
- **botPrivacy**: Put the link to your bot privacy policy. - **botPrivacy**: Put the link to your bot privacy policy.
- **maxRetries**: Maximum number of retries for a failing command on Kowalski. Default is 5. If the limit is hit, the bot will crash past this number. - **maxRetries**: Maximum number of retries for a failing command on Kowalski. Default is 5. If the limit is hit, the bot will crash past this number.
- **botToken**: Put your bot token that you created at [@BotFather](https://t.me/botfather). - **botToken**: Put your bot token that you created at [@BotFather](https://t.me/botfather).
- **ollamaEnabled** (optional): Enables/disables AI features
- **ollamaApi** (optional): Ollama API endpoint for various AI features, will be disabled if not set - **ollamaApi** (optional): Ollama API endpoint for various AI features, will be disabled if not set
- **handlerTimeout** (default: `600_000`): How long handlers will wait before timing out. Set this high if using large AI models.
- **botAdmins**: Put the ID of the people responsible for managing the bot. They can use some administrative + exclusive commands on any group. - **botAdmins**: Put the ID of the people responsible for managing the bot. They can use some administrative + exclusive commands on any group.
- **lastKey**: Last.fm API key, for use on `lastfm.js` functions, like see who is listening to what song and etc. - **lastKey**: Last.fm API key, for use on `lastfm.js` functions, like see who is listening to what song and etc.
- **weatherKey**: Weather.com API key, used for the `/weather` command. - **weatherKey**: Weather.com API key, used for the `/weather` command.

View file

@ -4,6 +4,7 @@ import fs from 'fs';
import { isOnSpamWatch } from './spamwatch/spamwatch'; import { isOnSpamWatch } from './spamwatch/spamwatch';
import '@dotenvx/dotenvx'; import '@dotenvx/dotenvx';
import './plugins/ytDlpWrapper'; import './plugins/ytDlpWrapper';
import { preChecks } from './commands/ai';
// Ensures bot token is set, and not default value // Ensures bot token is set, and not default value
if (!process.env.botToken || process.env.botToken === 'InsertYourBotTokenHere') { if (!process.env.botToken || process.env.botToken === 'InsertYourBotTokenHere') {
@ -11,7 +12,17 @@ if (!process.env.botToken || process.env.botToken === 'InsertYourBotTokenHere')
process.exit(1) process.exit(1)
} }
const bot = new Telegraf(process.env.botToken); // Detect AI and run pre-checks
if (process.env.ollamaEnabled === "true") {
if (!(await preChecks())) {
process.exit(1)
}
}
const bot = new Telegraf(
process.env.botToken,
{ handlerTimeout: Number(process.env.handlerTimeout) || 600_000 }
);
const maxRetries = process.env.maxRetries || 5; const maxRetries = process.env.maxRetries || 5;
let restartCount = 0; let restartCount = 0;

View file

@ -40,8 +40,8 @@ import { rateLimiter } from "../utils/rate-limiter"
import { logger } from "../utils/log" import { logger } from "../utils/log"
const spamwatchMiddleware = spamwatchMiddlewareModule(isOnSpamWatch) const spamwatchMiddleware = spamwatchMiddlewareModule(isOnSpamWatch)
//const model = "qwen3:0.6b" export const flash_model = "gemma3:4b"
const model = "deepseek-r1:1.5b" export const thinking_model = "deepseek-r1:1.5b"
type TextContext = Context & { message: Message.TextMessage } type TextContext = Context & { message: Message.TextMessage }
@ -54,7 +54,22 @@ export function sanitizeForJson(text: string): string {
.replace(/\t/g, '\\t') .replace(/\t/g, '\\t')
} }
async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Message) { export async function preChecks() {
const envs = [
"ollamaApi",
]
for (const env of envs) {
if (!process.env[env]) {
console.error(`[✨ AI | !] ❌ ${env} not set!`)
return false
}
}
console.log("[✨ AI] Pre-checks passed\n")
return true
}
async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Message, model: string) {
const Strings = getStrings(languageCode(ctx)) const Strings = getStrings(languageCode(ctx))
if (!ctx.chat) return { if (!ctx.chat) return {
@ -81,16 +96,43 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
if (!line.trim()) continue if (!line.trim()) continue
let ln = JSON.parse(line) let ln = JSON.parse(line)
if (ln.response.includes("<think>")) { logger.logThinking(true) } else if (ln.response.includes("</think>")) { logger.logThinking(false) } if (model === thinking_model && ln.response.includes('<think>')) {
const thinkMatch = ln.response.match(/<think>([\s\S]*?)<\/think>/)
if (thinkMatch) {
const innerContent = thinkMatch[1]
if (innerContent.trim().length > 0) {
logger.logThinking(ctx.chat.id, replyGenerating.message_id, true)
}
} else {
logger.logThinking(ctx.chat.id, replyGenerating.message_id, true)
}
} else if (model === thinking_model && ln.response.includes('</think>')) {
logger.logThinking(ctx.chat.id, replyGenerating.message_id, false)
}
try { try {
const now = Date.now() const now = Date.now()
if (ln.response) { if (ln.response) {
const patchedThoughts = ln.response.replace("<think>", "`Thinking...`").replace("</think>", "`Finished thinking`") if (model === thinking_model) {
let patchedThoughts = ln.response
// TODO: hide blank thinking chunks
const thinkTagRx = /<think>([\s\S]*?)<\/think>/g
patchedThoughts = patchedThoughts.replace(thinkTagRx, (match, p1) => {
if (p1.trim().length > 0) {
console.log(p1)
return '`Thinking...`' + p1 + '`Finished thinking`'
} else {
return ''
}
})
patchedThoughts = patchedThoughts.replace(/<think>/g, '`Thinking...`')
patchedThoughts = patchedThoughts.replace(/<\/think>/g, '`Finished thinking`')
thoughts += patchedThoughts thoughts += patchedThoughts
fullResponse += patchedThoughts fullResponse += patchedThoughts
} else {
fullResponse += ln.response
}
if (now - lastUpdate >= 1000) { if (now - lastUpdate >= 1000) {
await rateLimiter.editMessageWithRetry( await rateLimiter.editMessageWithRetry(
ctx, ctx,
@ -103,7 +145,7 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
} }
} }
} catch (e) { } catch (e) {
console.error("Error parsing chunk:", e) console.error("[✨ AI | !] Error parsing chunk:", e)
} }
} }
} }
@ -119,7 +161,7 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
if (error.response.data.error.includes(`model '${model}' not found`) || error.status === 404) { if (error.response.data.error.includes(`model '${model}' not found`) || error.status === 404) {
shouldPullModel = true shouldPullModel = true
} else { } else {
console.error("[!] 1", error.response.data.error) console.error("[✨ AI | !] Error zone 1:", error.response.data.error)
return { return {
"success": false, "success": false,
"error": error.response.data.error, "error": error.response.data.error,
@ -130,23 +172,25 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
} }
if (shouldPullModel) { if (shouldPullModel) {
ctx.telegram.editMessageText(ctx.chat.id, replyGenerating.message_id, undefined, `🔄 Pulling ${model} from ollama...`) ctx.telegram.editMessageText(ctx.chat.id, replyGenerating.message_id, undefined, `🔄 Pulling ${model} from ollama...\n\nThis may take a few minutes...`)
console.log(`[i] Pulling ${model} from ollama...`) console.log(`[✨ AI | i] Pulling ${model} from ollama...`)
let pullModelStream: any
const pullModelStream = await axios.post(`${process.env.ollamaApi}/api/pull`, { try {
pullModelStream = await axios.post(`${process.env.ollamaApi}/api/pull`, {
model: model, model: model,
stream: false, stream: false,
timeout: process.env.ollamaApiTimeout || 10000,
}) })
} catch (e: any) {
if (pullModelStream.data.status !== ("success")) { console.error("[✨ AI | !] Something went wrong:", e.response.data.error)
console.error("[!] Something went wrong:", pullModelStream.data)
return { return {
"success": false, "success": false,
"error": `❌ Something went wrong while pulling ${model}, please try your command again!`, "error": `❌ Something went wrong while pulling ${model}, please try your command again!`,
} }
} }
console.log("[i] Model pulled successfully") console.log(`[✨ AI | i] ${model} pulled successfully`)
return { return {
"success": true, "success": true,
"response": `✅ Pulled ${model} successfully, please retry the command.`, "response": `✅ Pulled ${model} successfully, please retry the command.`,
@ -154,7 +198,7 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
} }
if (error.response) { if (error.response) {
console.error("[!] 2", error.response) console.error("[✨ AI | !] Error zone 2:", error.response)
return { return {
"success": false, "success": false,
"error": error.response, "error": error.response,
@ -162,7 +206,7 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
} }
if (error.statusText) { if (error.statusText) {
console.error("[!] 3", error.statusText) console.error("[✨ AI | !] Error zone 3:", error.statusText)
return { return {
"success": false, "success": false,
"error": error.statusText, "error": error.statusText,
@ -177,15 +221,24 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
} }
export default (bot: Telegraf<Context>) => { export default (bot: Telegraf<Context>) => {
bot.command("ask", spamwatchMiddleware, async (ctx) => { const botName = bot.botInfo?.first_name && bot.botInfo?.last_name ? `${bot.botInfo.first_name} ${bot.botInfo.last_name}` : "Kowalski"
bot.command(["ask", "think"], spamwatchMiddleware, async (ctx) => {
if (!ctx.message || !('text' in ctx.message)) return; if (!ctx.message || !('text' in ctx.message)) return;
const isAsk = ctx.message.text.startsWith("/ask")
const model = isAsk ? flash_model : thinking_model
console.log(model)
console.log(ctx.message.text)
const textCtx = ctx as TextContext; const textCtx = ctx as TextContext;
const reply_to_message_id = replyToMessageId(textCtx) const reply_to_message_id = replyToMessageId(textCtx)
const Strings = getStrings(languageCode(textCtx)) const Strings = getStrings(languageCode(textCtx))
const message = textCtx.message.text const message = textCtx.message.text
const author = ("@" + ctx.from?.username) || ctx.from?.first_name const author = ("@" + ctx.from?.username) || ctx.from?.first_name
logger.logCmdStart(author) logger.logCmdStart(
author,
model === flash_model ? "ask" : "think"
)
if (!process.env.ollamaApi) { if (!process.env.ollamaApi) {
await ctx.reply(Strings.aiDisabled, { await ctx.reply(Strings.aiDisabled, {
@ -212,12 +265,14 @@ export default (bot: Telegraf<Context>) => {
logger.logPrompt(fixedMsg) logger.logPrompt(fixedMsg)
const prompt = sanitizeForJson( const prompt = sanitizeForJson(
`You are a helpful assistant named Kowalski, who has been given a message from a user. `You are a helpful assistant called ${botName}.
Current Date/Time (UTC): ${new Date().toLocaleString()}
The message is: ---
Respond to the user's message:
${fixedMsg}`) ${fixedMsg}`)
const aiResponse = await getResponse(prompt, textCtx, replyGenerating) const aiResponse = await getResponse(prompt, textCtx, replyGenerating, model)
if (!aiResponse) return if (!aiResponse) return
if (aiResponse.success && aiResponse.response) { if (aiResponse.success && aiResponse.response) {
@ -239,7 +294,6 @@ ${fixedMsg}`)
error, error,
{ parse_mode: 'Markdown' } { parse_mode: 'Markdown' }
) )
console.error("[!] Error sending response:", aiResponse.error)
} }
}) })
} }

View file

@ -28,6 +28,8 @@
// //
// For more information, please refer to <https://unlicense.org/> // For more information, please refer to <https://unlicense.org/>
import { flash_model, thinking_model } from "../commands/ai"
class Logger { class Logger {
private static instance: Logger private static instance: Logger
private thinking: boolean = false private thinking: boolean = false
@ -41,40 +43,40 @@ class Logger {
return Logger.instance return Logger.instance
} }
logCmdStart(user: string): void { logCmdStart(user: string, type: "ask" | "think"): void {
console.log(`[START] Received /ask from ${user}`) console.log(`\n[✨ AI | START] Received /${type} for model ${type === "ask" ? flash_model : thinking_model} from ${user}`)
} }
logThinking(thinking: boolean): void { logThinking(chatId: number, messageId: number, thinking: boolean): void {
if (thinking) { if (thinking) {
console.log("[THINKING] Started") console.log(`[✨ AI | THINKING | ${chatId}:${messageId}] Model started thinking`)
} else { } else {
console.log("[THINKING] Ended") console.log(`[✨ AI | THINKING | ${chatId}:${messageId}] Model stopped thinking`)
} }
} }
logChunk(chatId: number, messageId: number, text: string, isOverflow: boolean = false): void { logChunk(chatId: number, messageId: number, text: string, isOverflow: boolean = false): void {
const prefix = isOverflow ? "[OVERFLOW]" : "[CHUNK]" const prefix = isOverflow ? "[✨ AI | OVERFLOW]" : "[✨ AI | CHUNK]"
console.log(`${prefix} [${chatId}:${messageId}] ${text.length} chars`) console.log(`${prefix} [${chatId}:${messageId}] ${text.length} chars pushed to Telegram`)
} }
logPrompt(prompt: string): void { logPrompt(prompt: string): void {
console.log(`[PROMPT] ${prompt.length} chars: ${prompt.substring(0, 50)}${prompt.length > 50 ? "..." : ""}`) console.log(`[✨ AI | PROMPT] ${prompt.length} chars: ${prompt.substring(0, 50)}${prompt.length > 50 ? "..." : ""}`)
} }
logError(error: any): void { logError(error: any): void {
if (error.response?.error_code === 429) { if (error.response?.error_code === 429) {
const retryAfter = error.response.parameters?.retry_after || 1 const retryAfter = error.response.parameters?.retry_after || 1
console.error(`[RATE_LIMIT] Too Many Requests - retry after ${retryAfter}s`) console.error(`[✨ AI | RATE_LIMIT] Too Many Requests - retry after ${retryAfter}s`)
} else if (error.response?.error_code === 400 && error.response?.description?.includes("can't parse entities")) { } else if (error.response?.error_code === 400 && error.response?.description?.includes("can't parse entities")) {
console.error("[PARSE_ERROR] Markdown parsing failed, retrying with plain text") console.error("[✨ AI | PARSE_ERROR] Markdown parsing failed, retrying with plain text")
} else { } else {
const errorDetails = { const errorDetails = {
code: error.response?.error_code, code: error.response?.error_code,
description: error.response?.description, description: error.response?.description,
method: error.on?.method method: error.on?.method
} }
console.error("[ERROR]", JSON.stringify(errorDetails, null, 2)) console.error("[✨ AI | ERROR]", JSON.stringify(errorDetails, null, 2))
} }
} }
} }

View file

@ -138,9 +138,9 @@ class RateLimiter {
const overflowMessageId = this.overflowMessages.get(messageKey) const overflowMessageId = this.overflowMessages.get(messageKey)
if (overflowMessageId) { if (overflowMessageId) {
logger.logChunk(chatId, overflowMessageId, chunk, true)
try { try {
await ctx.telegram.editMessageText(chatId, overflowMessageId, undefined, chunk, options) await ctx.telegram.editMessageText(chatId, overflowMessageId, undefined, chunk, options)
logger.logChunk(chatId, overflowMessageId, chunk, true)
} catch (error: any) { } catch (error: any) {
if (!error.response?.description?.includes("message is not modified")) { if (!error.response?.description?.includes("message is not modified")) {
throw error throw error