diff --git a/.env.example b/.env.example index 8e9cb3f..f59b14a 100644 --- a/.env.example +++ b/.env.example @@ -9,6 +9,8 @@ botToken = "" ollamaEnabled = false # ollamaApi = "http://ollama:11434" # handlerTimeout = "600_000" # set higher if you expect to download larger models +# flashModel = "gemma3:4b" +# thinkingModel = "qwen3:4b" # misc (botAdmins isnt a array here!) maxRetries = 9999 diff --git a/README.md b/README.md index 3cc7c99..7912e3d 100644 --- a/README.md +++ b/README.md @@ -113,7 +113,9 @@ If you prefer to use Docker directly, you can use these instructions instead. - **botToken**: Put your bot token that you created at [@BotFather](https://t.me/botfather). - **ollamaEnabled** (optional): Enables/disables AI features - **ollamaApi** (optional): Ollama API endpoint for various AI features, will be disabled if not set -- **handlerTimeout** (default: `600_000`): How long handlers will wait before timing out. Set this high if using large AI models. +- **handlerTimeout** (optional): How long handlers will wait before timing out. Set this high if using large AI models. +- **flashModel** (optional): Which model will be used for /ask +- **thinkingModel** (optional): Which model will be used for /think - **botAdmins**: Put the ID of the people responsible for managing the bot. They can use some administrative + exclusive commands on any group. - **lastKey**: Last.fm API key, for use on `lastfm.js` functions, like see who is listening to what song and etc. - **weatherKey**: Weather.com API key, used for the `/weather` command. diff --git a/src/commands/ai.ts b/src/commands/ai.ts index a58933d..dfec202 100644 --- a/src/commands/ai.ts +++ b/src/commands/ai.ts @@ -40,8 +40,8 @@ import { rateLimiter } from "../utils/rate-limiter" import { logger } from "../utils/log" const spamwatchMiddleware = spamwatchMiddlewareModule(isOnSpamWatch) -export const flash_model = "gemma3:4b" -export const thinking_model = "deepseek-r1:1.5b" +export const flash_model = process.env.flashModel || "gemma3:4b" +export const thinking_model = process.env.thinkingModel || "qwen3:4b" type TextContext = Context & { message: Message.TextMessage } @@ -57,15 +57,19 @@ export function sanitizeForJson(text: string): string { export async function preChecks() { const envs = [ "ollamaApi", + "flashModel", + "thinkingModel", ] + let checked = 0; for (const env of envs) { if (!process.env[env]) { console.error(`[✨ AI | !] ❌ ${env} not set!`) return false } + checked++; } - console.log("[✨ AI] Pre-checks passed\n") + console.log(`[✨ AI] Pre-checks passed [${checked}/${envs.length}]\n`) return true }