allow changing models from .env, update docs
This commit is contained in:
parent
61d5cc75b6
commit
765b1144fa
3 changed files with 12 additions and 4 deletions
|
@ -9,6 +9,8 @@ botToken = ""
|
|||
ollamaEnabled = false
|
||||
# ollamaApi = "http://ollama:11434"
|
||||
# handlerTimeout = "600_000" # set higher if you expect to download larger models
|
||||
# flashModel = "gemma3:4b"
|
||||
# thinkingModel = "qwen3:4b"
|
||||
|
||||
# misc (botAdmins isnt a array here!)
|
||||
maxRetries = 9999
|
||||
|
|
|
@ -113,7 +113,9 @@ If you prefer to use Docker directly, you can use these instructions instead.
|
|||
- **botToken**: Put your bot token that you created at [@BotFather](https://t.me/botfather).
|
||||
- **ollamaEnabled** (optional): Enables/disables AI features
|
||||
- **ollamaApi** (optional): Ollama API endpoint for various AI features, will be disabled if not set
|
||||
- **handlerTimeout** (default: `600_000`): How long handlers will wait before timing out. Set this high if using large AI models.
|
||||
- **handlerTimeout** (optional): How long handlers will wait before timing out. Set this high if using large AI models.
|
||||
- **flashModel** (optional): Which model will be used for /ask
|
||||
- **thinkingModel** (optional): Which model will be used for /think
|
||||
- **botAdmins**: Put the ID of the people responsible for managing the bot. They can use some administrative + exclusive commands on any group.
|
||||
- **lastKey**: Last.fm API key, for use on `lastfm.js` functions, like see who is listening to what song and etc.
|
||||
- **weatherKey**: Weather.com API key, used for the `/weather` command.
|
||||
|
|
|
@ -40,8 +40,8 @@ import { rateLimiter } from "../utils/rate-limiter"
|
|||
import { logger } from "../utils/log"
|
||||
|
||||
const spamwatchMiddleware = spamwatchMiddlewareModule(isOnSpamWatch)
|
||||
export const flash_model = "gemma3:4b"
|
||||
export const thinking_model = "deepseek-r1:1.5b"
|
||||
export const flash_model = process.env.flashModel || "gemma3:4b"
|
||||
export const thinking_model = process.env.thinkingModel || "qwen3:4b"
|
||||
|
||||
type TextContext = Context & { message: Message.TextMessage }
|
||||
|
||||
|
@ -57,15 +57,19 @@ export function sanitizeForJson(text: string): string {
|
|||
export async function preChecks() {
|
||||
const envs = [
|
||||
"ollamaApi",
|
||||
"flashModel",
|
||||
"thinkingModel",
|
||||
]
|
||||
|
||||
let checked = 0;
|
||||
for (const env of envs) {
|
||||
if (!process.env[env]) {
|
||||
console.error(`[✨ AI | !] ❌ ${env} not set!`)
|
||||
return false
|
||||
}
|
||||
checked++;
|
||||
}
|
||||
console.log("[✨ AI] Pre-checks passed\n")
|
||||
console.log(`[✨ AI] Pre-checks passed [${checked}/${envs.length}]\n`)
|
||||
return true
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue