diff --git a/README.md b/README.md index f1b8d2b..3cc7c99 100644 --- a/README.md +++ b/README.md @@ -70,6 +70,11 @@ You can also run Kowalski using Docker, which simplifies the setup process. Make 2. **Make sure to setup your `.env` file first!** + > [!TIP] + > If you intend to setup AI, the defaults for Docker are already included (just uncomment) and don't need to be changed. + > + > Further setup may be needed for GPUs. See the Ollama documentation for more. + 3. **Run the container** ```bash @@ -129,7 +134,7 @@ chmod +x src/plugins/yt-dlp/yt-dlp **Q:** How can I disable AI features? -**A:** AI features are disabled by default, unless you have set `ollamaApi` in your `.env` file. Please remove or comment out this line to disable all AI functionality. +**A:** AI features are disabled by default, unless you have set `ollamaEnabled` to `true` in your `.env` file. Set it back to `false` to disable. ## Contributors diff --git a/src/commands/ai.ts b/src/commands/ai.ts index 33330e8..495a36f 100644 --- a/src/commands/ai.ts +++ b/src/commands/ai.ts @@ -227,8 +227,6 @@ export default (bot: Telegraf) => { if (!ctx.message || !('text' in ctx.message)) return; const isAsk = ctx.message.text.startsWith("/ask") const model = isAsk ? flash_model : thinking_model - console.log(model) - console.log(ctx.message.text) const textCtx = ctx as TextContext; const reply_to_message_id = replyToMessageId(textCtx) const Strings = getStrings(languageCode(textCtx)) diff --git a/src/utils/log.ts b/src/utils/log.ts index 63c046a..67019a8 100644 --- a/src/utils/log.ts +++ b/src/utils/log.ts @@ -32,7 +32,6 @@ import { flash_model, thinking_model } from "../commands/ai" class Logger { private static instance: Logger - private thinking: boolean = false private constructor() {} @@ -44,7 +43,7 @@ class Logger { } logCmdStart(user: string, type: "ask" | "think"): void { - console.log(`\n[✨ AI | START] Received /${type} for model ${type === "ask" ? flash_model : thinking_model} from ${user}`) + console.log(`\n[✨ AI | START] Received /${type} for model ${type === "ask" ? flash_model : thinking_model}`) } logThinking(chatId: number, messageId: number, thinking: boolean): void { @@ -61,7 +60,7 @@ class Logger { } logPrompt(prompt: string): void { - console.log(`[✨ AI | PROMPT] ${prompt.length} chars: ${prompt.substring(0, 50)}${prompt.length > 50 ? "..." : ""}`) + console.log(`[✨ AI | PROMPT] ${prompt.length} chars input`) } logError(error: any): void {