diff --git a/src/commands/ai.ts b/src/commands/ai.ts index 97a8c48..de69903 100644 --- a/src/commands/ai.ts +++ b/src/commands/ai.ts @@ -69,7 +69,7 @@ interface OllamaResponse { export const models: ModelInfo[] = [ { name: 'gemma3n', - label: 'Gemma3n', + label: 'gemma3n', descriptionEn: 'Gemma3n is a family of open, light on-device models for general tasks.', descriptionPt: 'Gemma3n é uma família de modelos abertos, leves e para dispositivos locais, para tarefas gerais.', models: [ @@ -79,7 +79,7 @@ export const models: ModelInfo[] = [ }, { name: 'gemma3-abliterated', - label: 'Gemma3 Uncensored', + label: 'gemma3 Uncensored', descriptionEn: 'Gemma3-abliterated is a family of open, uncensored models for general tasks.', descriptionPt: 'Gemma3-abliterated é uma família de modelos abertos, não censurados, para tarefas gerais.', models: [ @@ -103,7 +103,18 @@ export const models: ModelInfo[] = [ descriptionPt: 'DeepSeek é um modelo de pesquisa para tarefas de raciocínio.', models: [ { name: 'deepseek-r1:1.5b', label: 'DeepSeek 1.5B', parameterSize: '1.5B' }, + { name: 'deepseek-r1:7b', label: 'DeepSeek 7B', parameterSize: '7B' }, { name: 'huihui_ai/deepseek-r1-abliterated:1.5b', label: 'DeepSeek Uncensored 1.5B', parameterSize: '1.5B' }, + { name: 'huihui_ai/deepseek-r1-abliterated:7b', label: 'DeepSeek Uncensored 7B', parameterSize: '7B' }, + ] + }, + { + name: 'phi3', + label: 'Phi3', + descriptionEn: 'Phi-3 is a family of lightweight 3B (Mini) and 14B (Medium) state-of-the-art open models by Microsoft.', + descriptionPt: 'Phi-3 é uma família de modelos leves de 3B (Mini) e 14B (Médio) de última geração, abertos pela Microsoft.', + models: [ + { name: 'phi3:3.8b', label: 'Phi3 3.8B', parameterSize: '3.8B' }, ] } ]; @@ -224,7 +235,11 @@ function escapeMarkdown(text: string): string { return text.replace(/([*_])/g, '\\$1'); } -async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Message, model: string, aiTemperature: number): Promise<{ success: boolean; response?: string; error?: string }> { +function containsUrls(text: string): boolean { + return text.includes('http://') || text.includes('https://'); +} + +async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Message, model: string, aiTemperature: number, originalMessage: string): Promise<{ success: boolean; response?: string; error?: string }> { const Strings = getStrings(languageCode(ctx)); if (!ctx.chat) { return { @@ -233,6 +248,8 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me }; } const modelHeader = `🤖 *${model}* | 🌡️ *${aiTemperature}*\n\n`; + const urlWarning = containsUrls(originalMessage) ? Strings.ai.urlWarning : ''; + try { const aiResponse = await axios.post( `${process.env.ollamaApi}/api/generate`, @@ -289,12 +306,12 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me } else { fullResponse += ln.response; } - if (now - lastUpdate >= 1000 || !sentHeader) { + if (now - lastUpdate >= 5000 || !sentHeader) { await rateLimiter.editMessageWithRetry( ctx, ctx.chat.id, replyGenerating.message_id, - modelHeader + fullResponse, + modelHeader + urlWarning + fullResponse, { parse_mode: 'Markdown' } ); lastUpdate = now; @@ -354,18 +371,21 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me } } -async function handleAiReply(ctx: TextContext, model: string, prompt: string, replyGenerating: Message, aiTemperature: number) { +async function handleAiReply(ctx: TextContext, model: string, prompt: string, replyGenerating: Message, aiTemperature: number, originalMessage: string) { const Strings = getStrings(languageCode(ctx)); - const aiResponse = await getResponse(prompt, ctx, replyGenerating, model, aiTemperature); + const aiResponse = await getResponse(prompt, ctx, replyGenerating, model, aiTemperature, originalMessage); if (!aiResponse) return; if (!ctx.chat) return; const modelHeader = `🤖 *${model}* | 🌡️ *${aiTemperature}*\n\n`; + + const urlWarning = containsUrls(originalMessage) ? Strings.ai.urlWarning : ''; + if (aiResponse.success && aiResponse.response) { await rateLimiter.editMessageWithRetry( ctx, ctx.chat.id, replyGenerating.message_id, - modelHeader + aiResponse.response, + modelHeader + urlWarning + aiResponse.response, { parse_mode: 'Markdown' } ); return; @@ -411,7 +431,7 @@ export default (bot: Telegraf, db: NodePgDatabase) => { const model = isAsk ? flash_model : thinking_model const textCtx = ctx as TextContext const reply_to_message_id = replyToMessageId(textCtx) - const { Strings, aiTemperature } = await getUserWithStringsAndModel(textCtx, db) + const { user, Strings, aiTemperature } = await getUserWithStringsAndModel(textCtx, db) const message = textCtx.message.text const author = ("@" + ctx.from?.username) || ctx.from?.first_name @@ -425,6 +445,14 @@ export default (bot: Telegraf, db: NodePgDatabase) => { return } + if (!user.aiEnabled) { + await ctx.reply(Strings.ai.disabledForUser, { + parse_mode: 'Markdown', + ...({ reply_to_message_id }) + }) + return + } + const fixedMsg = message.replace(/^\/(ask|think)(@\w+)?\s*/, "").trim() if (fixedMsg.length < 1) { await ctx.reply(Strings.ai.askNoMessage, { @@ -442,7 +470,7 @@ export default (bot: Telegraf, db: NodePgDatabase) => { logger.logPrompt(fixedMsg) const prompt = sanitizeForJson(await usingSystemPrompt(textCtx, db, botName)) - await handleAiReply(textCtx, model, prompt, replyGenerating, aiTemperature) + await handleAiReply(textCtx, model, prompt, replyGenerating, aiTemperature, fixedMsg) }) bot.command(["ai"], spamwatchMiddleware, async (ctx) => { @@ -450,7 +478,7 @@ export default (bot: Telegraf, db: NodePgDatabase) => { if (!ctx.message || !("text" in ctx.message)) return const textCtx = ctx as TextContext const reply_to_message_id = replyToMessageId(textCtx) - const { Strings, customAiModel, aiTemperature } = await getUserWithStringsAndModel(textCtx, db) + const { user, Strings, customAiModel, aiTemperature } = await getUserWithStringsAndModel(textCtx, db) const message = textCtx.message.text const author = ("@" + ctx.from?.username) || ctx.from?.first_name @@ -464,6 +492,14 @@ export default (bot: Telegraf, db: NodePgDatabase) => { return } + if (!user.aiEnabled) { + await ctx.reply(Strings.ai.disabledForUser, { + parse_mode: 'Markdown', + ...({ reply_to_message_id }) + }) + return + } + const fixedMsg = message.replace(/^\/ai(@\w+)?\s*/, "").trim() if (fixedMsg.length < 1) { await ctx.reply(Strings.ai.askNoMessage, { @@ -482,7 +518,7 @@ export default (bot: Telegraf, db: NodePgDatabase) => { logger.logPrompt(fixedMsg) const prompt = sanitizeForJson(await usingSystemPrompt(textCtx, db, botName)) - await handleAiReply(textCtx, customAiModel, prompt, replyGenerating, aiTemperature) + await handleAiReply(textCtx, customAiModel, prompt, replyGenerating, aiTemperature, fixedMsg) } catch (err) { const Strings = getStrings(languageCode(ctx)); if (ctx && ctx.reply) { @@ -494,4 +530,4 @@ export default (bot: Telegraf, db: NodePgDatabase) => { } } }) -} \ No newline at end of file +} diff --git a/src/locales/english.json b/src/locales/english.json index cd83da6..74aa29e 100644 --- a/src/locales/english.json +++ b/src/locales/english.json @@ -67,12 +67,14 @@ "helpEntry": "✨ AI Commands", "helpDesc": "✨ *AI Commands*\n\n- /ask ``: Ask a question to an AI\n- /think ``: Ask a thinking model about a question", "disabled": "✨ AI features are currently disabled", + "disabledForUser": "✨ AI features are disabled for your account. You can enable them in /settings", "pulling": "🔄 *Pulling {model} from Ollama...*\n\nThis may take a few minutes...", "askGenerating": "✨ _{model} is working..._", "askNoMessage": "Please provide a message to ask the model.", "languageCode": "Language", "thinking": "Thinking...", - "finishedThinking": "Finished thinking" + "finishedThinking": "Finished thinking", + "urlWarning": "⚠️ *Warning: I cannot access or open links. Please provide the content directly if you need me to analyze something from a website.*\n\n" }, "maInvalidModule": "Please provide a valid module ID from The Mod Archive.\nExample: `/modarchive 81574`", "maDownloadError": "Error downloading the file. Check the module ID and try again.", diff --git a/src/locales/portuguese.json b/src/locales/portuguese.json index 2d04a8f..81959af 100644 --- a/src/locales/portuguese.json +++ b/src/locales/portuguese.json @@ -66,12 +66,14 @@ "helpEntry": "✨ Comandos de IA", "helpDesc": "✨ *Comandos de IA*\n\n- /ask ``: Fazer uma pergunta a uma IA\n- /think ``: Fazer uma pergunta a um modelo de pensamento", "disabled": "✨ Os recursos de IA estão desativados no momento", + "disabledForUser": "✨ Os recursos de IA estão desativados para sua conta. Você pode ativá-los em /settings", "pulling": "🔄 *Puxando {model} do Ollama...*\n\nIsso pode levar alguns minutos...", "askGenerating": "✨ _{model} está funcionando..._", "askNoMessage": "Por favor, forneça uma mensagem para fazer a pergunta ao modelo.", "languageCode": "Idioma", "thinking": "Pensando...", - "finishedThinking": "Pensamento finalizado" + "finishedThinking": "Pensamento finalizado", + "urlWarning": "⚠️ *Aviso: Não posso acessar ou abrir links. Por favor, forneça o conteúdo diretamente se precisar que eu analise algo de um site.*\n\n" }, "maInvalidModule": "Por favor, forneça um ID de módulo válido do The Mod Archive.\nExemplo: `/modarchive 81574`", "maDownloadError": "Erro ao baixar o arquivo. Verifique o ID do módulo e tente novamente.", diff --git a/src/utils/rate-limiter.ts b/src/utils/rate-limiter.ts index b65ebb2..10673d9 100644 --- a/src/utils/rate-limiter.ts +++ b/src/utils/rate-limiter.ts @@ -32,7 +32,7 @@ import { Context } from 'telegraf' import { logger } from './log' class RateLimiter { - private lastEditTime: number = 0 + private lastEditTimes: Map = new Map() private readonly minInterval: number = 5000 private pendingUpdates: Map = new Map() private updateQueue: Map = new Map() @@ -144,7 +144,8 @@ class RateLimiter { if (!latestText) return const now = Date.now() - const timeSinceLastEdit = now - this.lastEditTime + const lastEditTime = this.lastEditTimes.get(messageKey) || 0 + const timeSinceLastEdit = now - lastEditTime await this.waitForRateLimit(chatId, messageId) if (timeSinceLastEdit < this.minInterval) { @@ -217,7 +218,7 @@ class RateLimiter { } this.pendingUpdates.delete(messageKey) } - this.lastEditTime = Date.now() + this.lastEditTimes.set(messageKey, Date.now()) this.updateQueue.delete(messageKey) } catch (error: unknown) { if (!this.handleTelegramError(error, messageKey, options, ctx, chatId, messageId)) {