KOW-27 commands respect aiEnabled now, message sending fix, show warning model cant use links, add phi, deepseek 7b, clean

This commit is contained in:
Aidan 2025-06-30 20:24:42 -04:00
parent 5270d2cae5
commit df49bc4157
4 changed files with 59 additions and 18 deletions

View file

@ -69,7 +69,7 @@ interface OllamaResponse {
export const models: ModelInfo[] = [ export const models: ModelInfo[] = [
{ {
name: 'gemma3n', name: 'gemma3n',
label: 'Gemma3n', label: 'gemma3n',
descriptionEn: 'Gemma3n is a family of open, light on-device models for general tasks.', descriptionEn: 'Gemma3n is a family of open, light on-device models for general tasks.',
descriptionPt: 'Gemma3n é uma família de modelos abertos, leves e para dispositivos locais, para tarefas gerais.', descriptionPt: 'Gemma3n é uma família de modelos abertos, leves e para dispositivos locais, para tarefas gerais.',
models: [ models: [
@ -79,7 +79,7 @@ export const models: ModelInfo[] = [
}, },
{ {
name: 'gemma3-abliterated', name: 'gemma3-abliterated',
label: 'Gemma3 Uncensored', label: 'gemma3 Uncensored',
descriptionEn: 'Gemma3-abliterated is a family of open, uncensored models for general tasks.', descriptionEn: 'Gemma3-abliterated is a family of open, uncensored models for general tasks.',
descriptionPt: 'Gemma3-abliterated é uma família de modelos abertos, não censurados, para tarefas gerais.', descriptionPt: 'Gemma3-abliterated é uma família de modelos abertos, não censurados, para tarefas gerais.',
models: [ models: [
@ -103,7 +103,18 @@ export const models: ModelInfo[] = [
descriptionPt: 'DeepSeek é um modelo de pesquisa para tarefas de raciocínio.', descriptionPt: 'DeepSeek é um modelo de pesquisa para tarefas de raciocínio.',
models: [ models: [
{ name: 'deepseek-r1:1.5b', label: 'DeepSeek 1.5B', parameterSize: '1.5B' }, { name: 'deepseek-r1:1.5b', label: 'DeepSeek 1.5B', parameterSize: '1.5B' },
{ name: 'deepseek-r1:7b', label: 'DeepSeek 7B', parameterSize: '7B' },
{ name: 'huihui_ai/deepseek-r1-abliterated:1.5b', label: 'DeepSeek Uncensored 1.5B', parameterSize: '1.5B' }, { name: 'huihui_ai/deepseek-r1-abliterated:1.5b', label: 'DeepSeek Uncensored 1.5B', parameterSize: '1.5B' },
{ name: 'huihui_ai/deepseek-r1-abliterated:7b', label: 'DeepSeek Uncensored 7B', parameterSize: '7B' },
]
},
{
name: 'phi3',
label: 'Phi3',
descriptionEn: 'Phi-3 is a family of lightweight 3B (Mini) and 14B (Medium) state-of-the-art open models by Microsoft.',
descriptionPt: 'Phi-3 é uma família de modelos leves de 3B (Mini) e 14B (Médio) de última geração, abertos pela Microsoft.',
models: [
{ name: 'phi3:3.8b', label: 'Phi3 3.8B', parameterSize: '3.8B' },
] ]
} }
]; ];
@ -224,7 +235,11 @@ function escapeMarkdown(text: string): string {
return text.replace(/([*_])/g, '\\$1'); return text.replace(/([*_])/g, '\\$1');
} }
async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Message, model: string, aiTemperature: number): Promise<{ success: boolean; response?: string; error?: string }> { function containsUrls(text: string): boolean {
return text.includes('http://') || text.includes('https://');
}
async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Message, model: string, aiTemperature: number, originalMessage: string): Promise<{ success: boolean; response?: string; error?: string }> {
const Strings = getStrings(languageCode(ctx)); const Strings = getStrings(languageCode(ctx));
if (!ctx.chat) { if (!ctx.chat) {
return { return {
@ -233,6 +248,8 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
}; };
} }
const modelHeader = `🤖 *${model}* | 🌡️ *${aiTemperature}*\n\n`; const modelHeader = `🤖 *${model}* | 🌡️ *${aiTemperature}*\n\n`;
const urlWarning = containsUrls(originalMessage) ? Strings.ai.urlWarning : '';
try { try {
const aiResponse = await axios.post<unknown>( const aiResponse = await axios.post<unknown>(
`${process.env.ollamaApi}/api/generate`, `${process.env.ollamaApi}/api/generate`,
@ -289,12 +306,12 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
} else { } else {
fullResponse += ln.response; fullResponse += ln.response;
} }
if (now - lastUpdate >= 1000 || !sentHeader) { if (now - lastUpdate >= 5000 || !sentHeader) {
await rateLimiter.editMessageWithRetry( await rateLimiter.editMessageWithRetry(
ctx, ctx,
ctx.chat.id, ctx.chat.id,
replyGenerating.message_id, replyGenerating.message_id,
modelHeader + fullResponse, modelHeader + urlWarning + fullResponse,
{ parse_mode: 'Markdown' } { parse_mode: 'Markdown' }
); );
lastUpdate = now; lastUpdate = now;
@ -354,18 +371,21 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
} }
} }
async function handleAiReply(ctx: TextContext, model: string, prompt: string, replyGenerating: Message, aiTemperature: number) { async function handleAiReply(ctx: TextContext, model: string, prompt: string, replyGenerating: Message, aiTemperature: number, originalMessage: string) {
const Strings = getStrings(languageCode(ctx)); const Strings = getStrings(languageCode(ctx));
const aiResponse = await getResponse(prompt, ctx, replyGenerating, model, aiTemperature); const aiResponse = await getResponse(prompt, ctx, replyGenerating, model, aiTemperature, originalMessage);
if (!aiResponse) return; if (!aiResponse) return;
if (!ctx.chat) return; if (!ctx.chat) return;
const modelHeader = `🤖 *${model}* | 🌡️ *${aiTemperature}*\n\n`; const modelHeader = `🤖 *${model}* | 🌡️ *${aiTemperature}*\n\n`;
const urlWarning = containsUrls(originalMessage) ? Strings.ai.urlWarning : '';
if (aiResponse.success && aiResponse.response) { if (aiResponse.success && aiResponse.response) {
await rateLimiter.editMessageWithRetry( await rateLimiter.editMessageWithRetry(
ctx, ctx,
ctx.chat.id, ctx.chat.id,
replyGenerating.message_id, replyGenerating.message_id,
modelHeader + aiResponse.response, modelHeader + urlWarning + aiResponse.response,
{ parse_mode: 'Markdown' } { parse_mode: 'Markdown' }
); );
return; return;
@ -411,7 +431,7 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
const model = isAsk ? flash_model : thinking_model const model = isAsk ? flash_model : thinking_model
const textCtx = ctx as TextContext const textCtx = ctx as TextContext
const reply_to_message_id = replyToMessageId(textCtx) const reply_to_message_id = replyToMessageId(textCtx)
const { Strings, aiTemperature } = await getUserWithStringsAndModel(textCtx, db) const { user, Strings, aiTemperature } = await getUserWithStringsAndModel(textCtx, db)
const message = textCtx.message.text const message = textCtx.message.text
const author = ("@" + ctx.from?.username) || ctx.from?.first_name const author = ("@" + ctx.from?.username) || ctx.from?.first_name
@ -425,6 +445,14 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
return return
} }
if (!user.aiEnabled) {
await ctx.reply(Strings.ai.disabledForUser, {
parse_mode: 'Markdown',
...({ reply_to_message_id })
})
return
}
const fixedMsg = message.replace(/^\/(ask|think)(@\w+)?\s*/, "").trim() const fixedMsg = message.replace(/^\/(ask|think)(@\w+)?\s*/, "").trim()
if (fixedMsg.length < 1) { if (fixedMsg.length < 1) {
await ctx.reply(Strings.ai.askNoMessage, { await ctx.reply(Strings.ai.askNoMessage, {
@ -442,7 +470,7 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
logger.logPrompt(fixedMsg) logger.logPrompt(fixedMsg)
const prompt = sanitizeForJson(await usingSystemPrompt(textCtx, db, botName)) const prompt = sanitizeForJson(await usingSystemPrompt(textCtx, db, botName))
await handleAiReply(textCtx, model, prompt, replyGenerating, aiTemperature) await handleAiReply(textCtx, model, prompt, replyGenerating, aiTemperature, fixedMsg)
}) })
bot.command(["ai"], spamwatchMiddleware, async (ctx) => { bot.command(["ai"], spamwatchMiddleware, async (ctx) => {
@ -450,7 +478,7 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
if (!ctx.message || !("text" in ctx.message)) return if (!ctx.message || !("text" in ctx.message)) return
const textCtx = ctx as TextContext const textCtx = ctx as TextContext
const reply_to_message_id = replyToMessageId(textCtx) const reply_to_message_id = replyToMessageId(textCtx)
const { Strings, customAiModel, aiTemperature } = await getUserWithStringsAndModel(textCtx, db) const { user, Strings, customAiModel, aiTemperature } = await getUserWithStringsAndModel(textCtx, db)
const message = textCtx.message.text const message = textCtx.message.text
const author = ("@" + ctx.from?.username) || ctx.from?.first_name const author = ("@" + ctx.from?.username) || ctx.from?.first_name
@ -464,6 +492,14 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
return return
} }
if (!user.aiEnabled) {
await ctx.reply(Strings.ai.disabledForUser, {
parse_mode: 'Markdown',
...({ reply_to_message_id })
})
return
}
const fixedMsg = message.replace(/^\/ai(@\w+)?\s*/, "").trim() const fixedMsg = message.replace(/^\/ai(@\w+)?\s*/, "").trim()
if (fixedMsg.length < 1) { if (fixedMsg.length < 1) {
await ctx.reply(Strings.ai.askNoMessage, { await ctx.reply(Strings.ai.askNoMessage, {
@ -482,7 +518,7 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
logger.logPrompt(fixedMsg) logger.logPrompt(fixedMsg)
const prompt = sanitizeForJson(await usingSystemPrompt(textCtx, db, botName)) const prompt = sanitizeForJson(await usingSystemPrompt(textCtx, db, botName))
await handleAiReply(textCtx, customAiModel, prompt, replyGenerating, aiTemperature) await handleAiReply(textCtx, customAiModel, prompt, replyGenerating, aiTemperature, fixedMsg)
} catch (err) { } catch (err) {
const Strings = getStrings(languageCode(ctx)); const Strings = getStrings(languageCode(ctx));
if (ctx && ctx.reply) { if (ctx && ctx.reply) {

View file

@ -67,12 +67,14 @@
"helpEntry": "✨ AI Commands", "helpEntry": "✨ AI Commands",
"helpDesc": "✨ *AI Commands*\n\n- /ask `<prompt>`: Ask a question to an AI\n- /think `<prompt>`: Ask a thinking model about a question", "helpDesc": "✨ *AI Commands*\n\n- /ask `<prompt>`: Ask a question to an AI\n- /think `<prompt>`: Ask a thinking model about a question",
"disabled": "✨ AI features are currently disabled", "disabled": "✨ AI features are currently disabled",
"disabledForUser": "✨ AI features are disabled for your account. You can enable them in /settings",
"pulling": "🔄 *Pulling {model} from Ollama...*\n\nThis may take a few minutes...", "pulling": "🔄 *Pulling {model} from Ollama...*\n\nThis may take a few minutes...",
"askGenerating": "✨ _{model} is working..._", "askGenerating": "✨ _{model} is working..._",
"askNoMessage": "Please provide a message to ask the model.", "askNoMessage": "Please provide a message to ask the model.",
"languageCode": "Language", "languageCode": "Language",
"thinking": "Thinking...", "thinking": "Thinking...",
"finishedThinking": "Finished thinking" "finishedThinking": "Finished thinking",
"urlWarning": "⚠️ *Warning: I cannot access or open links. Please provide the content directly if you need me to analyze something from a website.*\n\n"
}, },
"maInvalidModule": "Please provide a valid module ID from The Mod Archive.\nExample: `/modarchive 81574`", "maInvalidModule": "Please provide a valid module ID from The Mod Archive.\nExample: `/modarchive 81574`",
"maDownloadError": "Error downloading the file. Check the module ID and try again.", "maDownloadError": "Error downloading the file. Check the module ID and try again.",

View file

@ -66,12 +66,14 @@
"helpEntry": "✨ Comandos de IA", "helpEntry": "✨ Comandos de IA",
"helpDesc": "✨ *Comandos de IA*\n\n- /ask `<prompt>`: Fazer uma pergunta a uma IA\n- /think `<prompt>`: Fazer uma pergunta a um modelo de pensamento", "helpDesc": "✨ *Comandos de IA*\n\n- /ask `<prompt>`: Fazer uma pergunta a uma IA\n- /think `<prompt>`: Fazer uma pergunta a um modelo de pensamento",
"disabled": "✨ Os recursos de IA estão desativados no momento", "disabled": "✨ Os recursos de IA estão desativados no momento",
"disabledForUser": "✨ Os recursos de IA estão desativados para sua conta. Você pode ativá-los em /settings",
"pulling": "🔄 *Puxando {model} do Ollama...*\n\nIsso pode levar alguns minutos...", "pulling": "🔄 *Puxando {model} do Ollama...*\n\nIsso pode levar alguns minutos...",
"askGenerating": "✨ _{model} está funcionando..._", "askGenerating": "✨ _{model} está funcionando..._",
"askNoMessage": "Por favor, forneça uma mensagem para fazer a pergunta ao modelo.", "askNoMessage": "Por favor, forneça uma mensagem para fazer a pergunta ao modelo.",
"languageCode": "Idioma", "languageCode": "Idioma",
"thinking": "Pensando...", "thinking": "Pensando...",
"finishedThinking": "Pensamento finalizado" "finishedThinking": "Pensamento finalizado",
"urlWarning": "⚠️ *Aviso: Não posso acessar ou abrir links. Por favor, forneça o conteúdo diretamente se precisar que eu analise algo de um site.*\n\n"
}, },
"maInvalidModule": "Por favor, forneça um ID de módulo válido do The Mod Archive.\nExemplo: `/modarchive 81574`", "maInvalidModule": "Por favor, forneça um ID de módulo válido do The Mod Archive.\nExemplo: `/modarchive 81574`",
"maDownloadError": "Erro ao baixar o arquivo. Verifique o ID do módulo e tente novamente.", "maDownloadError": "Erro ao baixar o arquivo. Verifique o ID do módulo e tente novamente.",

View file

@ -32,7 +32,7 @@ import { Context } from 'telegraf'
import { logger } from './log' import { logger } from './log'
class RateLimiter { class RateLimiter {
private lastEditTime: number = 0 private lastEditTimes: Map<string, number> = new Map()
private readonly minInterval: number = 5000 private readonly minInterval: number = 5000
private pendingUpdates: Map<string, string> = new Map() private pendingUpdates: Map<string, string> = new Map()
private updateQueue: Map<string, NodeJS.Timeout> = new Map() private updateQueue: Map<string, NodeJS.Timeout> = new Map()
@ -144,7 +144,8 @@ class RateLimiter {
if (!latestText) return if (!latestText) return
const now = Date.now() const now = Date.now()
const timeSinceLastEdit = now - this.lastEditTime const lastEditTime = this.lastEditTimes.get(messageKey) || 0
const timeSinceLastEdit = now - lastEditTime
await this.waitForRateLimit(chatId, messageId) await this.waitForRateLimit(chatId, messageId)
if (timeSinceLastEdit < this.minInterval) { if (timeSinceLastEdit < this.minInterval) {
@ -217,7 +218,7 @@ class RateLimiter {
} }
this.pendingUpdates.delete(messageKey) this.pendingUpdates.delete(messageKey)
} }
this.lastEditTime = Date.now() this.lastEditTimes.set(messageKey, Date.now())
this.updateQueue.delete(messageKey) this.updateQueue.delete(messageKey)
} catch (error: unknown) { } catch (error: unknown) {
if (!this.handleTelegramError(error, messageKey, options, ctx, chatId, messageId)) { if (!this.handleTelegramError(error, messageKey, options, ctx, chatId, messageId)) {