ai queue, better markdown parsing, refactor, better feedback

This commit is contained in:
Aidan 2025-06-30 23:43:30 -04:00
parent df49bc4157
commit 23ebd021f3
6 changed files with 273 additions and 173 deletions

View file

@ -117,6 +117,7 @@ If you prefer to use Docker directly, you can use these instructions instead.
- **handlerTimeout** (optional): How long handlers will wait before timing out. Set this high if using large AI models. - **handlerTimeout** (optional): How long handlers will wait before timing out. Set this high if using large AI models.
- **flashModel** (optional): Which model will be used for /ask - **flashModel** (optional): Which model will be used for /ask
- **thinkingModel** (optional): Which model will be used for /think - **thinkingModel** (optional): Which model will be used for /think
- **updateEveryChars** (optional): The amount of chars until message update triggers (for streaming response)
- **databaseUrl**: Database server configuration (see `.env.example`) - **databaseUrl**: Database server configuration (see `.env.example`)
- **botAdmins**: Put the ID of the people responsible for managing the bot. They can use some administrative + exclusive commands on any group. - **botAdmins**: Put the ID of the people responsible for managing the bot. They can use some administrative + exclusive commands on any group.
- **lastKey**: Last.fm API key, for use on `lastfm.js` functions, like see who is listening to what song and etc. - **lastKey**: Last.fm API key, for use on `lastfm.js` functions, like see who is listening to what song and etc.

View file

@ -119,31 +119,17 @@ export const models: ModelInfo[] = [
} }
]; ];
const enSystemPrompt = `You are a plaintext-only, helpful assistant called {botName}. async function usingSystemPrompt(ctx: TextContext, db: NodePgDatabase<typeof schema>, botName: string, message: string): Promise<string> {
Current Date/Time (UTC): {date}
---
Respond to the user's message:
{message}`
const ptSystemPrompt = `Você é um assistente de texto puro e útil chamado {botName}.
Data/Hora atual (UTC): {date}
---
Responda à mensagem do usuário:
{message}`
async function usingSystemPrompt(ctx: TextContext, db: NodePgDatabase<typeof schema>, botName: string): Promise<string> {
const user = await db.query.usersTable.findMany({ where: (fields, { eq }) => eq(fields.telegramId, String(ctx.from!.id)), limit: 1 }); const user = await db.query.usersTable.findMany({ where: (fields, { eq }) => eq(fields.telegramId, String(ctx.from!.id)), limit: 1 });
if (user.length === 0) await ensureUserInDb(ctx, db); if (user.length === 0) await ensureUserInDb(ctx, db);
const userData = user[0]; const userData = user[0];
const lang = userData?.languageCode || "en"; const lang = userData?.languageCode || "en";
const Strings = getStrings(lang);
const utcDate = new Date().toISOString(); const utcDate = new Date().toISOString();
const prompt = lang === "pt" const prompt = Strings.ai.systemPrompt
? ptSystemPrompt.replace("{botName}", botName).replace("{date}", utcDate).replace("{message}", ctx.message.text) .replace("{botName}", botName)
: enSystemPrompt.replace("{botName}", botName).replace("{date}", utcDate).replace("{message}", ctx.message.text); .replace("{date}", utcDate)
.replace("{message}", message);
return prompt; return prompt;
} }
@ -156,6 +142,51 @@ export function sanitizeForJson(text: string): string {
.replace(/\t/g, '\\t') .replace(/\t/g, '\\t')
} }
function sanitizeMarkdownForTelegram(text: string): string {
let sanitizedText = text;
const replacements: string[] = [];
const addReplacement = (match: string): string => {
replacements.push(match);
return `___PLACEHOLDER_${replacements.length - 1}___`;
};
sanitizedText = sanitizedText.replace(/```([\s\S]*?)```/g, addReplacement);
sanitizedText = sanitizedText.replace(/`([^`]+)`/g, addReplacement);
sanitizedText = sanitizedText.replace(/\[([^\]]+)\]\(([^)]+)\)/g, addReplacement);
const parts = sanitizedText.split(/(___PLACEHOLDER_\d+___)/g);
const processedParts = parts.map(part => {
if (part.match(/___PLACEHOLDER_\d+___/)) {
return part;
} else {
let processedPart = part;
processedPart = processedPart.replace(/^(#{1,6})\s+(.+)/gm, '*$2*');
processedPart = processedPart.replace(/^(\s*)[-*]\s+/gm, '$1- ');
processedPart = processedPart.replace(/\*\*(.*?)\*\*/g, '*$1*');
processedPart = processedPart.replace(/__(.*?)__/g, '*$1*');
processedPart = processedPart.replace(/(^|\s)\*(?!\*)([^*]+?)\*(?!\*)/g, '$1_$2_');
processedPart = processedPart.replace(/(^|\s)_(?!_)([^_]+?)_(?!_)/g, '$1_$2_');
processedPart = processedPart.replace(/~~(.*?)~~/g, '~$1~');
processedPart = processedPart.replace(/^\s*┃/gm, '>');
processedPart = processedPart.replace(/^>\s?/gm, '> ');
return processedPart;
}
});
sanitizedText = processedParts.join('');
sanitizedText = sanitizedText.replace(/___PLACEHOLDER_(\d+)___/g, (_, idx) => replacements[Number(idx)]);
const codeBlockCount = (sanitizedText.match(/```/g) || []).length;
if (codeBlockCount % 2 !== 0) {
sanitizedText += '\n```';
}
return sanitizedText;
}
export async function preChecks() { export async function preChecks() {
const envs = [ const envs = [
"ollamaApi", "ollamaApi",
@ -232,7 +263,7 @@ function extractAxiosErrorMessage(error: unknown): string {
} }
function escapeMarkdown(text: string): string { function escapeMarkdown(text: string): string {
return text.replace(/([*_])/g, '\\$1'); return text.replace(/([_*\[\]()`>#\+\-=|{}.!~])/g, '\\$1');
} }
function containsUrls(text: string): boolean { function containsUrls(text: string): boolean {
@ -244,10 +275,14 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
if (!ctx.chat) { if (!ctx.chat) {
return { return {
success: false, success: false,
error: Strings.unexpectedErr.replace("{error}", "No chat found"), error: Strings.unexpectedErr.replace("{error}", Strings.ai.noChatFound),
}; };
} }
const modelHeader = `🤖 *${model}* | 🌡️ *${aiTemperature}*\n\n`; let status = Strings.ai.statusWaitingRender;
let modelHeader = Strings.ai.modelHeader
.replace("{model}", model)
.replace("{temperature}", aiTemperature)
.replace("{status}", status) + "\n\n";
const urlWarning = containsUrls(originalMessage) ? Strings.ai.urlWarning : ''; const urlWarning = containsUrls(originalMessage) ? Strings.ai.urlWarning : '';
try { try {
@ -267,8 +302,9 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
); );
let fullResponse = ""; let fullResponse = "";
let thoughts = ""; let thoughts = "";
let lastUpdate = Date.now(); let lastUpdateCharCount = 0;
let sentHeader = false; let sentHeader = false;
let firstChunk = true;
const stream: NodeJS.ReadableStream = aiResponse.data as any; const stream: NodeJS.ReadableStream = aiResponse.data as any;
for await (const chunk of stream) { for await (const chunk of stream) {
const lines = chunk.toString().split('\n'); const lines = chunk.toString().split('\n');
@ -293,7 +329,6 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
logger.logThinking(ctx.chat.id, replyGenerating.message_id, false); logger.logThinking(ctx.chat.id, replyGenerating.message_id, false);
} }
} }
const now = Date.now();
if (ln.response) { if (ln.response) {
if (model === thinking_model) { if (model === thinking_model) {
let patchedThoughts = ln.response; let patchedThoughts = ln.response;
@ -306,20 +341,51 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
} else { } else {
fullResponse += ln.response; fullResponse += ln.response;
} }
if (now - lastUpdate >= 5000 || !sentHeader) { if (firstChunk) {
status = Strings.ai.statusWaitingRender;
modelHeader = Strings.ai.modelHeader
.replace("{model}", model)
.replace("{temperature}", aiTemperature)
.replace("{status}", status) + "\n\n";
await rateLimiter.editMessageWithRetry( await rateLimiter.editMessageWithRetry(
ctx, ctx,
ctx.chat.id, ctx.chat.id,
replyGenerating.message_id, replyGenerating.message_id,
modelHeader + urlWarning + fullResponse, modelHeader + urlWarning + escapeMarkdown(fullResponse),
{ parse_mode: 'Markdown' } { parse_mode: 'Markdown' }
); );
lastUpdate = now; lastUpdateCharCount = fullResponse.length;
sentHeader = true;
firstChunk = false;
continue;
}
const updateEveryChars = Number(process.env.updateEveryChars) || 100;
if (fullResponse.length - lastUpdateCharCount >= updateEveryChars || !sentHeader) {
await rateLimiter.editMessageWithRetry(
ctx,
ctx.chat.id,
replyGenerating.message_id,
modelHeader + urlWarning + escapeMarkdown(fullResponse),
{ parse_mode: 'Markdown' }
);
lastUpdateCharCount = fullResponse.length;
sentHeader = true; sentHeader = true;
} }
} }
} }
} }
status = Strings.ai.statusRendering;
modelHeader = Strings.ai.modelHeader
.replace("{model}", model)
.replace("{temperature}", aiTemperature)
.replace("{status}", status) + "\n\n";
await rateLimiter.editMessageWithRetry(
ctx,
ctx.chat.id,
replyGenerating.message_id,
modelHeader + urlWarning + escapeMarkdown(fullResponse),
{ parse_mode: 'Markdown' }
);
return { return {
success: true, success: true,
response: fullResponse, response: fullResponse,
@ -360,7 +426,7 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
console.log(`[✨ AI] ${model} pulled successfully`); console.log(`[✨ AI] ${model} pulled successfully`);
return { return {
success: true, success: true,
response: `✅ Pulled ${escapeMarkdown(model)} successfully, please retry the command.`, response: Strings.ai.pulled.replace("{model}", escapeMarkdown(model)),
}; };
} }
} }
@ -376,16 +442,18 @@ async function handleAiReply(ctx: TextContext, model: string, prompt: string, re
const aiResponse = await getResponse(prompt, ctx, replyGenerating, model, aiTemperature, originalMessage); const aiResponse = await getResponse(prompt, ctx, replyGenerating, model, aiTemperature, originalMessage);
if (!aiResponse) return; if (!aiResponse) return;
if (!ctx.chat) return; if (!ctx.chat) return;
const modelHeader = `🤖 *${model}* | 🌡️ *${aiTemperature}*\n\n`;
const urlWarning = containsUrls(originalMessage) ? Strings.ai.urlWarning : '';
if (aiResponse.success && aiResponse.response) { if (aiResponse.success && aiResponse.response) {
const status = Strings.ai.statusComplete;
const modelHeader = Strings.ai.modelHeader
.replace("{model}", model)
.replace("{temperature}", aiTemperature)
.replace("{status}", status) + "\n\n";
const urlWarning = containsUrls(originalMessage) ? Strings.ai.urlWarning : '';
await rateLimiter.editMessageWithRetry( await rateLimiter.editMessageWithRetry(
ctx, ctx,
ctx.chat.id, ctx.chat.id,
replyGenerating.message_id, replyGenerating.message_id,
modelHeader + urlWarning + aiResponse.response, modelHeader + urlWarning + sanitizeMarkdownForTelegram(aiResponse.response),
{ parse_mode: 'Markdown' } { parse_mode: 'Markdown' }
); );
return; return;
@ -425,109 +493,112 @@ export function getModelLabelByName(name: string): string {
export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => { export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
const botName = bot.botInfo?.first_name && bot.botInfo?.last_name ? `${bot.botInfo.first_name} ${bot.botInfo.last_name}` : "Kowalski" const botName = bot.botInfo?.first_name && bot.botInfo?.last_name ? `${bot.botInfo.first_name} ${bot.botInfo.last_name}` : "Kowalski"
bot.command(["ask", "think"], spamwatchMiddleware, async (ctx) => { interface AiRequest {
if (!ctx.message || !('text' in ctx.message)) return task: () => Promise<void>;
const isAsk = ctx.message.text.startsWith("/ask") ctx: TextContext;
const model = isAsk ? flash_model : thinking_model wasQueued: boolean;
const textCtx = ctx as TextContext
const reply_to_message_id = replyToMessageId(textCtx)
const { user, Strings, aiTemperature } = await getUserWithStringsAndModel(textCtx, db)
const message = textCtx.message.text
const author = ("@" + ctx.from?.username) || ctx.from?.first_name
logger.logCmdStart(author, model === flash_model ? "ask" : "think")
if (!process.env.ollamaApi) {
await ctx.reply(Strings.ai.disabled, {
parse_mode: 'Markdown',
...({ reply_to_message_id })
})
return
} }
if (!user.aiEnabled) { const requestQueue: AiRequest[] = [];
await ctx.reply(Strings.ai.disabledForUser, { let isProcessing = false;
parse_mode: 'Markdown',
...({ reply_to_message_id }) async function processQueue() {
}) if (isProcessing || requestQueue.length === 0) {
return return;
} }
const fixedMsg = message.replace(/^\/(ask|think)(@\w+)?\s*/, "").trim() isProcessing = true;
if (fixedMsg.length < 1) { const { task, ctx, wasQueued } = requestQueue.shift()!;
await ctx.reply(Strings.ai.askNoMessage, { const { Strings } = await getUserWithStringsAndModel(ctx, db);
parse_mode: 'Markdown', const reply_to_message_id = replyToMessageId(ctx);
...({ reply_to_message_id })
})
return
}
const replyGenerating = await ctx.reply(Strings.ai.askGenerating.replace("{model}", model), {
parse_mode: 'Markdown',
...({ reply_to_message_id })
})
logger.logPrompt(fixedMsg)
const prompt = sanitizeForJson(await usingSystemPrompt(textCtx, db, botName))
await handleAiReply(textCtx, model, prompt, replyGenerating, aiTemperature, fixedMsg)
})
bot.command(["ai"], spamwatchMiddleware, async (ctx) => {
try { try {
if (!ctx.message || !("text" in ctx.message)) return if (wasQueued) {
const textCtx = ctx as TextContext await ctx.reply(Strings.ai.startingProcessing, {
const reply_to_message_id = replyToMessageId(textCtx) ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }),
const { user, Strings, customAiModel, aiTemperature } = await getUserWithStringsAndModel(textCtx, db) parse_mode: 'Markdown'
const message = textCtx.message.text });
const author = ("@" + ctx.from?.username) || ctx.from?.first_name }
await task();
} catch (error) {
console.error("[✨ AI | !] Error processing task:", error);
const errorMessage = error instanceof Error ? error.message : String(error);
await ctx.reply(Strings.unexpectedErr.replace("{error}", errorMessage), {
...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }),
parse_mode: 'Markdown'
});
} finally {
isProcessing = false;
processQueue();
}
}
logger.logCmdStart(author, "ask") async function aiCommandHandler(ctx: TextContext, command: 'ask' | 'think' | 'ai') {
const reply_to_message_id = replyToMessageId(ctx);
const { user, Strings, customAiModel, aiTemperature } = await getUserWithStringsAndModel(ctx, db);
const message = ctx.message.text;
const author = ("@" + ctx.from?.username) || ctx.from?.first_name || "Unknown";
let model: string;
let fixedMsg: string;
if (command === 'ai') {
model = customAiModel || flash_model;
fixedMsg = message.replace(/^\/ai(@\w+)?\s*/, "").trim();
logger.logCmdStart(author, "ask");
} else {
model = command === 'ask' ? flash_model : thinking_model;
fixedMsg = message.replace(/^\/(ask|think)(@\w+)?\s*/, "").trim();
logger.logCmdStart(author, command);
}
if (!process.env.ollamaApi) { if (!process.env.ollamaApi) {
await ctx.reply(Strings.ai.disabled, { await ctx.reply(Strings.ai.disabled, { parse_mode: 'Markdown', ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) });
parse_mode: 'Markdown', return;
...({ reply_to_message_id })
})
return
} }
if (!user.aiEnabled) { if (!user.aiEnabled) {
await ctx.reply(Strings.ai.disabledForUser, { await ctx.reply(Strings.ai.disabledForUser, { parse_mode: 'Markdown', ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) });
parse_mode: 'Markdown', return;
...({ reply_to_message_id })
})
return
} }
const fixedMsg = message.replace(/^\/ai(@\w+)?\s*/, "").trim()
if (fixedMsg.length < 1) { if (fixedMsg.length < 1) {
await ctx.reply(Strings.ai.askNoMessage, { await ctx.reply(Strings.ai.askNoMessage, { parse_mode: 'Markdown', ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) });
parse_mode: 'Markdown', return;
...({ reply_to_message_id })
})
return
} }
const modelLabel = getModelLabelByName(customAiModel) const task = async () => {
const modelLabel = getModelLabelByName(model);
const replyGenerating = await ctx.reply(Strings.ai.askGenerating.replace("{model}", modelLabel), { const replyGenerating = await ctx.reply(Strings.ai.askGenerating.replace("{model}", modelLabel), {
parse_mode: 'Markdown', parse_mode: 'Markdown',
...({ reply_to_message_id }) ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } })
}) });
logger.logPrompt(fixedMsg);
const prompt = sanitizeForJson(await usingSystemPrompt(ctx, db, botName, fixedMsg));
await handleAiReply(ctx, model, prompt, replyGenerating, aiTemperature, fixedMsg);
};
logger.logPrompt(fixedMsg) if (isProcessing) {
requestQueue.push({ task, ctx, wasQueued: true });
const position = requestQueue.length;
await ctx.reply(Strings.ai.inQueue.replace("{position}", String(position)), {
parse_mode: 'Markdown',
...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } })
});
} else {
requestQueue.push({ task, ctx, wasQueued: false });
processQueue();
}
}
const prompt = sanitizeForJson(await usingSystemPrompt(textCtx, db, botName)) bot.command(["ask", "think"], spamwatchMiddleware, async (ctx) => {
await handleAiReply(textCtx, customAiModel, prompt, replyGenerating, aiTemperature, fixedMsg) if (!ctx.message || !('text' in ctx.message)) return;
} catch (err) { const command = ctx.message.text.startsWith('/ask') ? 'ask' : 'think';
const Strings = getStrings(languageCode(ctx)); await aiCommandHandler(ctx as TextContext, command);
if (ctx && ctx.reply) { });
try {
await ctx.reply(Strings.unexpectedErr.replace("{error}", (err && err.message ? err.message : String(err))), { parse_mode: 'Markdown' }) bot.command(["ai"], spamwatchMiddleware, async (ctx) => {
} catch (e) { if (!ctx.message || !('text' in ctx.message)) return;
console.error("[✨ AI | !] Failed to send error reply:", e) await aiCommandHandler(ctx as TextContext, 'ai');
} });
}
}
})
} }

View file

@ -146,7 +146,7 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
inline_keyboard: models.map(series => [ inline_keyboard: models.map(series => [
{ text: series.label, callback_data: `selectseries_${series.name}` } { text: series.label, callback_data: `selectseries_${series.name}` }
]).concat([[ ]).concat([[
{ text: `⬅️ ${Strings.settings.ai.back}`, callback_data: 'settings_back' } { text: `${Strings.varStrings.varBack}`, callback_data: 'settings_back' }
]]) ]])
} }
} }
@ -185,7 +185,7 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
inline_keyboard: series.models.map(m => [ inline_keyboard: series.models.map(m => [
{ text: `${m.label} (${m.parameterSize})`, callback_data: `setmodel_${series.name}_${m.name}` } { text: `${m.label} (${m.parameterSize})`, callback_data: `setmodel_${series.name}_${m.name}` }
]).concat([[ ]).concat([[
{ text: `⬅️ ${Strings.settings.ai.back}`, callback_data: 'settings_aiModel' } { text: `${Strings.varStrings.varBack}`, callback_data: 'settings_aiModel' }
]]) ]])
} }
} }
@ -262,7 +262,7 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
const temps = [0.2, 0.5, 0.7, 0.9, 1.2]; const temps = [0.2, 0.5, 0.7, 0.9, 1.2];
try { try {
await ctx.editMessageReplyMarkup({ await ctx.editMessageReplyMarkup({
inline_keyboard: temps.map(t => [{ text: t.toString(), callback_data: `settemp_${t}` }]).concat([[{ text: `⬅️ ${Strings.settings.ai.back}`, callback_data: 'settings_back' }]]) inline_keyboard: temps.map(t => [{ text: t.toString(), callback_data: `settemp_${t}` }]).concat([[{ text: `${Strings.varStrings.varBack}`, callback_data: 'settings_back' }]])
}); });
} catch (err) { } catch (err) {
if ( if (
@ -304,7 +304,7 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
if (!user) return; if (!user) return;
try { try {
await ctx.editMessageReplyMarkup({ await ctx.editMessageReplyMarkup({
inline_keyboard: langs.map(l => [{ text: l.label, callback_data: `setlang_${l.code}` }]).concat([[{ text: `⬅️ ${Strings.settings.ai.back}`, callback_data: 'settings_back' }]]) inline_keyboard: langs.map(l => [{ text: l.label, callback_data: `setlang_${l.code}` }]).concat([[{ text: `${Strings.varStrings.varBack}`, callback_data: 'settings_back' }]])
}); });
} catch (err) { } catch (err) {
if ( if (

View file

@ -13,9 +13,9 @@
"varWas": "was", "varWas": "was",
"varNone": "None", "varNone": "None",
"varUnknown": "Unknown", "varUnknown": "Unknown",
"varBack": "Back" "varBack": "⬅️ Back"
}, },
"unexpectedErr": "Some unexpected error occurred during a bot action. Please report it to the developers.\n\n{error}", "unexpectedErr": "An unexpected error occurred: {error}",
"errInvalidOption": "Whoops! Invalid option!", "errInvalidOption": "Whoops! Invalid option!",
"kickingMyself": "*Since you don't need me, I'll leave.*", "kickingMyself": "*Since you don't need me, I'll leave.*",
"kickingMyselfErr": "Error leaving the chat.", "kickingMyselfErr": "Error leaving the chat.",
@ -65,22 +65,31 @@
"animalCommandsDesc": "🐱 *Animals*\n\n- /soggy | /soggycat `<1 | 2 | 3 | 4 | orig | thumb | sticker | alt>`: Sends the [Soggy cat meme](https://knowyourmeme.com/memes/soggy-cat)\n- /cat: Sends a random picture of a cat.\n- /fox: Sends a random picture of a fox.\n- /duck: Sends a random picture of a duck.\n- /dog: Sends a random picture of a dog.\n- /httpcat `<http code>`: Send cat memes from http.cat with your specified HTTP code. Example: `/httpcat 404`", "animalCommandsDesc": "🐱 *Animals*\n\n- /soggy | /soggycat `<1 | 2 | 3 | 4 | orig | thumb | sticker | alt>`: Sends the [Soggy cat meme](https://knowyourmeme.com/memes/soggy-cat)\n- /cat: Sends a random picture of a cat.\n- /fox: Sends a random picture of a fox.\n- /duck: Sends a random picture of a duck.\n- /dog: Sends a random picture of a dog.\n- /httpcat `<http code>`: Send cat memes from http.cat with your specified HTTP code. Example: `/httpcat 404`",
"ai": { "ai": {
"helpEntry": "✨ AI Commands", "helpEntry": "✨ AI Commands",
"helpDesc": "✨ *AI Commands*\n\n- /ask `<prompt>`: Ask a question to an AI\n- /think `<prompt>`: Ask a thinking model about a question", "helpDesc": "✨ *AI Commands*\n\n- /ask `<prompt>`: Ask a question to an AI model\n- /think `<prompt>`: Ask a thinking model about a question\n- /ai `<prompt>`: Ask your custom-set AI model a question",
"disabled": "✨ AI features are currently disabled", "disabled": "✨ AI features are currently disabled globally.",
"disabledForUser": "✨ AI features are disabled for your account. You can enable them in /settings", "disabledForUser": "✨ AI features are disabled for your account.",
"pulling": "🔄 *Pulling {model} from Ollama...*\n\nThis may take a few minutes...", "pulling": "🔄 Model {model} not found locally, pulling...",
"askGenerating": "✨ _{model} is working..._", "askGenerating": "✨ Generating response with {model}...",
"askNoMessage": "Please provide a message to ask the model.", "askNoMessage": "✨ You need to ask me a question!",
"languageCode": "Language", "languageCode": "Language",
"thinking": "Thinking...", "thinking": "Thinking...",
"finishedThinking": "Finished thinking", "finishedThinking": "Done.",
"urlWarning": "⚠️ *Warning: I cannot access or open links. Please provide the content directly if you need me to analyze something from a website.*\n\n" "urlWarning": "\n\n⚠ The user provided one or more URLs in their message. Please do not visit any suspicious URLs.",
"inQueue": " You are {position} in the queue.",
"startingProcessing": "✨ Starting to process your request...",
"systemPrompt": "You are a friendly assistant called {botName}, capable of Telegram MarkdownV2.\nYou are currently in a chat with a user, who has sent a message to you.\nCurrent Date/Time (UTC): {date}\n\n---\n\nRespond to the user's message:\n{message}",
"statusWaitingRender": "⏳ Waiting to Render...",
"statusRendering": "🖼️ Rendering...",
"statusComplete": "✅ Complete!",
"modelHeader": "🤖 *{model}* | 🌡️ *{temperature}* | {status}",
"noChatFound": "No chat found",
"pulled": "✅ Pulled {model} successfully, please retry the command."
}, },
"maInvalidModule": "Please provide a valid module ID from The Mod Archive.\nExample: `/modarchive 81574`", "maInvalidModule": "Please provide a valid module ID from The Mod Archive.\nExample: `/modarchive 81574`",
"maDownloadError": "Error downloading the file. Check the module ID and try again.", "maDownloadError": "Error downloading the file. Check the module ID and try again.",
"ytDownload": { "ytDownload": {
"helpEntry": "📺 YouTube Download", "helpEntry": "📺 Video Download",
"helpDesc": "📺 *YouTube Download*\n\n- /yt | /ytdl | /sdl | /dl | /video `<video link>`: Download a video from some platforms (e.g. YouTube, Instagram, Facebook, etc.).\n\n See [this link](https://github.com/yt-dlp/yt-dlp/blob/master/supportedsites.md) for more information and which services are supported.\n\n*Note: Telegram is currently limiting bot uploads to 50MB, which means that if the video you want to download is larger than 50MB, the quality will be reduced to try to upload it anyway. We're trying our best to work around or fix this problem.*", "helpDesc": "📺 *Video Download*\n\n- /yt | /ytdl | /sdl | /dl | /video `<video link>`: Download a video from some platforms (e.g. YouTube, Instagram, Facebook, etc.).\n\n See [this link](https://github.com/yt-dlp/yt-dlp/blob/master/supportedsites.md) for more information and which services are supported.\n\n*Note: Telegram is currently limiting bot uploads to 50MB, which means that if the video you want to download is larger than 50MB, the quality will be reduced to try to upload it anyway. We're trying our best to work around or fix this problem.*",
"downloadingVid": "⬇️ *Downloading video...*", "downloadingVid": "⬇️ *Downloading video...*",
"libNotFound": "*It seems that the yt-dlp executable does not exist on our server...\n\nIn that case, the problem is on our end! Please wait until we have noticed and solved the problem.*", "libNotFound": "*It seems that the yt-dlp executable does not exist on our server...\n\nIn that case, the problem is on our end! Please wait until we have noticed and solved the problem.*",
"checkingSize": "🔎 *Checking if the video exceeds the 50MB limit...*", "checkingSize": "🔎 *Checking if the video exceeds the 50MB limit...*",
@ -110,7 +119,6 @@
"aiEnabledSetTo": "AI Enabled set to {aiEnabled}", "aiEnabledSetTo": "AI Enabled set to {aiEnabled}",
"aiModelSetTo": "AI Model set to {aiModel}", "aiModelSetTo": "AI Model set to {aiModel}",
"aiTemperatureSetTo": "AI Temperature set to {aiTemperature}", "aiTemperatureSetTo": "AI Temperature set to {aiTemperature}",
"back": "Back",
"selectSeries": "Please select a model series.", "selectSeries": "Please select a model series.",
"seriesDescription": "{seriesDescription}", "seriesDescription": "{seriesDescription}",
"selectParameterSize": "Please select a parameter size for {seriesLabel}.", "selectParameterSize": "Please select a parameter size for {seriesLabel}.",
@ -167,5 +175,12 @@
"gsmarenaNotAllowed": "you are not allowed to interact with this.", "gsmarenaNotAllowed": "you are not allowed to interact with this.",
"gsmarenaInvalidOrExpired": "Whoops, invalid or expired option. Please try again.", "gsmarenaInvalidOrExpired": "Whoops, invalid or expired option. Please try again.",
"gsmarenaDeviceDetails": "these are the details of your device:", "gsmarenaDeviceDetails": "these are the details of your device:",
"gsmarenaErrorFetchingDetails": "Error fetching phone details." "gsmarenaErrorFetchingDetails": "Error fetching phone details.",
"info": {
"ping": "Pong!",
"pinging": "Pinging...",
"pong": "Pong in {ms}ms.",
"botInfo": "Kowalski is a multipurpose bot with a variety of features, including AI, moderation, and more.",
"credits": "Kowalski was created by ihatenodejs/Aidan, with contributions from the open-source community. It is licensed under the Unlicense license."
}
} }

View file

@ -12,9 +12,9 @@
"varWas": "estava", "varWas": "estava",
"varNone": "Nenhum", "varNone": "Nenhum",
"varUnknown": "Desconhecido", "varUnknown": "Desconhecido",
"varBack": "Voltar" "varBack": "⬅️ Voltar"
}, },
"unexpectedErr": "Algum erro inesperado ocorreu durante uma ação do bot. Por favor, reporte aos desenvolvedores.\n\n{error}", "unexpectedErr": "Ocorreu um erro inesperado: {error}",
"errInvalidOption": "Ops! Opção inválida!", "errInvalidOption": "Ops! Opção inválida!",
"kickingMyself": "*Já que você não precisa de mim, vou sair daqui.*", "kickingMyself": "*Já que você não precisa de mim, vou sair daqui.*",
"kickingMyselfErr": "Erro ao sair do chat.", "kickingMyselfErr": "Erro ao sair do chat.",
@ -64,16 +64,31 @@
"animalCommandsDesc": "🐱 *Animais*\n\n- /soggy | /soggycat `<1 | 2 | 3 | 4 | orig | thumb | sticker | alt>`: Envia o [meme do gato encharcado](https://knowyourmeme.com/memes/soggy-cat)\n- /cat - Envia uma foto aleatória de um gato.\n- /fox - Envia uma foto aleatória de uma raposa.\n- /duck - Envia uma foto aleatória de um pato.\n- /dog - Envia uma imagem aleatória de um cachorro.\n- /httpcat `<código http>`: Envia memes de gato do http.cat com o código HTTP especificado. Exemplo: `/httpcat 404`", "animalCommandsDesc": "🐱 *Animais*\n\n- /soggy | /soggycat `<1 | 2 | 3 | 4 | orig | thumb | sticker | alt>`: Envia o [meme do gato encharcado](https://knowyourmeme.com/memes/soggy-cat)\n- /cat - Envia uma foto aleatória de um gato.\n- /fox - Envia uma foto aleatória de uma raposa.\n- /duck - Envia uma foto aleatória de um pato.\n- /dog - Envia uma imagem aleatória de um cachorro.\n- /httpcat `<código http>`: Envia memes de gato do http.cat com o código HTTP especificado. Exemplo: `/httpcat 404`",
"ai": { "ai": {
"helpEntry": "✨ Comandos de IA", "helpEntry": "✨ Comandos de IA",
"helpDesc": "✨ *Comandos de IA*\n\n- /ask `<prompt>`: Fazer uma pergunta a uma IA\n- /think `<prompt>`: Fazer uma pergunta a um modelo de pensamento", "helpDesc": "✨ *Comandos de IA*\n\n- /ask `<prompt>`: Fazer uma pergunta a uma IA\n- /think `<prompt>`: Fazer uma pergunta a um modelo de pensamento\n- /ai `<prompt>`: Fazer uma pergunta a um modelo de IA personalizado",
"disabled": "✨ Os recursos de IA estão desativados no momento", "disabled": "A AIApi foi desativada\\.",
"disabledForUser": "✨ Os recursos de IA estão desativados para sua conta. Você pode ativá-los em /settings", "disabledForUser": "As funções de IA estão desativadas para a sua conta\\.",
"pulling": "🔄 *Puxando {model} do Ollama...*\n\nIsso pode levar alguns minutos...", "pulling": "O modelo {model} não foi encontrado localmente, baixando\\.\\.\\.",
"askGenerating": "✨ _{model} está funcionando..._", "askGenerating": "Gerando resposta com {model}\\.\\.\\.",
"askNoMessage": "Por favor, forneça uma mensagem para fazer a pergunta ao modelo.", "askNoMessage": "Você precisa fazer uma pergunta\\.",
"languageCode": "Idioma", "thinking": "Pensando\\.\\.\\.",
"thinking": "Pensando...", "finishedThinking": "Pronto\\.",
"finishedThinking": "Pensamento finalizado", "urlWarning": "\n\n⚠ O usuário forneceu um ou mais URLs na sua mensagem\\. Por favor, não visite URLs suspeitos\\.",
"urlWarning": "⚠️ *Aviso: Não posso acessar ou abrir links. Por favor, forneça o conteúdo diretamente se precisar que eu analise algo de um site.*\n\n" "inQueue": " Você é o {position} na fila.",
"startingProcessing": "✨ Começando a processar o seu pedido\\.\\.\\.",
"aiEnabled": "IA",
"aiModel": "Modelo",
"aiTemperature": "Temperatura",
"selectSeries": "Por favor, selecione uma série de modelos.",
"seriesDescription": "{seriesDescription}",
"selectParameterSize": "Por favor, selecione um tamanho de parâmetro para {seriesLabel}.",
"parameterSizeExplanation": "O tamanho do parâmetro (ex: 2B, 4B) refere-se ao número de parâmetros do modelo. Modelos maiores podem ser mais capazes, mas exigem mais recursos.",
"systemPrompt": "Você é um assistente de Telegram chamado {botName}, capaz de Telegram MarkdownV2.\nVocê está em um chat com um usuário, que enviou uma mensagem para você.\nData/Hora atual (UTC): {date}\n\n---\n\nResponda à mensagem do usuário:\n{message}",
"statusWaitingRender": "⏳ Aguardando renderização...",
"statusRendering": "🖼️ Renderizando...",
"statusComplete": "✅ Completo!",
"modelHeader": "🤖 *{model}* | 🌡️ *{temperature}* | {status}",
"noChatFound": "Nenhum chat encontrado",
"pulled": "✅ {model} baixado com sucesso, por favor tente o comando novamente."
}, },
"maInvalidModule": "Por favor, forneça um ID de módulo válido do The Mod Archive.\nExemplo: `/modarchive 81574`", "maInvalidModule": "Por favor, forneça um ID de módulo válido do The Mod Archive.\nExemplo: `/modarchive 81574`",
"maDownloadError": "Erro ao baixar o arquivo. Verifique o ID do módulo e tente novamente.", "maDownloadError": "Erro ao baixar o arquivo. Verifique o ID do módulo e tente novamente.",
@ -113,8 +128,7 @@
"seriesDescription": "{seriesDescription}", "seriesDescription": "{seriesDescription}",
"selectParameterSize": "Por favor, selecione um tamanho de parâmetro para {seriesLabel}.", "selectParameterSize": "Por favor, selecione um tamanho de parâmetro para {seriesLabel}.",
"parameterSizeExplanation": "O tamanho do parâmetro (ex: 2B, 4B) refere-se ao número de parâmetros do modelo. Modelos maiores podem ser mais capazes, mas exigem mais recursos.", "parameterSizeExplanation": "O tamanho do parâmetro (ex: 2B, 4B) refere-se ao número de parâmetros do modelo. Modelos maiores podem ser mais capazes, mas exigem mais recursos.",
"modelSetTo": "Modelo definido para {aiModel} ({parameterSize})", "modelSetTo": "Modelo definido para {aiModel} ({parameterSize})"
"back": "Voltar"
}, },
"languageCodeSetTo": "Idioma definido para {languageCode}", "languageCodeSetTo": "Idioma definido para {languageCode}",
"unknownAction": "Ação desconhecida." "unknownAction": "Ação desconhecida."
@ -165,6 +179,5 @@
"gsmarenaNotAllowed": "você não tem permissão para interagir com isso.", "gsmarenaNotAllowed": "você não tem permissão para interagir com isso.",
"gsmarenaInvalidOrExpired": "Ops! Opção inválida ou expirada. Por favor, tente novamente.", "gsmarenaInvalidOrExpired": "Ops! Opção inválida ou expirada. Por favor, tente novamente.",
"gsmarenaDeviceDetails": "estes são os detalhes do seu dispositivo:", "gsmarenaDeviceDetails": "estes são os detalhes do seu dispositivo:",
"gsmarenaErrorFetchingDetails": "Erro ao buscar detalhes do celular.", "gsmarenaErrorFetchingDetails": "Erro ao buscar detalhes do celular."
"userNotFound": "Usuário não encontrado."
} }