ai queue, better markdown parsing, refactor, better feedback
This commit is contained in:
parent
df49bc4157
commit
23ebd021f3
6 changed files with 273 additions and 173 deletions
|
@ -117,6 +117,7 @@ If you prefer to use Docker directly, you can use these instructions instead.
|
|||
- **handlerTimeout** (optional): How long handlers will wait before timing out. Set this high if using large AI models.
|
||||
- **flashModel** (optional): Which model will be used for /ask
|
||||
- **thinkingModel** (optional): Which model will be used for /think
|
||||
- **updateEveryChars** (optional): The amount of chars until message update triggers (for streaming response)
|
||||
- **databaseUrl**: Database server configuration (see `.env.example`)
|
||||
- **botAdmins**: Put the ID of the people responsible for managing the bot. They can use some administrative + exclusive commands on any group.
|
||||
- **lastKey**: Last.fm API key, for use on `lastfm.js` functions, like see who is listening to what song and etc.
|
||||
|
|
|
@ -119,31 +119,17 @@ export const models: ModelInfo[] = [
|
|||
}
|
||||
];
|
||||
|
||||
const enSystemPrompt = `You are a plaintext-only, helpful assistant called {botName}.
|
||||
Current Date/Time (UTC): {date}
|
||||
|
||||
---
|
||||
|
||||
Respond to the user's message:
|
||||
{message}`
|
||||
|
||||
const ptSystemPrompt = `Você é um assistente de texto puro e útil chamado {botName}.
|
||||
Data/Hora atual (UTC): {date}
|
||||
|
||||
---
|
||||
|
||||
Responda à mensagem do usuário:
|
||||
{message}`
|
||||
|
||||
async function usingSystemPrompt(ctx: TextContext, db: NodePgDatabase<typeof schema>, botName: string): Promise<string> {
|
||||
async function usingSystemPrompt(ctx: TextContext, db: NodePgDatabase<typeof schema>, botName: string, message: string): Promise<string> {
|
||||
const user = await db.query.usersTable.findMany({ where: (fields, { eq }) => eq(fields.telegramId, String(ctx.from!.id)), limit: 1 });
|
||||
if (user.length === 0) await ensureUserInDb(ctx, db);
|
||||
const userData = user[0];
|
||||
const lang = userData?.languageCode || "en";
|
||||
const Strings = getStrings(lang);
|
||||
const utcDate = new Date().toISOString();
|
||||
const prompt = lang === "pt"
|
||||
? ptSystemPrompt.replace("{botName}", botName).replace("{date}", utcDate).replace("{message}", ctx.message.text)
|
||||
: enSystemPrompt.replace("{botName}", botName).replace("{date}", utcDate).replace("{message}", ctx.message.text);
|
||||
const prompt = Strings.ai.systemPrompt
|
||||
.replace("{botName}", botName)
|
||||
.replace("{date}", utcDate)
|
||||
.replace("{message}", message);
|
||||
return prompt;
|
||||
}
|
||||
|
||||
|
@ -156,6 +142,51 @@ export function sanitizeForJson(text: string): string {
|
|||
.replace(/\t/g, '\\t')
|
||||
}
|
||||
|
||||
function sanitizeMarkdownForTelegram(text: string): string {
|
||||
let sanitizedText = text;
|
||||
|
||||
const replacements: string[] = [];
|
||||
const addReplacement = (match: string): string => {
|
||||
replacements.push(match);
|
||||
return `___PLACEHOLDER_${replacements.length - 1}___`;
|
||||
};
|
||||
|
||||
sanitizedText = sanitizedText.replace(/```([\s\S]*?)```/g, addReplacement);
|
||||
sanitizedText = sanitizedText.replace(/`([^`]+)`/g, addReplacement);
|
||||
sanitizedText = sanitizedText.replace(/\[([^\]]+)\]\(([^)]+)\)/g, addReplacement);
|
||||
|
||||
const parts = sanitizedText.split(/(___PLACEHOLDER_\d+___)/g);
|
||||
const processedParts = parts.map(part => {
|
||||
if (part.match(/___PLACEHOLDER_\d+___/)) {
|
||||
return part;
|
||||
} else {
|
||||
let processedPart = part;
|
||||
processedPart = processedPart.replace(/^(#{1,6})\s+(.+)/gm, '*$2*');
|
||||
processedPart = processedPart.replace(/^(\s*)[-*]\s+/gm, '$1- ');
|
||||
processedPart = processedPart.replace(/\*\*(.*?)\*\*/g, '*$1*');
|
||||
processedPart = processedPart.replace(/__(.*?)__/g, '*$1*');
|
||||
processedPart = processedPart.replace(/(^|\s)\*(?!\*)([^*]+?)\*(?!\*)/g, '$1_$2_');
|
||||
processedPart = processedPart.replace(/(^|\s)_(?!_)([^_]+?)_(?!_)/g, '$1_$2_');
|
||||
processedPart = processedPart.replace(/~~(.*?)~~/g, '~$1~');
|
||||
processedPart = processedPart.replace(/^\s*┃/gm, '>');
|
||||
processedPart = processedPart.replace(/^>\s?/gm, '> ');
|
||||
|
||||
return processedPart;
|
||||
}
|
||||
});
|
||||
|
||||
sanitizedText = processedParts.join('');
|
||||
|
||||
sanitizedText = sanitizedText.replace(/___PLACEHOLDER_(\d+)___/g, (_, idx) => replacements[Number(idx)]);
|
||||
|
||||
const codeBlockCount = (sanitizedText.match(/```/g) || []).length;
|
||||
if (codeBlockCount % 2 !== 0) {
|
||||
sanitizedText += '\n```';
|
||||
}
|
||||
|
||||
return sanitizedText;
|
||||
}
|
||||
|
||||
export async function preChecks() {
|
||||
const envs = [
|
||||
"ollamaApi",
|
||||
|
@ -232,7 +263,7 @@ function extractAxiosErrorMessage(error: unknown): string {
|
|||
}
|
||||
|
||||
function escapeMarkdown(text: string): string {
|
||||
return text.replace(/([*_])/g, '\\$1');
|
||||
return text.replace(/([_*\[\]()`>#\+\-=|{}.!~])/g, '\\$1');
|
||||
}
|
||||
|
||||
function containsUrls(text: string): boolean {
|
||||
|
@ -244,10 +275,14 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
|
|||
if (!ctx.chat) {
|
||||
return {
|
||||
success: false,
|
||||
error: Strings.unexpectedErr.replace("{error}", "No chat found"),
|
||||
error: Strings.unexpectedErr.replace("{error}", Strings.ai.noChatFound),
|
||||
};
|
||||
}
|
||||
const modelHeader = `🤖 *${model}* | 🌡️ *${aiTemperature}*\n\n`;
|
||||
let status = Strings.ai.statusWaitingRender;
|
||||
let modelHeader = Strings.ai.modelHeader
|
||||
.replace("{model}", model)
|
||||
.replace("{temperature}", aiTemperature)
|
||||
.replace("{status}", status) + "\n\n";
|
||||
const urlWarning = containsUrls(originalMessage) ? Strings.ai.urlWarning : '';
|
||||
|
||||
try {
|
||||
|
@ -267,8 +302,9 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
|
|||
);
|
||||
let fullResponse = "";
|
||||
let thoughts = "";
|
||||
let lastUpdate = Date.now();
|
||||
let lastUpdateCharCount = 0;
|
||||
let sentHeader = false;
|
||||
let firstChunk = true;
|
||||
const stream: NodeJS.ReadableStream = aiResponse.data as any;
|
||||
for await (const chunk of stream) {
|
||||
const lines = chunk.toString().split('\n');
|
||||
|
@ -293,7 +329,6 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
|
|||
logger.logThinking(ctx.chat.id, replyGenerating.message_id, false);
|
||||
}
|
||||
}
|
||||
const now = Date.now();
|
||||
if (ln.response) {
|
||||
if (model === thinking_model) {
|
||||
let patchedThoughts = ln.response;
|
||||
|
@ -306,20 +341,51 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
|
|||
} else {
|
||||
fullResponse += ln.response;
|
||||
}
|
||||
if (now - lastUpdate >= 5000 || !sentHeader) {
|
||||
if (firstChunk) {
|
||||
status = Strings.ai.statusWaitingRender;
|
||||
modelHeader = Strings.ai.modelHeader
|
||||
.replace("{model}", model)
|
||||
.replace("{temperature}", aiTemperature)
|
||||
.replace("{status}", status) + "\n\n";
|
||||
await rateLimiter.editMessageWithRetry(
|
||||
ctx,
|
||||
ctx.chat.id,
|
||||
replyGenerating.message_id,
|
||||
modelHeader + urlWarning + fullResponse,
|
||||
modelHeader + urlWarning + escapeMarkdown(fullResponse),
|
||||
{ parse_mode: 'Markdown' }
|
||||
);
|
||||
lastUpdate = now;
|
||||
lastUpdateCharCount = fullResponse.length;
|
||||
sentHeader = true;
|
||||
firstChunk = false;
|
||||
continue;
|
||||
}
|
||||
const updateEveryChars = Number(process.env.updateEveryChars) || 100;
|
||||
if (fullResponse.length - lastUpdateCharCount >= updateEveryChars || !sentHeader) {
|
||||
await rateLimiter.editMessageWithRetry(
|
||||
ctx,
|
||||
ctx.chat.id,
|
||||
replyGenerating.message_id,
|
||||
modelHeader + urlWarning + escapeMarkdown(fullResponse),
|
||||
{ parse_mode: 'Markdown' }
|
||||
);
|
||||
lastUpdateCharCount = fullResponse.length;
|
||||
sentHeader = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
status = Strings.ai.statusRendering;
|
||||
modelHeader = Strings.ai.modelHeader
|
||||
.replace("{model}", model)
|
||||
.replace("{temperature}", aiTemperature)
|
||||
.replace("{status}", status) + "\n\n";
|
||||
await rateLimiter.editMessageWithRetry(
|
||||
ctx,
|
||||
ctx.chat.id,
|
||||
replyGenerating.message_id,
|
||||
modelHeader + urlWarning + escapeMarkdown(fullResponse),
|
||||
{ parse_mode: 'Markdown' }
|
||||
);
|
||||
return {
|
||||
success: true,
|
||||
response: fullResponse,
|
||||
|
@ -360,7 +426,7 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
|
|||
console.log(`[✨ AI] ${model} pulled successfully`);
|
||||
return {
|
||||
success: true,
|
||||
response: `✅ Pulled ${escapeMarkdown(model)} successfully, please retry the command.`,
|
||||
response: Strings.ai.pulled.replace("{model}", escapeMarkdown(model)),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -376,16 +442,18 @@ async function handleAiReply(ctx: TextContext, model: string, prompt: string, re
|
|||
const aiResponse = await getResponse(prompt, ctx, replyGenerating, model, aiTemperature, originalMessage);
|
||||
if (!aiResponse) return;
|
||||
if (!ctx.chat) return;
|
||||
const modelHeader = `🤖 *${model}* | 🌡️ *${aiTemperature}*\n\n`;
|
||||
|
||||
const urlWarning = containsUrls(originalMessage) ? Strings.ai.urlWarning : '';
|
||||
|
||||
if (aiResponse.success && aiResponse.response) {
|
||||
const status = Strings.ai.statusComplete;
|
||||
const modelHeader = Strings.ai.modelHeader
|
||||
.replace("{model}", model)
|
||||
.replace("{temperature}", aiTemperature)
|
||||
.replace("{status}", status) + "\n\n";
|
||||
const urlWarning = containsUrls(originalMessage) ? Strings.ai.urlWarning : '';
|
||||
await rateLimiter.editMessageWithRetry(
|
||||
ctx,
|
||||
ctx.chat.id,
|
||||
replyGenerating.message_id,
|
||||
modelHeader + urlWarning + aiResponse.response,
|
||||
modelHeader + urlWarning + sanitizeMarkdownForTelegram(aiResponse.response),
|
||||
{ parse_mode: 'Markdown' }
|
||||
);
|
||||
return;
|
||||
|
@ -425,109 +493,112 @@ export function getModelLabelByName(name: string): string {
|
|||
export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
|
||||
const botName = bot.botInfo?.first_name && bot.botInfo?.last_name ? `${bot.botInfo.first_name} ${bot.botInfo.last_name}` : "Kowalski"
|
||||
|
||||
bot.command(["ask", "think"], spamwatchMiddleware, async (ctx) => {
|
||||
if (!ctx.message || !('text' in ctx.message)) return
|
||||
const isAsk = ctx.message.text.startsWith("/ask")
|
||||
const model = isAsk ? flash_model : thinking_model
|
||||
const textCtx = ctx as TextContext
|
||||
const reply_to_message_id = replyToMessageId(textCtx)
|
||||
const { user, Strings, aiTemperature } = await getUserWithStringsAndModel(textCtx, db)
|
||||
const message = textCtx.message.text
|
||||
const author = ("@" + ctx.from?.username) || ctx.from?.first_name
|
||||
interface AiRequest {
|
||||
task: () => Promise<void>;
|
||||
ctx: TextContext;
|
||||
wasQueued: boolean;
|
||||
}
|
||||
|
||||
logger.logCmdStart(author, model === flash_model ? "ask" : "think")
|
||||
const requestQueue: AiRequest[] = [];
|
||||
let isProcessing = false;
|
||||
|
||||
async function processQueue() {
|
||||
if (isProcessing || requestQueue.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
isProcessing = true;
|
||||
const { task, ctx, wasQueued } = requestQueue.shift()!;
|
||||
const { Strings } = await getUserWithStringsAndModel(ctx, db);
|
||||
const reply_to_message_id = replyToMessageId(ctx);
|
||||
|
||||
try {
|
||||
if (wasQueued) {
|
||||
await ctx.reply(Strings.ai.startingProcessing, {
|
||||
...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }),
|
||||
parse_mode: 'Markdown'
|
||||
});
|
||||
}
|
||||
await task();
|
||||
} catch (error) {
|
||||
console.error("[✨ AI | !] Error processing task:", error);
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
await ctx.reply(Strings.unexpectedErr.replace("{error}", errorMessage), {
|
||||
...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }),
|
||||
parse_mode: 'Markdown'
|
||||
});
|
||||
} finally {
|
||||
isProcessing = false;
|
||||
processQueue();
|
||||
}
|
||||
}
|
||||
|
||||
async function aiCommandHandler(ctx: TextContext, command: 'ask' | 'think' | 'ai') {
|
||||
const reply_to_message_id = replyToMessageId(ctx);
|
||||
const { user, Strings, customAiModel, aiTemperature } = await getUserWithStringsAndModel(ctx, db);
|
||||
const message = ctx.message.text;
|
||||
const author = ("@" + ctx.from?.username) || ctx.from?.first_name || "Unknown";
|
||||
|
||||
let model: string;
|
||||
let fixedMsg: string;
|
||||
|
||||
if (command === 'ai') {
|
||||
model = customAiModel || flash_model;
|
||||
fixedMsg = message.replace(/^\/ai(@\w+)?\s*/, "").trim();
|
||||
logger.logCmdStart(author, "ask");
|
||||
} else {
|
||||
model = command === 'ask' ? flash_model : thinking_model;
|
||||
fixedMsg = message.replace(/^\/(ask|think)(@\w+)?\s*/, "").trim();
|
||||
logger.logCmdStart(author, command);
|
||||
}
|
||||
|
||||
if (!process.env.ollamaApi) {
|
||||
await ctx.reply(Strings.ai.disabled, {
|
||||
parse_mode: 'Markdown',
|
||||
...({ reply_to_message_id })
|
||||
})
|
||||
return
|
||||
await ctx.reply(Strings.ai.disabled, { parse_mode: 'Markdown', ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) });
|
||||
return;
|
||||
}
|
||||
|
||||
if (!user.aiEnabled) {
|
||||
await ctx.reply(Strings.ai.disabledForUser, {
|
||||
parse_mode: 'Markdown',
|
||||
...({ reply_to_message_id })
|
||||
})
|
||||
return
|
||||
await ctx.reply(Strings.ai.disabledForUser, { parse_mode: 'Markdown', ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) });
|
||||
return;
|
||||
}
|
||||
|
||||
const fixedMsg = message.replace(/^\/(ask|think)(@\w+)?\s*/, "").trim()
|
||||
if (fixedMsg.length < 1) {
|
||||
await ctx.reply(Strings.ai.askNoMessage, {
|
||||
parse_mode: 'Markdown',
|
||||
...({ reply_to_message_id })
|
||||
})
|
||||
return
|
||||
await ctx.reply(Strings.ai.askNoMessage, { parse_mode: 'Markdown', ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) });
|
||||
return;
|
||||
}
|
||||
|
||||
const replyGenerating = await ctx.reply(Strings.ai.askGenerating.replace("{model}", model), {
|
||||
parse_mode: 'Markdown',
|
||||
...({ reply_to_message_id })
|
||||
})
|
||||
|
||||
logger.logPrompt(fixedMsg)
|
||||
|
||||
const prompt = sanitizeForJson(await usingSystemPrompt(textCtx, db, botName))
|
||||
await handleAiReply(textCtx, model, prompt, replyGenerating, aiTemperature, fixedMsg)
|
||||
})
|
||||
|
||||
bot.command(["ai"], spamwatchMiddleware, async (ctx) => {
|
||||
try {
|
||||
if (!ctx.message || !("text" in ctx.message)) return
|
||||
const textCtx = ctx as TextContext
|
||||
const reply_to_message_id = replyToMessageId(textCtx)
|
||||
const { user, Strings, customAiModel, aiTemperature } = await getUserWithStringsAndModel(textCtx, db)
|
||||
const message = textCtx.message.text
|
||||
const author = ("@" + ctx.from?.username) || ctx.from?.first_name
|
||||
|
||||
logger.logCmdStart(author, "ask")
|
||||
|
||||
if (!process.env.ollamaApi) {
|
||||
await ctx.reply(Strings.ai.disabled, {
|
||||
parse_mode: 'Markdown',
|
||||
...({ reply_to_message_id })
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
if (!user.aiEnabled) {
|
||||
await ctx.reply(Strings.ai.disabledForUser, {
|
||||
parse_mode: 'Markdown',
|
||||
...({ reply_to_message_id })
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
const fixedMsg = message.replace(/^\/ai(@\w+)?\s*/, "").trim()
|
||||
if (fixedMsg.length < 1) {
|
||||
await ctx.reply(Strings.ai.askNoMessage, {
|
||||
parse_mode: 'Markdown',
|
||||
...({ reply_to_message_id })
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
const modelLabel = getModelLabelByName(customAiModel)
|
||||
const task = async () => {
|
||||
const modelLabel = getModelLabelByName(model);
|
||||
const replyGenerating = await ctx.reply(Strings.ai.askGenerating.replace("{model}", modelLabel), {
|
||||
parse_mode: 'Markdown',
|
||||
...({ reply_to_message_id })
|
||||
})
|
||||
...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } })
|
||||
});
|
||||
logger.logPrompt(fixedMsg);
|
||||
const prompt = sanitizeForJson(await usingSystemPrompt(ctx, db, botName, fixedMsg));
|
||||
await handleAiReply(ctx, model, prompt, replyGenerating, aiTemperature, fixedMsg);
|
||||
};
|
||||
|
||||
logger.logPrompt(fixedMsg)
|
||||
|
||||
const prompt = sanitizeForJson(await usingSystemPrompt(textCtx, db, botName))
|
||||
await handleAiReply(textCtx, customAiModel, prompt, replyGenerating, aiTemperature, fixedMsg)
|
||||
} catch (err) {
|
||||
const Strings = getStrings(languageCode(ctx));
|
||||
if (ctx && ctx.reply) {
|
||||
try {
|
||||
await ctx.reply(Strings.unexpectedErr.replace("{error}", (err && err.message ? err.message : String(err))), { parse_mode: 'Markdown' })
|
||||
} catch (e) {
|
||||
console.error("[✨ AI | !] Failed to send error reply:", e)
|
||||
}
|
||||
}
|
||||
if (isProcessing) {
|
||||
requestQueue.push({ task, ctx, wasQueued: true });
|
||||
const position = requestQueue.length;
|
||||
await ctx.reply(Strings.ai.inQueue.replace("{position}", String(position)), {
|
||||
parse_mode: 'Markdown',
|
||||
...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } })
|
||||
});
|
||||
} else {
|
||||
requestQueue.push({ task, ctx, wasQueued: false });
|
||||
processQueue();
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
bot.command(["ask", "think"], spamwatchMiddleware, async (ctx) => {
|
||||
if (!ctx.message || !('text' in ctx.message)) return;
|
||||
const command = ctx.message.text.startsWith('/ask') ? 'ask' : 'think';
|
||||
await aiCommandHandler(ctx as TextContext, command);
|
||||
});
|
||||
|
||||
bot.command(["ai"], spamwatchMiddleware, async (ctx) => {
|
||||
if (!ctx.message || !('text' in ctx.message)) return;
|
||||
await aiCommandHandler(ctx as TextContext, 'ai');
|
||||
});
|
||||
}
|
||||
|
|
|
@ -146,7 +146,7 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
|
|||
inline_keyboard: models.map(series => [
|
||||
{ text: series.label, callback_data: `selectseries_${series.name}` }
|
||||
]).concat([[
|
||||
{ text: `⬅️ ${Strings.settings.ai.back}`, callback_data: 'settings_back' }
|
||||
{ text: `${Strings.varStrings.varBack}`, callback_data: 'settings_back' }
|
||||
]])
|
||||
}
|
||||
}
|
||||
|
@ -185,7 +185,7 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
|
|||
inline_keyboard: series.models.map(m => [
|
||||
{ text: `${m.label} (${m.parameterSize})`, callback_data: `setmodel_${series.name}_${m.name}` }
|
||||
]).concat([[
|
||||
{ text: `⬅️ ${Strings.settings.ai.back}`, callback_data: 'settings_aiModel' }
|
||||
{ text: `${Strings.varStrings.varBack}`, callback_data: 'settings_aiModel' }
|
||||
]])
|
||||
}
|
||||
}
|
||||
|
@ -262,7 +262,7 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
|
|||
const temps = [0.2, 0.5, 0.7, 0.9, 1.2];
|
||||
try {
|
||||
await ctx.editMessageReplyMarkup({
|
||||
inline_keyboard: temps.map(t => [{ text: t.toString(), callback_data: `settemp_${t}` }]).concat([[{ text: `⬅️ ${Strings.settings.ai.back}`, callback_data: 'settings_back' }]])
|
||||
inline_keyboard: temps.map(t => [{ text: t.toString(), callback_data: `settemp_${t}` }]).concat([[{ text: `${Strings.varStrings.varBack}`, callback_data: 'settings_back' }]])
|
||||
});
|
||||
} catch (err) {
|
||||
if (
|
||||
|
@ -304,7 +304,7 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
|
|||
if (!user) return;
|
||||
try {
|
||||
await ctx.editMessageReplyMarkup({
|
||||
inline_keyboard: langs.map(l => [{ text: l.label, callback_data: `setlang_${l.code}` }]).concat([[{ text: `⬅️ ${Strings.settings.ai.back}`, callback_data: 'settings_back' }]])
|
||||
inline_keyboard: langs.map(l => [{ text: l.label, callback_data: `setlang_${l.code}` }]).concat([[{ text: `${Strings.varStrings.varBack}`, callback_data: 'settings_back' }]])
|
||||
});
|
||||
} catch (err) {
|
||||
if (
|
||||
|
|
|
@ -13,9 +13,9 @@
|
|||
"varWas": "was",
|
||||
"varNone": "None",
|
||||
"varUnknown": "Unknown",
|
||||
"varBack": "Back"
|
||||
"varBack": "⬅️ Back"
|
||||
},
|
||||
"unexpectedErr": "Some unexpected error occurred during a bot action. Please report it to the developers.\n\n{error}",
|
||||
"unexpectedErr": "An unexpected error occurred: {error}",
|
||||
"errInvalidOption": "Whoops! Invalid option!",
|
||||
"kickingMyself": "*Since you don't need me, I'll leave.*",
|
||||
"kickingMyselfErr": "Error leaving the chat.",
|
||||
|
@ -65,22 +65,31 @@
|
|||
"animalCommandsDesc": "🐱 *Animals*\n\n- /soggy | /soggycat `<1 | 2 | 3 | 4 | orig | thumb | sticker | alt>`: Sends the [Soggy cat meme](https://knowyourmeme.com/memes/soggy-cat)\n- /cat: Sends a random picture of a cat.\n- /fox: Sends a random picture of a fox.\n- /duck: Sends a random picture of a duck.\n- /dog: Sends a random picture of a dog.\n- /httpcat `<http code>`: Send cat memes from http.cat with your specified HTTP code. Example: `/httpcat 404`",
|
||||
"ai": {
|
||||
"helpEntry": "✨ AI Commands",
|
||||
"helpDesc": "✨ *AI Commands*\n\n- /ask `<prompt>`: Ask a question to an AI\n- /think `<prompt>`: Ask a thinking model about a question",
|
||||
"disabled": "✨ AI features are currently disabled",
|
||||
"disabledForUser": "✨ AI features are disabled for your account. You can enable them in /settings",
|
||||
"pulling": "🔄 *Pulling {model} from Ollama...*\n\nThis may take a few minutes...",
|
||||
"askGenerating": "✨ _{model} is working..._",
|
||||
"askNoMessage": "Please provide a message to ask the model.",
|
||||
"helpDesc": "✨ *AI Commands*\n\n- /ask `<prompt>`: Ask a question to an AI model\n- /think `<prompt>`: Ask a thinking model about a question\n- /ai `<prompt>`: Ask your custom-set AI model a question",
|
||||
"disabled": "✨ AI features are currently disabled globally.",
|
||||
"disabledForUser": "✨ AI features are disabled for your account.",
|
||||
"pulling": "🔄 Model {model} not found locally, pulling...",
|
||||
"askGenerating": "✨ Generating response with {model}...",
|
||||
"askNoMessage": "✨ You need to ask me a question!",
|
||||
"languageCode": "Language",
|
||||
"thinking": "Thinking...",
|
||||
"finishedThinking": "Finished thinking",
|
||||
"urlWarning": "⚠️ *Warning: I cannot access or open links. Please provide the content directly if you need me to analyze something from a website.*\n\n"
|
||||
"finishedThinking": "Done.",
|
||||
"urlWarning": "\n\n⚠️ The user provided one or more URLs in their message. Please do not visit any suspicious URLs.",
|
||||
"inQueue": "ℹ️ You are {position} in the queue.",
|
||||
"startingProcessing": "✨ Starting to process your request...",
|
||||
"systemPrompt": "You are a friendly assistant called {botName}, capable of Telegram MarkdownV2.\nYou are currently in a chat with a user, who has sent a message to you.\nCurrent Date/Time (UTC): {date}\n\n---\n\nRespond to the user's message:\n{message}",
|
||||
"statusWaitingRender": "⏳ Waiting to Render...",
|
||||
"statusRendering": "🖼️ Rendering...",
|
||||
"statusComplete": "✅ Complete!",
|
||||
"modelHeader": "🤖 *{model}* | 🌡️ *{temperature}* | {status}",
|
||||
"noChatFound": "No chat found",
|
||||
"pulled": "✅ Pulled {model} successfully, please retry the command."
|
||||
},
|
||||
"maInvalidModule": "Please provide a valid module ID from The Mod Archive.\nExample: `/modarchive 81574`",
|
||||
"maDownloadError": "Error downloading the file. Check the module ID and try again.",
|
||||
"ytDownload": {
|
||||
"helpEntry": "📺 YouTube Download",
|
||||
"helpDesc": "📺 *YouTube Download*\n\n- /yt | /ytdl | /sdl | /dl | /video `<video link>`: Download a video from some platforms (e.g. YouTube, Instagram, Facebook, etc.).\n\n See [this link](https://github.com/yt-dlp/yt-dlp/blob/master/supportedsites.md) for more information and which services are supported.\n\n*Note: Telegram is currently limiting bot uploads to 50MB, which means that if the video you want to download is larger than 50MB, the quality will be reduced to try to upload it anyway. We're trying our best to work around or fix this problem.*",
|
||||
"helpEntry": "📺 Video Download",
|
||||
"helpDesc": "📺 *Video Download*\n\n- /yt | /ytdl | /sdl | /dl | /video `<video link>`: Download a video from some platforms (e.g. YouTube, Instagram, Facebook, etc.).\n\n See [this link](https://github.com/yt-dlp/yt-dlp/blob/master/supportedsites.md) for more information and which services are supported.\n\n*Note: Telegram is currently limiting bot uploads to 50MB, which means that if the video you want to download is larger than 50MB, the quality will be reduced to try to upload it anyway. We're trying our best to work around or fix this problem.*",
|
||||
"downloadingVid": "⬇️ *Downloading video...*",
|
||||
"libNotFound": "*It seems that the yt-dlp executable does not exist on our server...\n\nIn that case, the problem is on our end! Please wait until we have noticed and solved the problem.*",
|
||||
"checkingSize": "🔎 *Checking if the video exceeds the 50MB limit...*",
|
||||
|
@ -110,7 +119,6 @@
|
|||
"aiEnabledSetTo": "AI Enabled set to {aiEnabled}",
|
||||
"aiModelSetTo": "AI Model set to {aiModel}",
|
||||
"aiTemperatureSetTo": "AI Temperature set to {aiTemperature}",
|
||||
"back": "Back",
|
||||
"selectSeries": "Please select a model series.",
|
||||
"seriesDescription": "{seriesDescription}",
|
||||
"selectParameterSize": "Please select a parameter size for {seriesLabel}.",
|
||||
|
@ -167,5 +175,12 @@
|
|||
"gsmarenaNotAllowed": "you are not allowed to interact with this.",
|
||||
"gsmarenaInvalidOrExpired": "Whoops, invalid or expired option. Please try again.",
|
||||
"gsmarenaDeviceDetails": "these are the details of your device:",
|
||||
"gsmarenaErrorFetchingDetails": "Error fetching phone details."
|
||||
}
|
||||
"gsmarenaErrorFetchingDetails": "Error fetching phone details.",
|
||||
"info": {
|
||||
"ping": "Pong!",
|
||||
"pinging": "Pinging...",
|
||||
"pong": "Pong in {ms}ms.",
|
||||
"botInfo": "Kowalski is a multipurpose bot with a variety of features, including AI, moderation, and more.",
|
||||
"credits": "Kowalski was created by ihatenodejs/Aidan, with contributions from the open-source community. It is licensed under the Unlicense license."
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,9 +12,9 @@
|
|||
"varWas": "estava",
|
||||
"varNone": "Nenhum",
|
||||
"varUnknown": "Desconhecido",
|
||||
"varBack": "Voltar"
|
||||
"varBack": "⬅️ Voltar"
|
||||
},
|
||||
"unexpectedErr": "Algum erro inesperado ocorreu durante uma ação do bot. Por favor, reporte aos desenvolvedores.\n\n{error}",
|
||||
"unexpectedErr": "Ocorreu um erro inesperado: {error}",
|
||||
"errInvalidOption": "Ops! Opção inválida!",
|
||||
"kickingMyself": "*Já que você não precisa de mim, vou sair daqui.*",
|
||||
"kickingMyselfErr": "Erro ao sair do chat.",
|
||||
|
@ -64,16 +64,31 @@
|
|||
"animalCommandsDesc": "🐱 *Animais*\n\n- /soggy | /soggycat `<1 | 2 | 3 | 4 | orig | thumb | sticker | alt>`: Envia o [meme do gato encharcado](https://knowyourmeme.com/memes/soggy-cat)\n- /cat - Envia uma foto aleatória de um gato.\n- /fox - Envia uma foto aleatória de uma raposa.\n- /duck - Envia uma foto aleatória de um pato.\n- /dog - Envia uma imagem aleatória de um cachorro.\n- /httpcat `<código http>`: Envia memes de gato do http.cat com o código HTTP especificado. Exemplo: `/httpcat 404`",
|
||||
"ai": {
|
||||
"helpEntry": "✨ Comandos de IA",
|
||||
"helpDesc": "✨ *Comandos de IA*\n\n- /ask `<prompt>`: Fazer uma pergunta a uma IA\n- /think `<prompt>`: Fazer uma pergunta a um modelo de pensamento",
|
||||
"disabled": "✨ Os recursos de IA estão desativados no momento",
|
||||
"disabledForUser": "✨ Os recursos de IA estão desativados para sua conta. Você pode ativá-los em /settings",
|
||||
"pulling": "🔄 *Puxando {model} do Ollama...*\n\nIsso pode levar alguns minutos...",
|
||||
"askGenerating": "✨ _{model} está funcionando..._",
|
||||
"askNoMessage": "Por favor, forneça uma mensagem para fazer a pergunta ao modelo.",
|
||||
"languageCode": "Idioma",
|
||||
"thinking": "Pensando...",
|
||||
"finishedThinking": "Pensamento finalizado",
|
||||
"urlWarning": "⚠️ *Aviso: Não posso acessar ou abrir links. Por favor, forneça o conteúdo diretamente se precisar que eu analise algo de um site.*\n\n"
|
||||
"helpDesc": "✨ *Comandos de IA*\n\n- /ask `<prompt>`: Fazer uma pergunta a uma IA\n- /think `<prompt>`: Fazer uma pergunta a um modelo de pensamento\n- /ai `<prompt>`: Fazer uma pergunta a um modelo de IA personalizado",
|
||||
"disabled": "A AIApi foi desativada\\.",
|
||||
"disabledForUser": "As funções de IA estão desativadas para a sua conta\\.",
|
||||
"pulling": "O modelo {model} não foi encontrado localmente, baixando\\.\\.\\.",
|
||||
"askGenerating": "Gerando resposta com {model}\\.\\.\\.",
|
||||
"askNoMessage": "Você precisa fazer uma pergunta\\.",
|
||||
"thinking": "Pensando\\.\\.\\.",
|
||||
"finishedThinking": "Pronto\\.",
|
||||
"urlWarning": "\n\n⚠️ O usuário forneceu um ou mais URLs na sua mensagem\\. Por favor, não visite URLs suspeitos\\.",
|
||||
"inQueue": "ℹ️ Você é o {position} na fila.",
|
||||
"startingProcessing": "✨ Começando a processar o seu pedido\\.\\.\\.",
|
||||
"aiEnabled": "IA",
|
||||
"aiModel": "Modelo",
|
||||
"aiTemperature": "Temperatura",
|
||||
"selectSeries": "Por favor, selecione uma série de modelos.",
|
||||
"seriesDescription": "{seriesDescription}",
|
||||
"selectParameterSize": "Por favor, selecione um tamanho de parâmetro para {seriesLabel}.",
|
||||
"parameterSizeExplanation": "O tamanho do parâmetro (ex: 2B, 4B) refere-se ao número de parâmetros do modelo. Modelos maiores podem ser mais capazes, mas exigem mais recursos.",
|
||||
"systemPrompt": "Você é um assistente de Telegram chamado {botName}, capaz de Telegram MarkdownV2.\nVocê está em um chat com um usuário, que enviou uma mensagem para você.\nData/Hora atual (UTC): {date}\n\n---\n\nResponda à mensagem do usuário:\n{message}",
|
||||
"statusWaitingRender": "⏳ Aguardando renderização...",
|
||||
"statusRendering": "🖼️ Renderizando...",
|
||||
"statusComplete": "✅ Completo!",
|
||||
"modelHeader": "🤖 *{model}* | 🌡️ *{temperature}* | {status}",
|
||||
"noChatFound": "Nenhum chat encontrado",
|
||||
"pulled": "✅ {model} baixado com sucesso, por favor tente o comando novamente."
|
||||
},
|
||||
"maInvalidModule": "Por favor, forneça um ID de módulo válido do The Mod Archive.\nExemplo: `/modarchive 81574`",
|
||||
"maDownloadError": "Erro ao baixar o arquivo. Verifique o ID do módulo e tente novamente.",
|
||||
|
@ -113,8 +128,7 @@
|
|||
"seriesDescription": "{seriesDescription}",
|
||||
"selectParameterSize": "Por favor, selecione um tamanho de parâmetro para {seriesLabel}.",
|
||||
"parameterSizeExplanation": "O tamanho do parâmetro (ex: 2B, 4B) refere-se ao número de parâmetros do modelo. Modelos maiores podem ser mais capazes, mas exigem mais recursos.",
|
||||
"modelSetTo": "Modelo definido para {aiModel} ({parameterSize})",
|
||||
"back": "Voltar"
|
||||
"modelSetTo": "Modelo definido para {aiModel} ({parameterSize})"
|
||||
},
|
||||
"languageCodeSetTo": "Idioma definido para {languageCode}",
|
||||
"unknownAction": "Ação desconhecida."
|
||||
|
@ -165,6 +179,5 @@
|
|||
"gsmarenaNotAllowed": "você não tem permissão para interagir com isso.",
|
||||
"gsmarenaInvalidOrExpired": "Ops! Opção inválida ou expirada. Por favor, tente novamente.",
|
||||
"gsmarenaDeviceDetails": "estes são os detalhes do seu dispositivo:",
|
||||
"gsmarenaErrorFetchingDetails": "Erro ao buscar detalhes do celular.",
|
||||
"userNotFound": "Usuário não encontrado."
|
||||
"gsmarenaErrorFetchingDetails": "Erro ao buscar detalhes do celular."
|
||||
}
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
import { Context } from "telegraf";
|
||||
import { replyToMessageId } from "../utils/reply-to-message-id";
|
||||
|
||||
export default function verifyInput(ctx: Context, userInput: string, message: string, verifyNaN = false) {
|
||||
const reply_to_message_id = replyToMessageId(ctx);
|
||||
if (!userInput || (verifyNaN && isNaN(Number(userInput)))) {
|
||||
ctx.reply(message, {
|
||||
parse_mode: "Markdown",
|
||||
...({ reply_to_message_id })
|
||||
});
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
import { Context } from "telegraf";
|
||||
import { replyToMessageId } from "../utils/reply-to-message-id";
|
||||
|
||||
export default function verifyInput(ctx: Context, userInput: string, message: string, verifyNaN = false) {
|
||||
const reply_to_message_id = replyToMessageId(ctx);
|
||||
if (!userInput || (verifyNaN && isNaN(Number(userInput)))) {
|
||||
ctx.reply(message, {
|
||||
parse_mode: "Markdown",
|
||||
...({ reply_to_message_id })
|
||||
});
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue