diff --git a/config/ai.ts b/config/ai.ts
index 5b653aa..5f9a5cf 100644
--- a/config/ai.ts
+++ b/config/ai.ts
@@ -2,7 +2,7 @@ import type { ModelInfo } from "../src/commands/ai"
export const defaultFlashModel = "gemma3:4b"
export const defaultThinkingModel = "qwen3:4b"
-export const unloadModelAfterB = 0.1 // how many billion params until model is auto-unloaded
+export const unloadModelAfterB = 12 // how many billion params until model is auto-unloaded
export const models: ModelInfo[] = [
{
@@ -35,13 +35,31 @@ export const models: ModelInfo[] = [
descriptionPt: 'Qwen3 é uma série de modelos multilingues.',
models: [
{ name: 'qwen3:0.6b', label: 'Qwen3 0.6B', parameterSize: '0.6B' },
+ { name: 'qwen3:1.7b', label: 'Qwen3 1.7B', parameterSize: '1.7B' },
{ name: 'qwen3:4b', label: 'Qwen3 4B', parameterSize: '4B' },
{ name: 'qwen3:8b', label: 'Qwen3 8B', parameterSize: '8B' },
{ name: 'qwen3:14b', label: 'Qwen3 14B', parameterSize: '14B' },
{ name: 'qwen3:30b', label: 'Qwen3 30B', parameterSize: '30B' },
+ { name: 'qwen3:32b', label: 'Qwen3 32B', parameterSize: '32B' },
{ name: 'qwen3:235b-a22b', label: 'Qwen3 235B A22B', parameterSize: '235B' },
]
},
+ {
+ name: 'qwen3-abliterated',
+ label: 'Qwen3 [ Uncensored ]',
+ descriptionEn: 'Qwen3-abliterated is a multilingual reasoning model series.',
+ descriptionPt: 'Qwen3-abliterated é uma série de modelos multilingues.',
+ models: [
+ { name: 'huihui_ai/qwen3-abliterated:0.6b', label: 'Qwen3 Uncensored 0.6B', parameterSize: '0.6B' },
+ { name: 'huihui_ai/qwen3-abliterated:1.7b', label: 'Qwen3 Uncensored 1.7B', parameterSize: '1.7B' },
+ { name: 'huihui_ai/qwen3-abliterated:4b', label: 'Qwen3 Uncensored 4B', parameterSize: '4B' },
+ { name: 'huihui_ai/qwen3-abliterated:8b', label: 'Qwen3 Uncensored 8B', parameterSize: '8B' },
+ { name: 'huihui_ai/qwen3-abliterated:14b', label: 'Qwen3 Uncensored 14B', parameterSize: '14B' },
+ { name: 'huihui_ai/qwen3-abliterated:30b', label: 'Qwen3 Uncensored 30B', parameterSize: '30B' },
+ { name: 'huihui_ai/qwen3-abliterated:32b', label: 'Qwen3 Uncensored 32B', parameterSize: '32B' },
+ { name: 'huihui_ai/qwen3-abliterated:235b', label: 'Qwen3 Uncensored 235B', parameterSize: '235B' },
+ ]
+ },
{
name: 'qwq',
label: 'QwQ',
@@ -49,6 +67,7 @@ export const models: ModelInfo[] = [
descriptionPt: 'QwQ é o modelo de raciocínio da série Qwen.',
models: [
{ name: 'qwq:32b', label: 'QwQ 32B', parameterSize: '32B' },
+ { name: 'huihui_ai/qwq-abliterated:32b', label: 'QwQ Uncensored 32B', parameterSize: '32B' },
]
},
{
@@ -60,6 +79,32 @@ export const models: ModelInfo[] = [
{ name: 'llama4:scout', label: 'Llama4 109B A17B', parameterSize: '109B' },
]
},
+ {
+ name: 'deepseek',
+ label: 'DeepSeek [ & Uncensored ]',
+ descriptionEn: 'DeepSeek is a research model for reasoning tasks.',
+ descriptionPt: 'DeepSeek é um modelo de pesquisa para tarefas de raciocínio.',
+ models: [
+ { name: 'deepseek-r1:1.5b', label: 'DeepSeek 1.5B', parameterSize: '1.5B' },
+ { name: 'deepseek-r1:7b', label: 'DeepSeek 7B', parameterSize: '7B' },
+ { name: 'deepseek-r1:8b', label: 'DeepSeek 8B', parameterSize: '8B' },
+ { name: 'deepseek-r1:14b', label: 'DeepSeek 14B', parameterSize: '14B' },
+ { name: 'huihui_ai/deepseek-r1-abliterated:1.5b', label: 'DeepSeek Uncensored 1.5B', parameterSize: '1.5B' },
+ { name: 'huihui_ai/deepseek-r1-abliterated:7b', label: 'DeepSeek Uncensored 7B', parameterSize: '7B' },
+ { name: 'huihui_ai/deepseek-r1-abliterated:8b', label: 'DeepSeek Uncensored 8B', parameterSize: '8B' },
+ { name: 'huihui_ai/deepseek-r1-abliterated:14b', label: 'DeepSeek Uncensored 14B', parameterSize: '14B' },
+ ]
+ },
+ {
+ name: 'hermes3',
+ label: 'Hermes3',
+ descriptionEn: 'Hermes 3 is the latest version of the flagship Hermes series of LLMs by Nous Research.',
+ descriptionPt: 'Hermes 3 é a versão mais recente da série Hermes de LLMs da Nous Research.',
+ models: [
+ { name: 'hermes3:3b', label: 'Hermes3 3B', parameterSize: '3B' },
+ { name: 'hermes3:8b', label: 'Hermes3 8B', parameterSize: '8B' },
+ ]
+ },
{
name: 'mistral',
label: 'Mistral',
@@ -70,15 +115,15 @@ export const models: ModelInfo[] = [
]
},
{
- name: 'deepseek',
- label: 'DeepSeek [ & Uncensored ]',
- descriptionEn: 'DeepSeek is a research model for reasoning tasks.',
- descriptionPt: 'DeepSeek é um modelo de pesquisa para tarefas de raciocínio.',
+ name: 'phi4 [ & Uncensored ]',
+ label: 'Phi4',
+ descriptionEn: 'Phi-4 is a 14B parameter, state-of-the-art open model from Microsoft. ',
+ descriptionPt: 'Phi-4 é um modelo de 14B de última geração, aberto pela Microsoft.',
models: [
- { name: 'deepseek-r1:1.5b', label: 'DeepSeek 1.5B', parameterSize: '1.5B' },
- { name: 'deepseek-r1:7b', label: 'DeepSeek 7B', parameterSize: '7B' },
- { name: 'huihui_ai/deepseek-r1-abliterated:1.5b', label: 'DeepSeek Uncensored 1.5B', parameterSize: '1.5B' },
- { name: 'huihui_ai/deepseek-r1-abliterated:7b', label: 'DeepSeek Uncensored 7B', parameterSize: '7B' },
+ { name: 'hf.co/unsloth/Phi-4-mini-reasoning-GGUF', label: 'Phi4 Mini Reasoning', parameterSize: '4B' },
+ { name: 'phi4:14b', label: 'Phi4 14B', parameterSize: '14B' },
+ { name: 'hf.co/unsloth/Phi-4-reasoning-plus-GGUF', label: 'Phi4 Reasoning Plus', parameterSize: '14B' },
+ { name: 'huihui_ai/phi4-abliterated:14b', label: 'Phi4 Uncensored 14B', parameterSize: '14B' },
]
},
{
@@ -89,5 +134,34 @@ export const models: ModelInfo[] = [
models: [
{ name: 'phi3:3.8b', label: 'Phi3 3.8B', parameterSize: '3.8B' },
]
- }
+ },
+ {
+ name: 'llama3',
+ label: 'Llama4',
+ descriptionEn: 'Llama 3, a lightweight model from Meta.',
+ descriptionPt: 'Llama 3, um modelo leve da Meta.',
+ models: [
+ { name: 'llama3:8b', label: 'Llama3 8B', parameterSize: '8B' },
+ ]
+ },
+ {
+ name: 'llama3.1 [ Uncensored ]',
+ label: 'Llama3.1',
+ descriptionEn: 'Ablitered v3 llama-3.1 8b with uncensored prompt ',
+ descriptionPt: 'Llama3.1 é um modelo aberto, leve e para dispositivos locais, com prompt não censurado.',
+ models: [
+ { name: 'mannix/llama3.1-8b-abliterated:latest', label: 'Llama3.1 8B', parameterSize: '8B' },
+ ]
+ },
+ {
+ name: 'llama3.2 [ & Uncensored ]',
+ label: 'Llama3.2',
+ descriptionEn: 'Llama3.2 is a family of open, lightweight models for general tasks.',
+ descriptionPt: 'Llama3.2 é uma família de modelos abertos, leves e para dispositivos locais, para tarefas gerais.',
+ models: [
+ { name: 'llama3.2:1b', label: 'Llama3.2 1B', parameterSize: '1B' },
+ { name: 'llama3.2:3b', label: 'Llama3.2 3B', parameterSize: '3B' },
+ { name: 'socialnetwooky/llama3.2-abliterated:3b_q8_0', label: 'Llama3.2 Uncensored 3B', parameterSize: '3B' },
+ ]
+ },
];
\ No newline at end of file
diff --git a/config/settings.ts b/config/settings.ts
new file mode 100644
index 0000000..1fe94b3
--- /dev/null
+++ b/config/settings.ts
@@ -0,0 +1,2 @@
+export const seriesPageSize = 4;
+export const modelPageSize = 4;
\ No newline at end of file
diff --git a/nodemon.json b/nodemon.json
index 918bcb8..4e4ec20 100644
--- a/nodemon.json
+++ b/nodemon.json
@@ -1,6 +1,6 @@
-{
+{
"ignore": ["src/props/*.json", "src/props/*.txt"],
- "watch": ["src"],
+ "watch": ["src", "config"],
"ext": "ts,js",
"exec": "bun src/bot.ts"
}
\ No newline at end of file
diff --git a/src/commands/ai.ts b/src/commands/ai.ts
index ebcd613..a032850 100644
--- a/src/commands/ai.ts
+++ b/src/commands/ai.ts
@@ -135,6 +135,26 @@ function sanitizeMarkdownForTelegram(text: string): string {
return sanitizedText;
}
+function processThinkingTags(text: string): string {
+ let processedText = text;
+
+ const firstThinkIndex = processedText.indexOf('');
+ if (firstThinkIndex === -1) {
+ return processedText.replace(/<\/think>/g, '___THINK_END___');
+ }
+
+ processedText = processedText.substring(0, firstThinkIndex) + '___THINK_START___' + processedText.substring(firstThinkIndex + ''.length);
+ const lastThinkEndIndex = processedText.lastIndexOf('');
+ if (lastThinkEndIndex !== -1) {
+ processedText = processedText.substring(0, lastThinkEndIndex) + '___THEND___' + processedText.substring(lastThinkEndIndex + ''.length);
+ }
+ processedText = processedText.replace(//g, '');
+ processedText = processedText.replace(/<\/think>/g, '');
+ processedText = processedText.replace('___THEND___', '___THINK_END___');
+
+ return processedText;
+}
+
export async function preChecks() {
const envs = [
"ollamaApi",
@@ -221,9 +241,10 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
error: Strings.unexpectedErr.replace("{error}", Strings.ai.noChatFound),
};
}
+ const cleanedModelName = model.replace('hf.co/', '');
let status = Strings.ai.statusWaitingRender;
let modelHeader = Strings.ai.modelHeader
- .replace("{model}", model)
+ .replace("{model}", cleanedModelName)
.replace("{temperature}", aiTemperature)
.replace("{status}", status) + "\n\n";
@@ -277,23 +298,8 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
console.error("[✨ AI | !] Error parsing chunk");
continue;
}
- if (model === thinking_model && !showThinking) {
- if (ln.response) {
- finalResponseText += ln.response;
- if (finalResponseText.includes('') && !thinkingMessageSent) {
- await rateLimiter.editMessageWithRetry(
- ctx,
- ctx.chat.id,
- replyGenerating.message_id,
- modelHeader + Strings.ai.thinking,
- { parse_mode: 'Markdown' }
- );
- thinkingMessageSent = true;
- }
- }
- continue;
- }
- if (model === thinking_model && ln.response) {
+
+ if (ln.response) {
if (ln.response.includes('')) {
const thinkMatch = ln.response.match(/([\s\S]*?)<\/think>/);
if (thinkMatch && thinkMatch[1].trim().length > 0) {
@@ -304,68 +310,63 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
} else if (ln.response.includes('')) {
logger.logThinking(ctx.chat.id, replyGenerating.message_id, false);
}
- }
- if (ln.response) {
- if (model === thinking_model) {
- let patchedThoughts = ln.response;
- const thinkTagRx = /([\s\S]*?)<\/think>/g;
- patchedThoughts = patchedThoughts.replace(thinkTagRx, (p1) => p1.trim().length > 0 ? '___THINK_START___' + p1.trim() + '___THINK_END___' : '');
- patchedThoughts = patchedThoughts.replace(//g, '___THINK_START___');
- patchedThoughts = patchedThoughts.replace(/<\/think>/g, '___THINK_END___');
- thoughts += patchedThoughts;
- fullResponse += patchedThoughts;
- } else {
- fullResponse += ln.response;
- }
- if (firstChunk) {
- status = Strings.ai.statusWaitingRender;
- modelHeader = Strings.ai.modelHeader
- .replace("{model}", model)
- .replace("{temperature}", aiTemperature)
- .replace("{status}", status) + "\n\n";
- await rateLimiter.editMessageWithRetry(
- ctx,
- ctx.chat.id,
- replyGenerating.message_id,
- modelHeader + formatThinkingMessage(fullResponse),
- { parse_mode: 'Markdown' }
- );
- lastUpdateCharCount = fullResponse.length;
- sentHeader = true;
- firstChunk = false;
- continue;
- }
- const updateEveryChars = Number(process.env.updateEveryChars) || 100;
- if (fullResponse.length - lastUpdateCharCount >= updateEveryChars || !sentHeader) {
- await rateLimiter.editMessageWithRetry(
- ctx,
- ctx.chat.id,
- replyGenerating.message_id,
- modelHeader + formatThinkingMessage(fullResponse),
- { parse_mode: 'Markdown' }
- );
- lastUpdateCharCount = fullResponse.length;
- sentHeader = true;
+ fullResponse += ln.response;
+ if (showThinking) {
+ let displayResponse = processThinkingTags(fullResponse);
+
+ if (firstChunk) {
+ status = Strings.ai.statusWaitingRender;
+ modelHeader = Strings.ai.modelHeader
+ .replace("{model}", cleanedModelName)
+ .replace("{temperature}", aiTemperature)
+ .replace("{status}", status) + "\n\n";
+ await rateLimiter.editMessageWithRetry(
+ ctx,
+ ctx.chat.id,
+ replyGenerating.message_id,
+ modelHeader + formatThinkingMessage(displayResponse),
+ { parse_mode: 'Markdown' }
+ );
+ lastUpdateCharCount = displayResponse.length;
+ sentHeader = true;
+ firstChunk = false;
+ continue;
+ }
+ const updateEveryChars = Number(process.env.updateEveryChars) || 100;
+ if (displayResponse.length - lastUpdateCharCount >= updateEveryChars || !sentHeader) {
+ await rateLimiter.editMessageWithRetry(
+ ctx,
+ ctx.chat.id,
+ replyGenerating.message_id,
+ modelHeader + formatThinkingMessage(displayResponse),
+ { parse_mode: 'Markdown' }
+ );
+ lastUpdateCharCount = displayResponse.length;
+ sentHeader = true;
+ }
}
}
}
}
- if (model === thinking_model && !showThinking) {
- const cleanedResponse = finalResponseText.replace(/[\s\S]*?<\/think>/g, '').trim();
- return { success: true, response: cleanedResponse };
- }
+
status = Strings.ai.statusRendering;
modelHeader = Strings.ai.modelHeader
- .replace("{model}", model)
+ .replace("{model}", cleanedModelName)
.replace("{temperature}", aiTemperature)
.replace("{status}", status) + "\n\n";
- await rateLimiter.editMessageWithRetry(
- ctx,
- ctx.chat.id,
- replyGenerating.message_id,
- modelHeader + formatThinkingMessage(fullResponse),
- { parse_mode: 'Markdown' }
- );
+
+ if (showThinking) {
+ let displayResponse = processThinkingTags(fullResponse);
+
+ await rateLimiter.editMessageWithRetry(
+ ctx,
+ ctx.chat.id,
+ replyGenerating.message_id,
+ modelHeader + formatThinkingMessage(displayResponse),
+ { parse_mode: 'Markdown' }
+ );
+ }
+
const responseCharCount = fullResponse.length;
await db.update(schema.usersTable)
.set({
@@ -373,9 +374,12 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
aiRequests: sql`${schema.usersTable.aiRequests} + 1`
})
.where(eq(schema.usersTable.telegramId, userId));
+
+ const patchedResponse = processThinkingTags(fullResponse);
+
return {
success: true,
- response: fullResponse,
+ response: patchedResponse,
};
} catch (error: unknown) {
const errorMsg = extractAxiosErrorMessage(error);
@@ -429,16 +433,20 @@ async function handleAiReply(ctx: TextContext, model: string, prompt: string, re
if (!aiResponse) return;
if (!ctx.chat) return;
if (aiResponse.success && aiResponse.response) {
+ const cleanedModelName = model.replace('hf.co/', '');
const status = Strings.ai.statusComplete;
const modelHeader = Strings.ai.modelHeader
- .replace("{model}", model)
+ .replace("{model}", cleanedModelName)
.replace("{temperature}", aiTemperature)
.replace("{status}", status) + "\n\n";
const urlWarning = containsUrls(originalMessage) ? Strings.ai.urlWarning : '';
let finalResponse = aiResponse.response;
- if (model === thinking_model) {
- finalResponse = finalResponse.replace(/___THINK_START___/g, `${Strings.ai.thinking}`)
- .replace(/___THINK_END___/g, `${Strings.ai.finishedThinking}`);
+ if (showThinking) {
+ finalResponse = finalResponse.replace(/___THINK_START___/g, `${Strings.ai.thinking}`)
+ .replace(/___THINK_END___/g, `${Strings.ai.finishedThinking}`);
+ } else {
+ finalResponse = finalResponse.replace(/___THINK_START___[\s\S]*?___THINK_END___/g, '').trim();
+ finalResponse = finalResponse.replace(/___THINK_START___[\s\S]*/g, '').trim();
}
await rateLimiter.editMessageWithRetry(
diff --git a/src/commands/main.ts b/src/commands/main.ts
index 9478a13..f86ddea 100644
--- a/src/commands/main.ts
+++ b/src/commands/main.ts
@@ -10,6 +10,7 @@ import type { NodePgDatabase } from 'drizzle-orm/node-postgres';
import { getModelLabelByName } from './ai';
import { models } from '../../config/ai';
import { langs } from '../locales/config';
+import { modelPageSize, seriesPageSize } from '../../config/settings';
type UserRow = typeof schema.usersTable.$inferSelect;
@@ -221,12 +222,12 @@ export default (bot: Telegraf, db: NodePgDatabase) => {
const originalIndex = start + idx;
const isSelected = series.models.some(m => m.name === user.customAiModel);
const label = isSelected ? `✅ ${series.label}` : series.label;
- return { text: label, callback_data: `selectseries_${originalIndex}_${user.telegramId}` };
+ return { text: label, callback_data: `selectseries_${originalIndex}_0_${user.telegramId}` };
});
const navigationButtons: any[] = [];
if (page > 0) {
- navigationButtons.push({ text: Strings.varStrings.varBack, callback_data: `settings_aiModel_${page - 1}_${user.telegramId}` });
+ navigationButtons.push({ text: Strings.varStrings.varLess, callback_data: `settings_aiModel_${page - 1}_${user.telegramId}` });
}
if (end < models.length) {
navigationButtons.push({ text: Strings.varStrings.varMore, callback_data: `settings_aiModel_${page + 1}_${user.telegramId}` });
@@ -257,7 +258,7 @@ export default (bot: Telegraf, db: NodePgDatabase) => {
}
});
- bot.action(/^selectseries_\d+_\d+$/, async (ctx) => {
+ bot.action(/^selectseries_\d+_\d+_\d+$/, async (ctx) => {
const data = (ctx.callbackQuery as any).data;
const userId = extractUserIdFromCallback(data);
const allowed = !!userId && String(ctx.from.id) === userId;
@@ -269,26 +270,46 @@ export default (bot: Telegraf, db: NodePgDatabase) => {
await ctx.answerCbQuery();
const { user, Strings } = await getUserAndStrings(ctx, db);
if (!user) return;
- const match = data.match(/^selectseries_(\d+)_\d+$/);
+ const match = data.match(/^selectseries_(\d+)_(\d+)_(\d+)$/);
if (!match) return;
const seriesIdx = parseInt(match[1], 10);
+ const modelPage = parseInt(match[2], 10);
const series = models[seriesIdx];
if (!series) return;
- const pageSize = 4;
- const page = Math.floor(seriesIdx / pageSize);
+
+ const seriesPage = Math.floor(seriesIdx / seriesPageSize);
+
+ const start = modelPage * modelPageSize;
+ const end = start + modelPageSize;
+ const paginatedSeriesModels = series.models.slice(start, end);
+
+ const modelButtons = paginatedSeriesModels.map((m, idx) => {
+ const originalModelIndex = start + idx;
+ const isSelected = m.name === user.customAiModel;
+ const label = isSelected ? `✅ ${m.label}` : m.label;
+ return [{ text: `${label} (${m.parameterSize})`, callback_data: `setmodel_${seriesIdx}_${originalModelIndex}_${user.telegramId}` }];
+ });
+
+ const navigationButtons: any[] = [];
+ if (modelPage > 0) {
+ navigationButtons.push({ text: Strings.varStrings.varLess, callback_data: `selectseries_${seriesIdx}_${modelPage - 1}_${user.telegramId}` });
+ }
+ if (end < series.models.length) {
+ navigationButtons.push({ text: Strings.varStrings.varMore, callback_data: `selectseries_${seriesIdx}_${modelPage + 1}_${user.telegramId}` });
+ }
+
+ const keyboard: any[][] = [...modelButtons];
+ if (navigationButtons.length > 0) {
+ keyboard.push(navigationButtons);
+ }
+ keyboard.push([{ text: `${Strings.varStrings.varBack}`, callback_data: `settings_aiModel_${seriesPage}_${user.telegramId}` }]);
const desc = user.languageCode === 'pt' ? series.descriptionPt : series.descriptionEn;
try {
await ctx.editMessageText(
`${Strings.settings.ai.seriesDescription.replace('{seriesDescription}', desc)}\n\n${Strings.settings.ai.selectParameterSize.replace('{seriesLabel}', series.label).replace(' [ & Uncensored ]', '')}\n\n${Strings.settings.ai.parameterSizeExplanation}`,
{
reply_markup: {
- inline_keyboard: series.models.map((m, idx) => {
- const isSelected = m.name === user.customAiModel;
- const label = isSelected ? `✅ ${m.label}` : m.label;
- return [{ text: `${label} (${m.parameterSize})`, callback_data: `setmodel_${seriesIdx}_${idx}_${user.telegramId}` }];
- }).concat([[
- { text: `${Strings.varStrings.varBack}`, callback_data: `settings_aiModel_${page}_${user.telegramId}` }
- ]])
+ inline_keyboard: keyboard
}
}
);
diff --git a/src/locales/english.json b/src/locales/english.json
index 0f6f875..dcb178d 100644
--- a/src/locales/english.json
+++ b/src/locales/english.json
@@ -80,7 +80,7 @@
"inQueue": "ℹ️ You are {position} in the queue.",
"startingProcessing": "✨ Starting to process your request...",
"systemPrompt": "You are a friendly assistant called {botName}.\nCurrent Date/Time (UTC): {date}\n\n---\n\nUser message:\n{message}",
- "statusWaitingRender": "⏳ Waiting to Render...",
+ "statusWaitingRender": "⏳ Streaming...",
"statusRendering": "🖼️ Rendering...",
"statusComplete": "✅ Complete!",
"modelHeader": "🤖 *{model}* 🌡️ *{temperature}* {status}",
@@ -123,7 +123,7 @@
"aiEnabledSetTo": "AI Enabled set to {aiEnabled}",
"aiModelSetTo": "AI Model set to {aiModel}",
"aiTemperatureSetTo": "AI Temperature set to {aiTemperature}",
- "selectSeries": "*Please select a model series.*",
+ "selectSeries": "*Please select a model series.*\n\nThis will be set as the default model for the /ai command.",
"seriesDescription": "{seriesDescription}",
"selectParameterSize": "*Please select a parameter size for {seriesLabel}*.",
"parameterSizeExplanation": "Parameter size (e.g. 2B, 4B) refers to the number of parameters in the model. Larger models may be more capable but require more resources.",
diff --git a/src/locales/portuguese.json b/src/locales/portuguese.json
index 6571b12..8697c2b 100644
--- a/src/locales/portuguese.json
+++ b/src/locales/portuguese.json
@@ -85,7 +85,7 @@
"selectParameterSize": "Por favor, selecione um tamanho de parâmetro para {seriesLabel}.",
"parameterSizeExplanation": "O tamanho do parâmetro (ex: 2B, 4B) refere-se ao número de parâmetros do modelo. Modelos maiores podem ser mais capazes, mas exigem mais recursos.",
"systemPrompt": "Você é um assistente de Telegram chamado {botName}.\nData/Hora atual (UTC): {date}\n\n---\n\nMensagem do usuário:\n{message}",
- "statusWaitingRender": "⏳ Aguardando renderização...",
+ "statusWaitingRender": "⏳ Transmitindo...",
"statusRendering": "🖼️ Renderizando...",
"statusComplete": "✅ Completo!",
"modelHeader": "🤖 *{model}* 🌡️ *{temperature}* {status}",
@@ -126,7 +126,7 @@
"aiEnabledSetTo": "Inteligência Artificial definido para {aiEnabled}",
"aiModelSetTo": "Modelo personalizado definido para {aiModel}",
"aiTemperatureSetTo": "Temperatura definida para {aiTemperature}",
- "selectSeries": "*Por favor, selecione uma série de modelos.*",
+ "selectSeries": "*Por favor, selecione uma série de modelos.*\n\nIsso será definido como o modelo padrão para o comando /ai.",
"seriesDescription": "{seriesDescription}",
"selectParameterSize": "Por favor, selecione um tamanho de parâmetro para {seriesLabel}.",
"parameterSizeExplanation": "O tamanho do parâmetro (ex: 2B, 4B) refere-se ao número de parâmetros do modelo. Modelos maiores podem ser mais capazes, mas exigem mais recursos.",