more models, nodemon fix, thinking fixes, settings ui fixes, clean

This commit is contained in:
Aidan 2025-07-03 01:04:19 -04:00
parent 4409acd79d
commit 973d224bf7
7 changed files with 211 additions and 106 deletions

View file

@ -2,7 +2,7 @@ import type { ModelInfo } from "../src/commands/ai"
export const defaultFlashModel = "gemma3:4b" export const defaultFlashModel = "gemma3:4b"
export const defaultThinkingModel = "qwen3:4b" export const defaultThinkingModel = "qwen3:4b"
export const unloadModelAfterB = 0.1 // how many billion params until model is auto-unloaded export const unloadModelAfterB = 12 // how many billion params until model is auto-unloaded
export const models: ModelInfo[] = [ export const models: ModelInfo[] = [
{ {
@ -35,13 +35,31 @@ export const models: ModelInfo[] = [
descriptionPt: 'Qwen3 é uma série de modelos multilingues.', descriptionPt: 'Qwen3 é uma série de modelos multilingues.',
models: [ models: [
{ name: 'qwen3:0.6b', label: 'Qwen3 0.6B', parameterSize: '0.6B' }, { name: 'qwen3:0.6b', label: 'Qwen3 0.6B', parameterSize: '0.6B' },
{ name: 'qwen3:1.7b', label: 'Qwen3 1.7B', parameterSize: '1.7B' },
{ name: 'qwen3:4b', label: 'Qwen3 4B', parameterSize: '4B' }, { name: 'qwen3:4b', label: 'Qwen3 4B', parameterSize: '4B' },
{ name: 'qwen3:8b', label: 'Qwen3 8B', parameterSize: '8B' }, { name: 'qwen3:8b', label: 'Qwen3 8B', parameterSize: '8B' },
{ name: 'qwen3:14b', label: 'Qwen3 14B', parameterSize: '14B' }, { name: 'qwen3:14b', label: 'Qwen3 14B', parameterSize: '14B' },
{ name: 'qwen3:30b', label: 'Qwen3 30B', parameterSize: '30B' }, { name: 'qwen3:30b', label: 'Qwen3 30B', parameterSize: '30B' },
{ name: 'qwen3:32b', label: 'Qwen3 32B', parameterSize: '32B' },
{ name: 'qwen3:235b-a22b', label: 'Qwen3 235B A22B', parameterSize: '235B' }, { name: 'qwen3:235b-a22b', label: 'Qwen3 235B A22B', parameterSize: '235B' },
] ]
}, },
{
name: 'qwen3-abliterated',
label: 'Qwen3 [ Uncensored ]',
descriptionEn: 'Qwen3-abliterated is a multilingual reasoning model series.',
descriptionPt: 'Qwen3-abliterated é uma série de modelos multilingues.',
models: [
{ name: 'huihui_ai/qwen3-abliterated:0.6b', label: 'Qwen3 Uncensored 0.6B', parameterSize: '0.6B' },
{ name: 'huihui_ai/qwen3-abliterated:1.7b', label: 'Qwen3 Uncensored 1.7B', parameterSize: '1.7B' },
{ name: 'huihui_ai/qwen3-abliterated:4b', label: 'Qwen3 Uncensored 4B', parameterSize: '4B' },
{ name: 'huihui_ai/qwen3-abliterated:8b', label: 'Qwen3 Uncensored 8B', parameterSize: '8B' },
{ name: 'huihui_ai/qwen3-abliterated:14b', label: 'Qwen3 Uncensored 14B', parameterSize: '14B' },
{ name: 'huihui_ai/qwen3-abliterated:30b', label: 'Qwen3 Uncensored 30B', parameterSize: '30B' },
{ name: 'huihui_ai/qwen3-abliterated:32b', label: 'Qwen3 Uncensored 32B', parameterSize: '32B' },
{ name: 'huihui_ai/qwen3-abliterated:235b', label: 'Qwen3 Uncensored 235B', parameterSize: '235B' },
]
},
{ {
name: 'qwq', name: 'qwq',
label: 'QwQ', label: 'QwQ',
@ -49,6 +67,7 @@ export const models: ModelInfo[] = [
descriptionPt: 'QwQ é o modelo de raciocínio da série Qwen.', descriptionPt: 'QwQ é o modelo de raciocínio da série Qwen.',
models: [ models: [
{ name: 'qwq:32b', label: 'QwQ 32B', parameterSize: '32B' }, { name: 'qwq:32b', label: 'QwQ 32B', parameterSize: '32B' },
{ name: 'huihui_ai/qwq-abliterated:32b', label: 'QwQ Uncensored 32B', parameterSize: '32B' },
] ]
}, },
{ {
@ -60,6 +79,32 @@ export const models: ModelInfo[] = [
{ name: 'llama4:scout', label: 'Llama4 109B A17B', parameterSize: '109B' }, { name: 'llama4:scout', label: 'Llama4 109B A17B', parameterSize: '109B' },
] ]
}, },
{
name: 'deepseek',
label: 'DeepSeek [ & Uncensored ]',
descriptionEn: 'DeepSeek is a research model for reasoning tasks.',
descriptionPt: 'DeepSeek é um modelo de pesquisa para tarefas de raciocínio.',
models: [
{ name: 'deepseek-r1:1.5b', label: 'DeepSeek 1.5B', parameterSize: '1.5B' },
{ name: 'deepseek-r1:7b', label: 'DeepSeek 7B', parameterSize: '7B' },
{ name: 'deepseek-r1:8b', label: 'DeepSeek 8B', parameterSize: '8B' },
{ name: 'deepseek-r1:14b', label: 'DeepSeek 14B', parameterSize: '14B' },
{ name: 'huihui_ai/deepseek-r1-abliterated:1.5b', label: 'DeepSeek Uncensored 1.5B', parameterSize: '1.5B' },
{ name: 'huihui_ai/deepseek-r1-abliterated:7b', label: 'DeepSeek Uncensored 7B', parameterSize: '7B' },
{ name: 'huihui_ai/deepseek-r1-abliterated:8b', label: 'DeepSeek Uncensored 8B', parameterSize: '8B' },
{ name: 'huihui_ai/deepseek-r1-abliterated:14b', label: 'DeepSeek Uncensored 14B', parameterSize: '14B' },
]
},
{
name: 'hermes3',
label: 'Hermes3',
descriptionEn: 'Hermes 3 is the latest version of the flagship Hermes series of LLMs by Nous Research.',
descriptionPt: 'Hermes 3 é a versão mais recente da série Hermes de LLMs da Nous Research.',
models: [
{ name: 'hermes3:3b', label: 'Hermes3 3B', parameterSize: '3B' },
{ name: 'hermes3:8b', label: 'Hermes3 8B', parameterSize: '8B' },
]
},
{ {
name: 'mistral', name: 'mistral',
label: 'Mistral', label: 'Mistral',
@ -70,15 +115,15 @@ export const models: ModelInfo[] = [
] ]
}, },
{ {
name: 'deepseek', name: 'phi4 [ & Uncensored ]',
label: 'DeepSeek [ & Uncensored ]', label: 'Phi4',
descriptionEn: 'DeepSeek is a research model for reasoning tasks.', descriptionEn: 'Phi-4 is a 14B parameter, state-of-the-art open model from Microsoft. ',
descriptionPt: 'DeepSeek é um modelo de pesquisa para tarefas de raciocínio.', descriptionPt: 'Phi-4 é um modelo de 14B de última geração, aberto pela Microsoft.',
models: [ models: [
{ name: 'deepseek-r1:1.5b', label: 'DeepSeek 1.5B', parameterSize: '1.5B' }, { name: 'hf.co/unsloth/Phi-4-mini-reasoning-GGUF', label: 'Phi4 Mini Reasoning', parameterSize: '4B' },
{ name: 'deepseek-r1:7b', label: 'DeepSeek 7B', parameterSize: '7B' }, { name: 'phi4:14b', label: 'Phi4 14B', parameterSize: '14B' },
{ name: 'huihui_ai/deepseek-r1-abliterated:1.5b', label: 'DeepSeek Uncensored 1.5B', parameterSize: '1.5B' }, { name: 'hf.co/unsloth/Phi-4-reasoning-plus-GGUF', label: 'Phi4 Reasoning Plus', parameterSize: '14B' },
{ name: 'huihui_ai/deepseek-r1-abliterated:7b', label: 'DeepSeek Uncensored 7B', parameterSize: '7B' }, { name: 'huihui_ai/phi4-abliterated:14b', label: 'Phi4 Uncensored 14B', parameterSize: '14B' },
] ]
}, },
{ {
@ -89,5 +134,34 @@ export const models: ModelInfo[] = [
models: [ models: [
{ name: 'phi3:3.8b', label: 'Phi3 3.8B', parameterSize: '3.8B' }, { name: 'phi3:3.8b', label: 'Phi3 3.8B', parameterSize: '3.8B' },
] ]
} },
{
name: 'llama3',
label: 'Llama4',
descriptionEn: 'Llama 3, a lightweight model from Meta.',
descriptionPt: 'Llama 3, um modelo leve da Meta.',
models: [
{ name: 'llama3:8b', label: 'Llama3 8B', parameterSize: '8B' },
]
},
{
name: 'llama3.1 [ Uncensored ]',
label: 'Llama3.1',
descriptionEn: 'Ablitered v3 llama-3.1 8b with uncensored prompt ',
descriptionPt: 'Llama3.1 é um modelo aberto, leve e para dispositivos locais, com prompt não censurado.',
models: [
{ name: 'mannix/llama3.1-8b-abliterated:latest', label: 'Llama3.1 8B', parameterSize: '8B' },
]
},
{
name: 'llama3.2 [ & Uncensored ]',
label: 'Llama3.2',
descriptionEn: 'Llama3.2 is a family of open, lightweight models for general tasks.',
descriptionPt: 'Llama3.2 é uma família de modelos abertos, leves e para dispositivos locais, para tarefas gerais.',
models: [
{ name: 'llama3.2:1b', label: 'Llama3.2 1B', parameterSize: '1B' },
{ name: 'llama3.2:3b', label: 'Llama3.2 3B', parameterSize: '3B' },
{ name: 'socialnetwooky/llama3.2-abliterated:3b_q8_0', label: 'Llama3.2 Uncensored 3B', parameterSize: '3B' },
]
},
]; ];

2
config/settings.ts Normal file
View file

@ -0,0 +1,2 @@
export const seriesPageSize = 4;
export const modelPageSize = 4;

View file

@ -1,6 +1,6 @@
{ {
"ignore": ["src/props/*.json", "src/props/*.txt"], "ignore": ["src/props/*.json", "src/props/*.txt"],
"watch": ["src"], "watch": ["src", "config"],
"ext": "ts,js", "ext": "ts,js",
"exec": "bun src/bot.ts" "exec": "bun src/bot.ts"
} }

View file

@ -135,6 +135,26 @@ function sanitizeMarkdownForTelegram(text: string): string {
return sanitizedText; return sanitizedText;
} }
function processThinkingTags(text: string): string {
let processedText = text;
const firstThinkIndex = processedText.indexOf('<think>');
if (firstThinkIndex === -1) {
return processedText.replace(/<\/think>/g, '___THINK_END___');
}
processedText = processedText.substring(0, firstThinkIndex) + '___THINK_START___' + processedText.substring(firstThinkIndex + '<think>'.length);
const lastThinkEndIndex = processedText.lastIndexOf('</think>');
if (lastThinkEndIndex !== -1) {
processedText = processedText.substring(0, lastThinkEndIndex) + '___THEND___' + processedText.substring(lastThinkEndIndex + '</think>'.length);
}
processedText = processedText.replace(/<think>/g, '');
processedText = processedText.replace(/<\/think>/g, '');
processedText = processedText.replace('___THEND___', '___THINK_END___');
return processedText;
}
export async function preChecks() { export async function preChecks() {
const envs = [ const envs = [
"ollamaApi", "ollamaApi",
@ -221,9 +241,10 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
error: Strings.unexpectedErr.replace("{error}", Strings.ai.noChatFound), error: Strings.unexpectedErr.replace("{error}", Strings.ai.noChatFound),
}; };
} }
const cleanedModelName = model.replace('hf.co/', '');
let status = Strings.ai.statusWaitingRender; let status = Strings.ai.statusWaitingRender;
let modelHeader = Strings.ai.modelHeader let modelHeader = Strings.ai.modelHeader
.replace("{model}", model) .replace("{model}", cleanedModelName)
.replace("{temperature}", aiTemperature) .replace("{temperature}", aiTemperature)
.replace("{status}", status) + "\n\n"; .replace("{status}", status) + "\n\n";
@ -277,23 +298,8 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
console.error("[✨ AI | !] Error parsing chunk"); console.error("[✨ AI | !] Error parsing chunk");
continue; continue;
} }
if (model === thinking_model && !showThinking) {
if (ln.response) { if (ln.response) {
finalResponseText += ln.response;
if (finalResponseText.includes('<think>') && !thinkingMessageSent) {
await rateLimiter.editMessageWithRetry(
ctx,
ctx.chat.id,
replyGenerating.message_id,
modelHeader + Strings.ai.thinking,
{ parse_mode: 'Markdown' }
);
thinkingMessageSent = true;
}
}
continue;
}
if (model === thinking_model && ln.response) {
if (ln.response.includes('<think>')) { if (ln.response.includes('<think>')) {
const thinkMatch = ln.response.match(/<think>([\s\S]*?)<\/think>/); const thinkMatch = ln.response.match(/<think>([\s\S]*?)<\/think>/);
if (thinkMatch && thinkMatch[1].trim().length > 0) { if (thinkMatch && thinkMatch[1].trim().length > 0) {
@ -304,68 +310,63 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
} else if (ln.response.includes('</think>')) { } else if (ln.response.includes('</think>')) {
logger.logThinking(ctx.chat.id, replyGenerating.message_id, false); logger.logThinking(ctx.chat.id, replyGenerating.message_id, false);
} }
} fullResponse += ln.response;
if (ln.response) { if (showThinking) {
if (model === thinking_model) { let displayResponse = processThinkingTags(fullResponse);
let patchedThoughts = ln.response;
const thinkTagRx = /<think>([\s\S]*?)<\/think>/g; if (firstChunk) {
patchedThoughts = patchedThoughts.replace(thinkTagRx, (p1) => p1.trim().length > 0 ? '___THINK_START___' + p1.trim() + '___THINK_END___' : ''); status = Strings.ai.statusWaitingRender;
patchedThoughts = patchedThoughts.replace(/<think>/g, '___THINK_START___'); modelHeader = Strings.ai.modelHeader
patchedThoughts = patchedThoughts.replace(/<\/think>/g, '___THINK_END___'); .replace("{model}", cleanedModelName)
thoughts += patchedThoughts; .replace("{temperature}", aiTemperature)
fullResponse += patchedThoughts; .replace("{status}", status) + "\n\n";
} else { await rateLimiter.editMessageWithRetry(
fullResponse += ln.response; ctx,
} ctx.chat.id,
if (firstChunk) { replyGenerating.message_id,
status = Strings.ai.statusWaitingRender; modelHeader + formatThinkingMessage(displayResponse),
modelHeader = Strings.ai.modelHeader { parse_mode: 'Markdown' }
.replace("{model}", model) );
.replace("{temperature}", aiTemperature) lastUpdateCharCount = displayResponse.length;
.replace("{status}", status) + "\n\n"; sentHeader = true;
await rateLimiter.editMessageWithRetry( firstChunk = false;
ctx, continue;
ctx.chat.id, }
replyGenerating.message_id, const updateEveryChars = Number(process.env.updateEveryChars) || 100;
modelHeader + formatThinkingMessage(fullResponse), if (displayResponse.length - lastUpdateCharCount >= updateEveryChars || !sentHeader) {
{ parse_mode: 'Markdown' } await rateLimiter.editMessageWithRetry(
); ctx,
lastUpdateCharCount = fullResponse.length; ctx.chat.id,
sentHeader = true; replyGenerating.message_id,
firstChunk = false; modelHeader + formatThinkingMessage(displayResponse),
continue; { parse_mode: 'Markdown' }
} );
const updateEveryChars = Number(process.env.updateEveryChars) || 100; lastUpdateCharCount = displayResponse.length;
if (fullResponse.length - lastUpdateCharCount >= updateEveryChars || !sentHeader) { sentHeader = true;
await rateLimiter.editMessageWithRetry( }
ctx,
ctx.chat.id,
replyGenerating.message_id,
modelHeader + formatThinkingMessage(fullResponse),
{ parse_mode: 'Markdown' }
);
lastUpdateCharCount = fullResponse.length;
sentHeader = true;
} }
} }
} }
} }
if (model === thinking_model && !showThinking) {
const cleanedResponse = finalResponseText.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
return { success: true, response: cleanedResponse };
}
status = Strings.ai.statusRendering; status = Strings.ai.statusRendering;
modelHeader = Strings.ai.modelHeader modelHeader = Strings.ai.modelHeader
.replace("{model}", model) .replace("{model}", cleanedModelName)
.replace("{temperature}", aiTemperature) .replace("{temperature}", aiTemperature)
.replace("{status}", status) + "\n\n"; .replace("{status}", status) + "\n\n";
await rateLimiter.editMessageWithRetry(
ctx, if (showThinking) {
ctx.chat.id, let displayResponse = processThinkingTags(fullResponse);
replyGenerating.message_id,
modelHeader + formatThinkingMessage(fullResponse), await rateLimiter.editMessageWithRetry(
{ parse_mode: 'Markdown' } ctx,
); ctx.chat.id,
replyGenerating.message_id,
modelHeader + formatThinkingMessage(displayResponse),
{ parse_mode: 'Markdown' }
);
}
const responseCharCount = fullResponse.length; const responseCharCount = fullResponse.length;
await db.update(schema.usersTable) await db.update(schema.usersTable)
.set({ .set({
@ -373,9 +374,12 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
aiRequests: sql`${schema.usersTable.aiRequests} + 1` aiRequests: sql`${schema.usersTable.aiRequests} + 1`
}) })
.where(eq(schema.usersTable.telegramId, userId)); .where(eq(schema.usersTable.telegramId, userId));
const patchedResponse = processThinkingTags(fullResponse);
return { return {
success: true, success: true,
response: fullResponse, response: patchedResponse,
}; };
} catch (error: unknown) { } catch (error: unknown) {
const errorMsg = extractAxiosErrorMessage(error); const errorMsg = extractAxiosErrorMessage(error);
@ -429,16 +433,20 @@ async function handleAiReply(ctx: TextContext, model: string, prompt: string, re
if (!aiResponse) return; if (!aiResponse) return;
if (!ctx.chat) return; if (!ctx.chat) return;
if (aiResponse.success && aiResponse.response) { if (aiResponse.success && aiResponse.response) {
const cleanedModelName = model.replace('hf.co/', '');
const status = Strings.ai.statusComplete; const status = Strings.ai.statusComplete;
const modelHeader = Strings.ai.modelHeader const modelHeader = Strings.ai.modelHeader
.replace("{model}", model) .replace("{model}", cleanedModelName)
.replace("{temperature}", aiTemperature) .replace("{temperature}", aiTemperature)
.replace("{status}", status) + "\n\n"; .replace("{status}", status) + "\n\n";
const urlWarning = containsUrls(originalMessage) ? Strings.ai.urlWarning : ''; const urlWarning = containsUrls(originalMessage) ? Strings.ai.urlWarning : '';
let finalResponse = aiResponse.response; let finalResponse = aiResponse.response;
if (model === thinking_model) { if (showThinking) {
finalResponse = finalResponse.replace(/___THINK_START___/g, `${Strings.ai.thinking}`) finalResponse = finalResponse.replace(/___THINK_START___/g, `${Strings.ai.thinking}`)
.replace(/___THINK_END___/g, `${Strings.ai.finishedThinking}`); .replace(/___THINK_END___/g, `${Strings.ai.finishedThinking}`);
} else {
finalResponse = finalResponse.replace(/___THINK_START___[\s\S]*?___THINK_END___/g, '').trim();
finalResponse = finalResponse.replace(/___THINK_START___[\s\S]*/g, '').trim();
} }
await rateLimiter.editMessageWithRetry( await rateLimiter.editMessageWithRetry(

View file

@ -10,6 +10,7 @@ import type { NodePgDatabase } from 'drizzle-orm/node-postgres';
import { getModelLabelByName } from './ai'; import { getModelLabelByName } from './ai';
import { models } from '../../config/ai'; import { models } from '../../config/ai';
import { langs } from '../locales/config'; import { langs } from '../locales/config';
import { modelPageSize, seriesPageSize } from '../../config/settings';
type UserRow = typeof schema.usersTable.$inferSelect; type UserRow = typeof schema.usersTable.$inferSelect;
@ -221,12 +222,12 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
const originalIndex = start + idx; const originalIndex = start + idx;
const isSelected = series.models.some(m => m.name === user.customAiModel); const isSelected = series.models.some(m => m.name === user.customAiModel);
const label = isSelected ? `${series.label}` : series.label; const label = isSelected ? `${series.label}` : series.label;
return { text: label, callback_data: `selectseries_${originalIndex}_${user.telegramId}` }; return { text: label, callback_data: `selectseries_${originalIndex}_0_${user.telegramId}` };
}); });
const navigationButtons: any[] = []; const navigationButtons: any[] = [];
if (page > 0) { if (page > 0) {
navigationButtons.push({ text: Strings.varStrings.varBack, callback_data: `settings_aiModel_${page - 1}_${user.telegramId}` }); navigationButtons.push({ text: Strings.varStrings.varLess, callback_data: `settings_aiModel_${page - 1}_${user.telegramId}` });
} }
if (end < models.length) { if (end < models.length) {
navigationButtons.push({ text: Strings.varStrings.varMore, callback_data: `settings_aiModel_${page + 1}_${user.telegramId}` }); navigationButtons.push({ text: Strings.varStrings.varMore, callback_data: `settings_aiModel_${page + 1}_${user.telegramId}` });
@ -257,7 +258,7 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
} }
}); });
bot.action(/^selectseries_\d+_\d+$/, async (ctx) => { bot.action(/^selectseries_\d+_\d+_\d+$/, async (ctx) => {
const data = (ctx.callbackQuery as any).data; const data = (ctx.callbackQuery as any).data;
const userId = extractUserIdFromCallback(data); const userId = extractUserIdFromCallback(data);
const allowed = !!userId && String(ctx.from.id) === userId; const allowed = !!userId && String(ctx.from.id) === userId;
@ -269,26 +270,46 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
await ctx.answerCbQuery(); await ctx.answerCbQuery();
const { user, Strings } = await getUserAndStrings(ctx, db); const { user, Strings } = await getUserAndStrings(ctx, db);
if (!user) return; if (!user) return;
const match = data.match(/^selectseries_(\d+)_\d+$/); const match = data.match(/^selectseries_(\d+)_(\d+)_(\d+)$/);
if (!match) return; if (!match) return;
const seriesIdx = parseInt(match[1], 10); const seriesIdx = parseInt(match[1], 10);
const modelPage = parseInt(match[2], 10);
const series = models[seriesIdx]; const series = models[seriesIdx];
if (!series) return; if (!series) return;
const pageSize = 4;
const page = Math.floor(seriesIdx / pageSize); const seriesPage = Math.floor(seriesIdx / seriesPageSize);
const start = modelPage * modelPageSize;
const end = start + modelPageSize;
const paginatedSeriesModels = series.models.slice(start, end);
const modelButtons = paginatedSeriesModels.map((m, idx) => {
const originalModelIndex = start + idx;
const isSelected = m.name === user.customAiModel;
const label = isSelected ? `${m.label}` : m.label;
return [{ text: `${label} (${m.parameterSize})`, callback_data: `setmodel_${seriesIdx}_${originalModelIndex}_${user.telegramId}` }];
});
const navigationButtons: any[] = [];
if (modelPage > 0) {
navigationButtons.push({ text: Strings.varStrings.varLess, callback_data: `selectseries_${seriesIdx}_${modelPage - 1}_${user.telegramId}` });
}
if (end < series.models.length) {
navigationButtons.push({ text: Strings.varStrings.varMore, callback_data: `selectseries_${seriesIdx}_${modelPage + 1}_${user.telegramId}` });
}
const keyboard: any[][] = [...modelButtons];
if (navigationButtons.length > 0) {
keyboard.push(navigationButtons);
}
keyboard.push([{ text: `${Strings.varStrings.varBack}`, callback_data: `settings_aiModel_${seriesPage}_${user.telegramId}` }]);
const desc = user.languageCode === 'pt' ? series.descriptionPt : series.descriptionEn; const desc = user.languageCode === 'pt' ? series.descriptionPt : series.descriptionEn;
try { try {
await ctx.editMessageText( await ctx.editMessageText(
`${Strings.settings.ai.seriesDescription.replace('{seriesDescription}', desc)}\n\n${Strings.settings.ai.selectParameterSize.replace('{seriesLabel}', series.label).replace(' [ & Uncensored ]', '')}\n\n${Strings.settings.ai.parameterSizeExplanation}`, `${Strings.settings.ai.seriesDescription.replace('{seriesDescription}', desc)}\n\n${Strings.settings.ai.selectParameterSize.replace('{seriesLabel}', series.label).replace(' [ & Uncensored ]', '')}\n\n${Strings.settings.ai.parameterSizeExplanation}`,
{ {
reply_markup: { reply_markup: {
inline_keyboard: series.models.map((m, idx) => { inline_keyboard: keyboard
const isSelected = m.name === user.customAiModel;
const label = isSelected ? `${m.label}` : m.label;
return [{ text: `${label} (${m.parameterSize})`, callback_data: `setmodel_${seriesIdx}_${idx}_${user.telegramId}` }];
}).concat([[
{ text: `${Strings.varStrings.varBack}`, callback_data: `settings_aiModel_${page}_${user.telegramId}` }
]])
} }
} }
); );

View file

@ -80,7 +80,7 @@
"inQueue": " You are {position} in the queue.", "inQueue": " You are {position} in the queue.",
"startingProcessing": "✨ Starting to process your request...", "startingProcessing": "✨ Starting to process your request...",
"systemPrompt": "You are a friendly assistant called {botName}.\nCurrent Date/Time (UTC): {date}\n\n---\n\nUser message:\n{message}", "systemPrompt": "You are a friendly assistant called {botName}.\nCurrent Date/Time (UTC): {date}\n\n---\n\nUser message:\n{message}",
"statusWaitingRender": "⏳ Waiting to Render...", "statusWaitingRender": "⏳ Streaming...",
"statusRendering": "🖼️ Rendering...", "statusRendering": "🖼️ Rendering...",
"statusComplete": "✅ Complete!", "statusComplete": "✅ Complete!",
"modelHeader": "🤖 *{model}* 🌡️ *{temperature}* {status}", "modelHeader": "🤖 *{model}* 🌡️ *{temperature}* {status}",
@ -123,7 +123,7 @@
"aiEnabledSetTo": "AI Enabled set to {aiEnabled}", "aiEnabledSetTo": "AI Enabled set to {aiEnabled}",
"aiModelSetTo": "AI Model set to {aiModel}", "aiModelSetTo": "AI Model set to {aiModel}",
"aiTemperatureSetTo": "AI Temperature set to {aiTemperature}", "aiTemperatureSetTo": "AI Temperature set to {aiTemperature}",
"selectSeries": "*Please select a model series.*", "selectSeries": "*Please select a model series.*\n\nThis will be set as the default model for the /ai command.",
"seriesDescription": "{seriesDescription}", "seriesDescription": "{seriesDescription}",
"selectParameterSize": "*Please select a parameter size for {seriesLabel}*.", "selectParameterSize": "*Please select a parameter size for {seriesLabel}*.",
"parameterSizeExplanation": "Parameter size (e.g. 2B, 4B) refers to the number of parameters in the model. Larger models may be more capable but require more resources.", "parameterSizeExplanation": "Parameter size (e.g. 2B, 4B) refers to the number of parameters in the model. Larger models may be more capable but require more resources.",

View file

@ -85,7 +85,7 @@
"selectParameterSize": "Por favor, selecione um tamanho de parâmetro para {seriesLabel}.", "selectParameterSize": "Por favor, selecione um tamanho de parâmetro para {seriesLabel}.",
"parameterSizeExplanation": "O tamanho do parâmetro (ex: 2B, 4B) refere-se ao número de parâmetros do modelo. Modelos maiores podem ser mais capazes, mas exigem mais recursos.", "parameterSizeExplanation": "O tamanho do parâmetro (ex: 2B, 4B) refere-se ao número de parâmetros do modelo. Modelos maiores podem ser mais capazes, mas exigem mais recursos.",
"systemPrompt": "Você é um assistente de Telegram chamado {botName}.\nData/Hora atual (UTC): {date}\n\n---\n\nMensagem do usuário:\n{message}", "systemPrompt": "Você é um assistente de Telegram chamado {botName}.\nData/Hora atual (UTC): {date}\n\n---\n\nMensagem do usuário:\n{message}",
"statusWaitingRender": "⏳ Aguardando renderização...", "statusWaitingRender": "⏳ Transmitindo...",
"statusRendering": "🖼️ Renderizando...", "statusRendering": "🖼️ Renderizando...",
"statusComplete": "✅ Completo!", "statusComplete": "✅ Completo!",
"modelHeader": "🤖 *{model}* 🌡️ *{temperature}* {status}", "modelHeader": "🤖 *{model}* 🌡️ *{temperature}* {status}",
@ -126,7 +126,7 @@
"aiEnabledSetTo": "Inteligência Artificial definido para {aiEnabled}", "aiEnabledSetTo": "Inteligência Artificial definido para {aiEnabled}",
"aiModelSetTo": "Modelo personalizado definido para {aiModel}", "aiModelSetTo": "Modelo personalizado definido para {aiModel}",
"aiTemperatureSetTo": "Temperatura definida para {aiTemperature}", "aiTemperatureSetTo": "Temperatura definida para {aiTemperature}",
"selectSeries": "*Por favor, selecione uma série de modelos.*", "selectSeries": "*Por favor, selecione uma série de modelos.*\n\nIsso será definido como o modelo padrão para o comando /ai.",
"seriesDescription": "{seriesDescription}", "seriesDescription": "{seriesDescription}",
"selectParameterSize": "Por favor, selecione um tamanho de parâmetro para {seriesLabel}.", "selectParameterSize": "Por favor, selecione um tamanho de parâmetro para {seriesLabel}.",
"parameterSizeExplanation": "O tamanho do parâmetro (ex: 2B, 4B) refere-se ao número de parâmetros do modelo. Modelos maiores podem ser mais capazes, mas exigem mais recursos.", "parameterSizeExplanation": "O tamanho do parâmetro (ex: 2B, 4B) refere-se ao número de parâmetros do modelo. Modelos maiores podem ser mais capazes, mas exigem mais recursos.",