split some options to config folder, translation fixes, markdown fixes, ui improvements to settings and ai, cleanup, add additional models
This commit is contained in:
parent
1857767213
commit
4409acd79d
8 changed files with 273 additions and 182 deletions
|
@ -124,6 +124,9 @@ If you prefer to use Docker directly, you can use these instructions instead.
|
||||||
- **weatherKey**: Weather.com API key, used for the `/weather` command.
|
- **weatherKey**: Weather.com API key, used for the `/weather` command.
|
||||||
- **longerLogs**: Set to `true` to enable verbose logging whenever possible.
|
- **longerLogs**: Set to `true` to enable verbose logging whenever possible.
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> Further, advanced fine-tuning and configuration can be done in TypeScript with the files in the `/config` folder.
|
||||||
|
|
||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
|
|
||||||
### YouTube Downloading
|
### YouTube Downloading
|
||||||
|
|
93
config/ai.ts
Normal file
93
config/ai.ts
Normal file
|
@ -0,0 +1,93 @@
|
||||||
|
import type { ModelInfo } from "../src/commands/ai"
|
||||||
|
|
||||||
|
export const defaultFlashModel = "gemma3:4b"
|
||||||
|
export const defaultThinkingModel = "qwen3:4b"
|
||||||
|
export const unloadModelAfterB = 0.1 // how many billion params until model is auto-unloaded
|
||||||
|
|
||||||
|
export const models: ModelInfo[] = [
|
||||||
|
{
|
||||||
|
name: 'gemma3n',
|
||||||
|
label: 'gemma3n',
|
||||||
|
descriptionEn: 'Gemma3n is a family of open, light on-device models for general tasks.',
|
||||||
|
descriptionPt: 'Gemma3n é uma família de modelos abertos, leves e para dispositivos locais, para tarefas gerais.',
|
||||||
|
models: [
|
||||||
|
{ name: 'gemma3n:e2b', label: 'Gemma3n e2b', parameterSize: '2B' },
|
||||||
|
{ name: 'gemma3n:e4b', label: 'Gemma3n e4b', parameterSize: '4B' },
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'gemma3',
|
||||||
|
label: 'gemma3 [ & Uncensored ]',
|
||||||
|
descriptionEn: 'Gemma3-abliterated is a family of open, uncensored models for general tasks.',
|
||||||
|
descriptionPt: 'Gemma3-abliterated é uma família de modelos abertos, não censurados, para tarefas gerais.',
|
||||||
|
models: [
|
||||||
|
{ name: 'huihui_ai/gemma3-abliterated:1b', label: 'Gemma3 Uncensored 1B', parameterSize: '1B' },
|
||||||
|
{ name: 'huihui_ai/gemma3-abliterated:4b', label: 'Gemma3 Uncensored 4B', parameterSize: '4B' },
|
||||||
|
{ name: 'gemma3:1b', label: 'Gemma3 1B', parameterSize: '1B' },
|
||||||
|
{ name: 'gemma3:4b', label: 'Gemma3 4B', parameterSize: '4B' },
|
||||||
|
{ name: 'gemma3:12b', label: 'Gemma3 12B', parameterSize: '12B' },
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'qwen3',
|
||||||
|
label: 'Qwen3',
|
||||||
|
descriptionEn: 'Qwen3 is a multilingual reasoning model series.',
|
||||||
|
descriptionPt: 'Qwen3 é uma série de modelos multilingues.',
|
||||||
|
models: [
|
||||||
|
{ name: 'qwen3:0.6b', label: 'Qwen3 0.6B', parameterSize: '0.6B' },
|
||||||
|
{ name: 'qwen3:4b', label: 'Qwen3 4B', parameterSize: '4B' },
|
||||||
|
{ name: 'qwen3:8b', label: 'Qwen3 8B', parameterSize: '8B' },
|
||||||
|
{ name: 'qwen3:14b', label: 'Qwen3 14B', parameterSize: '14B' },
|
||||||
|
{ name: 'qwen3:30b', label: 'Qwen3 30B', parameterSize: '30B' },
|
||||||
|
{ name: 'qwen3:235b-a22b', label: 'Qwen3 235B A22B', parameterSize: '235B' },
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'qwq',
|
||||||
|
label: 'QwQ',
|
||||||
|
descriptionEn: 'QwQ is the reasoning model of the Qwen series.',
|
||||||
|
descriptionPt: 'QwQ é o modelo de raciocínio da série Qwen.',
|
||||||
|
models: [
|
||||||
|
{ name: 'qwq:32b', label: 'QwQ 32B', parameterSize: '32B' },
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'llama4',
|
||||||
|
label: 'Llama4',
|
||||||
|
descriptionEn: 'The latest collection of multimodal models from Meta.',
|
||||||
|
descriptionPt: 'A coleção mais recente de modelos multimodais da Meta.',
|
||||||
|
models: [
|
||||||
|
{ name: 'llama4:scout', label: 'Llama4 109B A17B', parameterSize: '109B' },
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'mistral',
|
||||||
|
label: 'Mistral',
|
||||||
|
descriptionEn: 'The 7B model released by Mistral AI, updated to version 0.3.',
|
||||||
|
descriptionPt: 'O modelo 7B lançado pela Mistral AI, atualizado para a versão 0.3.',
|
||||||
|
models: [
|
||||||
|
{ name: 'mistral:7b', label: 'Mistral 7B', parameterSize: '7B' },
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'deepseek',
|
||||||
|
label: 'DeepSeek [ & Uncensored ]',
|
||||||
|
descriptionEn: 'DeepSeek is a research model for reasoning tasks.',
|
||||||
|
descriptionPt: 'DeepSeek é um modelo de pesquisa para tarefas de raciocínio.',
|
||||||
|
models: [
|
||||||
|
{ name: 'deepseek-r1:1.5b', label: 'DeepSeek 1.5B', parameterSize: '1.5B' },
|
||||||
|
{ name: 'deepseek-r1:7b', label: 'DeepSeek 7B', parameterSize: '7B' },
|
||||||
|
{ name: 'huihui_ai/deepseek-r1-abliterated:1.5b', label: 'DeepSeek Uncensored 1.5B', parameterSize: '1.5B' },
|
||||||
|
{ name: 'huihui_ai/deepseek-r1-abliterated:7b', label: 'DeepSeek Uncensored 7B', parameterSize: '7B' },
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'phi3',
|
||||||
|
label: 'Phi3',
|
||||||
|
descriptionEn: 'Phi-3 is a family of lightweight 3B (Mini) and 14B (Medium) state-of-the-art open models by Microsoft.',
|
||||||
|
descriptionPt: 'Phi-3 é uma família de modelos leves de 3B (Mini) e 14B (Médio) de última geração, abertos pela Microsoft.',
|
||||||
|
models: [
|
||||||
|
{ name: 'phi3:3.8b', label: 'Phi3 3.8B', parameterSize: '3.8B' },
|
||||||
|
]
|
||||||
|
}
|
||||||
|
];
|
|
@ -34,7 +34,6 @@ import { Telegraf, Context } from "telegraf"
|
||||||
import type { Message } from "telegraf/types"
|
import type { Message } from "telegraf/types"
|
||||||
import { replyToMessageId } from "../utils/reply-to-message-id"
|
import { replyToMessageId } from "../utils/reply-to-message-id"
|
||||||
import { getStrings } from "../plugins/checklang"
|
import { getStrings } from "../plugins/checklang"
|
||||||
import { languageCode } from "../utils/language-code"
|
|
||||||
import axios from "axios"
|
import axios from "axios"
|
||||||
import { rateLimiter } from "../utils/rate-limiter"
|
import { rateLimiter } from "../utils/rate-limiter"
|
||||||
import { logger } from "../utils/log"
|
import { logger } from "../utils/log"
|
||||||
|
@ -42,6 +41,7 @@ import { ensureUserInDb } from "../utils/ensure-user"
|
||||||
import * as schema from '../db/schema'
|
import * as schema from '../db/schema'
|
||||||
import type { NodePgDatabase } from "drizzle-orm/node-postgres"
|
import type { NodePgDatabase } from "drizzle-orm/node-postgres"
|
||||||
import { eq, sql } from 'drizzle-orm'
|
import { eq, sql } from 'drizzle-orm'
|
||||||
|
import { models, unloadModelAfterB } from "../../config/ai"
|
||||||
|
|
||||||
const spamwatchMiddleware = spamwatchMiddlewareModule(isOnSpamWatch)
|
const spamwatchMiddleware = spamwatchMiddlewareModule(isOnSpamWatch)
|
||||||
export const flash_model = process.env.flashModel || "gemma3:4b"
|
export const flash_model = process.env.flashModel || "gemma3:4b"
|
||||||
|
@ -51,7 +51,7 @@ type TextContext = Context & { message: Message.TextMessage }
|
||||||
|
|
||||||
type User = typeof schema.usersTable.$inferSelect
|
type User = typeof schema.usersTable.$inferSelect
|
||||||
|
|
||||||
interface ModelInfo {
|
export interface ModelInfo {
|
||||||
name: string;
|
name: string;
|
||||||
label: string;
|
label: string;
|
||||||
descriptionEn: string;
|
descriptionEn: string;
|
||||||
|
@ -67,59 +67,6 @@ interface OllamaResponse {
|
||||||
response: string;
|
response: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
export const models: ModelInfo[] = [
|
|
||||||
{
|
|
||||||
name: 'gemma3n',
|
|
||||||
label: 'gemma3n',
|
|
||||||
descriptionEn: 'Gemma3n is a family of open, light on-device models for general tasks.',
|
|
||||||
descriptionPt: 'Gemma3n é uma família de modelos abertos, leves e para dispositivos locais, para tarefas gerais.',
|
|
||||||
models: [
|
|
||||||
{ name: 'gemma3n:e2b', label: 'Gemma3n e2b', parameterSize: '2B' },
|
|
||||||
{ name: 'gemma3n:e4b', label: 'Gemma3n e4b', parameterSize: '4B' },
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'gemma3-abliterated',
|
|
||||||
label: 'gemma3 Uncensored',
|
|
||||||
descriptionEn: 'Gemma3-abliterated is a family of open, uncensored models for general tasks.',
|
|
||||||
descriptionPt: 'Gemma3-abliterated é uma família de modelos abertos, não censurados, para tarefas gerais.',
|
|
||||||
models: [
|
|
||||||
{ name: 'huihui_ai/gemma3-abliterated:1b', label: 'Gemma3-abliterated 1B', parameterSize: '1b' },
|
|
||||||
{ name: 'huihui_ai/gemma3-abliterated:4b', label: 'Gemma3-abliterated 4B', parameterSize: '4b' },
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'qwen3',
|
|
||||||
label: 'Qwen3',
|
|
||||||
descriptionEn: 'Qwen3 is a multilingual reasoning model series.',
|
|
||||||
descriptionPt: 'Qwen3 é uma série de modelos multilingues.',
|
|
||||||
models: [
|
|
||||||
{ name: 'qwen3:4b', label: 'Qwen3 4B', parameterSize: '4B' },
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'deepseek',
|
|
||||||
label: 'DeepSeek',
|
|
||||||
descriptionEn: 'DeepSeek is a research model for reasoning tasks.',
|
|
||||||
descriptionPt: 'DeepSeek é um modelo de pesquisa para tarefas de raciocínio.',
|
|
||||||
models: [
|
|
||||||
{ name: 'deepseek-r1:1.5b', label: 'DeepSeek 1.5B', parameterSize: '1.5B' },
|
|
||||||
{ name: 'deepseek-r1:7b', label: 'DeepSeek 7B', parameterSize: '7B' },
|
|
||||||
{ name: 'huihui_ai/deepseek-r1-abliterated:1.5b', label: 'DeepSeek Uncensored 1.5B', parameterSize: '1.5B' },
|
|
||||||
{ name: 'huihui_ai/deepseek-r1-abliterated:7b', label: 'DeepSeek Uncensored 7B', parameterSize: '7B' },
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'phi3',
|
|
||||||
label: 'Phi3',
|
|
||||||
descriptionEn: 'Phi-3 is a family of lightweight 3B (Mini) and 14B (Medium) state-of-the-art open models by Microsoft.',
|
|
||||||
descriptionPt: 'Phi-3 é uma família de modelos leves de 3B (Mini) e 14B (Médio) de última geração, abertos pela Microsoft.',
|
|
||||||
models: [
|
|
||||||
{ name: 'phi3:3.8b', label: 'Phi3 3.8B', parameterSize: '3.8B' },
|
|
||||||
]
|
|
||||||
}
|
|
||||||
];
|
|
||||||
|
|
||||||
async function usingSystemPrompt(ctx: TextContext, db: NodePgDatabase<typeof schema>, botName: string, message: string): Promise<string> {
|
async function usingSystemPrompt(ctx: TextContext, db: NodePgDatabase<typeof schema>, botName: string, message: string): Promise<string> {
|
||||||
const user = await db.query.usersTable.findMany({ where: (fields, { eq }) => eq(fields.telegramId, String(ctx.from!.id)), limit: 1 });
|
const user = await db.query.usersTable.findMany({ where: (fields, { eq }) => eq(fields.telegramId, String(ctx.from!.id)), limit: 1 });
|
||||||
if (user.length === 0) await ensureUserInDb(ctx, db);
|
if (user.length === 0) await ensureUserInDb(ctx, db);
|
||||||
|
@ -263,16 +210,11 @@ function extractAxiosErrorMessage(error: unknown): string {
|
||||||
return 'An unexpected error occurred.';
|
return 'An unexpected error occurred.';
|
||||||
}
|
}
|
||||||
|
|
||||||
function escapeMarkdown(text: string): string {
|
|
||||||
return text.replace(/([_*\[\]()`>#\+\-=|{}.!~])/g, '\\$1');
|
|
||||||
}
|
|
||||||
|
|
||||||
function containsUrls(text: string): boolean {
|
function containsUrls(text: string): boolean {
|
||||||
return text.includes('http://') || text.includes('https://');
|
return text.includes('http://') || text.includes('https://') || text.includes('.com') || text.includes('.net') || text.includes('.org') || text.includes('.io') || text.includes('.ai') || text.includes('.dev')
|
||||||
}
|
}
|
||||||
|
|
||||||
async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Message, model: string, aiTemperature: number, originalMessage: string, db: NodePgDatabase<typeof schema>, userId: string): Promise<{ success: boolean; response?: string; error?: string }> {
|
async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Message, model: string, aiTemperature: number, originalMessage: string, db: NodePgDatabase<typeof schema>, userId: string, Strings: ReturnType<typeof getStrings>, showThinking: boolean): Promise<{ success: boolean; response?: string; error?: string }> {
|
||||||
const Strings = getStrings(languageCode(ctx));
|
|
||||||
if (!ctx.chat) {
|
if (!ctx.chat) {
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
|
@ -289,6 +231,8 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
|
||||||
await db.update(schema.usersTable)
|
await db.update(schema.usersTable)
|
||||||
.set({ aiCharacters: sql`${schema.usersTable.aiCharacters} + ${promptCharCount}` })
|
.set({ aiCharacters: sql`${schema.usersTable.aiCharacters} + ${promptCharCount}` })
|
||||||
.where(eq(schema.usersTable.telegramId, userId));
|
.where(eq(schema.usersTable.telegramId, userId));
|
||||||
|
const paramSizeStr = models.find(m => m.name === model)?.models.find(m => m.name === model)?.parameterSize?.replace('B', '');
|
||||||
|
const shouldKeepAlive = paramSizeStr ? Number(paramSizeStr) > unloadModelAfterB : false;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const aiResponse = await axios.post<unknown>(
|
const aiResponse = await axios.post<unknown>(
|
||||||
|
@ -297,6 +241,7 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
|
||||||
model,
|
model,
|
||||||
prompt,
|
prompt,
|
||||||
stream: true,
|
stream: true,
|
||||||
|
keep_alive: shouldKeepAlive ? '1' : '0',
|
||||||
options: {
|
options: {
|
||||||
temperature: aiTemperature
|
temperature: aiTemperature
|
||||||
}
|
}
|
||||||
|
@ -311,6 +256,16 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
|
||||||
let sentHeader = false;
|
let sentHeader = false;
|
||||||
let firstChunk = true;
|
let firstChunk = true;
|
||||||
const stream: NodeJS.ReadableStream = aiResponse.data as any;
|
const stream: NodeJS.ReadableStream = aiResponse.data as any;
|
||||||
|
let thinkingMessageSent = false;
|
||||||
|
let finalResponseText = '';
|
||||||
|
|
||||||
|
const formatThinkingMessage = (text: string) => {
|
||||||
|
const withPlaceholders = text
|
||||||
|
.replace(/___THINK_START___/g, `${Strings.ai.thinking}`)
|
||||||
|
.replace(/___THINK_END___/g, `${Strings.ai.finishedThinking}`);
|
||||||
|
return sanitizeMarkdownForTelegram(withPlaceholders);
|
||||||
|
};
|
||||||
|
|
||||||
for await (const chunk of stream) {
|
for await (const chunk of stream) {
|
||||||
const lines = chunk.toString().split('\n');
|
const lines = chunk.toString().split('\n');
|
||||||
for (const line of lines) {
|
for (const line of lines) {
|
||||||
|
@ -322,6 +277,22 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
|
||||||
console.error("[✨ AI | !] Error parsing chunk");
|
console.error("[✨ AI | !] Error parsing chunk");
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
if (model === thinking_model && !showThinking) {
|
||||||
|
if (ln.response) {
|
||||||
|
finalResponseText += ln.response;
|
||||||
|
if (finalResponseText.includes('<think>') && !thinkingMessageSent) {
|
||||||
|
await rateLimiter.editMessageWithRetry(
|
||||||
|
ctx,
|
||||||
|
ctx.chat.id,
|
||||||
|
replyGenerating.message_id,
|
||||||
|
modelHeader + Strings.ai.thinking,
|
||||||
|
{ parse_mode: 'Markdown' }
|
||||||
|
);
|
||||||
|
thinkingMessageSent = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
if (model === thinking_model && ln.response) {
|
if (model === thinking_model && ln.response) {
|
||||||
if (ln.response.includes('<think>')) {
|
if (ln.response.includes('<think>')) {
|
||||||
const thinkMatch = ln.response.match(/<think>([\s\S]*?)<\/think>/);
|
const thinkMatch = ln.response.match(/<think>([\s\S]*?)<\/think>/);
|
||||||
|
@ -338,9 +309,9 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
|
||||||
if (model === thinking_model) {
|
if (model === thinking_model) {
|
||||||
let patchedThoughts = ln.response;
|
let patchedThoughts = ln.response;
|
||||||
const thinkTagRx = /<think>([\s\S]*?)<\/think>/g;
|
const thinkTagRx = /<think>([\s\S]*?)<\/think>/g;
|
||||||
patchedThoughts = patchedThoughts.replace(thinkTagRx, (p1) => p1.trim().length > 0 ? '`' + Strings.ai.thinking + '`' + p1 + '`' + Strings.ai.finishedThinking + '`' : '');
|
patchedThoughts = patchedThoughts.replace(thinkTagRx, (p1) => p1.trim().length > 0 ? '___THINK_START___' + p1.trim() + '___THINK_END___' : '');
|
||||||
patchedThoughts = patchedThoughts.replace(/<think>/g, '`' + Strings.ai.thinking + '`');
|
patchedThoughts = patchedThoughts.replace(/<think>/g, '___THINK_START___');
|
||||||
patchedThoughts = patchedThoughts.replace(/<\/think>/g, '`' + Strings.ai.finishedThinking + '`');
|
patchedThoughts = patchedThoughts.replace(/<\/think>/g, '___THINK_END___');
|
||||||
thoughts += patchedThoughts;
|
thoughts += patchedThoughts;
|
||||||
fullResponse += patchedThoughts;
|
fullResponse += patchedThoughts;
|
||||||
} else {
|
} else {
|
||||||
|
@ -356,7 +327,7 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
|
||||||
ctx,
|
ctx,
|
||||||
ctx.chat.id,
|
ctx.chat.id,
|
||||||
replyGenerating.message_id,
|
replyGenerating.message_id,
|
||||||
modelHeader + escapeMarkdown(fullResponse),
|
modelHeader + formatThinkingMessage(fullResponse),
|
||||||
{ parse_mode: 'Markdown' }
|
{ parse_mode: 'Markdown' }
|
||||||
);
|
);
|
||||||
lastUpdateCharCount = fullResponse.length;
|
lastUpdateCharCount = fullResponse.length;
|
||||||
|
@ -370,7 +341,7 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
|
||||||
ctx,
|
ctx,
|
||||||
ctx.chat.id,
|
ctx.chat.id,
|
||||||
replyGenerating.message_id,
|
replyGenerating.message_id,
|
||||||
modelHeader + escapeMarkdown(fullResponse),
|
modelHeader + formatThinkingMessage(fullResponse),
|
||||||
{ parse_mode: 'Markdown' }
|
{ parse_mode: 'Markdown' }
|
||||||
);
|
);
|
||||||
lastUpdateCharCount = fullResponse.length;
|
lastUpdateCharCount = fullResponse.length;
|
||||||
|
@ -379,6 +350,10 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (model === thinking_model && !showThinking) {
|
||||||
|
const cleanedResponse = finalResponseText.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
|
||||||
|
return { success: true, response: cleanedResponse };
|
||||||
|
}
|
||||||
status = Strings.ai.statusRendering;
|
status = Strings.ai.statusRendering;
|
||||||
modelHeader = Strings.ai.modelHeader
|
modelHeader = Strings.ai.modelHeader
|
||||||
.replace("{model}", model)
|
.replace("{model}", model)
|
||||||
|
@ -388,7 +363,7 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
|
||||||
ctx,
|
ctx,
|
||||||
ctx.chat.id,
|
ctx.chat.id,
|
||||||
replyGenerating.message_id,
|
replyGenerating.message_id,
|
||||||
modelHeader + escapeMarkdown(fullResponse),
|
modelHeader + formatThinkingMessage(fullResponse),
|
||||||
{ parse_mode: 'Markdown' }
|
{ parse_mode: 'Markdown' }
|
||||||
);
|
);
|
||||||
const responseCharCount = fullResponse.length;
|
const responseCharCount = fullResponse.length;
|
||||||
|
@ -432,13 +407,13 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
|
||||||
console.error("[✨ AI | !] Pull error:", pullMsg);
|
console.error("[✨ AI | !] Pull error:", pullMsg);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: `❌ Something went wrong while pulling ${escapeMarkdown(model)}: ${escapeMarkdown(pullMsg)}`,
|
error: `❌ Something went wrong while pulling ${model}: ${pullMsg}`,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
console.log(`[✨ AI] ${model} pulled successfully`);
|
console.log(`[✨ AI] ${model} pulled successfully`);
|
||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
response: Strings.ai.pulled.replace("{model}", escapeMarkdown(model)),
|
response: Strings.ai.pulled.replace("{model}", model),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -449,9 +424,8 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async function handleAiReply(ctx: TextContext, model: string, prompt: string, replyGenerating: Message, aiTemperature: number, originalMessage: string, db: NodePgDatabase<typeof schema>, userId: string) {
|
async function handleAiReply(ctx: TextContext, model: string, prompt: string, replyGenerating: Message, aiTemperature: number, originalMessage: string, db: NodePgDatabase<typeof schema>, userId: string, Strings: ReturnType<typeof getStrings>, showThinking: boolean) {
|
||||||
const Strings = getStrings(languageCode(ctx));
|
const aiResponse = await getResponse(prompt, ctx, replyGenerating, model, aiTemperature, originalMessage, db, userId, Strings, showThinking);
|
||||||
const aiResponse = await getResponse(prompt, ctx, replyGenerating, model, aiTemperature, originalMessage, db, userId);
|
|
||||||
if (!aiResponse) return;
|
if (!aiResponse) return;
|
||||||
if (!ctx.chat) return;
|
if (!ctx.chat) return;
|
||||||
if (aiResponse.success && aiResponse.response) {
|
if (aiResponse.success && aiResponse.response) {
|
||||||
|
@ -461,11 +435,17 @@ async function handleAiReply(ctx: TextContext, model: string, prompt: string, re
|
||||||
.replace("{temperature}", aiTemperature)
|
.replace("{temperature}", aiTemperature)
|
||||||
.replace("{status}", status) + "\n\n";
|
.replace("{status}", status) + "\n\n";
|
||||||
const urlWarning = containsUrls(originalMessage) ? Strings.ai.urlWarning : '';
|
const urlWarning = containsUrls(originalMessage) ? Strings.ai.urlWarning : '';
|
||||||
|
let finalResponse = aiResponse.response;
|
||||||
|
if (model === thinking_model) {
|
||||||
|
finalResponse = finalResponse.replace(/___THINK_START___/g, `${Strings.ai.thinking}`)
|
||||||
|
.replace(/___THINK_END___/g, `${Strings.ai.finishedThinking}`);
|
||||||
|
}
|
||||||
|
|
||||||
await rateLimiter.editMessageWithRetry(
|
await rateLimiter.editMessageWithRetry(
|
||||||
ctx,
|
ctx,
|
||||||
ctx.chat.id,
|
ctx.chat.id,
|
||||||
replyGenerating.message_id,
|
replyGenerating.message_id,
|
||||||
modelHeader + sanitizeMarkdownForTelegram(aiResponse.response) + urlWarning,
|
modelHeader + sanitizeMarkdownForTelegram(finalResponse) + urlWarning,
|
||||||
{ parse_mode: 'Markdown' }
|
{ parse_mode: 'Markdown' }
|
||||||
);
|
);
|
||||||
return;
|
return;
|
||||||
|
@ -480,7 +460,7 @@ async function handleAiReply(ctx: TextContext, model: string, prompt: string, re
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
async function getUserWithStringsAndModel(ctx: Context, db: NodePgDatabase<typeof schema>): Promise<{ user: User; Strings: ReturnType<typeof getStrings>; languageCode: string; customAiModel: string; aiTemperature: number }> {
|
async function getUserWithStringsAndModel(ctx: Context, db: NodePgDatabase<typeof schema>): Promise<{ user: User; Strings: ReturnType<typeof getStrings>; languageCode: string; customAiModel: string; aiTemperature: number, showThinking: boolean }> {
|
||||||
const userArr = await db.query.usersTable.findMany({ where: (fields, { eq }) => eq(fields.telegramId, String(ctx.from!.id)), limit: 1 });
|
const userArr = await db.query.usersTable.findMany({ where: (fields, { eq }) => eq(fields.telegramId, String(ctx.from!.id)), limit: 1 });
|
||||||
let user = userArr[0];
|
let user = userArr[0];
|
||||||
if (!user) {
|
if (!user) {
|
||||||
|
@ -488,10 +468,10 @@ async function getUserWithStringsAndModel(ctx: Context, db: NodePgDatabase<typeo
|
||||||
const newUserArr = await db.query.usersTable.findMany({ where: (fields, { eq }) => eq(fields.telegramId, String(ctx.from!.id)), limit: 1 });
|
const newUserArr = await db.query.usersTable.findMany({ where: (fields, { eq }) => eq(fields.telegramId, String(ctx.from!.id)), limit: 1 });
|
||||||
user = newUserArr[0];
|
user = newUserArr[0];
|
||||||
const Strings = getStrings(user.languageCode);
|
const Strings = getStrings(user.languageCode);
|
||||||
return { user, Strings, languageCode: user.languageCode, customAiModel: user.customAiModel, aiTemperature: user.aiTemperature };
|
return { user, Strings, languageCode: user.languageCode, customAiModel: user.customAiModel, aiTemperature: user.aiTemperature, showThinking: user.showThinking };
|
||||||
}
|
}
|
||||||
const Strings = getStrings(user.languageCode);
|
const Strings = getStrings(user.languageCode);
|
||||||
return { user, Strings, languageCode: user.languageCode, customAiModel: user.customAiModel, aiTemperature: user.aiTemperature };
|
return { user, Strings, languageCode: user.languageCode, customAiModel: user.customAiModel, aiTemperature: user.aiTemperature, showThinking: user.showThinking };
|
||||||
}
|
}
|
||||||
|
|
||||||
export function getModelLabelByName(name: string): string {
|
export function getModelLabelByName(name: string): string {
|
||||||
|
@ -547,7 +527,7 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
|
||||||
|
|
||||||
async function aiCommandHandler(ctx: TextContext, command: 'ask' | 'think' | 'ai') {
|
async function aiCommandHandler(ctx: TextContext, command: 'ask' | 'think' | 'ai') {
|
||||||
const reply_to_message_id = replyToMessageId(ctx);
|
const reply_to_message_id = replyToMessageId(ctx);
|
||||||
const { user, Strings, customAiModel, aiTemperature } = await getUserWithStringsAndModel(ctx, db);
|
const { user, Strings, customAiModel, aiTemperature, showThinking } = await getUserWithStringsAndModel(ctx, db);
|
||||||
const message = ctx.message.text;
|
const message = ctx.message.text;
|
||||||
const author = ("@" + ctx.from?.username) || ctx.from?.first_name || "Unknown";
|
const author = ("@" + ctx.from?.username) || ctx.from?.first_name || "Unknown";
|
||||||
|
|
||||||
|
@ -586,7 +566,7 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
|
||||||
...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } })
|
...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } })
|
||||||
});
|
});
|
||||||
const prompt = sanitizeForJson(await usingSystemPrompt(ctx, db, botName, fixedMsg));
|
const prompt = sanitizeForJson(await usingSystemPrompt(ctx, db, botName, fixedMsg));
|
||||||
await handleAiReply(ctx, model, prompt, replyGenerating, aiTemperature, fixedMsg, db, user.telegramId);
|
await handleAiReply(ctx, model, prompt, replyGenerating, aiTemperature, fixedMsg, db, user.telegramId, Strings, showThinking);
|
||||||
};
|
};
|
||||||
|
|
||||||
if (isProcessing) {
|
if (isProcessing) {
|
||||||
|
|
|
@ -7,7 +7,8 @@ import * as schema from '../db/schema';
|
||||||
import { eq } from 'drizzle-orm';
|
import { eq } from 'drizzle-orm';
|
||||||
import { ensureUserInDb } from '../utils/ensure-user';
|
import { ensureUserInDb } from '../utils/ensure-user';
|
||||||
import type { NodePgDatabase } from 'drizzle-orm/node-postgres';
|
import type { NodePgDatabase } from 'drizzle-orm/node-postgres';
|
||||||
import { models, getModelLabelByName } from './ai';
|
import { getModelLabelByName } from './ai';
|
||||||
|
import { models } from '../../config/ai';
|
||||||
import { langs } from '../locales/config';
|
import { langs } from '../locales/config';
|
||||||
|
|
||||||
type UserRow = typeof schema.usersTable.$inferSelect;
|
type UserRow = typeof schema.usersTable.$inferSelect;
|
||||||
|
@ -55,11 +56,14 @@ function getSettingsMenu(user: UserRow, Strings: any): SettingsMenu {
|
||||||
inline_keyboard: [
|
inline_keyboard: [
|
||||||
[
|
[
|
||||||
{ text: `✨ ${Strings.settings.ai.aiEnabled}: ${user.aiEnabled ? Strings.settings.enabled : Strings.settings.disabled}`, callback_data: `settings_aiEnabled_${userId}` },
|
{ text: `✨ ${Strings.settings.ai.aiEnabled}: ${user.aiEnabled ? Strings.settings.enabled : Strings.settings.disabled}`, callback_data: `settings_aiEnabled_${userId}` },
|
||||||
{ text: `🧠 ${Strings.settings.ai.aiModel}: ${getModelLabelByName(user.customAiModel)}`, callback_data: `settings_aiModel_${userId}` }
|
{ text: `🧠 ${Strings.settings.ai.aiModel}: ${getModelLabelByName(user.customAiModel)}`, callback_data: `settings_aiModel_0_${userId}` }
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
{ text: `🌡️ ${Strings.settings.ai.aiTemperature}: ${user.aiTemperature}`, callback_data: `settings_aiTemperature_${userId}` },
|
{ text: `🌡️ ${Strings.settings.ai.aiTemperature}: ${user.aiTemperature}`, callback_data: `settings_aiTemperature_${userId}` },
|
||||||
{ text: `🌐 ${langLabel}`, callback_data: `settings_language_${userId}` }
|
{ text: `🌐 ${langLabel}`, callback_data: `settings_language_${userId}` }
|
||||||
|
],
|
||||||
|
[
|
||||||
|
{ text: `🧠 ${Strings.settings.ai.showThinking}: ${user.showThinking ? Strings.settings.enabled : Strings.settings.disabled}`, callback_data: `settings_showThinking_${userId}` }
|
||||||
]
|
]
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
@ -83,6 +87,22 @@ function logSettingsAccess(action: string, ctx: Context, allowed: boolean, expec
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function handleTelegramError(err: any, context: string) {
|
||||||
|
const description = err?.response?.description || '';
|
||||||
|
const ignoredErrors = [
|
||||||
|
'query is too old',
|
||||||
|
'query ID is invalid',
|
||||||
|
'message is not modified',
|
||||||
|
'message to edit not found',
|
||||||
|
];
|
||||||
|
|
||||||
|
const isIgnored = ignoredErrors.some(errorString => description.includes(errorString));
|
||||||
|
|
||||||
|
if (!isIgnored) {
|
||||||
|
console.error(`[${context}] Unexpected Telegram error:`, err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
|
export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
|
||||||
bot.start(spamwatchMiddleware, async (ctx: Context) => {
|
bot.start(spamwatchMiddleware, async (ctx: Context) => {
|
||||||
const { user, Strings } = await getUserAndStrings(ctx, db);
|
const { user, Strings } = await getUserAndStrings(ctx, db);
|
||||||
|
@ -155,7 +175,26 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
|
||||||
await updateSettingsKeyboard(ctx, updatedUser, Strings);
|
await updateSettingsKeyboard(ctx, updatedUser, Strings);
|
||||||
});
|
});
|
||||||
|
|
||||||
bot.action(/^settings_aiModel_\d+$/, async (ctx) => {
|
bot.action(/^settings_showThinking_\d+$/, async (ctx) => {
|
||||||
|
const data = (ctx.callbackQuery as any).data;
|
||||||
|
const userId = extractUserIdFromCallback(data);
|
||||||
|
const allowed = !!userId && String(ctx.from.id) === userId;
|
||||||
|
logSettingsAccess('settings_showThinking', ctx, allowed, userId);
|
||||||
|
if (!allowed) {
|
||||||
|
const { Strings } = await getUserAndStrings(ctx, db);
|
||||||
|
return ctx.answerCbQuery(getNotAllowedMessage(Strings), { show_alert: true });
|
||||||
|
}
|
||||||
|
await ctx.answerCbQuery();
|
||||||
|
const { user, Strings } = await getUserAndStrings(ctx, db);
|
||||||
|
if (!user) return;
|
||||||
|
await db.update(schema.usersTable)
|
||||||
|
.set({ showThinking: !user.showThinking })
|
||||||
|
.where(eq(schema.usersTable.telegramId, String(user.telegramId)));
|
||||||
|
const updatedUser = (await db.query.usersTable.findMany({ where: (fields, { eq }) => eq(fields.telegramId, String(user.telegramId)), limit: 1 }))[0];
|
||||||
|
await updateSettingsKeyboard(ctx, updatedUser, Strings);
|
||||||
|
});
|
||||||
|
|
||||||
|
bot.action(/^settings_aiModel_(\d+)_(\d+)$/, async (ctx) => {
|
||||||
const data = (ctx.callbackQuery as any).data;
|
const data = (ctx.callbackQuery as any).data;
|
||||||
const userId = extractUserIdFromCallback(data);
|
const userId = extractUserIdFromCallback(data);
|
||||||
const allowed = !!userId && String(ctx.from.id) === userId;
|
const allowed = !!userId && String(ctx.from.id) === userId;
|
||||||
|
@ -167,30 +206,54 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
|
||||||
await ctx.answerCbQuery();
|
await ctx.answerCbQuery();
|
||||||
const { user, Strings } = await getUserAndStrings(ctx, db);
|
const { user, Strings } = await getUserAndStrings(ctx, db);
|
||||||
if (!user) return;
|
if (!user) return;
|
||||||
|
|
||||||
|
const match = data.match(/^settings_aiModel_(\d+)_/);
|
||||||
|
if (!match) return;
|
||||||
|
|
||||||
|
const page = parseInt(match[1], 10);
|
||||||
|
const pageSize = 4;
|
||||||
|
const start = page * pageSize;
|
||||||
|
const end = start + pageSize;
|
||||||
|
|
||||||
|
const paginatedModels = models.slice(start, end);
|
||||||
|
|
||||||
|
const buttons = paginatedModels.map((series, idx) => {
|
||||||
|
const originalIndex = start + idx;
|
||||||
|
const isSelected = series.models.some(m => m.name === user.customAiModel);
|
||||||
|
const label = isSelected ? `✅ ${series.label}` : series.label;
|
||||||
|
return { text: label, callback_data: `selectseries_${originalIndex}_${user.telegramId}` };
|
||||||
|
});
|
||||||
|
|
||||||
|
const navigationButtons: any[] = [];
|
||||||
|
if (page > 0) {
|
||||||
|
navigationButtons.push({ text: Strings.varStrings.varBack, callback_data: `settings_aiModel_${page - 1}_${user.telegramId}` });
|
||||||
|
}
|
||||||
|
if (end < models.length) {
|
||||||
|
navigationButtons.push({ text: Strings.varStrings.varMore, callback_data: `settings_aiModel_${page + 1}_${user.telegramId}` });
|
||||||
|
}
|
||||||
|
|
||||||
|
const keyboard: any[][] = [];
|
||||||
|
for (const button of buttons) {
|
||||||
|
keyboard.push([button]);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (navigationButtons.length > 0) {
|
||||||
|
keyboard.push(navigationButtons);
|
||||||
|
}
|
||||||
|
keyboard.push([{ text: `${Strings.varStrings.varBack}`, callback_data: `settings_back_${user.telegramId}` }]);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
await ctx.editMessageText(
|
await ctx.editMessageText(
|
||||||
`${Strings.settings.ai.selectSeries}`,
|
`${Strings.settings.ai.selectSeries}`,
|
||||||
{
|
{
|
||||||
parse_mode: 'Markdown',
|
parse_mode: 'Markdown',
|
||||||
reply_markup: {
|
reply_markup: {
|
||||||
inline_keyboard: models.map((series, idx) => [
|
inline_keyboard: keyboard
|
||||||
{ text: series.label, callback_data: `selectseries_${idx}_${user.telegramId}` }
|
|
||||||
]).concat([[
|
|
||||||
{ text: `${Strings.varStrings.varBack}`, callback_data: `settings_back_${user.telegramId}` }
|
|
||||||
]])
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
if (
|
handleTelegramError(err, 'settings_aiModel');
|
||||||
!(
|
|
||||||
err.response.description?.includes('query is too old') ||
|
|
||||||
err.response.description?.includes('query ID is invalid') ||
|
|
||||||
err.response.description?.includes('message is not modified') ||
|
|
||||||
err.response.description?.includes('message to edit not found')
|
|
||||||
)
|
|
||||||
)
|
|
||||||
console.error('Unexpected Telegram error:', err);
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -211,30 +274,26 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
|
||||||
const seriesIdx = parseInt(match[1], 10);
|
const seriesIdx = parseInt(match[1], 10);
|
||||||
const series = models[seriesIdx];
|
const series = models[seriesIdx];
|
||||||
if (!series) return;
|
if (!series) return;
|
||||||
|
const pageSize = 4;
|
||||||
|
const page = Math.floor(seriesIdx / pageSize);
|
||||||
const desc = user.languageCode === 'pt' ? series.descriptionPt : series.descriptionEn;
|
const desc = user.languageCode === 'pt' ? series.descriptionPt : series.descriptionEn;
|
||||||
try {
|
try {
|
||||||
await ctx.editMessageText(
|
await ctx.editMessageText(
|
||||||
`${Strings.settings.ai.seriesDescription.replace('{seriesDescription}', desc)}\n\n${Strings.settings.ai.selectParameterSize.replace('{seriesLabel}', series.label)}\n\n${Strings.settings.ai.parameterSizeExplanation}`,
|
`${Strings.settings.ai.seriesDescription.replace('{seriesDescription}', desc)}\n\n${Strings.settings.ai.selectParameterSize.replace('{seriesLabel}', series.label).replace(' [ & Uncensored ]', '')}\n\n${Strings.settings.ai.parameterSizeExplanation}`,
|
||||||
{
|
{
|
||||||
reply_markup: {
|
reply_markup: {
|
||||||
inline_keyboard: series.models.map((m, idx) => [
|
inline_keyboard: series.models.map((m, idx) => {
|
||||||
{ text: `${m.label} (${m.parameterSize})`, callback_data: `setmodel_${seriesIdx}_${idx}_${user.telegramId}` }
|
const isSelected = m.name === user.customAiModel;
|
||||||
]).concat([[
|
const label = isSelected ? `✅ ${m.label}` : m.label;
|
||||||
{ text: `${Strings.varStrings.varBack}`, callback_data: `settings_aiModel_${user.telegramId}` }
|
return [{ text: `${label} (${m.parameterSize})`, callback_data: `setmodel_${seriesIdx}_${idx}_${user.telegramId}` }];
|
||||||
|
}).concat([[
|
||||||
|
{ text: `${Strings.varStrings.varBack}`, callback_data: `settings_aiModel_${page}_${user.telegramId}` }
|
||||||
]])
|
]])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
if (
|
handleTelegramError(err, 'selectseries');
|
||||||
!(
|
|
||||||
err.response.description?.includes('query is too old') ||
|
|
||||||
err.response.description?.includes('query ID is invalid') ||
|
|
||||||
err.response.description?.includes('message is not modified') ||
|
|
||||||
err.response.description?.includes('message to edit not found')
|
|
||||||
)
|
|
||||||
)
|
|
||||||
console.error('Unexpected Telegram error:', err);
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -278,15 +337,7 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
if (
|
handleTelegramError(err, 'setmodel');
|
||||||
!(
|
|
||||||
err.response.description?.includes('query is too old') ||
|
|
||||||
err.response.description?.includes('query ID is invalid') ||
|
|
||||||
err.response.description?.includes('message is not modified') ||
|
|
||||||
err.response.description?.includes('message to edit not found')
|
|
||||||
)
|
|
||||||
)
|
|
||||||
console.error('[Settings] Unexpected Telegram error:', err);
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -320,15 +371,7 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
if (
|
handleTelegramError(err, 'settings_aiTemperature');
|
||||||
!(
|
|
||||||
err.response.description?.includes('query is too old') ||
|
|
||||||
err.response.description?.includes('query ID is invalid') ||
|
|
||||||
err.response.description?.includes('message is not modified') ||
|
|
||||||
err.response.description?.includes('message to edit not found')
|
|
||||||
)
|
|
||||||
)
|
|
||||||
console.error('Unexpected Telegram error:', err);
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -354,15 +397,7 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
|
||||||
])
|
])
|
||||||
});
|
});
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
if (
|
handleTelegramError(err, 'show_more_temps');
|
||||||
!(
|
|
||||||
err.response.description?.includes('query is too old') ||
|
|
||||||
err.response.description?.includes('query ID is invalid') ||
|
|
||||||
err.response.description?.includes('message is not modified') ||
|
|
||||||
err.response.description?.includes('message to edit not found')
|
|
||||||
)
|
|
||||||
)
|
|
||||||
console.error('Unexpected Telegram error:', err);
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -409,15 +444,7 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
if (
|
handleTelegramError(err, 'settings_language');
|
||||||
!(
|
|
||||||
err.response.description?.includes('query is too old') ||
|
|
||||||
err.response.description?.includes('query ID is invalid') ||
|
|
||||||
err.response.description?.includes('message is not modified') ||
|
|
||||||
err.response.description?.includes('message to edit not found')
|
|
||||||
)
|
|
||||||
)
|
|
||||||
console.error('Unexpected Telegram error:', err);
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -450,15 +477,7 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
if (
|
handleTelegramError(err, 'settings_back');
|
||||||
!(
|
|
||||||
err.response.description?.includes('query is too old') ||
|
|
||||||
err.response.description?.includes('query ID is invalid') ||
|
|
||||||
err.response.description?.includes('message is not modified') ||
|
|
||||||
err.response.description?.includes('message to edit not found')
|
|
||||||
)
|
|
||||||
)
|
|
||||||
console.error('[Settings] Unexpected Telegram error:', err);
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -500,15 +519,7 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
if (
|
handleTelegramError(err, 'setlang');
|
||||||
!(
|
|
||||||
err.response.description?.includes('query is too old') ||
|
|
||||||
err.response.description?.includes('query ID is invalid') ||
|
|
||||||
err.response.description?.includes('message is not modified') ||
|
|
||||||
err.response.description?.includes('message to edit not found')
|
|
||||||
)
|
|
||||||
)
|
|
||||||
console.error('[Settings] Unexpected Telegram error:', err);
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
|
@ -13,6 +13,7 @@ export const usersTable = pgTable("users", {
|
||||||
firstName: varchar({ length: 255 }).notNull(),
|
firstName: varchar({ length: 255 }).notNull(),
|
||||||
lastName: varchar({ length: 255 }).notNull(),
|
lastName: varchar({ length: 255 }).notNull(),
|
||||||
aiEnabled: boolean().notNull().default(false),
|
aiEnabled: boolean().notNull().default(false),
|
||||||
|
showThinking: boolean().notNull().default(false),
|
||||||
customAiModel: varchar({ length: 255 }).notNull().default("deepseek-r1:1.5b"),
|
customAiModel: varchar({ length: 255 }).notNull().default("deepseek-r1:1.5b"),
|
||||||
aiTemperature: real().notNull().default(0.9),
|
aiTemperature: real().notNull().default(0.9),
|
||||||
aiRequests: integer().notNull().default(0),
|
aiRequests: integer().notNull().default(0),
|
||||||
|
|
|
@ -74,16 +74,16 @@
|
||||||
"askGenerating": "✨ Generating response with {model}...",
|
"askGenerating": "✨ Generating response with {model}...",
|
||||||
"askNoMessage": "✨ You need to ask me a question!",
|
"askNoMessage": "✨ You need to ask me a question!",
|
||||||
"languageCode": "Language",
|
"languageCode": "Language",
|
||||||
"thinking": "Thinking...",
|
"thinking": "`🧠 Thinking...`",
|
||||||
"finishedThinking": "Done.",
|
"finishedThinking": "`🧠 Done thinking.`",
|
||||||
"urlWarning": "\n\n⚠️ Note: The model cannot access or visit links!",
|
"urlWarning": "\n\n⚠️ Note: The model cannot access or visit links!",
|
||||||
"inQueue": "ℹ️ You are {position} in the queue.",
|
"inQueue": "ℹ️ You are {position} in the queue.",
|
||||||
"startingProcessing": "✨ Starting to process your request...",
|
"startingProcessing": "✨ Starting to process your request...",
|
||||||
"systemPrompt": "You are a friendly assistant called {botName}, capable of Telegram MarkdownV2.\nYou are currently in a chat with a user, who has sent a message to you.\nCurrent Date/Time (UTC): {date}\n\n---\n\nRespond to the user's message:\n{message}",
|
"systemPrompt": "You are a friendly assistant called {botName}.\nCurrent Date/Time (UTC): {date}\n\n---\n\nUser message:\n{message}",
|
||||||
"statusWaitingRender": "⏳ Waiting to Render...",
|
"statusWaitingRender": "⏳ Waiting to Render...",
|
||||||
"statusRendering": "🖼️ Rendering...",
|
"statusRendering": "🖼️ Rendering...",
|
||||||
"statusComplete": "✅ Complete!",
|
"statusComplete": "✅ Complete!",
|
||||||
"modelHeader": "🤖 *{model}* | 🌡️ *{temperature}* | {status}",
|
"modelHeader": "🤖 *{model}* 🌡️ *{temperature}* {status}",
|
||||||
"noChatFound": "No chat found",
|
"noChatFound": "No chat found",
|
||||||
"pulled": "✅ Pulled {model} successfully, please retry the command.",
|
"pulled": "✅ Pulled {model} successfully, please retry the command.",
|
||||||
"selectTemperature": "*Please select a temperature:*",
|
"selectTemperature": "*Please select a temperature:*",
|
||||||
|
@ -125,11 +125,12 @@
|
||||||
"aiTemperatureSetTo": "AI Temperature set to {aiTemperature}",
|
"aiTemperatureSetTo": "AI Temperature set to {aiTemperature}",
|
||||||
"selectSeries": "*Please select a model series.*",
|
"selectSeries": "*Please select a model series.*",
|
||||||
"seriesDescription": "{seriesDescription}",
|
"seriesDescription": "{seriesDescription}",
|
||||||
"selectParameterSize": "Please select a parameter size for {seriesLabel}.",
|
"selectParameterSize": "*Please select a parameter size for {seriesLabel}*.",
|
||||||
"parameterSizeExplanation": "Parameter size (e.g. 2B, 4B) refers to the number of parameters in the model. Larger models may be more capable but require more resources.",
|
"parameterSizeExplanation": "Parameter size (e.g. 2B, 4B) refers to the number of parameters in the model. Larger models may be more capable but require more resources.",
|
||||||
"modelSetTo": "Model set to {aiModel} ({parameterSize})",
|
"modelSetTo": "Model set to {aiModel} ({parameterSize})",
|
||||||
"selectTemperature": "*Please select a temperature:*",
|
"selectTemperature": "*Please select a temperature:*",
|
||||||
"temperatureExplanation": "Temperature controls the randomness of the AI's responses. Lower values (e.g., 0.2) make the model more focused and deterministic, while higher values (e.g., 1.2 or above) make it more creative and random."
|
"temperatureExplanation": "Temperature controls the randomness of the AI's responses. Lower values (e.g., 0.2) make the model more focused and deterministic, while higher values (e.g., 1.2 or above) make it more creative and random.",
|
||||||
|
"showThinking": "Show Model Thinking"
|
||||||
},
|
},
|
||||||
"selectLanguage": "*Please select a language:*",
|
"selectLanguage": "*Please select a language:*",
|
||||||
"languageCodeSetTo": "Language set to {languageCode}",
|
"languageCodeSetTo": "Language set to {languageCode}",
|
||||||
|
|
|
@ -69,26 +69,26 @@
|
||||||
"helpDesc": "✨ *Comandos de IA*\n\n- /ask `<prompt>`: Fazer uma pergunta a uma IA\n- /think `<prompt>`: Fazer uma pergunta a um modelo de pensamento\n- /ai `<prompt>`: Fazer uma pergunta a um modelo de IA personalizado\n- /aistats: Mostra suas estatísticas de uso de IA",
|
"helpDesc": "✨ *Comandos de IA*\n\n- /ask `<prompt>`: Fazer uma pergunta a uma IA\n- /think `<prompt>`: Fazer uma pergunta a um modelo de pensamento\n- /ai `<prompt>`: Fazer uma pergunta a um modelo de IA personalizado\n- /aistats: Mostra suas estatísticas de uso de IA",
|
||||||
"disabled": "A AIApi foi desativada\\.",
|
"disabled": "A AIApi foi desativada\\.",
|
||||||
"disabledForUser": "As funções de IA estão desativadas para a sua conta. Você pode ativá-las com o comando /settings.",
|
"disabledForUser": "As funções de IA estão desativadas para a sua conta. Você pode ativá-las com o comando /settings.",
|
||||||
"pulling": "O modelo {model} não foi encontrado localmente, baixando\\.\\.\\.",
|
"pulling": "🔄 Modelo {model} não encontrado localmente, baixando...",
|
||||||
"askGenerating": "Gerando resposta com {model}\\.\\.\\.",
|
"askGenerating": "✨ Gerando resposta com {model}...",
|
||||||
"askNoMessage": "Você precisa fazer uma pergunta\\.",
|
"askNoMessage": "⚠️ Você precisa fazer uma pergunta.",
|
||||||
"thinking": "Pensando\\.\\.\\.",
|
"thinking": "`🧠 Pensando...`",
|
||||||
"finishedThinking": "Pronto\\.",
|
"finishedThinking": "`🧠 Pensamento concluido.`",
|
||||||
"urlWarning": "\n\n⚠️ Nota: O modelo de IA não pode acessar ou visitar links!",
|
"urlWarning": "\n\n⚠️ Nota: O modelo de IA não pode acessar ou visitar links!",
|
||||||
"inQueue": "ℹ️ Você é o {position} na fila.",
|
"inQueue": "ℹ️ Você é o {position} na fila.",
|
||||||
"startingProcessing": "✨ Começando a processar o seu pedido\\.\\.\\.",
|
"startingProcessing": "✨ Começando a processar o seu pedido...",
|
||||||
"aiEnabled": "IA",
|
"aiEnabled": "IA",
|
||||||
"aiModel": "Modelo",
|
"aiModel": "Modelo de IA",
|
||||||
"aiTemperature": "Temperatura",
|
"aiTemperature": "Temperatura",
|
||||||
"selectSeries": "*Por favor, selecione uma série de modelos.*",
|
"selectSeries": "*Por favor, selecione uma série de modelos de IA.*",
|
||||||
"seriesDescription": "{seriesDescription}",
|
"seriesDescription": "{seriesDescription}",
|
||||||
"selectParameterSize": "Por favor, selecione um tamanho de parâmetro para {seriesLabel}.",
|
"selectParameterSize": "Por favor, selecione um tamanho de parâmetro para {seriesLabel}.",
|
||||||
"parameterSizeExplanation": "O tamanho do parâmetro (ex: 2B, 4B) refere-se ao número de parâmetros do modelo. Modelos maiores podem ser mais capazes, mas exigem mais recursos.",
|
"parameterSizeExplanation": "O tamanho do parâmetro (ex: 2B, 4B) refere-se ao número de parâmetros do modelo. Modelos maiores podem ser mais capazes, mas exigem mais recursos.",
|
||||||
"systemPrompt": "Você é um assistente de Telegram chamado {botName}, capaz de Telegram MarkdownV2.\nVocê está em um chat com um usuário, que enviou uma mensagem para você.\nData/Hora atual (UTC): {date}\n\n---\n\nResponda à mensagem do usuário:\n{message}",
|
"systemPrompt": "Você é um assistente de Telegram chamado {botName}.\nData/Hora atual (UTC): {date}\n\n---\n\nMensagem do usuário:\n{message}",
|
||||||
"statusWaitingRender": "⏳ Aguardando renderização...",
|
"statusWaitingRender": "⏳ Aguardando renderização...",
|
||||||
"statusRendering": "🖼️ Renderizando...",
|
"statusRendering": "🖼️ Renderizando...",
|
||||||
"statusComplete": "✅ Completo!",
|
"statusComplete": "✅ Completo!",
|
||||||
"modelHeader": "🤖 *{model}* | 🌡️ *{temperature}* | {status}",
|
"modelHeader": "🤖 *{model}* 🌡️ *{temperature}* {status}",
|
||||||
"noChatFound": "Nenhum chat encontrado",
|
"noChatFound": "Nenhum chat encontrado",
|
||||||
"pulled": "✅ {model} baixado com sucesso, por favor tente o comando novamente."
|
"pulled": "✅ {model} baixado com sucesso, por favor tente o comando novamente."
|
||||||
},
|
},
|
||||||
|
@ -132,7 +132,8 @@
|
||||||
"parameterSizeExplanation": "O tamanho do parâmetro (ex: 2B, 4B) refere-se ao número de parâmetros do modelo. Modelos maiores podem ser mais capazes, mas exigem mais recursos.",
|
"parameterSizeExplanation": "O tamanho do parâmetro (ex: 2B, 4B) refere-se ao número de parâmetros do modelo. Modelos maiores podem ser mais capazes, mas exigem mais recursos.",
|
||||||
"modelSetTo": "Modelo definido para {aiModel} ({parameterSize})",
|
"modelSetTo": "Modelo definido para {aiModel} ({parameterSize})",
|
||||||
"selectTemperature": "*Por favor, selecione uma temperatura:*",
|
"selectTemperature": "*Por favor, selecione uma temperatura:*",
|
||||||
"temperatureExplanation": "A temperatura controla a aleatoriedade das respostas da IA. Valores mais baixos (ex: 0.2) tornam o modelo mais focado e determinístico, enquanto valores mais altos (ex: 1.2 ou mais) tornam as respostas mais criativas e aleatórias."
|
"temperatureExplanation": "A temperatura controla a aleatoriedade das respostas da IA. Valores mais baixos (ex: 0.2) tornam o modelo mais focado e determinístico, enquanto valores mais altos (ex: 1.2 ou mais) tornam as respostas mais criativas e aleatórias.",
|
||||||
|
"showThinking": "Mostrar Pensamento do Modelo"
|
||||||
},
|
},
|
||||||
"selectLanguage": "*Por favor, selecione um idioma:*",
|
"selectLanguage": "*Por favor, selecione um idioma:*",
|
||||||
"languageCodeSetTo": "Idioma definido para {languageCode}",
|
"languageCodeSetTo": "Idioma definido para {languageCode}",
|
||||||
|
|
|
@ -47,6 +47,7 @@ export async function ensureUserInDb(ctx, db) {
|
||||||
lastName,
|
lastName,
|
||||||
languageCode,
|
languageCode,
|
||||||
aiEnabled: false,
|
aiEnabled: false,
|
||||||
|
showThinking: false,
|
||||||
customAiModel: "deepseek-r1:1.5b",
|
customAiModel: "deepseek-r1:1.5b",
|
||||||
aiTemperature: 0.9,
|
aiTemperature: 0.9,
|
||||||
aiRequests: 0,
|
aiRequests: 0,
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue