clean, fix model name issues

This commit is contained in:
Aidan 2025-07-03 01:43:00 -04:00
parent 2c696654aa
commit 6b1c7f0da9

View file

@ -160,47 +160,40 @@ export async function preChecks() {
"ollamaApi", "ollamaApi",
"flashModel", "flashModel",
"thinkingModel", "thinkingModel",
] ];
let checked = 0;
for (const env of envs) { for (const env of envs) {
if (!process.env[env]) { if (!process.env[env]) {
console.error(`[✨ AI | !] ❌ ${env} not set!`) console.error(`[✨ AI | !] ❌ ${env} not set!`);
return false return false;
} }
checked++;
} }
const ollamaApi = process.env.ollamaApi const ollamaApi = process.env.ollamaApi!;
if (!ollamaApi) { let ollamaOk = false;
console.error("[✨ AI | !] ❌ ollamaApi not set!")
return false
}
let ollamaOk = false
for (let i = 0; i < 10; i++) { for (let i = 0; i < 10; i++) {
try { try {
const res = await axios.get(ollamaApi, { timeout: 2000 }) const res = await axios.get(ollamaApi, { timeout: 2000 });
if (res && res.data && typeof res.data === 'object' && 'ollama' in res.data) { if (res.status === 200) {
ollamaOk = true ollamaOk = true;
break break;
}
if (res && res.status === 200) {
ollamaOk = true
break
} }
} catch (err) { } catch (err) {
await new Promise(resolve => setTimeout(resolve, 1000)) if (i < 9) {
await new Promise((resolve) => setTimeout(resolve, 1000));
}
} }
} }
if (!ollamaOk) { if (!ollamaOk) {
console.error("[✨ AI | !] ❌ Ollama API is not responding at ", ollamaApi) console.error(`[✨ AI | !] ❌ Ollama API is not responding at ${ollamaApi}`);
return false return false;
} }
checked++;
console.log(`[✨ AI] Pre-checks passed [${checked}/${envs.length + 1}]`) console.log(`[✨ AI] Pre-checks passed.`);
const modelCount = models.reduce((acc, model) => acc + model.models.length, 0); const modelCount = models.reduce((acc, model) => acc + model.models.length, 0);
console.log(`[✨ AI] Found ${modelCount} models.`); console.log(`[✨ AI] Found ${modelCount} models.`);
return true return true;
} }
function isAxiosError(error: unknown): error is { response?: { data?: { error?: string }, status?: number, statusText?: string }, request?: unknown, message?: string } { function isAxiosError(error: unknown): error is { response?: { data?: { error?: string }, status?: number, statusText?: string }, request?: unknown, message?: string } {
@ -236,18 +229,18 @@ function containsUrls(text: string): boolean {
return text.includes('http://') || text.includes('https://') || text.includes('.com') || text.includes('.net') || text.includes('.org') || text.includes('.io') || text.includes('.ai') || text.includes('.dev') return text.includes('http://') || text.includes('https://') || text.includes('.com') || text.includes('.net') || text.includes('.org') || text.includes('.io') || text.includes('.ai') || text.includes('.dev')
} }
async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Message, model: string, aiTemperature: number, originalMessage: string, db: NodePgDatabase<typeof schema>, userId: string, Strings: ReturnType<typeof getStrings>, showThinking: boolean): Promise<{ success: boolean; response?: string; error?: string }> { async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Message, model: string, aiTemperature: number, originalMessage: string, db: NodePgDatabase<typeof schema>, userId: string, Strings: ReturnType<typeof getStrings>, showThinking: boolean): Promise<{ success: boolean; response?: string; error?: string, messageType?: 'generation' | 'system' }> {
if (!ctx.chat) { if (!ctx.chat) {
return { return {
success: false, success: false,
error: Strings.unexpectedErr.replace("{error}", Strings.ai.noChatFound), error: Strings.unexpectedErr.replace("{error}", Strings.ai.noChatFound),
}; };
} }
const cleanedModelName = model.replace('hf.co/', ''); const cleanedModelName = model.includes('/') ? model.split('/').pop()! : model;
let status = Strings.ai.statusWaitingRender; let status = Strings.ai.statusWaitingRender;
let modelHeader = Strings.ai.modelHeader let modelHeader = Strings.ai.modelHeader
.replace("{model}", cleanedModelName) .replace("{model}", `\`${cleanedModelName}\``)
.replace("{temperature}", aiTemperature) .replace("{temperature}", String(aiTemperature))
.replace("{status}", status) + "\n\n"; .replace("{status}", status) + "\n\n";
const promptCharCount = originalMessage.length; const promptCharCount = originalMessage.length;
@ -274,13 +267,10 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
} }
); );
let fullResponse = ""; let fullResponse = "";
let thoughts = "";
let lastUpdateCharCount = 0; let lastUpdateCharCount = 0;
let sentHeader = false; let sentHeader = false;
let firstChunk = true; let firstChunk = true;
const stream: NodeJS.ReadableStream = aiResponse.data as any; const stream: NodeJS.ReadableStream = aiResponse.data as any;
let thinkingMessageSent = false;
let finalResponseText = '';
const formatThinkingMessage = (text: string) => { const formatThinkingMessage = (text: string) => {
const withPlaceholders = text const withPlaceholders = text
@ -319,7 +309,7 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
if (firstChunk) { if (firstChunk) {
status = Strings.ai.statusWaitingRender; status = Strings.ai.statusWaitingRender;
modelHeader = Strings.ai.modelHeader modelHeader = Strings.ai.modelHeader
.replace("{model}", cleanedModelName) .replace("{model}", `\`${cleanedModelName}\``)
.replace("{temperature}", aiTemperature) .replace("{temperature}", aiTemperature)
.replace("{status}", status) + "\n\n"; .replace("{status}", status) + "\n\n";
await rateLimiter.editMessageWithRetry( await rateLimiter.editMessageWithRetry(
@ -353,7 +343,7 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
status = Strings.ai.statusRendering; status = Strings.ai.statusRendering;
modelHeader = Strings.ai.modelHeader modelHeader = Strings.ai.modelHeader
.replace("{model}", cleanedModelName) .replace("{model}", `\`${cleanedModelName}\``)
.replace("{temperature}", aiTemperature) .replace("{temperature}", aiTemperature)
.replace("{status}", status) + "\n\n"; .replace("{status}", status) + "\n\n";
@ -382,6 +372,7 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
return { return {
success: true, success: true,
response: patchedResponse, response: patchedResponse,
messageType: 'generation'
}; };
} catch (error: unknown) { } catch (error: unknown) {
const errorMsg = extractAxiosErrorMessage(error); const errorMsg = extractAxiosErrorMessage(error);
@ -395,7 +386,7 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
ctx.chat!.id, ctx.chat!.id,
replyGenerating.message_id, replyGenerating.message_id,
undefined, undefined,
Strings.ai.pulling.replace("{model}", model), Strings.ai.pulling.replace("{model}", `\`${cleanedModelName}\``),
{ parse_mode: 'Markdown' } { parse_mode: 'Markdown' }
); );
console.log(`[✨ AI] Pulling ${model} from ollama...`); console.log(`[✨ AI] Pulling ${model} from ollama...`);
@ -413,13 +404,15 @@ async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Me
console.error("[✨ AI | !] Pull error:", pullMsg); console.error("[✨ AI | !] Pull error:", pullMsg);
return { return {
success: false, success: false,
error: `❌ Something went wrong while pulling ${model}: ${pullMsg}`, error: `❌ Something went wrong while pulling \`${model}\`: ${pullMsg}`,
messageType: 'system'
}; };
} }
console.log(`[✨ AI] ${model} pulled successfully`); console.log(`[✨ AI] ${model} pulled successfully`);
return { return {
success: true, success: true,
response: Strings.ai.pulled.replace("{model}", model), response: Strings.ai.pulled.replace("{model}", `\`${cleanedModelName}\``),
messageType: 'system'
}; };
} }
} }
@ -435,11 +428,22 @@ async function handleAiReply(ctx: TextContext, model: string, prompt: string, re
if (!aiResponse) return; if (!aiResponse) return;
if (!ctx.chat) return; if (!ctx.chat) return;
if (aiResponse.success && aiResponse.response) { if (aiResponse.success && aiResponse.response) {
const cleanedModelName = model.replace('hf.co/', ''); if (aiResponse.messageType === 'system') {
await rateLimiter.editMessageWithRetry(
ctx,
ctx.chat.id,
replyGenerating.message_id,
aiResponse.response,
{ parse_mode: 'Markdown' }
);
return;
}
const cleanedModelName = model.includes('/') ? model.split('/').pop()! : model;
const status = Strings.ai.statusComplete; const status = Strings.ai.statusComplete;
const modelHeader = Strings.ai.modelHeader const modelHeader = Strings.ai.modelHeader
.replace("{model}", cleanedModelName) .replace("{model}", `\`${cleanedModelName}\``)
.replace("{temperature}", aiTemperature) .replace("{temperature}", String(aiTemperature))
.replace("{status}", status) + "\n\n"; .replace("{status}", status) + "\n\n";
const urlWarning = containsUrls(originalMessage) ? Strings.ai.urlWarning : ''; const urlWarning = containsUrls(originalMessage) ? Strings.ai.urlWarning : '';
let finalResponse = aiResponse.response; let finalResponse = aiResponse.response;
@ -541,18 +545,12 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
const message = ctx.message.text; const message = ctx.message.text;
const author = ("@" + ctx.from?.username) || ctx.from?.first_name || "Unknown"; const author = ("@" + ctx.from?.username) || ctx.from?.first_name || "Unknown";
let model: string; const model = command === 'ai'
let fixedMsg: string; ? (customAiModel || flash_model)
: (command === 'ask' ? flash_model : thinking_model);
if (command === 'ai') { const fixedMsg = message.replace(new RegExp(`^/${command}(@\\w+)?\\s*`), "").trim();
model = customAiModel || flash_model; logger.logCmdStart(author, command, model);
fixedMsg = message.replace(/^\/ai(@\w+)?\s*/, "").trim();
logger.logCmdStart(author, command, model);
} else {
model = command === 'ask' ? flash_model : thinking_model;
fixedMsg = message.replace(/^\/(ask|think)(@\w+)?\s*/, "").trim();
logger.logCmdStart(author, command, model);
}
if (!process.env.ollamaApi) { if (!process.env.ollamaApi) {
await ctx.reply(Strings.ai.disabled, { parse_mode: 'Markdown', ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) }); await ctx.reply(Strings.ai.disabled, { parse_mode: 'Markdown', ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) });
@ -571,7 +569,7 @@ export default (bot: Telegraf<Context>, db: NodePgDatabase<typeof schema>) => {
const task = async () => { const task = async () => {
const modelLabel = getModelLabelByName(model); const modelLabel = getModelLabelByName(model);
const replyGenerating = await ctx.reply(Strings.ai.askGenerating.replace("{model}", modelLabel), { const replyGenerating = await ctx.reply(Strings.ai.askGenerating.replace("{model}", `\`${modelLabel}\``), {
parse_mode: 'Markdown', parse_mode: 'Markdown',
...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } })
}); });