From 81294f572121887162bc329ad91909b6769e6dd4 Mon Sep 17 00:00:00 2001 From: Lucas Gabriel Date: Sat, 28 Jun 2025 16:22:15 -0300 Subject: [PATCH] [FEATURE] Add AI-based /ask command (complementing #54) (#56) * docs: add ai documentation * docker: update docker files for ai/regular versions, lint * feat: add initial /ask command * Delete docker-compose.yml * docker: ignore ollama folder in builds * fix: add emojis to help commands, capitalize, add ai commands to help menu * feat: add better logging, thought handling improvements * bug fixes, better logging and seperation of ai, update docs for ai * clean, remove prompt and user info from logs, more docs edits * system prompt change (plaintext only), parse out /think * clean up, axios tweaks * cleanup, logging of ratelimit --------- Co-authored-by: Aidan --- .dockerignore | 3 +- .env.example | 5 + .gitignore | 8 +- README.md | 46 ++- docker-compose.yml.ai.example | 15 + ...-compose.yml => docker-compose.yml.example | 2 +- src/bot.ts | 15 +- src/commands/ai.ts | 287 ++++++++++++++++++ src/commands/help.ts | 7 +- src/locales/english.json | 44 +-- src/locales/portuguese.json | 7 +- src/utils/log.ts | 83 +++++ src/utils/rate-limiter.ts | 246 +++++++++++++++ 13 files changed, 733 insertions(+), 35 deletions(-) create mode 100644 docker-compose.yml.ai.example rename docker-compose.yml => docker-compose.yml.example (84%) create mode 100644 src/commands/ai.ts create mode 100644 src/utils/log.ts create mode 100644 src/utils/rate-limiter.ts diff --git a/.dockerignore b/.dockerignore index 33e390a..9fe19f3 100644 --- a/.dockerignore +++ b/.dockerignore @@ -4,4 +4,5 @@ npm-debug.log .gitignore .env *.md -!README.md \ No newline at end of file +!README.md +ollama/ \ No newline at end of file diff --git a/.env.example b/.env.example index af81e1d..8e9cb3f 100644 --- a/.env.example +++ b/.env.example @@ -5,6 +5,11 @@ botSource = "https://github.com/ABOCN/TelegramBot" # insert token here botToken = "" +# ai features +ollamaEnabled = false +# ollamaApi = "http://ollama:11434" +# handlerTimeout = "600_000" # set higher if you expect to download larger models + # misc (botAdmins isnt a array here!) maxRetries = 9999 botAdmins = 00000000, 00000000, 00000000 diff --git a/.gitignore b/.gitignore index 6b42f1f..278fef8 100644 --- a/.gitignore +++ b/.gitignore @@ -144,4 +144,10 @@ yt-dlp ffmpeg # Bun -bun.lock* \ No newline at end of file +bun.lock* + +# Ollama +ollama/ + +# Docker +docker-compose.yml \ No newline at end of file diff --git a/README.md b/README.md index 8fa5b60..3cc7c99 100644 --- a/README.md +++ b/README.md @@ -10,12 +10,6 @@ Kowalski is a a simple Telegram bot made in Node.js. - You can find Kowalski at [@KowalskiNodeBot](https://t.me/KowalskiNodeBot) on Telegram. -## Translations - - -Translation status - - ## Self-host requirements > [!IMPORTANT] @@ -26,6 +20,11 @@ Kowalski is a a simple Telegram bot made in Node.js. - FFmpeg (only for the `/yt` command) - Docker and Docker Compose (only required for Docker setup) +### AI Requirements + +- High-end CPU *or* GPU (~ 6GB vRAM) +- If using CPU, enough RAM to load the models (~6GB w/ defaults) + ## Running locally (non-Docker setup) First, clone the repo with Git: @@ -55,9 +54,28 @@ You can also run Kowalski using Docker, which simplifies the setup process. Make ### Using Docker Compose -1. **Make sure to setup your `.env` file first!** +1. **Copy compose file** -2. **Run the container** + _Without AI (Ollama)_ + + ```bash + mv docker-compose.yml.example docker-compose.yml + ``` + + _With AI (Ollama)_ + + ```bash + mv docker-compose.yml.ai.example docker-compose.yml + ``` + +2. **Make sure to setup your `.env` file first!** + + > [!TIP] + > If you intend to setup AI, the defaults for Docker are already included (just uncomment) and don't need to be changed. + > + > Further setup may be needed for GPUs. See the Ollama documentation for more. + +3. **Run the container** ```bash docker compose up -d @@ -81,6 +99,9 @@ If you prefer to use Docker directly, you can use these instructions instead. docker run -d --name kowalski --restart unless-stopped -v $(pwd)/.env:/usr/src/app/.env:ro kowalski ``` +> [!NOTE] +> You must setup Ollama on your own if you would like to use AI features. + ## .env Functions > [!IMPORTANT] @@ -90,6 +111,9 @@ If you prefer to use Docker directly, you can use these instructions instead. - **botPrivacy**: Put the link to your bot privacy policy. - **maxRetries**: Maximum number of retries for a failing command on Kowalski. Default is 5. If the limit is hit, the bot will crash past this number. - **botToken**: Put your bot token that you created at [@BotFather](https://t.me/botfather). +- **ollamaEnabled** (optional): Enables/disables AI features +- **ollamaApi** (optional): Ollama API endpoint for various AI features, will be disabled if not set +- **handlerTimeout** (default: `600_000`): How long handlers will wait before timing out. Set this high if using large AI models. - **botAdmins**: Put the ID of the people responsible for managing the bot. They can use some administrative + exclusive commands on any group. - **lastKey**: Last.fm API key, for use on `lastfm.js` functions, like see who is listening to what song and etc. - **weatherKey**: Weather.com API key, used for the `/weather` command. @@ -106,6 +130,12 @@ If you prefer to use Docker directly, you can use these instructions instead. chmod +x src/plugins/yt-dlp/yt-dlp ``` +### AI + +**Q:** How can I disable AI features? + +**A:** AI features are disabled by default, unless you have set `ollamaEnabled` to `true` in your `.env` file. Set it back to `false` to disable. + ## Contributors diff --git a/docker-compose.yml.ai.example b/docker-compose.yml.ai.example new file mode 100644 index 0000000..2c516f7 --- /dev/null +++ b/docker-compose.yml.ai.example @@ -0,0 +1,15 @@ +services: + kowalski: + build: . + container_name: kowalski + restart: unless-stopped + volumes: + - ./.env:/usr/src/app/.env:ro + environment: + - NODE_ENV=production + ollama: + image: ollama/ollama + container_name: kowalski-ollama + restart: unless-stopped + volumes: + - ./ollama:/root/.ollama \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml.example similarity index 84% rename from docker-compose.yml rename to docker-compose.yml.example index 0aab44a..f3bb819 100644 --- a/docker-compose.yml +++ b/docker-compose.yml.example @@ -6,4 +6,4 @@ services: volumes: - ./.env:/usr/src/app/.env:ro environment: - - NODE_ENV=production \ No newline at end of file + - NODE_ENV=production \ No newline at end of file diff --git a/src/bot.ts b/src/bot.ts index 3422e56..04d2c97 100644 --- a/src/bot.ts +++ b/src/bot.ts @@ -4,6 +4,7 @@ import fs from 'fs'; import { isOnSpamWatch } from './spamwatch/spamwatch'; import '@dotenvx/dotenvx'; import './plugins/ytDlpWrapper'; +import { preChecks } from './commands/ai'; // Ensures bot token is set, and not default value if (!process.env.botToken || process.env.botToken === 'InsertYourBotTokenHere') { @@ -11,7 +12,17 @@ if (!process.env.botToken || process.env.botToken === 'InsertYourBotTokenHere') process.exit(1) } -const bot = new Telegraf(process.env.botToken); +// Detect AI and run pre-checks +if (process.env.ollamaEnabled === "true") { + if (!(await preChecks())) { + process.exit(1) + } +} + +const bot = new Telegraf( + process.env.botToken, + { handlerTimeout: Number(process.env.handlerTimeout) || 600_000 } +); const maxRetries = process.env.maxRetries || 5; let restartCount = 0; @@ -21,7 +32,7 @@ const loadCommands = () => { try { const files = fs.readdirSync(commandsPath) .filter(file => file.endsWith('.ts') || file.endsWith('.js')); - + files.forEach((file) => { try { const commandPath = path.join(commandsPath, file); diff --git a/src/commands/ai.ts b/src/commands/ai.ts new file mode 100644 index 0000000..0e27578 --- /dev/null +++ b/src/commands/ai.ts @@ -0,0 +1,287 @@ +// AI.TS +// by ihatenodejs/Aidan +// +// ----------------------------------------------------------------------- +// +// This is free and unencumbered software released into the public domain. +// +// Anyone is free to copy, modify, publish, use, compile, sell, or +// distribute this software, either in source code form or as a compiled +// binary, for any purpose, commercial or non-commercial, and by any +// means. +// +// In jurisdictions that recognize copyright laws, the author or authors +// of this software dedicate any and all copyright interest in the +// software to the public domain. We make this dedication for the benefit +// of the public at large and to the detriment of our heirs and +// successors. We intend this dedication to be an overt act of +// relinquishment in perpetuity of all present and future rights to this +// software under copyright law. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +// IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +// OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +// OTHER DEALINGS IN THE SOFTWARE. +// +// For more information, please refer to + +import { isOnSpamWatch } from "../spamwatch/spamwatch" +import spamwatchMiddlewareModule from "../spamwatch/Middleware" +import { Telegraf, Context } from "telegraf" +import type { Message } from "telegraf/types" +import { replyToMessageId } from "../utils/reply-to-message-id" +import { getStrings } from "../plugins/checklang" +import { languageCode } from "../utils/language-code" +import axios from "axios" +import { rateLimiter } from "../utils/rate-limiter" +import { logger } from "../utils/log" + +const spamwatchMiddleware = spamwatchMiddlewareModule(isOnSpamWatch) +export const flash_model = "gemma3:4b" +export const thinking_model = "deepseek-r1:1.5b" + +type TextContext = Context & { message: Message.TextMessage } + +export function sanitizeForJson(text: string): string { + return text + .replace(/\\/g, '\\\\') + .replace(/"/g, '\\"') + .replace(/\n/g, '\\n') + .replace(/\r/g, '\\r') + .replace(/\t/g, '\\t') +} + +export async function preChecks() { + const envs = [ + "ollamaApi", + ] + + for (const env of envs) { + if (!process.env[env]) { + console.error(`[✨ AI | !] ❌ ${env} not set!`) + return false + } + } + console.log("[✨ AI] Pre-checks passed\n") + return true +} + +async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Message, model: string) { + const Strings = getStrings(languageCode(ctx)) + + if (!ctx.chat) { + return { + success: false, + error: Strings.unexpectedErr.replace("{error}", "No chat found"), + } + } + + try { + const aiResponse = await axios.post( + `${process.env.ollamaApi}/api/generate`, + { + model, + prompt, + stream: true, + }, + { + responseType: "stream", + } + ) + + let fullResponse = "" + let thoughts = "" + let lastUpdate = Date.now() + + const stream = aiResponse.data + for await (const chunk of stream) { + const lines = chunk.toString().split('\n') + for (const line of lines) { + if (!line.trim()) continue + let ln + try { + ln = JSON.parse(line) + } catch (e) { + console.error("[✨ AI | !] Error parsing chunk:", e) + continue + } + + if (model === thinking_model) { + if (ln.response.includes('')) { + const thinkMatch = ln.response.match(/([\s\S]*?)<\/think>/) + if (thinkMatch && thinkMatch[1].trim().length > 0) { + logger.logThinking(ctx.chat.id, replyGenerating.message_id, true) + } else if (!thinkMatch) { + logger.logThinking(ctx.chat.id, replyGenerating.message_id, true) + } + } else if (ln.response.includes('')) { + logger.logThinking(ctx.chat.id, replyGenerating.message_id, false) + } + } + + const now = Date.now() + if (ln.response) { + if (model === thinking_model) { + let patchedThoughts = ln.response + const thinkTagRx = /([\s\S]*?)<\/think>/g + patchedThoughts = patchedThoughts.replace(thinkTagRx, (match, p1) => p1.trim().length > 0 ? '`Thinking...`' + p1 + '`Finished thinking`' : '') + patchedThoughts = patchedThoughts.replace(//g, '`Thinking...`') + patchedThoughts = patchedThoughts.replace(/<\/think>/g, '`Finished thinking`') + thoughts += patchedThoughts + fullResponse += patchedThoughts + } else { + fullResponse += ln.response + } + if (now - lastUpdate >= 1000) { + await rateLimiter.editMessageWithRetry( + ctx, + ctx.chat.id, + replyGenerating.message_id, + thoughts, + { parse_mode: 'Markdown' } + ) + lastUpdate = now + } + } + } + } + + return { + success: true, + response: fullResponse, + } + } catch (error: any) { + let shouldPullModel = false + if (error.response) { + const errData = error.response.data?.error + const errStatus = error.response.status + if (errData && (errData.includes(`model '${model}' not found`) || errStatus === 404)) { + shouldPullModel = true + } else { + console.error("[✨ AI | !] Error zone 1:", errData) + return { success: false, error: errData } + } + } else if (error.request) { + console.error("[✨ AI | !] No response received:", error.request) + return { success: false, error: "No response received from server" } + } else { + console.error("[✨ AI | !] Error zone 3:", error.message) + return { success: false, error: error.message } + } + + if (shouldPullModel) { + ctx.telegram.editMessageText(ctx.chat.id, replyGenerating.message_id, undefined, `🔄 Pulling ${model} from ollama...\n\nThis may take a few minutes...`) + console.log(`[✨ AI | i] Pulling ${model} from ollama...`) + try { + await axios.post( + `${process.env.ollamaApi}/api/pull`, + { + model, + stream: false, + timeout: process.env.ollamaApiTimeout || 10000, + } + ) + } catch (e: any) { + if (e.response) { + console.error("[✨ AI | !] Something went wrong:", e.response.data?.error) + return { + success: false, + error: `❌ Something went wrong while pulling ${model}, please try your command again!`, + } + } else if (e.request) { + console.error("[✨ AI | !] No response received while pulling:", e.request) + return { + success: false, + error: `❌ No response received while pulling ${model}, please try again!`, + } + } else { + console.error("[✨ AI | !] Error while pulling:", e.message) + return { + success: false, + error: `❌ Error while pulling ${model}: ${e.message}`, + } + } + } + console.log(`[✨ AI | i] ${model} pulled successfully`) + return { + success: true, + response: `✅ Pulled ${model} successfully, please retry the command.`, + } + } + } +} + +export default (bot: Telegraf) => { + const botName = bot.botInfo?.first_name && bot.botInfo?.last_name ? `${bot.botInfo.first_name} ${bot.botInfo.last_name}` : "Kowalski" + + bot.command(["ask", "think"], spamwatchMiddleware, async (ctx) => { + if (!ctx.message || !('text' in ctx.message)) return + const isAsk = ctx.message.text.startsWith("/ask") + const model = isAsk ? flash_model : thinking_model + const textCtx = ctx as TextContext + const reply_to_message_id = replyToMessageId(textCtx) + const Strings = getStrings(languageCode(textCtx)) + const message = textCtx.message.text + const author = ("@" + ctx.from?.username) || ctx.from?.first_name + + logger.logCmdStart(author, model === flash_model ? "ask" : "think") + + if (!process.env.ollamaApi) { + await ctx.reply(Strings.aiDisabled, { + parse_mode: 'Markdown', + ...({ reply_to_message_id }) + }) + return + } + + const replyGenerating = await ctx.reply(Strings.askGenerating.replace("{model}", model), { + parse_mode: 'Markdown', + ...({ reply_to_message_id }) + }) + + const fixedMsg = message.replace(/\/(ask|think) /, "") + if (fixedMsg.length < 1) { + await ctx.reply(Strings.askNoMessage, { + parse_mode: 'Markdown', + ...({ reply_to_message_id }) + }) + return + } + + logger.logPrompt(fixedMsg) + + const prompt = sanitizeForJson( +`You are a plaintext-only, helpful assistant called ${botName}. +Current Date/Time (UTC): ${new Date().toLocaleString()} + +--- + +Respond to the user's message: +${fixedMsg}`) + const aiResponse = await getResponse(prompt, textCtx, replyGenerating, model) + if (!aiResponse) return + + if (!ctx.chat) return + if (aiResponse.success && aiResponse.response) { + await rateLimiter.editMessageWithRetry( + ctx, + ctx.chat.id, + replyGenerating.message_id, + aiResponse.response, + { parse_mode: 'Markdown' } + ) + return + } + const error = Strings.unexpectedErr.replace("{error}", aiResponse.error) + await rateLimiter.editMessageWithRetry( + ctx, + ctx.chat.id, + replyGenerating.message_id, + error, + { parse_mode: 'Markdown' } + ) + }) +} \ No newline at end of file diff --git a/src/commands/help.ts b/src/commands/help.ts index 39191c1..3a6d3a0 100644 --- a/src/commands/help.ts +++ b/src/commands/help.ts @@ -32,7 +32,8 @@ async function sendHelpMessage(ctx, isEditing) { [{ text: Strings.mainCommands, callback_data: 'helpMain' }, { text: Strings.usefulCommands, callback_data: 'helpUseful' }], [{ text: Strings.interactiveEmojis, callback_data: 'helpInteractive' }, { text: Strings.funnyCommands, callback_data: 'helpFunny' }], [{ text: Strings.lastFm.helpEntry, callback_data: 'helpLast' }, { text: Strings.animalCommands, callback_data: 'helpAnimals' }], - [{ text: Strings.ytDownload.helpEntry, callback_data: 'helpYouTube' }, { text: Strings.ponyApi.helpEntry, callback_data: 'helpMLP' }] + [{ text: Strings.ytDownload.helpEntry, callback_data: 'helpYouTube' }, { text: Strings.ponyApi.helpEntry, callback_data: 'helpMLP' }], + [{ text: Strings.aiCmds, callback_data: 'helpAi' }] ] } }; @@ -112,6 +113,10 @@ export default (bot) => { await ctx.answerCbQuery(); await ctx.editMessageText(Strings.ponyApi.helpDesc, options); break; + case 'helpAi': + await ctx.answerCbQuery(); + await ctx.editMessageText(Strings.aiCmdsDesc, options); + break; case 'helpBack': await ctx.answerCbQuery(); await sendHelpMessage(ctx, true); diff --git a/src/locales/english.json b/src/locales/english.json index 4bc9c06..fadfcd6 100644 --- a/src/locales/english.json +++ b/src/locales/english.json @@ -33,8 +33,8 @@ "funEmojiResult": "*You rolled {emoji} and got* `{value}`*!*\nYou don't know what that means? Me neither!", "gifErr": "*Something went wrong while sending the GIF. Please try again later.*\n\n{err}", "lastFm": { - "helpEntry": "Last.fm", - "helpDesc": "*Last.fm*\n\n- /lt | /lmu | /last | /lfm: Shows the last song from your Last.fm profile + the number of plays.\n- /setuser ``: Sets the user for the command above.", + "helpEntry": "🎵 Last.fm", + "helpDesc": "🎵 *Last.fm*\n\n- /lt | /lmu | /last | /lfm: Shows the last song from your Last.fm profile + the number of plays.\n- /setuser ``: Sets the user for the command above.", "noUser": "*Please provide a Last.fm username.*\nExample: `/setuser `", "noUserSet": "*You haven't set your Last.fm username yet.*\nUse the command /setuser to set.\n\nExample: `/setuser `", "noRecentTracks": "*No recent tracks found for Last.fm user* `{lastfmUser}`*.*", @@ -52,25 +52,27 @@ "apiErr": "*An error occurred while retrieving the weather. Please try again later.*\n\n`{error}`", "apiKeyErr": "*An API key was not set by the bot owner. Please try again later.*" }, - "mainCommands": "Main commands", - "mainCommandsDesc": "*Main commands*\n\n- /help: Show bot's help\n- /start: Start the bot\n- /privacy: Read the bot's Privacy Policy", - "usefulCommands": "Useful commands", - "usefulCommandsDesc": "*Useful commands*\n\n- /chatinfo: Send information about the group\n- /userinfo: Send information about yourself\n- /d | /device ``: Search for a device on GSMArena and show its specs.\n/codename | /whatis ``: Shows what device is based on the codename. Example: `/codename begonia`\n- /weather | /clima ``: See weather status for a specific location.\n- /modarchive | /tma ``: Download a module from The Mod Archive.\n- /http ``: Send details about a specific HTTP code. Example: `/http 404`", - "funnyCommands": "Funny commands", - "funnyCommandsDesc": "*Funny commands*\n\n- /gay: Check if you are gay\n- /furry: Check if you are a furry\n- /random: Pick a random number between 0-10", - "interactiveEmojis": "Interactive emojis", - "interactiveEmojisDesc": "*Interactive emojis*\n\n- /dice: Roll a dice\n- /idice: Infinitely roll a colored dice\n- /slot: Try to combine the figures!\n- /ball: Try to kick the ball into the goal!\n- /bowling: Try to hit the pins!\n- /dart: Try to hit the target!", - "animalCommands": "Animals", - "animalCommandsDesc": "*Animals*\n\n- /soggy | /soggycat `<1 | 2 | 3 | 4 | orig | thumb | sticker | alt>`: Sends the [Soggy cat meme](https://knowyourmeme.com/memes/soggy-cat)\n- /cat: Sends a random picture of a cat.\n- /fox: Sends a random picture of a fox.\n- /duck: Sends a random picture of a duck.\n- /dog: Sends a random picture of a dog.\n- /httpcat ``: Send cat memes from http.cat with your specified HTTP code. Example: `/httpcat 404`", + "mainCommands": "ℹ️ Main Commands", + "mainCommandsDesc": "ℹ️ *Main Commands*\n\n- /help: Show bot's help\n- /start: Start the bot\n- /privacy: Read the bot's Privacy Policy", + "usefulCommands": "🛠️ Useful Commands", + "usefulCommandsDesc": "🛠️ *Useful commands*\n\n- /chatinfo: Send information about the group\n- /userinfo: Send information about yourself\n- /d | /device ``: Search for a device on GSMArena and show its specs.\n/codename | /whatis ``: Shows what device is based on the codename. Example: `/codename begonia`\n- /weather | /clima ``: See weather status for a specific location.\n- /modarchive | /tma ``: Download a module from The Mod Archive.\n- /http ``: Send details about a specific HTTP code. Example: `/http 404`", + "funnyCommands": "😂 Funny Commands", + "funnyCommandsDesc": "😂 *Funny Commands*\n\n- /gay: Check if you are gay\n- /furry: Check if you are a furry\n- /random: Pick a random number between 0-10", + "interactiveEmojis": "🎲 Interactive Emojis", + "interactiveEmojisDesc": "🎲 *Interactive emojis*\n\n- /dice: Roll a dice\n- /idice: Infinitely roll a colored dice\n- /slot: Try to combine the figures!\n- /ball: Try to kick the ball into the goal!\n- /bowling: Try to hit the pins!\n- /dart: Try to hit the target!", + "animalCommands": "🐱 Animals", + "animalCommandsDesc": "🐱 *Animals*\n\n- /soggy | /soggycat `<1 | 2 | 3 | 4 | orig | thumb | sticker | alt>`: Sends the [Soggy cat meme](https://knowyourmeme.com/memes/soggy-cat)\n- /cat: Sends a random picture of a cat.\n- /fox: Sends a random picture of a fox.\n- /duck: Sends a random picture of a duck.\n- /dog: Sends a random picture of a dog.\n- /httpcat ``: Send cat memes from http.cat with your specified HTTP code. Example: `/httpcat 404`", + "aiCmds": "✨ AI Commands", + "aiCmdsDesc": "✨ *AI Commands*\n\n- /ask ``: Ask a question to an AI", "maInvalidModule": "Please provide a valid module ID from The Mod Archive.\nExample: `/modarchive 81574`", "maDownloadError": "Error downloading the file. Check the module ID and try again.", "ytDownload": { - "helpEntry": "Video download", - "helpDesc": "*Video download*\n\n- /yt | /ytdl | /sdl | /dl | /video `