diff --git a/.dockerignore b/.dockerignore old mode 100644 new mode 100755 index e123d1a..cfdf0f2 --- a/.dockerignore +++ b/.dockerignore @@ -1,8 +1,13 @@ node_modules +webui/node_modules npm-debug.log .git +webui/.git .gitignore -.env +webui/.gitignore +.env* +webui/.env* +webui/.next *.md !README.md ollama/ diff --git a/.env.example b/.env.example old mode 100644 new mode 100755 diff --git a/.github/dependabot.yml b/.github/dependabot.yml old mode 100644 new mode 100755 diff --git a/.github/workflows/njsscan.yml b/.github/workflows/njsscan.yml old mode 100644 new mode 100755 diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml old mode 100644 new mode 100755 diff --git a/.github/workflows/update-authors.yml b/.github/workflows/update-authors.yml old mode 100644 new mode 100755 diff --git a/.gitignore b/.gitignore old mode 100644 new mode 100755 diff --git a/.gitmodules b/.gitmodules old mode 100644 new mode 100755 index cf3ce05..4a96795 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,3 @@ -[submodule "src/spamwatch"] - path = src/spamwatch +[submodule "telegram/spamwatch"] + path = telegram/spamwatch url = https://github.com/ABOCN/TelegramBot-SpamWatch diff --git a/AUTHORS b/AUTHORS old mode 100644 new mode 100755 diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md old mode 100644 new mode 100755 diff --git a/Dockerfile b/Dockerfile old mode 100644 new mode 100755 index 7a0c006..f0d7341 --- a/Dockerfile +++ b/Dockerfile @@ -1,18 +1,37 @@ FROM oven/bun # Install ffmpeg and other deps -RUN apt-get update && apt-get install -y ffmpeg git && apt-get clean && rm -rf /var/lib/apt/lists/* +RUN apt-get update && apt-get install -y \ + ffmpeg \ + git \ + supervisor \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* WORKDIR /usr/src/app COPY package*.json ./ +RUN bun install -RUN bun i +COPY webui/package*.json ./webui/ +WORKDIR /usr/src/app/webui +RUN bun install +WORKDIR /usr/src/app COPY . . -RUN chmod +x /usr/src/app/src/plugins/yt-dlp/yt-dlp +WORKDIR /usr/src/app/webui +RUN bun run build + +RUN chmod +x /usr/src/app/telegram/plugins/yt-dlp/yt-dlp + +COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf VOLUME /usr/src/app/.env -CMD ["bun", "start"] +EXPOSE 3000 + +ENV PYTHONUNBUFFERED=1 +ENV BUN_LOG_LEVEL=info + +CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/conf.d/supervisord.conf"] diff --git a/LICENSE b/LICENSE old mode 100644 new mode 100755 diff --git a/README.md b/README.md old mode 100644 new mode 100755 index 211d46a..36ad1dc --- a/README.md +++ b/README.md @@ -26,21 +26,6 @@ Kowalski is a a simple Telegram bot made in Node.js. - High-end CPU *or* GPU (~ 6GB vRAM) - If using CPU, enough RAM to load the models (~6GB w/ defaults) -## Running locally (non-Docker setup) - -First, clone the repo with Git: - -```bash -git clone --recurse-submodules https://github.com/ABOCN/TelegramBot -``` - -Next, inside the repository directory, create an `.env` file with some content, which you can see the [example .env file](.env.example) to fill info with. To see the meaning of each one, see [the Functions section](#env-functions). - -After editing the file, save all changes and run the bot with ``bun start``. - -> [!TIP] -> To deal with dependencies, just run ``bun install`` or ``bun i`` at any moment to install all of them. - ## Running with Docker > [!IMPORTANT] @@ -69,14 +54,16 @@ You can also run Kowalski using Docker, which simplifies the setup process. Make mv docker-compose.yml.ai.example docker-compose.yml ``` -2. **Make sure to setup your `.env` file first!** +1. **Make sure to setup your `.env` file first!** + + In order to successfuly deploy Kowalski, you will need to edit both your `.env` file and enter matching values in `webui/.env`. > [!TIP] > If you intend to setup AI, the defaults for Docker are already included (just uncomment) and don't need to be changed. > > Further setup may be needed for GPUs. See the Ollama documentation for more. -3. **Run the container** +1. **Run the container** ```bash docker compose up -d @@ -88,13 +75,15 @@ If you prefer to use Docker directly, you can use these instructions instead. 1. **Make sure to setup your `.env` file first!** -2. **Build the image** + In order to successfuly deploy Kowalski, you will need to edit both your `.env` file and enter matching values in `webui/.env`. + +1. **Build the image** ```bash docker build -t kowalski . ``` -3. **Run the container** +1. **Run the container** ```bash docker run -d --name kowalski --restart unless-stopped -v $(pwd)/.env:/usr/src/app/.env:ro kowalski @@ -103,11 +92,36 @@ If you prefer to use Docker directly, you can use these instructions instead. > [!NOTE] > You must setup Ollama on your own if you would like to use AI features. +## Running locally (non-Docker/development setup) + +First, clone the repo with Git: + +```bash +git clone --recurse-submodules https://github.com/ABOCN/TelegramBot +``` + +Next, inside the repository directory, create an `.env` file with some content, which you can see the [example .env file](.env.example) to fill info with. To see the meaning of each one, see [the Functions section](#env-functions). + +After editing the file, save all changes and run the bot with ``bun start``. + +> [!TIP] +> To deal with dependencies, just run ``bun install`` or ``bun i`` at any moment to install all of them. + +### Efficant Local (w/ Docker) Development + +If you want to develop a component of Kowalski, without dealing with the headache of several terminals, we suggest you follow these guidelines: + +1. If you are working on one component, run it with Bun, and Dockerize the other components. +1. Minimize the amount of non-Dockerized components to reduce headaches. +1. You will have to change your `.env` a lot. This is a common source of issues. Make sure the hostname and port are correct. + ## .env Functions > [!IMPORTANT] > Take care of your ``.env`` file, as it is so much important and needs to be secret (like your passwords), as anyone can do whatever they want to the bot with this token! +### Bot + - **botSource**: Put the link to your bot source code. - **botPrivacy**: Put the link to your bot privacy policy. - **maxRetries**: Maximum number of retries for a failing command on Kowalski. Default is 5. If the limit is hit, the bot will crash past this number. @@ -127,16 +141,21 @@ If you prefer to use Docker directly, you can use these instructions instead. > [!NOTE] > Further, advanced fine-tuning and configuration can be done in TypeScript with the files in the `/config` folder. +### WebUI + +- **botApiUrl**: Likely will stay the same, but changes the API that the bot exposes +- **databaseUrl**: Database server configuration (see `.env.example`) + ## Troubleshooting ### YouTube Downloading **Q:** I get a "Permission denied (EACCES)" error in the console when running the `/yt` command -**A:** Make sure `src/plugins/yt-dlp/yt-dlp` is executable. You can do this on Linux like so: +**A:** Make sure `telegram/plugins/yt-dlp/yt-dlp` is executable. You can do this on Linux like so: ```bash -chmod +x src/plugins/yt-dlp/yt-dlp +chmod +x telegram/plugins/yt-dlp/yt-dlp ``` ### AI @@ -157,4 +176,4 @@ Made with [contrib.rocks](https://contrib.rocks). BSD-3-Clause - 2024 Lucas Gabriel (lucmsilva). -Featuring some components under Unlicense. +With some components under Unlicense. diff --git a/TERMS_OF_USE.md b/TERMS_OF_USE.md old mode 100644 new mode 100755 diff --git a/config/ai.ts b/config/ai.ts old mode 100644 new mode 100755 index 1cf4fdf..de8d402 --- a/config/ai.ts +++ b/config/ai.ts @@ -1,4 +1,16 @@ -import type { ModelInfo } from "../src/commands/ai" +export interface ModelInfo { + name: string; + label: string; + descriptionEn: string; + descriptionPt: string; + models: Array<{ + name: string; + label: string; + parameterSize: string; + thinking: boolean; + uncensored: boolean; + }>; +} export const defaultFlashModel = "gemma3:4b" export const defaultThinkingModel = "qwen3:4b" @@ -12,8 +24,20 @@ export const models: ModelInfo[] = [ descriptionEn: 'Gemma3n is a family of open, light on-device models for general tasks.', descriptionPt: 'Gemma3n é uma família de modelos abertos, leves e para dispositivos locais, para tarefas gerais.', models: [ - { name: 'gemma3n:e2b', label: 'Gemma3n e2b', parameterSize: '2B' }, - { name: 'gemma3n:e4b', label: 'Gemma3n e4b', parameterSize: '4B' }, + { + name: 'gemma3n:e2b', + label: 'Gemma3n e2b', + parameterSize: '2B', + thinking: false, + uncensored: false + }, + { + name: 'gemma3n:e4b', + label: 'Gemma3n e4b', + parameterSize: '4B', + thinking: false, + uncensored: false + }, ] }, { @@ -22,11 +46,34 @@ export const models: ModelInfo[] = [ descriptionEn: 'Gemma3-abliterated is a family of open, uncensored models for general tasks.', descriptionPt: 'Gemma3-abliterated é uma família de modelos abertos, não censurados, para tarefas gerais.', models: [ - { name: 'huihui_ai/gemma3-abliterated:1b', label: 'Gemma3 Uncensored 1B', parameterSize: '1B' }, - { name: 'huihui_ai/gemma3-abliterated:4b', label: 'Gemma3 Uncensored 4B', parameterSize: '4B' }, - { name: 'gemma3:1b', label: 'Gemma3 1B', parameterSize: '1B' }, - { name: 'gemma3:4b', label: 'Gemma3 4B', parameterSize: '4B' }, - { name: 'gemma3:12b', label: 'Gemma3 12B', parameterSize: '12B' }, + { + name: 'huihui_ai/gemma3-abliterated:1b', + label: 'Gemma3 Uncensored 1B', + parameterSize: '1B', + thinking: false, + uncensored: true + }, + { + name: 'huihui_ai/gemma3-abliterated:4b', + label: 'Gemma3 Uncensored 4B', + parameterSize: '4B', + thinking: false, + uncensored: true + }, + { + name: 'gemma3:1b', + label: 'Gemma3 1B', + parameterSize: '1B', + thinking: false, + uncensored: false + }, + { + name: 'gemma3:4b', + label: 'Gemma3 4B', + parameterSize: '4B', + thinking: false, + uncensored: false + }, ] }, { @@ -35,14 +82,55 @@ export const models: ModelInfo[] = [ descriptionEn: 'Qwen3 is a multilingual reasoning model series.', descriptionPt: 'Qwen3 é uma série de modelos multilingues.', models: [ - { name: 'qwen3:0.6b', label: 'Qwen3 0.6B', parameterSize: '0.6B' }, - { name: 'qwen3:1.7b', label: 'Qwen3 1.7B', parameterSize: '1.7B' }, - { name: 'qwen3:4b', label: 'Qwen3 4B', parameterSize: '4B' }, - { name: 'qwen3:8b', label: 'Qwen3 8B', parameterSize: '8B' }, - { name: 'qwen3:14b', label: 'Qwen3 14B', parameterSize: '14B' }, - { name: 'qwen3:30b', label: 'Qwen3 30B', parameterSize: '30B' }, - { name: 'qwen3:32b', label: 'Qwen3 32B', parameterSize: '32B' }, - { name: 'qwen3:235b-a22b', label: 'Qwen3 235B A22B', parameterSize: '235B' }, + { + name: 'qwen3:0.6b', + label: 'Qwen3 0.6B', + parameterSize: '0.6B', + thinking: true, + uncensored: false + }, + { + name: 'qwen3:1.7b', + label: 'Qwen3 1.7B', + parameterSize: '1.7B', + thinking: true, + uncensored: false + }, + { + name: 'qwen3:4b', + label: 'Qwen3 4B', + parameterSize: '4B', + thinking: true, + uncensored: false + }, + { + name: 'qwen3:8b', + label: 'Qwen3 8B', + parameterSize: '8B', + thinking: true, + uncensored: false + }, + { + name: 'qwen3:14b', + label: 'Qwen3 14B', + parameterSize: '14B', + thinking: true, + uncensored: false + }, + { + name: 'qwen3:30b', + label: 'Qwen3 30B', + parameterSize: '30B', + thinking: true, + uncensored: false + }, + { + name: 'qwen3:32b', + label: 'Qwen3 32B', + parameterSize: '32B', + thinking: true, + uncensored: false + }, ] }, { @@ -51,14 +139,55 @@ export const models: ModelInfo[] = [ descriptionEn: 'Qwen3-abliterated is a multilingual reasoning model series.', descriptionPt: 'Qwen3-abliterated é uma série de modelos multilingues.', models: [ - { name: 'huihui_ai/qwen3-abliterated:0.6b', label: 'Qwen3 Uncensored 0.6B', parameterSize: '0.6B' }, - { name: 'huihui_ai/qwen3-abliterated:1.7b', label: 'Qwen3 Uncensored 1.7B', parameterSize: '1.7B' }, - { name: 'huihui_ai/qwen3-abliterated:4b', label: 'Qwen3 Uncensored 4B', parameterSize: '4B' }, - { name: 'huihui_ai/qwen3-abliterated:8b', label: 'Qwen3 Uncensored 8B', parameterSize: '8B' }, - { name: 'huihui_ai/qwen3-abliterated:14b', label: 'Qwen3 Uncensored 14B', parameterSize: '14B' }, - { name: 'huihui_ai/qwen3-abliterated:30b', label: 'Qwen3 Uncensored 30B', parameterSize: '30B' }, - { name: 'huihui_ai/qwen3-abliterated:32b', label: 'Qwen3 Uncensored 32B', parameterSize: '32B' }, - { name: 'huihui_ai/qwen3-abliterated:235b', label: 'Qwen3 Uncensored 235B', parameterSize: '235B' }, + { + name: 'huihui_ai/qwen3-abliterated:0.6b', + label: 'Qwen3 Uncensored 0.6B', + parameterSize: '0.6B', + thinking: true, + uncensored: true + }, + { + name: 'huihui_ai/qwen3-abliterated:1.7b', + label: 'Qwen3 Uncensored 1.7B', + parameterSize: '1.7B', + thinking: true, + uncensored: true + }, + { + name: 'huihui_ai/qwen3-abliterated:4b', + label: 'Qwen3 Uncensored 4B', + parameterSize: '4B', + thinking: true, + uncensored: true + }, + { + name: 'huihui_ai/qwen3-abliterated:8b', + label: 'Qwen3 Uncensored 8B', + parameterSize: '8B', + thinking: true, + uncensored: true + }, + { + name: 'huihui_ai/qwen3-abliterated:14b', + label: 'Qwen3 Uncensored 14B', + parameterSize: '14B', + thinking: true, + uncensored: true + }, + { + name: 'huihui_ai/qwen3-abliterated:30b', + label: 'Qwen3 Uncensored 30B', + parameterSize: '30B', + thinking: true, + uncensored: true + }, + { + name: 'huihui_ai/qwen3-abliterated:32b', + label: 'Qwen3 Uncensored 32B', + parameterSize: '32B', + thinking: true, + uncensored: true + }, ] }, { @@ -67,8 +196,20 @@ export const models: ModelInfo[] = [ descriptionEn: 'QwQ is the reasoning model of the Qwen series.', descriptionPt: 'QwQ é o modelo de raciocínio da série Qwen.', models: [ - { name: 'qwq:32b', label: 'QwQ 32B', parameterSize: '32B' }, - { name: 'huihui_ai/qwq-abliterated:32b', label: 'QwQ Uncensored 32B', parameterSize: '32B' }, + { + name: 'qwq:32b', + label: 'QwQ 32B', + parameterSize: '32B', + thinking: true, + uncensored: false + }, + { + name: 'huihui_ai/qwq-abliterated:32b', + label: 'QwQ Uncensored 32B', + parameterSize: '32B', + thinking: true, + uncensored: true + }, ] }, { @@ -77,7 +218,13 @@ export const models: ModelInfo[] = [ descriptionEn: 'The latest collection of multimodal models from Meta.', descriptionPt: 'A coleção mais recente de modelos multimodais da Meta.', models: [ - { name: 'llama4:scout', label: 'Llama4 109B A17B', parameterSize: '109B' }, + { + name: 'llama4:scout', + label: 'Llama4 109B A17B', + parameterSize: '109B', + thinking: false, + uncensored: false + }, ] }, { @@ -86,14 +233,55 @@ export const models: ModelInfo[] = [ descriptionEn: 'DeepSeek is a research model for reasoning tasks.', descriptionPt: 'DeepSeek é um modelo de pesquisa para tarefas de raciocínio.', models: [ - { name: 'deepseek-r1:1.5b', label: 'DeepSeek 1.5B', parameterSize: '1.5B' }, - { name: 'deepseek-r1:7b', label: 'DeepSeek 7B', parameterSize: '7B' }, - { name: 'deepseek-r1:8b', label: 'DeepSeek 8B', parameterSize: '8B' }, - { name: 'deepseek-r1:14b', label: 'DeepSeek 14B', parameterSize: '14B' }, - { name: 'huihui_ai/deepseek-r1-abliterated:1.5b', label: 'DeepSeek Uncensored 1.5B', parameterSize: '1.5B' }, - { name: 'huihui_ai/deepseek-r1-abliterated:7b', label: 'DeepSeek Uncensored 7B', parameterSize: '7B' }, - { name: 'huihui_ai/deepseek-r1-abliterated:8b', label: 'DeepSeek Uncensored 8B', parameterSize: '8B' }, - { name: 'huihui_ai/deepseek-r1-abliterated:14b', label: 'DeepSeek Uncensored 14B', parameterSize: '14B' }, + { + name: 'deepseek-r1:1.5b', + label: 'DeepSeek 1.5B', + parameterSize: '1.5B', + thinking: true, + uncensored: false + }, + { + name: 'deepseek-r1:7b', + label: 'DeepSeek 7B', + parameterSize: '7B', + thinking: true, + uncensored: false + }, + { + name: 'deepseek-r1:8b', + label: 'DeepSeek 8B', + parameterSize: '8B', + thinking: true, + uncensored: false + }, + { + name: 'huihui_ai/deepseek-r1-abliterated:1.5b', + label: 'DeepSeek Uncensored 1.5B', + parameterSize: '1.5B', + thinking: true, + uncensored: true + }, + { + name: 'huihui_ai/deepseek-r1-abliterated:7b', + label: 'DeepSeek Uncensored 7B', + parameterSize: '7B', + thinking: true, + uncensored: true + }, + { + name: 'huihui_ai/deepseek-r1-abliterated:8b', + label: 'DeepSeek Uncensored 8B', + parameterSize: '8B', + thinking: true, + uncensored: true + }, + { + name: 'huihui_ai/deepseek-r1-abliterated:14b', + label: 'DeepSeek Uncensored 14B', + parameterSize: '14B', + thinking: true, + uncensored: true + }, ] }, { @@ -102,8 +290,20 @@ export const models: ModelInfo[] = [ descriptionEn: 'Hermes 3 is the latest version of the flagship Hermes series of LLMs by Nous Research.', descriptionPt: 'Hermes 3 é a versão mais recente da série Hermes de LLMs da Nous Research.', models: [ - { name: 'hermes3:3b', label: 'Hermes3 3B', parameterSize: '3B' }, - { name: 'hermes3:8b', label: 'Hermes3 8B', parameterSize: '8B' }, + { + name: 'hermes3:3b', + label: 'Hermes3 3B', + parameterSize: '3B', + thinking: false, + uncensored: false + }, + { + name: 'hermes3:8b', + label: 'Hermes3 8B', + parameterSize: '8B', + thinking: false, + uncensored: false + }, ] }, { @@ -112,7 +312,13 @@ export const models: ModelInfo[] = [ descriptionEn: 'The 7B model released by Mistral AI, updated to version 0.3.', descriptionPt: 'O modelo 7B lançado pela Mistral AI, atualizado para a versão 0.3.', models: [ - { name: 'mistral:7b', label: 'Mistral 7B', parameterSize: '7B' }, + { + name: 'mistral:7b', + label: 'Mistral 7B', + parameterSize: '7B', + thinking: false, + uncensored: false + }, ] }, { @@ -121,10 +327,34 @@ export const models: ModelInfo[] = [ descriptionEn: 'Phi-4 is a 14B parameter, state-of-the-art open model from Microsoft. ', descriptionPt: 'Phi-4 é um modelo de 14B de última geração, aberto pela Microsoft.', models: [ - { name: 'hf.co/unsloth/Phi-4-mini-reasoning-GGUF', label: 'Phi4 Mini Reasoning', parameterSize: '4B' }, - { name: 'phi4:14b', label: 'Phi4 14B', parameterSize: '14B' }, - { name: 'hf.co/unsloth/Phi-4-reasoning-plus-GGUF', label: 'Phi4 Reasoning Plus', parameterSize: '14B' }, - { name: 'huihui_ai/phi4-abliterated:14b', label: 'Phi4 Uncensored 14B', parameterSize: '14B' }, + { + name: 'hf.co/unsloth/Phi-4-mini-reasoning-GGUF', + label: 'Phi4 Mini Reasoning', + parameterSize: '4B', + thinking: true, + uncensored: false + }, + { + name: 'phi4:14b', + label: 'Phi4 14B', + parameterSize: '14B', + thinking: false, + uncensored: false + }, + { + name: 'hf.co/unsloth/Phi-4-reasoning-plus-GGUF', + label: 'Phi4 Reasoning Plus', + parameterSize: '14B', + thinking: true, + uncensored: false + }, + { + name: 'huihui_ai/phi4-abliterated:14b', + label: 'Phi4 Uncensored 14B', + parameterSize: '14B', + thinking: false, + uncensored: true + }, ] }, { @@ -133,7 +363,13 @@ export const models: ModelInfo[] = [ descriptionEn: 'Phi-3 is a family of lightweight 3B (Mini) and 14B (Medium) state-of-the-art open models by Microsoft.', descriptionPt: 'Phi-3 é uma família de modelos leves de 3B (Mini) e 14B (Médio) de última geração, abertos pela Microsoft.', models: [ - { name: 'phi3:3.8b', label: 'Phi3 3.8B', parameterSize: '3.8B' }, + { + name: 'phi3:3.8b', + label: 'Phi3 3.8B', + parameterSize: '3.8B', + thinking: false, + uncensored: false + }, ] }, { @@ -142,7 +378,13 @@ export const models: ModelInfo[] = [ descriptionEn: 'Llama 3, a lightweight model from Meta.', descriptionPt: 'Llama 3, um modelo leve da Meta.', models: [ - { name: 'llama3:8b', label: 'Llama3 8B', parameterSize: '8B' }, + { + name: 'llama3:8b', + label: 'Llama3 8B', + parameterSize: '8B', + thinking: false, + uncensored: false + }, ] }, { @@ -151,7 +393,13 @@ export const models: ModelInfo[] = [ descriptionEn: 'Ablitered v3 llama-3.1 8b with uncensored prompt ', descriptionPt: 'Llama3.1 é um modelo aberto, leve e para dispositivos locais, com prompt não censurado.', models: [ - { name: 'mannix/llama3.1-8b-abliterated:latest', label: 'Llama3.1 8B', parameterSize: '8B' }, + { + name: 'mannix/llama3.1-8b-abliterated:latest', + label: 'Llama3.1 8B', + parameterSize: '8B', + thinking: false, + uncensored: true + }, ] }, { @@ -160,9 +408,27 @@ export const models: ModelInfo[] = [ descriptionEn: 'Llama3.2 is a family of open, lightweight models for general tasks.', descriptionPt: 'Llama3.2 é uma família de modelos abertos, leves e para dispositivos locais, para tarefas gerais.', models: [ - { name: 'llama3.2:1b', label: 'Llama3.2 1B', parameterSize: '1B' }, - { name: 'llama3.2:3b', label: 'Llama3.2 3B', parameterSize: '3B' }, - { name: 'socialnetwooky/llama3.2-abliterated:3b_q8_0', label: 'Llama3.2 Uncensored 3B', parameterSize: '3B' }, + { + name: 'llama3.2:1b', + label: 'Llama3.2 1B', + parameterSize: '1B', + thinking: false, + uncensored: false + }, + { + name: 'llama3.2:3b', + label: 'Llama3.2 3B', + parameterSize: '3B', + thinking: false, + uncensored: false + }, + { + name: 'socialnetwooky/llama3.2-abliterated:3b_q8_0', + label: 'Llama3.2 Uncensored 3B', + parameterSize: '3B', + thinking: false, + uncensored: true + }, ] }, ]; \ No newline at end of file diff --git a/config/settings.ts b/config/settings.ts old mode 100644 new mode 100755 diff --git a/database/schema.ts b/database/schema.ts new file mode 100755 index 0000000..ce9a8ed --- /dev/null +++ b/database/schema.ts @@ -0,0 +1,52 @@ +import { + integer, + pgTable, + varchar, + timestamp, + boolean, + real, + index +} from "drizzle-orm/pg-core"; + +export const usersTable = pgTable("users", { + telegramId: varchar({ length: 255 }).notNull().primaryKey(), + username: varchar({ length: 255 }).notNull(), + firstName: varchar({ length: 255 }).notNull(), + lastName: varchar({ length: 255 }).notNull(), + aiEnabled: boolean().notNull().default(false), + showThinking: boolean().notNull().default(false), + customAiModel: varchar({ length: 255 }).notNull().default("deepseek-r1:1.5b"), + aiTemperature: real().notNull().default(0.9), + aiRequests: integer().notNull().default(0), + aiCharacters: integer().notNull().default(0), + disabledCommands: varchar({ length: 255 }).array().notNull().default([]), + languageCode: varchar({ length: 255 }).notNull(), + aiTimeoutUntil: timestamp(), + aiMaxExecutionTime: integer().default(0), + createdAt: timestamp().notNull().defaultNow(), + updatedAt: timestamp().notNull().defaultNow(), +}); + +export const twoFactorTable = pgTable("two_factor", { + userId: varchar({ length: 255 }).notNull().references(() => usersTable.telegramId).primaryKey(), + currentCode: varchar({ length: 255 }).notNull(), + codeExpiresAt: timestamp().notNull(), + codeAttempts: integer().notNull().default(0), + createdAt: timestamp().notNull().defaultNow(), + updatedAt: timestamp().notNull().defaultNow(), +}, (table) => [ + index("idx_two_factor_user_id").on(table.userId), + index("idx_two_factor_code_expires_at").on(table.codeExpiresAt), +]); + +export const sessionsTable = pgTable("sessions", { + id: varchar({ length: 255 }).notNull().primaryKey(), + userId: varchar({ length: 255 }).notNull().references(() => usersTable.telegramId), + sessionToken: varchar({ length: 255 }).notNull().unique(), + expiresAt: timestamp().notNull(), + createdAt: timestamp().notNull().defaultNow(), + updatedAt: timestamp().notNull().defaultNow(), +}, (table) => [ + index("idx_sessions_user_id").on(table.userId), + index("idx_sessions_expires_at").on(table.expiresAt), +]); diff --git a/docker-compose.yml.ai.example b/docker-compose.yml.ai.example old mode 100644 new mode 100755 index 2a5c7e9..fe467ab --- a/docker-compose.yml.ai.example +++ b/docker-compose.yml.ai.example @@ -2,23 +2,26 @@ services: kowalski: build: . container_name: kowalski - restart: unless-stopped + ports: + - "3000:3000" volumes: - ./.env:/usr/src/app/.env:ro + - ./telegram/props/lastfm.json:/usr/src/app/telegram/props/lastfm.json environment: - NODE_ENV=production + env_file: + - .env + depends_on: + - postgres + - ollama ollama: image: ollama/ollama container_name: kowalski-ollama - restart: unless-stopped volumes: - ./ollama:/root/.ollama postgres: image: postgres:17 container_name: kowalski-postgres - restart: unless-stopped - ports: - - 5433:5432 volumes: - ./db:/var/lib/postgresql/data environment: diff --git a/docker-compose.yml.example b/docker-compose.yml.example old mode 100644 new mode 100755 index 65a2206..d94e78d --- a/docker-compose.yml.example +++ b/docker-compose.yml.example @@ -2,17 +2,20 @@ services: kowalski: build: . container_name: kowalski - restart: unless-stopped + ports: + - "3000:3000" volumes: - ./.env:/usr/src/app/.env:ro + - ./telegram/props/lastfm.json:/usr/src/app/telegram/props/lastfm.json environment: - NODE_ENV=production + env_file: + - .env + depends_on: + - postgres postgres: image: postgres:17 container_name: kowalski-postgres - restart: unless-stopped - ports: - - 5433:5432 volumes: - ./db:/var/lib/postgresql/data environment: diff --git a/drizzle.config.ts b/drizzle.config.ts old mode 100644 new mode 100755 index 6766ff6..51b8e1d --- a/drizzle.config.ts +++ b/drizzle.config.ts @@ -3,7 +3,7 @@ import { defineConfig } from 'drizzle-kit'; export default defineConfig({ out: './drizzle', - schema: './src/db/schema.ts', + schema: './database/schema.ts', dialect: 'postgresql', dbCredentials: { url: process.env.databaseUrl!, diff --git a/nodemon.json b/nodemon.json old mode 100644 new mode 100755 index 4e4ec20..d9938ac --- a/nodemon.json +++ b/nodemon.json @@ -1,6 +1,6 @@ { - "ignore": ["src/props/*.json", "src/props/*.txt"], - "watch": ["src", "config"], + "ignore": ["telegram/props/*.json", "telegram/props/*.txt"], + "watch": ["telegram", "database", "config"], "ext": "ts,js", - "exec": "bun src/bot.ts" + "exec": "bun telegram/bot.ts" } \ No newline at end of file diff --git a/package.json b/package.json old mode 100644 new mode 100755 index 7172307..b5c33cc --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "scripts": { - "start": "nodemon src/bot.ts", + "start": "nodemon telegram/bot.ts", "docs": "bunx typedoc", "serve:docs": "bun run serve-docs.ts" }, @@ -10,6 +10,7 @@ "axios": "^1.10.0", "dotenv": "^17.0.0", "drizzle-orm": "^0.44.2", + "express": "^5.1.0", "node-html-parser": "^7.0.1", "nodemon": "^3.1.10", "pg": "^8.16.3", diff --git a/src/commands/ai.ts b/src/commands/ai.ts deleted file mode 100644 index 3acdc0b..0000000 --- a/src/commands/ai.ts +++ /dev/null @@ -1,627 +0,0 @@ -// AI.TS -// by ihatenodejs/Aidan -// -// ----------------------------------------------------------------------- -// -// This is free and unencumbered software released into the public domain. -// -// Anyone is free to copy, modify, publish, use, compile, sell, or -// distribute this software, either in source code form or as a compiled -// binary, for any purpose, commercial or non-commercial, and by any -// means. -// -// In jurisdictions that recognize copyright laws, the author or authors -// of this software dedicate any and all copyright interest in the -// software to the public domain. We make this dedication for the benefit -// of the public at large and to the detriment of our heirs and -// successors. We intend this dedication to be an overt act of -// relinquishment in perpetuity of all present and future rights to this -// software under copyright law. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR -// OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -// OTHER DEALINGS IN THE SOFTWARE. -// -// For more information, please refer to - -import { isOnSpamWatch } from "../spamwatch/spamwatch" -import spamwatchMiddlewareModule from "../spamwatch/Middleware" -import { Telegraf, Context } from "telegraf" -import type { Message } from "telegraf/types" -import { replyToMessageId } from "../utils/reply-to-message-id" -import { getStrings } from "../plugins/checklang" -import axios from "axios" -import { rateLimiter } from "../utils/rate-limiter" -import { logger } from "../utils/log" -import { ensureUserInDb } from "../utils/ensure-user" -import * as schema from '../db/schema' -import type { NodePgDatabase } from "drizzle-orm/node-postgres" -import { eq, sql } from 'drizzle-orm' -import { models, unloadModelAfterB, maxUserQueueSize } from "../../config/ai" - -const spamwatchMiddleware = spamwatchMiddlewareModule(isOnSpamWatch) -export const flash_model = process.env.flashModel || "gemma3:4b" -export const thinking_model = process.env.thinkingModel || "qwen3:4b" - -type TextContext = Context & { message: Message.TextMessage } - -type User = typeof schema.usersTable.$inferSelect - -export interface ModelInfo { - name: string; - label: string; - descriptionEn: string; - descriptionPt: string; - models: Array<{ - name: string; - label: string; - parameterSize: string; - }>; -} - -interface OllamaResponse { - response: string; -} - -async function usingSystemPrompt(ctx: TextContext, db: NodePgDatabase, botName: string, message: string): Promise { - const user = await db.query.usersTable.findMany({ where: (fields, { eq }) => eq(fields.telegramId, String(ctx.from!.id)), limit: 1 }); - if (user.length === 0) await ensureUserInDb(ctx, db); - const userData = user[0]; - const lang = userData?.languageCode || "en"; - const Strings = getStrings(lang); - const utcDate = new Date().toISOString(); - const prompt = Strings.ai.systemPrompt - .replace("{botName}", botName) - .replace("{date}", utcDate) - .replace("{message}", message); - return prompt; -} - -export function sanitizeForJson(text: string): string { - return text - .replace(/\\/g, '\\\\') - .replace(/"/g, '\\"') - .replace(/\n/g, '\\n') - .replace(/\r/g, '\\r') - .replace(/\t/g, '\\t') -} - -function sanitizeMarkdownForTelegram(text: string): string { - let sanitizedText = text; - - const replacements: string[] = []; - const addReplacement = (match: string): string => { - replacements.push(match); - return `___PLACEHOLDER_${replacements.length - 1}___`; - }; - - sanitizedText = sanitizedText.replace(/```([\s\S]*?)```/g, addReplacement); - sanitizedText = sanitizedText.replace(/`([^`]+)`/g, addReplacement); - sanitizedText = sanitizedText.replace(/\[([^\]]+)\]\(([^)]+)\)/g, addReplacement); - - const parts = sanitizedText.split(/(___PLACEHOLDER_\d+___)/g); - const processedParts = parts.map(part => { - if (part.match(/___PLACEHOLDER_\d+___/)) { - return part; - } else { - let processedPart = part; - processedPart = processedPart.replace(/^(#{1,6})\s+(.+)/gm, '*$2*'); - processedPart = processedPart.replace(/^(\s*)[-*]\s+/gm, '$1- '); - processedPart = processedPart.replace(/\*\*(.*?)\*\*/g, '*$1*'); - processedPart = processedPart.replace(/__(.*?)__/g, '*$1*'); - processedPart = processedPart.replace(/(^|\s)\*(?!\*)([^*]+?)\*(?!\*)/g, '$1_$2_'); - processedPart = processedPart.replace(/(^|\s)_(?!_)([^_]+?)_(?!_)/g, '$1_$2_'); - processedPart = processedPart.replace(/~~(.*?)~~/g, '~$1~'); - processedPart = processedPart.replace(/^\s*┃/gm, '>'); - processedPart = processedPart.replace(/^>\s?/gm, '> '); - - return processedPart; - } - }); - - sanitizedText = processedParts.join(''); - - sanitizedText = sanitizedText.replace(/___PLACEHOLDER_(\d+)___/g, (_, idx) => replacements[Number(idx)]); - - const codeBlockCount = (sanitizedText.match(/```/g) || []).length; - if (codeBlockCount % 2 !== 0) { - sanitizedText += '\n```'; - } - - return sanitizedText; -} - -function processThinkingTags(text: string): string { - let processedText = text; - - const firstThinkIndex = processedText.indexOf(''); - if (firstThinkIndex === -1) { - return processedText.replace(/<\/think>/g, '___THINK_END___'); - } - - processedText = processedText.substring(0, firstThinkIndex) + '___THINK_START___' + processedText.substring(firstThinkIndex + ''.length); - const lastThinkEndIndex = processedText.lastIndexOf(''); - if (lastThinkEndIndex !== -1) { - processedText = processedText.substring(0, lastThinkEndIndex) + '___THEND___' + processedText.substring(lastThinkEndIndex + ''.length); - } - processedText = processedText.replace(//g, ''); - processedText = processedText.replace(/<\/think>/g, ''); - processedText = processedText.replace('___THEND___', '___THINK_END___'); - - return processedText; -} - -export async function preChecks() { - const envs = [ - "ollamaApi", - "flashModel", - "thinkingModel", - ]; - - for (const env of envs) { - if (!process.env[env]) { - console.error(`[✨ AI | !] ❌ ${env} not set!`); - return false; - } - } - - const ollamaApi = process.env.ollamaApi!; - let ollamaOk = false; - for (let i = 0; i < 10; i++) { - try { - const res = await axios.get(ollamaApi, { timeout: 2000 }); - if (res.status === 200) { - ollamaOk = true; - break; - } - } catch (err) { - if (i < 9) { - await new Promise((resolve) => setTimeout(resolve, 1000)); - } - } - } - - if (!ollamaOk) { - console.error(`[✨ AI | !] ❌ Ollama API is not responding at ${ollamaApi}`); - return false; - } - - console.log(`[✨ AI] Pre-checks passed.`); - const modelCount = models.reduce((acc, model) => acc + model.models.length, 0); - console.log(`[✨ AI] Found ${modelCount} models.`); - return true; -} - -function isAxiosError(error: unknown): error is { response?: { data?: { error?: string }, status?: number, statusText?: string }, request?: unknown, message?: string } { - return typeof error === 'object' && error !== null && ( - 'response' in error || 'request' in error || 'message' in error - ); -} - -function extractAxiosErrorMessage(error: unknown): string { - if (isAxiosError(error)) { - const err = error as { response?: { data?: { error?: string }, status?: number, statusText?: string }, request?: unknown, message?: string }; - if (err.response && typeof err.response === 'object') { - const resp = err.response; - if (resp.data && typeof resp.data === 'object' && 'error' in resp.data) { - return String(resp.data.error); - } - if ('status' in resp && 'statusText' in resp) { - return `HTTP ${resp.status}: ${resp.statusText}`; - } - return JSON.stringify(resp.data ?? resp); - } - if (err.request) { - return 'No response received from server.'; - } - if (typeof err.message === 'string') { - return err.message; - } - } - return 'An unexpected error occurred.'; -} - -function containsUrls(text: string): boolean { - return text.includes('http://') || text.includes('https://') || text.includes('.com') || text.includes('.net') || text.includes('.org') || text.includes('.io') || text.includes('.ai') || text.includes('.dev') -} - -async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Message, model: string, aiTemperature: number, originalMessage: string, db: NodePgDatabase, userId: string, Strings: ReturnType, showThinking: boolean): Promise<{ success: boolean; response?: string; error?: string, messageType?: 'generation' | 'system' }> { - if (!ctx.chat) { - return { - success: false, - error: Strings.unexpectedErr.replace("{error}", Strings.ai.noChatFound), - }; - } - const cleanedModelName = model.includes('/') ? model.split('/').pop()! : model; - let status = Strings.ai.statusWaitingRender; - let modelHeader = Strings.ai.modelHeader - .replace("{model}", `\`${cleanedModelName}\``) - .replace("{temperature}", String(aiTemperature)) - .replace("{status}", status) + "\n\n"; - - const promptCharCount = originalMessage.length; - await db.update(schema.usersTable) - .set({ aiCharacters: sql`${schema.usersTable.aiCharacters} + ${promptCharCount}` }) - .where(eq(schema.usersTable.telegramId, userId)); - const paramSizeStr = models.find(m => m.name === model)?.models.find(m => m.name === model)?.parameterSize?.replace('B', ''); - const shouldKeepAlive = paramSizeStr ? Number(paramSizeStr) > unloadModelAfterB : false; - - try { - const aiResponse = await axios.post( - `${process.env.ollamaApi}/api/generate`, - { - model, - prompt, - stream: true, - keep_alive: shouldKeepAlive ? '1' : '0', - options: { - temperature: aiTemperature - } - }, - { - responseType: "stream", - } - ); - let fullResponse = ""; - let lastUpdateCharCount = 0; - let sentHeader = false; - let firstChunk = true; - const stream: NodeJS.ReadableStream = aiResponse.data as any; - - const formatThinkingMessage = (text: string) => { - const withPlaceholders = text - .replace(/___THINK_START___/g, `${Strings.ai.thinking}`) - .replace(/___THINK_END___/g, `${Strings.ai.finishedThinking}`); - return sanitizeMarkdownForTelegram(withPlaceholders); - }; - - for await (const chunk of stream) { - const lines = chunk.toString().split('\n'); - for (const line of lines) { - if (!line.trim()) continue; - let ln: OllamaResponse; - try { - ln = JSON.parse(line); - } catch (e) { - console.error("[✨ AI | !] Error parsing chunk"); - continue; - } - - if (ln.response) { - if (ln.response.includes('')) { - const thinkMatch = ln.response.match(/([\s\S]*?)<\/think>/); - if (thinkMatch && thinkMatch[1].trim().length > 0) { - logger.logThinking(ctx.chat.id, replyGenerating.message_id, true); - } else if (!thinkMatch) { - logger.logThinking(ctx.chat.id, replyGenerating.message_id, true); - } - } else if (ln.response.includes('')) { - logger.logThinking(ctx.chat.id, replyGenerating.message_id, false); - } - fullResponse += ln.response; - if (showThinking) { - let displayResponse = processThinkingTags(fullResponse); - - if (firstChunk) { - status = Strings.ai.statusWaitingRender; - modelHeader = Strings.ai.modelHeader - .replace("{model}", `\`${cleanedModelName}\``) - .replace("{temperature}", aiTemperature) - .replace("{status}", status) + "\n\n"; - await rateLimiter.editMessageWithRetry( - ctx, - ctx.chat.id, - replyGenerating.message_id, - modelHeader + formatThinkingMessage(displayResponse), - { parse_mode: 'Markdown' } - ); - lastUpdateCharCount = displayResponse.length; - sentHeader = true; - firstChunk = false; - continue; - } - const updateEveryChars = Number(process.env.updateEveryChars) || 100; - if (displayResponse.length - lastUpdateCharCount >= updateEveryChars || !sentHeader) { - await rateLimiter.editMessageWithRetry( - ctx, - ctx.chat.id, - replyGenerating.message_id, - modelHeader + formatThinkingMessage(displayResponse), - { parse_mode: 'Markdown' } - ); - lastUpdateCharCount = displayResponse.length; - sentHeader = true; - } - } - } - } - } - - status = Strings.ai.statusRendering; - modelHeader = Strings.ai.modelHeader - .replace("{model}", `\`${cleanedModelName}\``) - .replace("{temperature}", aiTemperature) - .replace("{status}", status) + "\n\n"; - - if (showThinking) { - let displayResponse = processThinkingTags(fullResponse); - - await rateLimiter.editMessageWithRetry( - ctx, - ctx.chat.id, - replyGenerating.message_id, - modelHeader + formatThinkingMessage(displayResponse), - { parse_mode: 'Markdown' } - ); - } - - const responseCharCount = fullResponse.length; - await db.update(schema.usersTable) - .set({ - aiCharacters: sql`${schema.usersTable.aiCharacters} + ${responseCharCount}`, - aiRequests: sql`${schema.usersTable.aiRequests} + 1` - }) - .where(eq(schema.usersTable.telegramId, userId)); - - const patchedResponse = processThinkingTags(fullResponse); - - return { - success: true, - response: patchedResponse, - messageType: 'generation' - }; - } catch (error: unknown) { - const errorMsg = extractAxiosErrorMessage(error); - console.error("[✨ AI | !] Error:", errorMsg); - if (isAxiosError(error) && error.response && typeof error.response === 'object') { - const resp = error.response as { data?: { error?: string }, status?: number }; - const errData = resp.data && typeof resp.data === 'object' && 'error' in resp.data ? (resp.data as { error?: string }).error : undefined; - const errStatus = 'status' in resp ? resp.status : undefined; - if ((typeof errData === 'string' && errData.includes(`model '${model}' not found`)) || errStatus === 404) { - await ctx.telegram.editMessageText( - ctx.chat!.id, - replyGenerating.message_id, - undefined, - Strings.ai.pulling.replace("{model}", `\`${cleanedModelName}\``), - { parse_mode: 'Markdown' } - ); - console.log(`[✨ AI] Pulling ${model} from ollama...`); - try { - await axios.post( - `${process.env.ollamaApi}/api/pull`, - { - model, - stream: false, - timeout: Number(process.env.ollamaApiTimeout) || 10000, - } - ); - } catch (e: unknown) { - const pullMsg = extractAxiosErrorMessage(e); - console.error("[✨ AI | !] Pull error:", pullMsg); - return { - success: false, - error: `❌ Something went wrong while pulling \`${model}\`: ${pullMsg}`, - messageType: 'system' - }; - } - console.log(`[✨ AI] ${model} pulled successfully`); - return { - success: true, - response: Strings.ai.pulled.replace("{model}", `\`${cleanedModelName}\``), - messageType: 'system' - }; - } - } - return { - success: false, - error: errorMsg, - }; - } -} - -async function handleAiReply(ctx: TextContext, model: string, prompt: string, replyGenerating: Message, aiTemperature: number, originalMessage: string, db: NodePgDatabase, userId: string, Strings: ReturnType, showThinking: boolean) { - const aiResponse = await getResponse(prompt, ctx, replyGenerating, model, aiTemperature, originalMessage, db, userId, Strings, showThinking); - if (!aiResponse) return; - if (!ctx.chat) return; - if (aiResponse.success && aiResponse.response) { - if (aiResponse.messageType === 'system') { - await rateLimiter.editMessageWithRetry( - ctx, - ctx.chat.id, - replyGenerating.message_id, - aiResponse.response, - { parse_mode: 'Markdown' } - ); - return; - } - - const cleanedModelName = model.includes('/') ? model.split('/').pop()! : model; - const status = Strings.ai.statusComplete; - const modelHeader = Strings.ai.modelHeader - .replace("{model}", `\`${cleanedModelName}\``) - .replace("{temperature}", String(aiTemperature)) - .replace("{status}", status) + "\n\n"; - const urlWarning = containsUrls(originalMessage) ? Strings.ai.urlWarning : ''; - let finalResponse = aiResponse.response; - if (showThinking) { - finalResponse = finalResponse.replace(/___THINK_START___/g, `${Strings.ai.thinking}`) - .replace(/___THINK_END___/g, `${Strings.ai.finishedThinking}`); - } else { - finalResponse = finalResponse.replace(/___THINK_START___[\s\S]*?___THINK_END___/g, '').trim(); - finalResponse = finalResponse.replace(/___THINK_START___[\s\S]*/g, '').trim(); - } - - await rateLimiter.editMessageWithRetry( - ctx, - ctx.chat.id, - replyGenerating.message_id, - modelHeader + sanitizeMarkdownForTelegram(finalResponse) + urlWarning, - { parse_mode: 'Markdown' } - ); - return; - } - const error = Strings.unexpectedErr.replace("{error}", aiResponse.error); - await rateLimiter.editMessageWithRetry( - ctx, - ctx.chat.id, - replyGenerating.message_id, - error, - { parse_mode: 'Markdown' } - ); -} - -async function getUserWithStringsAndModel(ctx: Context, db: NodePgDatabase): Promise<{ user: User; Strings: ReturnType; languageCode: string; customAiModel: string; aiTemperature: number, showThinking: boolean }> { - const userArr = await db.query.usersTable.findMany({ where: (fields, { eq }) => eq(fields.telegramId, String(ctx.from!.id)), limit: 1 }); - let user = userArr[0]; - if (!user) { - await ensureUserInDb(ctx, db); - const newUserArr = await db.query.usersTable.findMany({ where: (fields, { eq }) => eq(fields.telegramId, String(ctx.from!.id)), limit: 1 }); - user = newUserArr[0]; - const Strings = getStrings(user.languageCode); - return { user, Strings, languageCode: user.languageCode, customAiModel: user.customAiModel, aiTemperature: user.aiTemperature, showThinking: user.showThinking }; - } - const Strings = getStrings(user.languageCode); - return { user, Strings, languageCode: user.languageCode, customAiModel: user.customAiModel, aiTemperature: user.aiTemperature, showThinking: user.showThinking }; -} - -export function getModelLabelByName(name: string): string { - for (const series of models) { - const found = series.models.find(m => m.name === name); - if (found) return found.label; - } - return name; -} - -export default (bot: Telegraf, db: NodePgDatabase) => { - const botName = bot.botInfo?.first_name && bot.botInfo?.last_name ? `${bot.botInfo.first_name} ${bot.botInfo.last_name}` : "Kowalski" - - interface AiRequest { - task: () => Promise; - ctx: TextContext; - wasQueued: boolean; - userId: number; - } - - const requestQueue: AiRequest[] = []; - let isProcessing = false; - - async function processQueue() { - if (isProcessing || requestQueue.length === 0) { - return; - } - - isProcessing = true; - const { task, ctx, wasQueued } = requestQueue.shift()!; - const { Strings } = await getUserWithStringsAndModel(ctx, db); - const reply_to_message_id = replyToMessageId(ctx); - - try { - if (wasQueued) { - await ctx.reply(Strings.ai.startingProcessing, { - ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }), - parse_mode: 'Markdown' - }); - } - await task(); - } catch (error) { - console.error("[✨ AI | !] Error processing task:", error); - const errorMessage = error instanceof Error ? error.message : String(error); - await ctx.reply(Strings.unexpectedErr.replace("{error}", errorMessage), { - ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }), - parse_mode: 'Markdown' - }); - } finally { - isProcessing = false; - processQueue(); - } - } - - async function aiCommandHandler(ctx: TextContext, command: 'ask' | 'think' | 'ai') { - const reply_to_message_id = replyToMessageId(ctx); - const { user, Strings, customAiModel, aiTemperature, showThinking } = await getUserWithStringsAndModel(ctx, db); - const message = ctx.message.text; - const author = ("@" + ctx.from?.username) || ctx.from?.first_name || "Unknown"; - - const model = command === 'ai' - ? (customAiModel || flash_model) - : (command === 'ask' ? flash_model : thinking_model); - - const fixedMsg = message.replace(new RegExp(`^/${command}(@\\w+)?\\s*`), "").trim(); - logger.logCmdStart(author, command, model); - - if (!process.env.ollamaApi) { - await ctx.reply(Strings.ai.disabled, { parse_mode: 'Markdown', ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) }); - return; - } - - if (!user.aiEnabled) { - await ctx.reply(Strings.ai.disabledForUser, { parse_mode: 'Markdown', ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) }); - return; - } - - if (fixedMsg.length < 1) { - await ctx.reply(Strings.ai.askNoMessage, { parse_mode: 'Markdown', ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) }); - return; - } - - const userId = ctx.from!.id; - const userQueueSize = requestQueue.filter(req => req.userId === userId).length; - - if (userQueueSize >= maxUserQueueSize) { - await ctx.reply(Strings.ai.queueFull, { - parse_mode: 'Markdown', - ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) - }); - return; - } - - const task = async () => { - const modelLabel = getModelLabelByName(model); - const replyGenerating = await ctx.reply(Strings.ai.askGenerating.replace("{model}", `\`${modelLabel}\``), { - parse_mode: 'Markdown', - ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) - }); - const prompt = sanitizeForJson(await usingSystemPrompt(ctx, db, botName, fixedMsg)); - await handleAiReply(ctx, model, prompt, replyGenerating, aiTemperature, fixedMsg, db, user.telegramId, Strings, showThinking); - }; - - if (isProcessing) { - requestQueue.push({ task, ctx, wasQueued: true, userId: ctx.from!.id }); - const position = requestQueue.length; - await ctx.reply(Strings.ai.inQueue.replace("{position}", String(position)), { - parse_mode: 'Markdown', - ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) - }); - } else { - requestQueue.push({ task, ctx, wasQueued: false, userId: ctx.from!.id }); - processQueue(); - } - } - - bot.command(["ask", "think"], spamwatchMiddleware, async (ctx) => { - if (!ctx.message || !('text' in ctx.message)) return; - const command = ctx.message.text.startsWith('/ask') ? 'ask' : 'think'; - await aiCommandHandler(ctx as TextContext, command); - }); - - bot.command(["ai"], spamwatchMiddleware, async (ctx) => { - if (!ctx.message || !('text' in ctx.message)) return; - await aiCommandHandler(ctx as TextContext, 'ai'); - }); - - bot.command(["aistats"], spamwatchMiddleware, async (ctx) => { - const { user, Strings } = await getUserWithStringsAndModel(ctx, db); - if (!user) { - await ctx.reply(Strings.userNotFound || "User not found."); - return; - } - const bookCount = Math.max(1, Math.round(user.aiCharacters / 500000)); - const bookWord = bookCount === 1 ? 'book' : 'books'; - const msg = `${Strings.aiStats.header}\n\n${Strings.aiStats.requests.replace('{aiRequests}', user.aiRequests)}\n${Strings.aiStats.characters.replace('{aiCharacters}', user.aiCharacters).replace('{bookCount}', bookCount).replace('books', bookWord)}`; - await ctx.reply(msg, { parse_mode: 'Markdown' }); - }); -} diff --git a/src/db/schema.ts b/src/db/schema.ts deleted file mode 100644 index 09fdb72..0000000 --- a/src/db/schema.ts +++ /dev/null @@ -1,24 +0,0 @@ -import { - integer, - pgTable, - varchar, - timestamp, - boolean, - real -} from "drizzle-orm/pg-core"; - -export const usersTable = pgTable("users", { - telegramId: varchar({ length: 255 }).notNull().primaryKey(), - username: varchar({ length: 255 }).notNull(), - firstName: varchar({ length: 255 }).notNull(), - lastName: varchar({ length: 255 }).notNull(), - aiEnabled: boolean().notNull().default(false), - showThinking: boolean().notNull().default(false), - customAiModel: varchar({ length: 255 }).notNull().default("deepseek-r1:1.5b"), - aiTemperature: real().notNull().default(0.9), - aiRequests: integer().notNull().default(0), - aiCharacters: integer().notNull().default(0), - languageCode: varchar({ length: 255 }).notNull(), - createdAt: timestamp().notNull().defaultNow(), - updatedAt: timestamp().notNull().defaultNow(), -}); diff --git a/src/spamwatch b/src/spamwatch deleted file mode 160000 index cee30dc..0000000 --- a/src/spamwatch +++ /dev/null @@ -1 +0,0 @@ -Subproject commit cee30dc64217e7ec235635f5bf5066eac56eec87 diff --git a/start-services.sh b/start-services.sh new file mode 100644 index 0000000..91243dc --- /dev/null +++ b/start-services.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +echo "Starting BOT..." +cd /usr/src/app +bun start 2>&1 | sed "s/^/[BOT] /" & +BOT_PID=$! +echo "BOT started with PID $BOT_PID" + +echo "Starting WEBUI..." +cd /usr/src/app/webui +bun run start 2>&1 | sed "s/^/[WEBUI] /" & +WEBUI_PID=$! +echo "WEBUI started with PID $WEBUI_PID" + +echo "Services started:" +echo " Bot PID: $BOT_PID" +echo " WebUI PID: $WEBUI_PID" + +wait $BOT_PID $WEBUI_PID \ No newline at end of file diff --git a/supervisord.conf b/supervisord.conf new file mode 100644 index 0000000..ca1704e --- /dev/null +++ b/supervisord.conf @@ -0,0 +1,31 @@ +[supervisord] +nodaemon=true +user=root +logfile=/dev/stdout +logfile_maxbytes=0 +pidfile=/var/run/supervisord.pid +loglevel=info + +[program:telegram-bot] +command=bun start +directory=/usr/src/app +autostart=true +autorestart=true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 +stdout_logfile_backups=0 +stderr_logfile_backups=0 + +[program:webui] +command=bun run start +directory=/usr/src/app/webui +autostart=true +autorestart=true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 +stdout_logfile_backups=0 +stderr_logfile_backups=0 \ No newline at end of file diff --git a/telegram/api/server.ts b/telegram/api/server.ts new file mode 100755 index 0000000..3bf1154 --- /dev/null +++ b/telegram/api/server.ts @@ -0,0 +1,102 @@ +import express from "express"; +import { drizzle } from "drizzle-orm/node-postgres"; +import { Client } from "pg"; +import * as schema from "../../database/schema"; +import { eq } from "drizzle-orm"; +import { twoFactorTable, usersTable } from "../../database/schema"; +import { Telegraf } from "telegraf"; +import { getStrings } from "../plugins/checklang"; + +const client = new Client({ connectionString: process.env.databaseUrl }); +const db = drizzle(client, { schema }); + +const bot = new Telegraf(process.env.botToken!); +const botName = bot.botInfo?.first_name && bot.botInfo?.last_name ? `${bot.botInfo.first_name} ${bot.botInfo.last_name}` : "Kowalski" + +function shouldLogLonger() { + return process.env.longerLogs === 'true'; +} + +export async function startServer() { + await client.connect(); + + const app = express(); + + app.use(express.json()); + + app.get("/health", (res) => { + res.send("OK"); + }); + + app.post("/2fa/get", async (req, res) => { + try { + const { userId } = req.body; + + if (!userId) { + console.log("[🌐 API] Missing userId in request"); + return res.status(400).json({ generated: false, error: "User ID is required" }); + } + + if (shouldLogLonger()) { + console.log("[🌐 API] Looking up user:", userId); + } + const user = await db.query.usersTable.findFirst({ + where: eq(usersTable.telegramId, userId), + columns: { + languageCode: true, + }, + }); + + if (!user) { + console.log("[🌐 API] User not found:", userId); + return res.status(404).json({ generated: false, error: "User not found" }); + } + + const code = Math.floor(100000 + Math.random() * 900000).toString(); + + console.log("[🌐 API] Inserting 2FA record"); + + await db.insert(twoFactorTable).values({ + userId, + currentCode: code, + codeAttempts: 0, + codeExpiresAt: new Date(Date.now() + 1000 * 60 * 5), + }).onConflictDoUpdate({ + target: twoFactorTable.userId, + set: { + currentCode: code, + codeAttempts: 0, + codeExpiresAt: new Date(Date.now() + 1000 * 60 * 5), + } + }); + + if (shouldLogLonger()) { + console.log("[🌐 API] Sending 2FA message"); + } + + try { + const Strings = getStrings(user.languageCode); + const message = Strings.twoFactor.codeMessage + .replace("{botName}", botName) + .replace("{code}", code); + await bot.telegram.sendMessage(userId, message, { parse_mode: "MarkdownV2" }); + if (shouldLogLonger()) { + console.log("[🌐 API] Message sent successfully"); + } + } catch (error) { + console.error("[🌐 API] Error sending 2FA code to user", error); + return res.status(500).json({ generated: false, error: "Error sending 2FA message" }); + } + + res.json({ generated: true }); + + } catch (error) { + console.error("[🌐 API] Unexpected error in 2FA endpoint:", error); + return res.status(500).json({ generated: false, error: "Internal server error" }); + } + }); + + app.listen(3030, () => { + console.log("[🌐 API] Running on port 3030\n"); + }); +} \ No newline at end of file diff --git a/src/bot.ts b/telegram/bot.ts old mode 100644 new mode 100755 similarity index 94% rename from src/bot.ts rename to telegram/bot.ts index ba9176f..c75d33a --- a/src/bot.ts +++ b/telegram/bot.ts @@ -8,9 +8,10 @@ import './plugins/ytDlpWrapper'; import { preChecks } from './commands/ai'; import { drizzle } from 'drizzle-orm/node-postgres'; import { Client } from 'pg'; -import * as schema from './db/schema'; +import * as schema from '../database/schema'; import { ensureUserInDb } from './utils/ensure-user'; import { getSpamwatchBlockedCount } from './spamwatch/spamwatch'; +import { startServer } from './api/server'; (async function main() { const { botToken, handlerTimeout, maxRetries, databaseUrl, ollamaEnabled } = process.env; @@ -46,7 +47,7 @@ import { getSpamwatchBlockedCount } from './spamwatch/spamwatch'; let loadedCount = 0; try { const files = fs.readdirSync(commandsPath) - .filter(file => file.endsWith('.ts') || file.endsWith('.js')); + .filter(file => file.endsWith('.ts')); files.forEach((file) => { try { const commandPath = path.join(commandsPath, file); @@ -59,7 +60,7 @@ import { getSpamwatchBlockedCount } from './spamwatch/spamwatch'; console.error(`Failed to load command file ${file}: ${error.message}`); } }); - console.log(`[🤖 BOT] Loaded ${loadedCount} commands.\n`); + console.log(`[🤖 BOT] Loaded ${loadedCount} commands.`); } catch (error) { console.error(`Failed to read commands directory: ${error.message}`); } @@ -125,5 +126,6 @@ import { getSpamwatchBlockedCount } from './spamwatch/spamwatch'; } loadCommands(); + startServer(); startBot(); })(); diff --git a/telegram/commands/ai.ts b/telegram/commands/ai.ts new file mode 100755 index 0000000..244678e --- /dev/null +++ b/telegram/commands/ai.ts @@ -0,0 +1,1291 @@ +// AI.TS +// by ihatenodejs/Aidan +// +// ----------------------------------------------------------------------- +// +// This is free and unencumbered software released into the public domain. +// +// Anyone is free to copy, modify, publish, use, compile, sell, or +// distribute this software, either in source code form or as a compiled +// binary, for any purpose, commercial or non-commercial, and by any +// means. +// +// In jurisdictions that recognize copyright laws, the author or authors +// of this software dedicate any and all copyright interest in the +// software to the public domain. We make this dedication for the benefit +// of the public at large and to the detriment of our heirs and +// successors. We intend this dedication to be an overt act of +// relinquishment in perpetuity of all present and future rights to this +// software under copyright law. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +// IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +// OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +// OTHER DEALINGS IN THE SOFTWARE. +// +// For more information, please refer to + +import { isOnSpamWatch } from "../spamwatch/spamwatch" +import spamwatchMiddlewareModule from "../spamwatch/Middleware" +import { Telegraf, Context } from "telegraf" +import type { Message } from "telegraf/types" +import { replyToMessageId } from "../utils/reply-to-message-id" +import { getStrings } from "../plugins/checklang" +import axios from "axios" +import { rateLimiter } from "../utils/rate-limiter" +import { logger } from "../utils/log" +import { ensureUserInDb } from "../utils/ensure-user" +import * as schema from '../../database/schema' +import type { NodePgDatabase } from "drizzle-orm/node-postgres" +import { eq, sql, and, gt, isNotNull } from 'drizzle-orm' +import { models, unloadModelAfterB, maxUserQueueSize } from "../../config/ai" +import { isCommandDisabled } from "../utils/check-command-disabled" + +const spamwatchMiddleware = spamwatchMiddlewareModule(isOnSpamWatch) +export const flash_model = process.env.flashModel || "gemma3:4b" +export const thinking_model = process.env.thinkingModel || "qwen3:4b" + +function isAdmin(ctx: Context): boolean { + const userId = ctx.from?.id; + if (!userId) return false; + const adminArray = process.env.botAdmins ? process.env.botAdmins.split(',').map(id => parseInt(id.trim())) : []; + return adminArray.includes(userId); +} + +function parseDuration(duration: string): number { + const match = duration.match(/^(\d+)([smhdw])$/); + if (!match) return -1; + + const value = parseInt(match[1]); + const unit = match[2]; + + switch (unit) { + case 's': return value; + case 'm': return value * 60; + case 'h': return value * 60 * 60; + case 'd': return value * 60 * 60 * 24; + case 'w': return value * 60 * 60 * 24 * 7; + default: return -1; + } +} + +function formatDuration(seconds: number): string { + if (seconds < 60) return `${seconds}s`; + if (seconds < 3600) return `${Math.floor(seconds / 60)}m`; + if (seconds < 86400) return `${Math.floor(seconds / 3600)}h`; + if (seconds < 604800) return `${Math.floor(seconds / 86400)}d`; + return `${Math.floor(seconds / 604800)}w`; +} + +async function checkUserTimeout(ctx: Context, db: NodePgDatabase, userId: string, Strings: ReturnType): Promise { + const user = await db.query.usersTable.findFirst({ where: (fields, { eq }) => eq(fields.telegramId, userId) }); + if (!user) return false; + + if (user.aiTimeoutUntil && user.aiTimeoutUntil > new Date()) { + const timeoutEnd = user.aiTimeoutUntil.toISOString(); + const reply_to_message_id = replyToMessageId(ctx); + await ctx.reply(Strings.ai.userTimedOutFromAI.replace("{timeoutEnd}", timeoutEnd), { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + return true; + } + + return false; +} + +type TextContext = Context & { message: Message.TextMessage } + +type User = typeof schema.usersTable.$inferSelect + +interface OllamaResponse { + response: string; +} + +async function usingSystemPrompt(ctx: TextContext, db: NodePgDatabase, botName: string, message: string): Promise { + const user = await db.query.usersTable.findMany({ where: (fields, { eq }) => eq(fields.telegramId, String(ctx.from!.id)), limit: 1 }); + if (user.length === 0) await ensureUserInDb(ctx, db); + const userData = user[0]; + const lang = userData?.languageCode || "en"; + const Strings = getStrings(lang); + const utcDate = new Date().toISOString(); + const prompt = Strings.ai.systemPrompt + .replace("{botName}", botName) + .replace("{date}", utcDate) + .replace("{message}", message); + return prompt; +} + +export function sanitizeForJson(text: string): string { + return text + .replace(/\\/g, '\\\\') + .replace(/"/g, '\\"') + .replace(/\n/g, '\\n') + .replace(/\r/g, '\\r') + .replace(/\t/g, '\\t') +} + +function sanitizeMarkdownForTelegram(text: string): string { + let sanitizedText = text; + + const replacements: string[] = []; + const addReplacement = (match: string): string => { + replacements.push(match); + return `___PLACEHOLDER_${replacements.length - 1}___`; + }; + + sanitizedText = sanitizedText.replace(/```([\s\S]*?)```/g, addReplacement); + sanitizedText = sanitizedText.replace(/`([^`]+)`/g, addReplacement); + sanitizedText = sanitizedText.replace(/\[([^\]]+)\]\(([^)]+)\)/g, addReplacement); + + const parts = sanitizedText.split(/(___PLACEHOLDER_\d+___)/g); + const processedParts = parts.map(part => { + if (part.match(/___PLACEHOLDER_\d+___/)) { + return part; + } else { + let processedPart = part; + processedPart = processedPart.replace(/^(#{1,6})\s+(.+)/gm, '*$2*'); + processedPart = processedPart.replace(/^(\s*)[-*]\s+/gm, '$1- '); + processedPart = processedPart.replace(/\*\*(.*?)\*\*/g, '*$1*'); + processedPart = processedPart.replace(/__(.*?)__/g, '*$1*'); + processedPart = processedPart.replace(/(^|\s)\*(?!\*)([^*]+?)\*(?!\*)/g, '$1_$2_'); + processedPart = processedPart.replace(/(^|\s)_(?!_)([^_]+?)_(?!_)/g, '$1_$2_'); + processedPart = processedPart.replace(/~~(.*?)~~/g, '~$1~'); + processedPart = processedPart.replace(/^\s*┃/gm, '>'); + processedPart = processedPart.replace(/^>\s?/gm, '> '); + + return processedPart; + } + }); + + sanitizedText = processedParts.join(''); + + sanitizedText = sanitizedText.replace(/___PLACEHOLDER_(\d+)___/g, (_, idx) => replacements[Number(idx)]); + + const codeBlockCount = (sanitizedText.match(/```/g) || []).length; + if (codeBlockCount % 2 !== 0) { + sanitizedText += '\n```'; + } + + return sanitizedText; +} + +function processThinkingTags(text: string): string { + let processedText = text; + + const firstThinkIndex = processedText.indexOf(''); + if (firstThinkIndex === -1) { + return processedText.replace(/<\/think>/g, '___THINK_END___'); + } + + processedText = processedText.substring(0, firstThinkIndex) + '___THINK_START___' + processedText.substring(firstThinkIndex + ''.length); + const lastThinkEndIndex = processedText.lastIndexOf(''); + if (lastThinkEndIndex !== -1) { + processedText = processedText.substring(0, lastThinkEndIndex) + '___THEND___' + processedText.substring(lastThinkEndIndex + ''.length); + } + processedText = processedText.replace(//g, ''); + processedText = processedText.replace(/<\/think>/g, ''); + processedText = processedText.replace('___THEND___', '___THINK_END___'); + + return processedText; +} + +export async function preChecks() { + const envs = [ + "ollamaApi", + "flashModel", + "thinkingModel", + ]; + + for (const env of envs) { + if (!process.env[env]) { + console.error(`[✨ AI | !] ❌ ${env} not set!`); + return false; + } + } + + const ollamaApi = process.env.ollamaApi!; + let ollamaOk = false; + for (let i = 0; i < 10; i++) { + try { + const res = await axios.get(ollamaApi, { timeout: 2000 }); + if (res.status === 200) { + ollamaOk = true; + break; + } + } catch (err) { + if (i < 9) { + await new Promise((resolve) => setTimeout(resolve, 1000)); + } + } + } + + if (!ollamaOk) { + console.error(`[✨ AI | !] ❌ Ollama API is not responding at ${ollamaApi}`); + return false; + } + + console.log(`[✨ AI] Pre-checks passed.`); + const modelCount = models.reduce((acc, model) => acc + model.models.length, 0); + console.log(`[✨ AI] Found ${modelCount} models.`); + return true; +} + +function isAxiosError(error: unknown): error is { response?: { data?: { error?: string }, status?: number, statusText?: string }, request?: unknown, message?: string } { + return typeof error === 'object' && error !== null && ( + 'response' in error || 'request' in error || 'message' in error + ); +} + +function extractAxiosErrorMessage(error: unknown): string { + if (isAxiosError(error)) { + const err = error as { response?: { data?: { error?: string }, status?: number, statusText?: string }, request?: unknown, message?: string }; + if (err.response && typeof err.response === 'object') { + const resp = err.response; + if (resp.data && typeof resp.data === 'object' && 'error' in resp.data) { + return String(resp.data.error); + } + if ('status' in resp && 'statusText' in resp) { + return `HTTP ${resp.status}: ${resp.statusText}`; + } + return JSON.stringify(resp.data ?? resp); + } + if (err.request) { + return 'No response received from server.'; + } + if (typeof err.message === 'string') { + return err.message; + } + } + return 'An unexpected error occurred.'; +} + +function containsUrls(text: string): boolean { + return text.includes('http://') || text.includes('https://') || text.includes('.com') || text.includes('.net') || text.includes('.org') || text.includes('.io') || text.includes('.ai') || text.includes('.dev') +} + +async function getResponse(prompt: string, ctx: TextContext, replyGenerating: Message, model: string, aiTemperature: number, originalMessage: string, db: NodePgDatabase, userId: string, Strings: ReturnType, showThinking: boolean, abortController?: AbortController): Promise<{ success: boolean; response?: string; error?: string, messageType?: 'generation' | 'system', executionTimeoutReached?: boolean }> { + if (!ctx.chat) { + return { + success: false, + error: Strings.unexpectedErr.replace("{error}", Strings.ai.noChatFound), + }; + } + const cleanedModelName = model.includes('/') ? model.split('/').pop()! : model; + let status = Strings.ai.statusWaitingRender; + let modelHeader = Strings.ai.modelHeader + .replace("{model}", `${cleanedModelName}`) + .replace("{temperature}", String(aiTemperature)) + .replace("{status}", status) + "\n\n"; + + const promptCharCount = originalMessage.length; + await db.update(schema.usersTable) + .set({ aiCharacters: sql`${schema.usersTable.aiCharacters} + ${promptCharCount}` }) + .where(eq(schema.usersTable.telegramId, userId)); + const paramSizeStr = models.find(m => m.name === model)?.models.find(m => m.name === model)?.parameterSize?.replace('B', ''); + const shouldKeepAlive = paramSizeStr ? Number(paramSizeStr) > unloadModelAfterB : false; + const user = await db.query.usersTable.findFirst({ where: (fields, { eq }) => eq(fields.telegramId, userId) }); + const maxExecutionTime = user?.aiMaxExecutionTime || 0; + const timeout = maxExecutionTime > 0 ? maxExecutionTime * 1000 : 300000; // 5m + + let executionTimeout: NodeJS.Timeout | null = null; + let executionTimeoutReached = false; + let fullResponse = ""; + if (timeout < 300000) { // 5m + executionTimeout = setTimeout(() => { + if (abortController && !abortController.signal.aborted) { + executionTimeoutReached = true; + abortController.abort(); + } + }, timeout); + } + + try { + const aiResponse = await axios.post( + `${process.env.ollamaApi}/api/generate`, + { + model, + prompt, + stream: true, + keep_alive: shouldKeepAlive ? '1' : '0', + options: { + temperature: aiTemperature + } + }, + { + responseType: "stream", + timeout: 60000, //1m + signal: abortController?.signal, + } + ); + let lastUpdateCharCount = 0; + let sentHeader = false; + let firstChunk = true; + const stream: NodeJS.ReadableStream = aiResponse.data as any; + + const formatThinkingMessage = (text: string) => { + const withPlaceholders = text + .replace(/___THINK_START___/g, `${Strings.ai.thinking}`) + .replace(/___THINK_END___/g, `${Strings.ai.finishedThinking}`); + return sanitizeMarkdownForTelegram(withPlaceholders); + }; + + let isThinking = false; + let hasStartedThinking = false; + let hasFinishedThinking = false; + + for await (const chunk of stream) { + const lines = chunk.toString().split('\n'); + for (const line of lines) { + if (!line.trim()) continue; + let ln: OllamaResponse; + try { + ln = JSON.parse(line); + } catch (e) { + console.error("[✨ AI | !] Error parsing chunk"); + continue; + } + + if (ln.response) { + if (ln.response.includes('')) { + const thinkMatch = ln.response.match(/([\s\S]*?)<\/think>/); + if (thinkMatch && thinkMatch[1].trim().length > 0) { + logger.logThinking(ctx.chat.id, replyGenerating.message_id, true); + } else if (!thinkMatch) { + logger.logThinking(ctx.chat.id, replyGenerating.message_id, true); + } + if (!hasStartedThinking) { + isThinking = true; + hasStartedThinking = true; + } + } else if (ln.response.includes('')) { + logger.logThinking(ctx.chat.id, replyGenerating.message_id, false); + if (isThinking && !hasFinishedThinking) { + isThinking = false; + hasFinishedThinking = true; + } + } + fullResponse += ln.response; + if (showThinking) { + let displayResponse = processThinkingTags(fullResponse); + + if (firstChunk) { + status = Strings.ai.statusWaitingRender; + modelHeader = Strings.ai.modelHeader + .replace("{model}", `${cleanedModelName}`) + .replace("{temperature}", String(aiTemperature)) + .replace("{status}", status) + "\n\n"; + await rateLimiter.editMessageWithRetry( + ctx, + ctx.chat.id, + replyGenerating.message_id, + modelHeader + formatThinkingMessage(displayResponse), + { parse_mode: 'Markdown' } + ); + lastUpdateCharCount = displayResponse.length; + sentHeader = true; + firstChunk = false; + continue; + } + const updateEveryChars = Number(process.env.updateEveryChars) || 100; + if (displayResponse.length - lastUpdateCharCount >= updateEveryChars || !sentHeader) { + await rateLimiter.editMessageWithRetry( + ctx, + ctx.chat.id, + replyGenerating.message_id, + modelHeader + formatThinkingMessage(displayResponse), + { parse_mode: 'Markdown' } + ); + lastUpdateCharCount = displayResponse.length; + sentHeader = true; + } + } else { + if (hasStartedThinking && !hasFinishedThinking && isThinking) { + if (firstChunk) { + status = Strings.ai.statusWaitingRender; + modelHeader = Strings.ai.modelHeader + .replace("{model}", `${cleanedModelName}`) + .replace("{temperature}", String(aiTemperature)) + .replace("{status}", status) + "\n\n"; + await rateLimiter.editMessageWithRetry( + ctx, + ctx.chat.id, + replyGenerating.message_id, + modelHeader + Strings.ai.thinking, + { parse_mode: 'Markdown' } + ); + sentHeader = true; + firstChunk = false; + } + } else if (hasFinishedThinking) { + let processedResponse = processThinkingTags(fullResponse); + let displayResponse = processedResponse.replace(/___THINK_START___[\s\S]*?___THINK_END___/g, '').trim(); + displayResponse = displayResponse.replace(/___THINK_START___[\s\S]*/g, '').trim(); + + if (firstChunk) { + status = Strings.ai.statusWaitingRender; + modelHeader = Strings.ai.modelHeader + .replace("{model}", `${cleanedModelName}`) + .replace("{temperature}", String(aiTemperature)) + .replace("{status}", status) + "\n\n"; + await rateLimiter.editMessageWithRetry( + ctx, + ctx.chat.id, + replyGenerating.message_id, + modelHeader + Strings.ai.finishedThinking + "\n\n" + sanitizeMarkdownForTelegram(displayResponse), + { parse_mode: 'Markdown' } + ); + lastUpdateCharCount = displayResponse.length; + sentHeader = true; + firstChunk = false; + continue; + } + + const updateEveryChars = Number(process.env.updateEveryChars) || 100; + if (displayResponse.length - lastUpdateCharCount >= updateEveryChars) { + await rateLimiter.editMessageWithRetry( + ctx, + ctx.chat.id, + replyGenerating.message_id, + modelHeader + Strings.ai.finishedThinking + "\n\n" + sanitizeMarkdownForTelegram(displayResponse), + { parse_mode: 'Markdown' } + ); + lastUpdateCharCount = displayResponse.length; + } + } else if (!hasStartedThinking) { + if (firstChunk) { + status = Strings.ai.statusWaitingRender; + modelHeader = Strings.ai.modelHeader + .replace("{model}", `${cleanedModelName}`) + .replace("{temperature}", String(aiTemperature)) + .replace("{status}", status) + "\n\n"; + await rateLimiter.editMessageWithRetry( + ctx, + ctx.chat.id, + replyGenerating.message_id, + modelHeader + sanitizeMarkdownForTelegram(fullResponse), + { parse_mode: 'Markdown' } + ); + lastUpdateCharCount = fullResponse.length; + sentHeader = true; + firstChunk = false; + continue; + } + + const updateEveryChars = Number(process.env.updateEveryChars) || 100; + if (fullResponse.length - lastUpdateCharCount >= updateEveryChars) { + await rateLimiter.editMessageWithRetry( + ctx, + ctx.chat.id, + replyGenerating.message_id, + modelHeader + sanitizeMarkdownForTelegram(fullResponse), + { parse_mode: 'Markdown' } + ); + lastUpdateCharCount = fullResponse.length; + } + } + } + } + } + } + + if (executionTimeout) { + clearTimeout(executionTimeout); + } + + status = Strings.ai.statusRendering; + modelHeader = Strings.ai.modelHeader + .replace("{model}", `${cleanedModelName}`) + .replace("{temperature}", String(aiTemperature)) + .replace("{status}", status) + "\n\n"; + + if (showThinking) { + let displayResponse = processThinkingTags(fullResponse); + + await rateLimiter.editMessageWithRetry( + ctx, + ctx.chat.id, + replyGenerating.message_id, + modelHeader + formatThinkingMessage(displayResponse), + { parse_mode: 'Markdown' } + ); + } else { + let processedResponse = processThinkingTags(fullResponse); + let displayResponse = processedResponse.replace(/___THINK_START___[\s\S]*?___THINK_END___/g, '').trim(); + displayResponse = displayResponse.replace(/___THINK_START___[\s\S]*/g, '').trim(); + if (hasStartedThinking) { + await rateLimiter.editMessageWithRetry( + ctx, + ctx.chat.id, + replyGenerating.message_id, + modelHeader + Strings.ai.finishedThinking + "\n\n" + sanitizeMarkdownForTelegram(displayResponse), + { parse_mode: 'Markdown' } + ); + } else { + await rateLimiter.editMessageWithRetry( + ctx, + ctx.chat.id, + replyGenerating.message_id, + modelHeader + sanitizeMarkdownForTelegram(displayResponse), + { parse_mode: 'Markdown' } + ); + } + } + + const responseCharCount = fullResponse.length; + await db.update(schema.usersTable) + .set({ + aiCharacters: sql`${schema.usersTable.aiCharacters} + ${responseCharCount}`, + aiRequests: sql`${schema.usersTable.aiRequests} + 1` + }) + .where(eq(schema.usersTable.telegramId, userId)); + + const patchedResponse = processThinkingTags(fullResponse); + + return { + success: true, + response: patchedResponse, + messageType: 'generation', + executionTimeoutReached + }; + } catch (error: unknown) { + if (executionTimeout) { + clearTimeout(executionTimeout); + } + + if (error instanceof Error && (error.name === 'AbortError' || error.message.toLowerCase().includes('aborted'))) { + if (executionTimeoutReached) { + console.log("[✨ AI] Request was aborted due to execution timeout"); + const patchedResponse = processThinkingTags(fullResponse); + return { + success: true, + response: patchedResponse, + messageType: 'generation', + executionTimeoutReached: true + }; + } else { + console.log("[✨ AI] Request was aborted by user"); + return { + success: false, + error: 'Request was aborted' + }; + } + } + + const errorMsg = extractAxiosErrorMessage(error); + console.error("[✨ AI | !] Error:", errorMsg); + if (isAxiosError(error) && error.response && typeof error.response === 'object') { + const resp = error.response as { data?: { error?: string }, status?: number }; + const errData = resp.data && typeof resp.data === 'object' && 'error' in resp.data ? (resp.data as { error?: string }).error : undefined; + const errStatus = 'status' in resp ? resp.status : undefined; + if ((typeof errData === 'string' && errData.includes(`model '${model}' not found`)) || errStatus === 404) { + await ctx.telegram.editMessageText( + ctx.chat!.id, + replyGenerating.message_id, + undefined, + Strings.ai.pulling.replace("{model}", `${cleanedModelName}`), + { parse_mode: 'Markdown' } + ); + console.log(`[✨ AI] Pulling ${model} from ollama...`); + try { + await axios.post( + `${process.env.ollamaApi}/api/pull`, + { + model, + stream: false, + timeout: Number(process.env.ollamaApiTimeout) || 10000, + } + ); + } catch (e: unknown) { + const pullMsg = extractAxiosErrorMessage(e); + console.error("[✨ AI | !] Pull error:", pullMsg); + return { + success: false, + error: `❌ Something went wrong while pulling \`${model}\`: ${pullMsg}`, + messageType: 'system' + }; + } + console.log(`[✨ AI] ${model} pulled successfully`); + return { + success: true, + response: Strings.ai.pulled.replace("{model}", `\`${cleanedModelName}\``), + messageType: 'system' + }; + } + } + return { + success: false, + error: errorMsg, + }; + } +} + +async function handleAiReply(ctx: TextContext, model: string, prompt: string, replyGenerating: Message, aiTemperature: number, originalMessage: string, db: NodePgDatabase, userId: string, Strings: ReturnType, showThinking: boolean, abortController?: AbortController) { + const aiResponse = await getResponse(prompt, ctx, replyGenerating, model, aiTemperature, originalMessage, db, userId, Strings, showThinking, abortController); + if (!aiResponse) return; + if (!ctx.chat) return; + if (!aiResponse.success && aiResponse.error === 'Request was aborted') { + return; + } + if (aiResponse.success && aiResponse.response) { + if (aiResponse.messageType === 'system') { + await rateLimiter.editMessageWithRetry( + ctx, + ctx.chat.id, + replyGenerating.message_id, + aiResponse.response, + { parse_mode: 'Markdown' } + ); + return; + } + + const cleanedModelName = model.includes('/') ? model.split('/').pop()! : model; + const status = Strings.ai.statusComplete; + const modelHeader = Strings.ai.modelHeader + .replace("{model}", `${cleanedModelName}`) + .replace("{temperature}", String(aiTemperature)) + .replace("{status}", status) + "\n\n"; + const urlWarning = containsUrls(originalMessage) ? Strings.ai.urlWarning : ''; + let finalResponse = aiResponse.response; + const hasThinkingContent = finalResponse.includes('___THINK_START___'); + + if (showThinking) { + finalResponse = finalResponse.replace(/___THINK_START___/g, `${Strings.ai.thinking}`) + .replace(/___THINK_END___/g, `${Strings.ai.finishedThinking}`); + } else { + finalResponse = finalResponse.replace(/___THINK_START___[\s\S]*?___THINK_END___/g, '').trim(); + finalResponse = finalResponse.replace(/___THINK_START___[\s\S]*/g, '').trim(); + } + + const thinkingPrefix = (!showThinking && hasThinkingContent) ? `${Strings.ai.finishedThinking}\n\n` : ''; + const timeoutSuffix = aiResponse.executionTimeoutReached ? Strings.ai.executionTimeoutReached : ''; + + await rateLimiter.editMessageWithRetry( + ctx, + ctx.chat.id, + replyGenerating.message_id, + modelHeader + thinkingPrefix + sanitizeMarkdownForTelegram(finalResponse) + urlWarning + timeoutSuffix, + { parse_mode: 'Markdown' } + ); + return; + } + const error = Strings.unexpectedErr.replace("{error}", aiResponse.error); + await rateLimiter.editMessageWithRetry( + ctx, + ctx.chat.id, + replyGenerating.message_id, + error, + { parse_mode: 'Markdown' } + ); +} + +async function getUserWithStringsAndModel(ctx: Context, db: NodePgDatabase): Promise<{ user: User; Strings: ReturnType; languageCode: string; customAiModel: string; aiTemperature: number, showThinking: boolean }> { + const userArr = await db.query.usersTable.findMany({ where: (fields, { eq }) => eq(fields.telegramId, String(ctx.from!.id)), limit: 1 }); + let user = userArr[0]; + if (!user) { + await ensureUserInDb(ctx, db); + const newUserArr = await db.query.usersTable.findMany({ where: (fields, { eq }) => eq(fields.telegramId, String(ctx.from!.id)), limit: 1 }); + user = newUserArr[0]; + const Strings = getStrings(user.languageCode); + return { user, Strings, languageCode: user.languageCode, customAiModel: user.customAiModel, aiTemperature: user.aiTemperature, showThinking: user.showThinking }; + } + const Strings = getStrings(user.languageCode); + return { user, Strings, languageCode: user.languageCode, customAiModel: user.customAiModel, aiTemperature: user.aiTemperature, showThinking: user.showThinking }; +} + +export function getModelLabelByName(name: string): string { + for (const series of models) { + const found = series.models.find(m => m.name === name); + if (found) return found.label; + } + return name; +} + +export default (bot: Telegraf, db: NodePgDatabase) => { + const botName = bot.botInfo?.first_name && bot.botInfo?.last_name ? `${bot.botInfo.first_name} ${bot.botInfo.last_name}` : "Kowalski" + + interface AiRequest { + task: () => Promise; + ctx: TextContext; + wasQueued: boolean; + userId: number; + model: string; + abortController?: AbortController; + } + + const requestQueue: AiRequest[] = []; + let isProcessing = false; + let lastProcessedUserId: number | null = null; + let currentRequest: AiRequest | null = null; + + async function processQueue() { + if (isProcessing || requestQueue.length === 0) { + return; + } + + isProcessing = true; + + let nextRequestIndex = 0; + if (lastProcessedUserId !== null && requestQueue.length > 1) { + const differentUserIndex = requestQueue.findIndex(req => req.userId !== lastProcessedUserId); + if (differentUserIndex !== -1) { + nextRequestIndex = differentUserIndex; + } + } + + const selectedRequest = requestQueue.splice(nextRequestIndex, 1)[0]; + const { task, ctx, wasQueued, userId } = selectedRequest; + currentRequest = selectedRequest; + + lastProcessedUserId = userId; + + const { Strings } = await getUserWithStringsAndModel(ctx, db); + const reply_to_message_id = replyToMessageId(ctx); + + try { + if (wasQueued) { + await ctx.reply(Strings.ai.startingProcessing, { + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }), + parse_mode: 'Markdown' + }); + } + await task(); + } catch (error) { + console.error("[✨ AI | !] Error processing task:", error); + if (error.name === 'AbortError' || (error instanceof Error && error.message.toLowerCase().includes('aborted'))) { + console.log("[✨ AI] Request was cancelled by user"); + } else { + const errorMessage = error instanceof Error ? error.message : String(error); + await ctx.reply(Strings.unexpectedErr.replace("{error}", errorMessage), { + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }), + parse_mode: 'Markdown' + }); + } + } finally { + currentRequest = null; + isProcessing = false; + processQueue(); + } + } + + async function aiCommandHandler(ctx: TextContext, command: 'ask' | 'think' | 'ai') { + const commandId = command === 'ask' || command === 'think' ? 'ai-ask-think' : 'ai-custom'; + if (await isCommandDisabled(ctx, db, commandId)) { + return; + } + + const reply_to_message_id = replyToMessageId(ctx); + const { user, Strings, customAiModel, aiTemperature, showThinking } = await getUserWithStringsAndModel(ctx, db); + const message = ctx.message.text; + const author = ("@" + ctx.from?.username) || ctx.from?.first_name || "Unknown"; + + if (await checkUserTimeout(ctx, db, user.telegramId, Strings)) { + return; + } + + const model = command === 'ai' + ? (customAiModel || flash_model) + : (command === 'ask' ? flash_model : thinking_model); + + const fixedMsg = message.replace(new RegExp(`^/${command}(@\\w+)?\\s*`), "").trim(); + logger.logCmdStart(author, command, model); + + if (!process.env.ollamaApi) { + await ctx.reply(Strings.ai.disabled, { parse_mode: 'Markdown', ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) }); + return; + } + + if (!user.aiEnabled) { + await ctx.reply(Strings.ai.disabledForUser, { parse_mode: 'Markdown', ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) }); + return; + } + + if (fixedMsg.length < 1) { + await ctx.reply(Strings.ai.askNoMessage, { parse_mode: 'Markdown', ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) }); + return; + } + + const userId = ctx.from!.id; + const userQueueSize = requestQueue.filter(req => req.userId === userId).length; + + if (userQueueSize >= maxUserQueueSize) { + await ctx.reply(Strings.ai.queueFull, { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + return; + } + + const abortController = new AbortController(); + const task = async () => { + const modelLabel = getModelLabelByName(model); + const replyGenerating = await ctx.reply(Strings.ai.askGenerating.replace("{model}", `\`${modelLabel}\``), { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + const prompt = sanitizeForJson(await usingSystemPrompt(ctx, db, botName, fixedMsg)); + await handleAiReply(ctx, model, prompt, replyGenerating, aiTemperature, fixedMsg, db, user.telegramId, Strings, showThinking, abortController); + }; + + if (isProcessing) { + requestQueue.push({ task, ctx, wasQueued: true, userId: ctx.from!.id, model, abortController }); + const position = requestQueue.length; + await ctx.reply(Strings.ai.inQueue.replace("{position}", String(position)), { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + } else { + requestQueue.push({ task, ctx, wasQueued: false, userId: ctx.from!.id, model, abortController }); + processQueue(); + } + } + + bot.command(["ask", "think"], spamwatchMiddleware, async (ctx) => { + if (!ctx.message || !('text' in ctx.message)) return; + const command = ctx.message.text.startsWith('/ask') ? 'ask' : 'think'; + await aiCommandHandler(ctx as TextContext, command); + }); + + bot.command(["ai"], spamwatchMiddleware, async (ctx) => { + if (!ctx.message || !('text' in ctx.message)) return; + await aiCommandHandler(ctx as TextContext, 'ai'); + }); + + bot.command(["aistop"], spamwatchMiddleware, async (ctx) => { + if (await isCommandDisabled(ctx, db, 'ai-stop')) { + return; + } + + const { Strings } = await getUserWithStringsAndModel(ctx, db); + const reply_to_message_id = replyToMessageId(ctx); + const userId = ctx.from!.id; + + if (currentRequest && currentRequest.userId === userId) { + currentRequest.abortController?.abort(); + + try { + await axios.post(`${process.env.ollamaApi}/api/generate`, { + model: currentRequest.model, + keep_alive: 0, + }, { timeout: 5000 }); + } catch (error) { + console.log("[✨ AI] Could not unload model after cancellation:", error.message); + } + + await ctx.reply(Strings.ai.requestStopped, { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + return; + } + + const queuedRequestIndex = requestQueue.findIndex(req => req.userId === userId); + if (queuedRequestIndex !== -1) { + const removedRequest = requestQueue.splice(queuedRequestIndex, 1)[0]; + removedRequest.abortController?.abort(); + await ctx.reply(Strings.ai.requestRemovedFromQueue, { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + return; + } + + await ctx.reply(Strings.ai.noActiveRequest, { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + }); + + bot.command(["aistats"], spamwatchMiddleware, async (ctx) => { + if (await isCommandDisabled(ctx, db, 'ai-stats')) { + return; + } + + const { user, Strings } = await getUserWithStringsAndModel(ctx, db); + if (!user) { + await ctx.reply(Strings.userNotFound || "User not found."); + return; + } + const bookCount = Math.max(1, Math.round(user.aiCharacters / 500000)); + const bookWord = bookCount === 1 ? 'book' : 'books'; + const msg = `${Strings.aiStats.header}\n\n${Strings.aiStats.requests.replace('{aiRequests}', user.aiRequests)}\n${Strings.aiStats.characters.replace('{aiCharacters}', user.aiCharacters).replace('{bookCount}', bookCount).replace('books', bookWord)}`; + await ctx.reply(msg, { parse_mode: 'Markdown' }); + }); + + bot.command("queue", spamwatchMiddleware, async (ctx) => { + if (!isAdmin(ctx)) { + const { Strings } = await getUserWithStringsAndModel(ctx, db); + await ctx.reply(Strings.noPermission); + return; + } + + const { Strings } = await getUserWithStringsAndModel(ctx, db); + const reply_to_message_id = replyToMessageId(ctx); + + if (requestQueue.length === 0) { + await ctx.reply(Strings.ai.queueEmpty, { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + return; + } + + let queueItems = ""; + for (let i = 0; i < requestQueue.length; i++) { + const item = requestQueue[i]; + const username = item.ctx.from?.username || item.ctx.from?.first_name || "Unknown"; + const status = i === 0 && isProcessing ? "Processing" : "Queued"; + const modelLabel = getModelLabelByName(item.model); + queueItems += Strings.ai.queueItem + .replace("{username}", username) + .replace("{userId}", String(item.userId)) + .replace("{model}", modelLabel) + .replace("{status}", status); + } + + const queueMsg = Strings.ai.queueList + .replace("{queueItems}", queueItems) + .replace("{totalItems}", String(requestQueue.length)); + + await ctx.reply(queueMsg, { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + }); + + bot.command("qdel", spamwatchMiddleware, async (ctx) => { + if (!isAdmin(ctx)) { + const { Strings } = await getUserWithStringsAndModel(ctx, db); + await ctx.reply(Strings.noPermission); + return; + } + + const { Strings } = await getUserWithStringsAndModel(ctx, db); + const reply_to_message_id = replyToMessageId(ctx); + const args = ctx.message.text.split(' '); + + if (args.length < 2) { + await ctx.reply(Strings.ai.invalidUserId, { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + return; + } + + const targetUserId = parseInt(args[1]); + if (isNaN(targetUserId)) { + await ctx.reply(Strings.ai.invalidUserId, { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + return; + } + + const initialLength = requestQueue.length; + const filteredQueue = requestQueue.filter(item => item.userId !== targetUserId); + const removedCount = initialLength - filteredQueue.length; + + if (removedCount === 0) { + await ctx.reply(Strings.ai.noQueueItems.replace("{userId}", String(targetUserId)), { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + return; + } + + requestQueue.length = 0; + requestQueue.push(...filteredQueue); + + await ctx.reply(Strings.ai.queueCleared.replace("{count}", String(removedCount)).replace("{userId}", String(targetUserId)), { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + }); + + bot.command("qlimit", spamwatchMiddleware, async (ctx) => { + if (!isAdmin(ctx)) { + const { Strings } = await getUserWithStringsAndModel(ctx, db); + await ctx.reply(Strings.noPermission); + return; + } + + const { Strings } = await getUserWithStringsAndModel(ctx, db); + const reply_to_message_id = replyToMessageId(ctx); + const args = ctx.message.text.split(' '); + + if (args.length < 3) { + await ctx.reply("Usage: /qlimit \nExample: /qlimit 123456789 1h", { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + return; + } + + const targetUserId = args[1]; + const durationStr = args[2]; + + if (!/^\d+$/.test(targetUserId)) { + await ctx.reply(Strings.ai.invalidUserId, { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + return; + } + + const durationSeconds = parseDuration(durationStr); + if (durationSeconds === -1) { + await ctx.reply(Strings.ai.invalidDuration, { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + return; + } + + try { + const user = await db.query.usersTable.findFirst({ where: (fields, { eq }) => eq(fields.telegramId, targetUserId) }); + if (!user) { + await ctx.reply(Strings.ai.userNotFound.replace("{userId}", targetUserId), { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + return; + } + + const timeoutEnd = new Date(Date.now() + (durationSeconds * 1000)); + await db.update(schema.usersTable) + .set({ aiTimeoutUntil: timeoutEnd }) + .where(eq(schema.usersTable.telegramId, targetUserId)); + + const filteredQueue = requestQueue.filter(item => item.userId !== parseInt(targetUserId)); + requestQueue.length = 0; + requestQueue.push(...filteredQueue); + + await ctx.reply(Strings.ai.userTimedOut.replace("{userId}", targetUserId).replace("{timeoutEnd}", timeoutEnd.toISOString()), { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + } catch (error) { + await ctx.reply(Strings.ai.userTimeoutError.replace("{userId}", targetUserId).replace("{error}", error.message), { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + } + }); + + bot.command("setexec", spamwatchMiddleware, async (ctx) => { + if (!isAdmin(ctx)) { + const { Strings } = await getUserWithStringsAndModel(ctx, db); + await ctx.reply(Strings.noPermission); + return; + } + + const { Strings } = await getUserWithStringsAndModel(ctx, db); + const reply_to_message_id = replyToMessageId(ctx); + const args = ctx.message.text.split(' '); + + if (args.length < 3) { + await ctx.reply("Usage: /setexec \nExample: /setexec 123456789 5m\nUse 'unlimited' to remove limit.", { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + return; + } + + const targetUserId = args[1]; + const durationStr = args[2]; + + if (!/^\d+$/.test(targetUserId)) { + await ctx.reply(Strings.ai.invalidUserId, { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + return; + } + + let durationSeconds = 0; + if (durationStr.toLowerCase() !== 'unlimited') { + durationSeconds = parseDuration(durationStr); + if (durationSeconds === -1) { + await ctx.reply(Strings.ai.invalidDuration, { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + return; + } + } + + try { + const user = await db.query.usersTable.findFirst({ where: (fields, { eq }) => eq(fields.telegramId, targetUserId) }); + if (!user) { + await ctx.reply(Strings.ai.userNotFound.replace("{userId}", targetUserId), { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + return; + } + + await db.update(schema.usersTable) + .set({ aiMaxExecutionTime: durationSeconds }) + .where(eq(schema.usersTable.telegramId, targetUserId)); + + if (durationSeconds === 0) { + await ctx.reply(Strings.ai.userExecTimeRemoved.replace("{userId}", targetUserId), { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + } else { + await ctx.reply(Strings.ai.userExecTimeSet.replace("{duration}", formatDuration(durationSeconds)).replace("{userId}", targetUserId), { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + } + } catch (error) { + await ctx.reply(Strings.ai.userExecTimeError.replace("{userId}", targetUserId).replace("{error}", error.message), { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + } + }); + + bot.command("rlimit", spamwatchMiddleware, async (ctx) => { + if (!isAdmin(ctx)) { + const { Strings } = await getUserWithStringsAndModel(ctx, db); + await ctx.reply(Strings.noPermission); + return; + } + + const { Strings } = await getUserWithStringsAndModel(ctx, db); + const reply_to_message_id = replyToMessageId(ctx); + const args = ctx.message.text.split(' '); + + if (args.length < 2) { + await ctx.reply("Usage: /rlimit \nExample: /rlimit 123456789", { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + return; + } + + const targetUserId = args[1]; + + if (!/^\d+$/.test(targetUserId)) { + await ctx.reply(Strings.ai.invalidUserId, { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + return; + } + + try { + const user = await db.query.usersTable.findFirst({ where: (fields, { eq }) => eq(fields.telegramId, targetUserId) }); + if (!user) { + await ctx.reply(Strings.ai.userNotFound.replace("{userId}", targetUserId), { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + return; + } + + await db.update(schema.usersTable) + .set({ + aiTimeoutUntil: null, + aiMaxExecutionTime: 0 + }) + .where(eq(schema.usersTable.telegramId, targetUserId)); + + await ctx.reply(Strings.ai.userLimitsRemoved.replace("{userId}", targetUserId), { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + } catch (error) { + await ctx.reply(Strings.ai.userLimitRemoveError.replace("{userId}", targetUserId).replace("{error}", error.message), { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + } + }); + + bot.command("limits", spamwatchMiddleware, async (ctx) => { + if (!isAdmin(ctx)) { + const { Strings } = await getUserWithStringsAndModel(ctx, db); + await ctx.reply(Strings.noPermission); + return; + } + + const { Strings } = await getUserWithStringsAndModel(ctx, db); + const reply_to_message_id = replyToMessageId(ctx); + + try { + const usersWithTimeouts = await db.query.usersTable.findMany({ + where: and( + isNotNull(schema.usersTable.aiTimeoutUntil), + gt(schema.usersTable.aiTimeoutUntil, new Date()) + ), + columns: { + telegramId: true, + username: true, + firstName: true, + aiTimeoutUntil: true + } + }); + + const usersWithExecLimits = await db.query.usersTable.findMany({ + where: gt(schema.usersTable.aiMaxExecutionTime, 0), + columns: { + telegramId: true, + username: true, + firstName: true, + aiMaxExecutionTime: true + } + }); + + if (usersWithTimeouts.length === 0 && usersWithExecLimits.length === 0) { + await ctx.reply(Strings.ai.noLimitsSet, { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + return; + } + + let limitsText = Strings.ai.limitsHeader + "\n\n"; + + if (usersWithTimeouts.length > 0) { + limitsText += Strings.ai.timeoutLimitsHeader + "\n"; + for (const user of usersWithTimeouts) { + const displayName = user.username || user.firstName || "Unknown"; + const timeoutEnd = user.aiTimeoutUntil!.toISOString(); + limitsText += Strings.ai.timeoutLimitItem + .replace("{displayName}", displayName) + .replace("{userId}", user.telegramId) + .replace("{timeoutEnd}", timeoutEnd) + "\n"; + } + limitsText += "\n"; + } + + if (usersWithExecLimits.length > 0) { + limitsText += Strings.ai.execLimitsHeader + "\n"; + for (const user of usersWithExecLimits) { + const displayName = user.username || user.firstName || "Unknown"; + const execTime = formatDuration(user.aiMaxExecutionTime!); + limitsText += Strings.ai.execLimitItem + .replace("{displayName}", displayName) + .replace("{userId}", user.telegramId) + .replace("{execTime}", execTime) + "\n"; + } + } + + await ctx.reply(limitsText.trim(), { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + } catch (error) { + await ctx.reply(Strings.ai.limitsListError.replace("{error}", error.message), { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + }); + } + }); +} diff --git a/src/commands/animal.ts b/telegram/commands/animal.ts old mode 100644 new mode 100755 similarity index 83% rename from src/commands/animal.ts rename to telegram/commands/animal.ts index 09c7ba2..a409e75 --- a/src/commands/animal.ts +++ b/telegram/commands/animal.ts @@ -6,6 +6,7 @@ import axios from 'axios'; import { Context, Telegraf } from 'telegraf'; import { replyToMessageId } from '../utils/reply-to-message-id'; import { languageCode } from '../utils/language-code'; +import { isCommandDisabled } from '../utils/check-command-disabled'; const spamwatchMiddleware = spamwatchMiddlewareModule(isOnSpamWatch); @@ -130,10 +131,29 @@ export const soggyHandler = async (ctx: Context & { message: { text: string } }) }; }; -export default (bot: Telegraf) => { - bot.command("duck", spamwatchMiddleware, duckHandler); - bot.command("fox", spamwatchMiddleware, foxHandler); - bot.command("dog", spamwatchMiddleware, dogHandler); - bot.command("cat", spamwatchMiddleware, catHandler); - bot.command(['soggy', 'soggycat'], spamwatchMiddleware, soggyHandler); -} \ No newline at end of file +export default (bot: Telegraf, db: any) => { + bot.command("duck", spamwatchMiddleware, async (ctx) => { + if (await isCommandDisabled(ctx, db, 'animals-basic')) return; + await duckHandler(ctx); + }); + + bot.command("fox", spamwatchMiddleware, async (ctx) => { + if (await isCommandDisabled(ctx, db, 'animals-basic')) return; + await foxHandler(ctx); + }); + + bot.command("dog", spamwatchMiddleware, async (ctx) => { + if (await isCommandDisabled(ctx, db, 'animals-basic')) return; + await dogHandler(ctx); + }); + + bot.command("cat", spamwatchMiddleware, async (ctx) => { + if (await isCommandDisabled(ctx, db, 'animals-basic')) return; + await catHandler(ctx); + }); + + bot.command(['soggy', 'soggycat'], spamwatchMiddleware, async (ctx) => { + if (await isCommandDisabled(ctx, db, 'soggy-cat')) return; + await soggyHandler(ctx); + }); +} diff --git a/src/commands/codename.ts b/telegram/commands/codename.ts old mode 100644 new mode 100755 similarity index 93% rename from src/commands/codename.ts rename to telegram/commands/codename.ts index a7d668c..cb2734d --- a/src/commands/codename.ts +++ b/telegram/commands/codename.ts @@ -6,8 +6,9 @@ import axios from 'axios'; import verifyInput from '../plugins/verifyInput'; import { Context, Telegraf } from 'telegraf'; import { replyToMessageId } from '../utils/reply-to-message-id'; -import * as schema from '../db/schema'; +import * as schema from '../../database/schema'; import type { NodePgDatabase } from 'drizzle-orm/node-postgres'; +import { isCommandDisabled } from '../utils/check-command-disabled'; const spamwatchMiddleware = spamwatchMiddlewareModule(isOnSpamWatch); @@ -53,6 +54,8 @@ async function getUserAndStrings(ctx: Context, db?: NodePgDatabase, db) => { bot.command(['codename', 'whatis'], spamwatchMiddleware, async (ctx: Context & { message: { text: string } }) => { + if (await isCommandDisabled(ctx, db, 'codename-lookup')) return; + const userInput = ctx.message.text.split(" ").slice(1).join(" "); const { Strings } = await getUserAndStrings(ctx, db); const { noCodename } = Strings.codenameCheck; diff --git a/src/commands/crew.ts b/telegram/commands/crew.ts old mode 100644 new mode 100755 similarity index 99% rename from src/commands/crew.ts rename to telegram/commands/crew.ts index 762bb93..f32950a --- a/src/commands/crew.ts +++ b/telegram/commands/crew.ts @@ -5,7 +5,7 @@ import os from 'os'; import { exec } from 'child_process'; import { error } from 'console'; import { Context, Telegraf } from 'telegraf'; -import * as schema from '../db/schema'; +import * as schema from '../../database/schema'; import type { NodePgDatabase } from 'drizzle-orm/node-postgres'; const spamwatchMiddleware = spamwatchMiddlewareModule(isOnSpamWatch); diff --git a/src/commands/fun.ts b/telegram/commands/fun.ts old mode 100644 new mode 100755 similarity index 87% rename from src/commands/fun.ts rename to telegram/commands/fun.ts index e045dea..1d8f6e7 --- a/src/commands/fun.ts +++ b/telegram/commands/fun.ts @@ -3,8 +3,9 @@ import { getStrings } from '../plugins/checklang'; import { isOnSpamWatch } from '../spamwatch/spamwatch'; import spamwatchMiddlewareModule from '../spamwatch/Middleware'; import { Context, Telegraf } from 'telegraf'; -import * as schema from '../db/schema'; +import * as schema from '../../database/schema'; import type { NodePgDatabase } from 'drizzle-orm/node-postgres'; +import { isCommandDisabled } from '../utils/check-command-disabled'; const spamwatchMiddleware = spamwatchMiddlewareModule(isOnSpamWatch); @@ -78,6 +79,8 @@ function getRandomInt(max: number) { export default (bot: Telegraf, db) => { bot.command('random', spamwatchMiddleware, async (ctx: Context & { message: { text: string } }) => { + if (await isCommandDisabled(ctx, db, 'fun-random')) return; + const { Strings } = await getUserAndStrings(ctx, db); const randomValue = getRandomInt(10); const randomVStr = Strings.randomNum.replace('{number}', randomValue); @@ -91,26 +94,33 @@ export default (bot: Telegraf, db) => { // TODO: maybe send custom stickers to match result of the roll? i think there are pre-existing ones bot.command('dice', spamwatchMiddleware, async (ctx: Context & { message: { text: string } }) => { + if (await isCommandDisabled(ctx, db, 'games-dice')) return; await handleDiceCommand(ctx, '🎲', 4000, db); }); bot.command('slot', spamwatchMiddleware, async (ctx: Context & { message: { text: string } }) => { - await handleDiceCommand(ctx, '��', 3000, db); + if (await isCommandDisabled(ctx, db, 'games-dice')) return; + await handleDiceCommand(ctx, '🎰', 3000, db); }); bot.command('ball', spamwatchMiddleware, async (ctx: Context & { message: { text: string } }) => { + if (await isCommandDisabled(ctx, db, 'games-dice')) return; await handleDiceCommand(ctx, '⚽', 3000, db); }); bot.command('dart', spamwatchMiddleware, async (ctx: Context & { message: { text: string } }) => { + if (await isCommandDisabled(ctx, db, 'games-dice')) return; await handleDiceCommand(ctx, '🎯', 3000, db); }); bot.command('bowling', spamwatchMiddleware, async (ctx: Context & { message: { text: string } }) => { + if (await isCommandDisabled(ctx, db, 'games-dice')) return; await handleDiceCommand(ctx, '🎳', 3000, db); }); bot.command('idice', spamwatchMiddleware, async (ctx: Context & { message: { text: string } }) => { + if (await isCommandDisabled(ctx, db, 'infinite-dice')) return; + const { Strings } = await getUserAndStrings(ctx, db); ctx.replyWithSticker( Resources.infiniteDice, { @@ -119,10 +129,12 @@ export default (bot: Telegraf, db) => { }); bot.command('furry', spamwatchMiddleware, async (ctx: Context & { message: { text: string } }) => { + if (await isCommandDisabled(ctx, db, 'fun-random')) return; sendRandomReply(ctx, Resources.furryGif, 'furryAmount', db); }); bot.command('gay', spamwatchMiddleware, async (ctx: Context & { message: { text: string } }) => { + if (await isCommandDisabled(ctx, db, 'fun-random')) return; sendRandomReply(ctx, Resources.gayFlag, 'gayAmount', db); }); }; \ No newline at end of file diff --git a/src/commands/gsmarena.ts b/telegram/commands/gsmarena.ts old mode 100644 new mode 100755 similarity index 98% rename from src/commands/gsmarena.ts rename to telegram/commands/gsmarena.ts index b4014a1..b345a00 --- a/src/commands/gsmarena.ts +++ b/telegram/commands/gsmarena.ts @@ -11,6 +11,7 @@ import { parse } from 'node-html-parser'; import { getDeviceByCodename } from './codename'; import { getStrings } from '../plugins/checklang'; import { languageCode } from '../utils/language-code'; +import { isCommandDisabled } from '../utils/check-command-disabled'; const spamwatchMiddleware = spamwatchMiddlewareModule(isOnSpamWatch); @@ -212,8 +213,10 @@ function getUsername(ctx){ const deviceSelectionCache: Record = {}; const lastSelectionMessageId: Record = {}; -export default (bot) => { +export default (bot, db) => { bot.command(['d', 'device'], spamwatchMiddleware, async (ctx) => { + if (await isCommandDisabled(ctx, db, 'device-specs')) return; + const userId = ctx.from.id; const userName = getUsername(ctx); const Strings = getStrings(languageCode(ctx)); diff --git a/src/commands/help.ts b/telegram/commands/help.ts old mode 100644 new mode 100755 similarity index 93% rename from src/commands/help.ts rename to telegram/commands/help.ts index f01f5e5..9937bab --- a/src/commands/help.ts +++ b/telegram/commands/help.ts @@ -22,6 +22,13 @@ async function getUserAndStrings(ctx: Context, db?: any): Promise<{ Strings: any return { Strings, languageCode }; } +function isAdmin(ctx: Context): boolean { + const userId = ctx.from?.id; + if (!userId) return false; + const adminArray = process.env.botAdmins ? process.env.botAdmins.split(',').map(id => parseInt(id.trim())) : []; + return adminArray.includes(userId); +} + interface MessageOptions { parse_mode: string; disable_web_page_preview: boolean; @@ -136,7 +143,8 @@ export default (bot, db) => { }); bot.action('helpAi', async (ctx) => { const { Strings } = await getUserAndStrings(ctx, db); - await ctx.editMessageText(Strings.ai.helpDesc, options(Strings)); + const helpText = isAdmin(ctx) ? Strings.ai.helpDescAdmin : Strings.ai.helpDesc; + await ctx.editMessageText(helpText, options(Strings)); await ctx.answerCbQuery(); }); bot.action('helpBack', async (ctx) => { diff --git a/src/commands/http.ts b/telegram/commands/http.ts old mode 100644 new mode 100755 similarity index 94% rename from src/commands/http.ts rename to telegram/commands/http.ts index 9ef0fdb..5fd4ef9 --- a/src/commands/http.ts +++ b/telegram/commands/http.ts @@ -5,9 +5,10 @@ import spamwatchMiddlewareModule from '../spamwatch/Middleware'; import axios from 'axios'; import verifyInput from '../plugins/verifyInput'; import { Context, Telegraf } from 'telegraf'; -import * as schema from '../db/schema'; +import * as schema from '../../database/schema'; import { languageCode } from '../utils/language-code'; import type { NodePgDatabase } from 'drizzle-orm/node-postgres'; +import { isCommandDisabled } from '../utils/check-command-disabled'; const spamwatchMiddleware = spamwatchMiddlewareModule(isOnSpamWatch); @@ -34,6 +35,8 @@ async function getUserAndStrings(ctx: Context, db?: NodePgDatabase, db) => { bot.command("http", spamwatchMiddleware, async (ctx: Context & { message: { text: string } }) => { + if (await isCommandDisabled(ctx, db, 'http-status')) return; + const reply_to_message_id = ctx.message.message_id; const { Strings } = await getUserAndStrings(ctx, db); const userInput = ctx.message.text.split(' ')[1]; @@ -75,6 +78,8 @@ export default (bot: Telegraf, db) => { }); bot.command("httpcat", spamwatchMiddleware, async (ctx: Context & { message: { text: string } }) => { + if (await isCommandDisabled(ctx, db, 'animals-basic')) return; + const Strings = getStrings(languageCode(ctx)); const reply_to_message_id = ctx.message.message_id; const userInput = ctx.message.text.split(' ').slice(1).join(' ').replace(/\s+/g, ''); diff --git a/src/commands/info.ts b/telegram/commands/info.ts old mode 100644 new mode 100755 similarity index 93% rename from src/commands/info.ts rename to telegram/commands/info.ts index c9f8042..597660b --- a/src/commands/info.ts +++ b/telegram/commands/info.ts @@ -2,8 +2,9 @@ import { getStrings } from '../plugins/checklang'; import { isOnSpamWatch } from '../spamwatch/spamwatch'; import spamwatchMiddlewareModule from '../spamwatch/Middleware'; import { Context, Telegraf } from 'telegraf'; -import * as schema from '../db/schema'; +import * as schema from '../../database/schema'; import type { NodePgDatabase } from 'drizzle-orm/node-postgres'; +import { isCommandDisabled } from '../utils/check-command-disabled'; const spamwatchMiddleware = spamwatchMiddlewareModule(isOnSpamWatch); @@ -62,6 +63,8 @@ async function getChatInfo(ctx: Context & { message: { text: string } }, db: any export default (bot: Telegraf, db) => { bot.command('chatinfo', spamwatchMiddleware, async (ctx: Context & { message: { text: string } }) => { + if (await isCommandDisabled(ctx, db, 'info-commands')) return; + const chatInfo = await getChatInfo(ctx, db); ctx.reply( chatInfo, { @@ -72,6 +75,8 @@ export default (bot: Telegraf, db) => { }); bot.command('userinfo', spamwatchMiddleware, async (ctx: Context & { message: { text: string } }) => { + if (await isCommandDisabled(ctx, db, 'info-commands')) return; + const userInfo = await getUserInfo(ctx, db); ctx.reply( userInfo, { diff --git a/src/commands/lastfm.ts b/telegram/commands/lastfm.ts old mode 100644 new mode 100755 similarity index 95% rename from src/commands/lastfm.ts rename to telegram/commands/lastfm.ts index d51ca25..2ccecbf --- a/src/commands/lastfm.ts +++ b/telegram/commands/lastfm.ts @@ -4,13 +4,14 @@ import axios from 'axios'; import { getStrings } from '../plugins/checklang'; import { isOnSpamWatch } from '../spamwatch/spamwatch'; import spamwatchMiddlewareModule from '../spamwatch/Middleware'; +import { isCommandDisabled } from '../utils/check-command-disabled'; const spamwatchMiddleware = spamwatchMiddlewareModule(isOnSpamWatch); const scrobbler_url = Resources.lastFmApi; const api_key = process.env.lastKey; -const dbFile = 'src/props/lastfm.json'; +const dbFile = 'telegram/props/lastfm.json'; let users = {}; function loadUsers() { @@ -60,10 +61,12 @@ function getFromLast(track) { return imageUrl; } -export default (bot) => { +export default (bot, db) => { loadUsers(); - bot.command('setuser', (ctx) => { + bot.command('setuser', async (ctx) => { + if (await isCommandDisabled(ctx, db, 'lastfm')) return; + const userId = ctx.from.id; const Strings = getStrings(ctx.from.language_code); const lastUser = ctx.message.text.split(' ')[1]; @@ -89,6 +92,8 @@ export default (bot) => { }); bot.command(['lt', 'lmu', 'last', 'lfm'], spamwatchMiddleware, async (ctx) => { + if (await isCommandDisabled(ctx, db, 'lastfm')) return; + const userId = ctx.from.id; const Strings = getStrings(ctx.from.language_code); const lastfmUser = users[userId]; diff --git a/src/commands/main.ts b/telegram/commands/main.ts old mode 100644 new mode 100755 similarity index 99% rename from src/commands/main.ts rename to telegram/commands/main.ts index f86ddea..9de0b69 --- a/src/commands/main.ts +++ b/telegram/commands/main.ts @@ -3,7 +3,7 @@ import { isOnSpamWatch } from '../spamwatch/spamwatch'; import spamwatchMiddlewareModule from '../spamwatch/Middleware'; import { Context, Telegraf } from 'telegraf'; import { replyToMessageId } from '../utils/reply-to-message-id'; -import * as schema from '../db/schema'; +import * as schema from '../../database/schema'; import { eq } from 'drizzle-orm'; import { ensureUserInDb } from '../utils/ensure-user'; import type { NodePgDatabase } from 'drizzle-orm/node-postgres'; diff --git a/src/commands/modarchive.ts b/telegram/commands/modarchive.ts old mode 100644 new mode 100755 similarity index 90% rename from src/commands/modarchive.ts rename to telegram/commands/modarchive.ts index 5f7333b..5d451a6 --- a/src/commands/modarchive.ts +++ b/telegram/commands/modarchive.ts @@ -8,6 +8,7 @@ import spamwatchMiddlewareModule from '../spamwatch/Middleware'; import { languageCode } from '../utils/language-code'; import { Context, Telegraf } from 'telegraf'; import { replyToMessageId } from '../utils/reply-to-message-id'; +import { isCommandDisabled } from '../utils/check-command-disabled'; const spamwatchMiddleware = spamwatchMiddlewareModule(isOnSpamWatch); @@ -79,6 +80,9 @@ export const modarchiveHandler = async (ctx: Context) => { }); }; -export default (bot: Telegraf) => { - bot.command(['modarchive', 'tma'], spamwatchMiddleware, modarchiveHandler); +export default (bot: Telegraf, db) => { + bot.command(['modarchive', 'tma'], spamwatchMiddleware, async (ctx) => { + if (await isCommandDisabled(ctx, db, 'modarchive')) return; + await modarchiveHandler(ctx); + }); }; diff --git a/src/commands/ponyapi.ts b/telegram/commands/ponyapi.ts old mode 100644 new mode 100755 similarity index 96% rename from src/commands/ponyapi.ts rename to telegram/commands/ponyapi.ts index 7f6320c..2bbc841 --- a/src/commands/ponyapi.ts +++ b/telegram/commands/ponyapi.ts @@ -7,6 +7,7 @@ import verifyInput from '../plugins/verifyInput'; import { Telegraf, Context } from 'telegraf'; import { languageCode } from '../utils/language-code'; import { replyToMessageId } from '../utils/reply-to-message-id'; +import { isCommandDisabled } from '../utils/check-command-disabled'; const spamwatchMiddleware = spamwatchMiddlewareModule(isOnSpamWatch); @@ -68,14 +69,18 @@ function sendPhoto(ctx: Context, photo: string, caption: string, reply_to_messag }); } -export default (bot: Telegraf) => { +export default (bot: Telegraf, db) => { bot.command("mlp", spamwatchMiddleware, async (ctx: Context & { message: { text: string } }) => { + if (await isCommandDisabled(ctx, db, 'mlp-content')) return; + const Strings = getStrings(languageCode(ctx)); const reply_to_message_id = replyToMessageId(ctx); sendReply(ctx, Strings.ponyApi.helpDesc, reply_to_message_id); }); bot.command("mlpchar", spamwatchMiddleware, async (ctx: Context & { message: { text: string } }) => { + if (await isCommandDisabled(ctx, db, 'mlp-content')) return; + const { message } = ctx; const reply_to_message_id = replyToMessageId(ctx); const Strings = getStrings(languageCode(ctx) || 'en'); @@ -118,6 +123,8 @@ export default (bot: Telegraf) => { }); bot.command("mlpep", spamwatchMiddleware, async (ctx: Context & { message: { text: string } }) => { + if (await isCommandDisabled(ctx, db, 'mlp-content')) return; + const Strings = getStrings(languageCode(ctx) || 'en'); const userInput = ctx.message.text.split(' ').slice(1).join(' ').replace(" ", "+"); const reply_to_message_id = replyToMessageId(ctx); @@ -194,6 +201,8 @@ export default (bot: Telegraf) => { }); bot.command("mlpcomic", spamwatchMiddleware, async (ctx: Context & { message: { text: string } }) => { + if (await isCommandDisabled(ctx, db, 'mlp-content')) return; + const Strings = getStrings(languageCode(ctx) || 'en'); const userInput = ctx.message.text.split(' ').slice(1).join(' ').replace(" ", "+"); const reply_to_message_id = replyToMessageId(ctx); diff --git a/src/commands/quotes.ts b/telegram/commands/quotes.ts old mode 100644 new mode 100755 similarity index 100% rename from src/commands/quotes.ts rename to telegram/commands/quotes.ts diff --git a/src/commands/randompony.ts b/telegram/commands/randompony.ts old mode 100644 new mode 100755 similarity index 88% rename from src/commands/randompony.ts rename to telegram/commands/randompony.ts index de24016..4ace245 --- a/src/commands/randompony.ts +++ b/telegram/commands/randompony.ts @@ -6,6 +6,7 @@ import axios from 'axios'; import { Telegraf, Context } from 'telegraf'; import { languageCode } from '../utils/language-code'; import { replyToMessageId } from '../utils/reply-to-message-id'; +import { isCommandDisabled } from '../utils/check-command-disabled'; const spamwatchMiddleware = spamwatchMiddlewareModule(isOnSpamWatch); @@ -43,6 +44,9 @@ export const randomponyHandler = async (ctx: Context & { message: { text: string } }; -export default (bot: Telegraf) => { - bot.command(["rpony", "randompony", "mlpart"], spamwatchMiddleware, randomponyHandler); +export default (bot: Telegraf, db) => { + bot.command(["rpony", "randompony", "mlpart"], spamwatchMiddleware, async (ctx) => { + if (await isCommandDisabled(ctx, db, 'random-pony')) return; + await randomponyHandler(ctx); + }); } \ No newline at end of file diff --git a/src/commands/weather.ts b/telegram/commands/weather.ts old mode 100644 new mode 100755 similarity index 92% rename from src/commands/weather.ts rename to telegram/commands/weather.ts index f72c343..26a1b04 --- a/src/commands/weather.ts +++ b/telegram/commands/weather.ts @@ -9,6 +9,7 @@ import { isOnSpamWatch } from '../spamwatch/spamwatch'; import spamwatchMiddlewareModule from '../spamwatch/Middleware'; import verifyInput from '../plugins/verifyInput'; import { Context, Telegraf } from 'telegraf'; +import { isCommandDisabled } from '../utils/check-command-disabled'; const spamwatchMiddleware = spamwatchMiddlewareModule(isOnSpamWatch); @@ -34,10 +35,12 @@ function getLocaleUnit(countryCode: string) { } } -export default (bot: Telegraf) => { - bot.command(['clima', 'weather'], spamwatchMiddleware, async (ctx) => { +export default (bot: Telegraf, db: any) => { + bot.command(['weather', 'clima'], spamwatchMiddleware, async (ctx: Context & { message: { text: string } }) => { + if (await isCommandDisabled(ctx, db, 'weather')) return; + const reply_to_message_id = ctx.message.message_id; - const userLang = ctx.from.language_code || "en-US"; + const userLang = ctx.from?.language_code || "en-US"; const Strings = getStrings(userLang); const userInput = ctx.message.text.split(' ').slice(1).join(' '); const { provideLocation } = Strings.weatherStatus diff --git a/src/commands/wiki.ts b/telegram/commands/wiki.ts old mode 100644 new mode 100755 similarity index 100% rename from src/commands/wiki.ts rename to telegram/commands/wiki.ts diff --git a/src/commands/youtube.ts b/telegram/commands/youtube.ts old mode 100644 new mode 100755 similarity index 96% rename from src/commands/youtube.ts rename to telegram/commands/youtube.ts index 96d5d80..5b20029 --- a/src/commands/youtube.ts +++ b/telegram/commands/youtube.ts @@ -2,6 +2,7 @@ import { getStrings } from '../plugins/checklang'; import { isOnSpamWatch } from '../spamwatch/spamwatch'; import spamwatchMiddlewareModule from '../spamwatch/Middleware'; import { execFile } from 'child_process'; +import { isCommandDisabled } from '../utils/check-command-disabled'; import os from 'os'; import fs from 'fs'; import path from 'path'; @@ -72,8 +73,10 @@ const isValidUrl = (url: string): boolean => { } }; -export default (bot) => { +export default (bot, db) => { bot.command(['yt', 'ytdl', 'sdl', 'video', 'dl'], spamwatchMiddleware, async (ctx) => { + if (await isCommandDisabled(ctx, db, 'youtube-download')) return; + const Strings = getStrings(ctx.from.language_code); const ytDlpPath = getYtDlpPath(); const userId: number = ctx.from.id; @@ -113,7 +116,7 @@ export default (bot) => { console.log(`\nDownload Request:\nURL: ${videoUrl}\nYOUTUBE: ${videoIsYoutube}\n`) if (fs.existsSync(path.resolve(__dirname, "../props/cookies.txt"))) { - cmdArgs = "--max-filesize 2G --no-playlist --cookies src/props/cookies.txt --merge-output-format mp4 -o"; + cmdArgs = "--max-filesize 2G --no-playlist --cookies telegram/props/cookies.txt --merge-output-format mp4 -o"; } else { cmdArgs = `--max-filesize 2G --no-playlist --merge-output-format mp4 -o`; } diff --git a/src/locales/config.ts b/telegram/locales/config.ts old mode 100644 new mode 100755 similarity index 100% rename from src/locales/config.ts rename to telegram/locales/config.ts diff --git a/src/locales/english.json b/telegram/locales/english.json old mode 100644 new mode 100755 similarity index 82% rename from src/locales/english.json rename to telegram/locales/english.json index b20e2ec..76e630d --- a/src/locales/english.json +++ b/telegram/locales/english.json @@ -19,6 +19,7 @@ }, "unexpectedErr": "An unexpected error occurred: {error}", "errInvalidOption": "Whoops! Invalid option!", + "commandDisabled": "🚫 This command is currently disabled for your account.\n\nYou can enable it in the web interface: {frontUrl}", "kickingMyself": "*Since you don't need me, I'll leave.*", "kickingMyselfErr": "Error leaving the chat.", "noPermission": "You don't have permission to run this command.", @@ -67,7 +68,8 @@ "animalCommandsDesc": "🐱 *Animals*\n\n- /soggy | /soggycat `<1 | 2 | 3 | 4 | orig | thumb | sticker | alt>`: Sends the [Soggy cat meme](https://knowyourmeme.com/memes/soggy-cat)\n- /cat: Sends a random picture of a cat.\n- /fox: Sends a random picture of a fox.\n- /duck: Sends a random picture of a duck.\n- /dog: Sends a random picture of a dog.\n- /httpcat ``: Send cat memes from http.cat with your specified HTTP code. Example: `/httpcat 404`", "ai": { "helpEntry": "✨ AI Commands", - "helpDesc": "✨ *AI Commands*\n\n- /ask ``: Ask a question to an AI model\n- /think ``: Ask a thinking model about a question\n- /ai ``: Ask your custom-set AI model a question\n- /aistats: Show your AI usage stats", + "helpDesc": "✨ *AI Commands*\n\n- /ask ``: Ask a question to an AI model\n- /think ``: Ask a thinking model about a question\n- /ai ``: Ask your custom-set AI model a question\n- /aistop: Stop your current AI request\n- /aistats: Show your AI usage stats", + "helpDescAdmin": "✨ *AI Commands*\n\n- /ask ``: Ask a question to an AI model\n- /think ``: Ask a thinking model about a question\n- /ai ``: Ask your custom-set AI model a question\n- /aistop: Stop your current AI request\n- /aistats: Show your AI usage stats\n\n*Admin Commands:*\n- /queue: List current AI queue\n- /qdel ``: Clear queue items for a user\n- /qlimit `` ``: Timeout user from AI commands\n- /setexec `` ``: Set max execution time for user\n- /rlimit ``: Remove all AI limits for user\n- /limits: List all current AI limits", "disabled": "✨ AI features are currently disabled globally.", "disabledForUser": "✨ AI features are disabled for your account. You can enable them with the /settings command.", "pulling": "🔄 Model {model} not found locally, pulling...", @@ -88,7 +90,37 @@ "noChatFound": "No chat found", "pulled": "✅ Pulled {model} successfully, please retry the command.", "selectTemperature": "*Please select a temperature:*", - "temperatureExplanation": "Temperature controls the randomness of the AI's responses. Lower values (e.g., 0.2) make the model more focused and deterministic, while higher values (e.g., 1.2 or above) make it more creative and random." + "temperatureExplanation": "Temperature controls the randomness of the AI's responses. Lower values (e.g., 0.2) make the model more focused and deterministic, while higher values (e.g., 1.2 or above) make it more creative and random.", + "queueEmpty": "✅ The AI queue is currently empty.", + "queueList": "📋 *AI Queue Status*\n\n{queueItems}\n\n*Total items:* {totalItems}", + "queueItem": "• User: {username} ({userId})\n Model: {model}\n Status: {status}\n", + "queueCleared": "✅ Cleared {count} queue items for user {userId}.", + "queueClearError": "❌ Error clearing queue for user {userId}: {error}", + "noQueueItems": "ℹ️ No queue items found for user {userId}.", + "userTimedOut": "⏱️ User {userId} has been timed out from AI commands until {timeoutEnd}.", + "userTimeoutRemoved": "✅ AI timeout removed for user {userId}.", + "userTimeoutError": "❌ Error setting timeout for user {userId}: {error}", + "invalidDuration": "❌ Invalid duration format. Use: 1m, 1h, 1d, 1w, etc.", + "userExecTimeSet": "⏱️ Max execution time set to {duration} for user {userId}.", + "userExecTimeRemoved": "✅ Max execution time limit removed for user {userId}.", + "userExecTimeError": "❌ Error setting execution time for user {userId}: {error}", + "invalidUserId": "❌ Invalid user ID. Please provide a valid Telegram user ID.", + "userNotFound": "❌ User {userId} not found in database.", + "userTimedOutFromAI": "⏱️ You are currently timed out from AI commands until {timeoutEnd}.", + "requestTooLong": "⏱️ Your request is taking too long. It has been cancelled to prevent system overload.", + "userLimitsRemoved": "✅ All AI limits removed for user {userId}.", + "userLimitRemoveError": "❌ Error removing limits for user {userId}: {error}", + "limitsHeader": "📋 *Current AI Limits*", + "noLimitsSet": "✅ No AI limits are currently set.", + "timeoutLimitsHeader": "*🔒 Users with AI Timeouts:*", + "timeoutLimitItem": "• {displayName} ({userId}) - Until: {timeoutEnd}", + "execLimitsHeader": "*⏱️ Users with Execution Time Limits:*", + "execLimitItem": "• {displayName} ({userId}) - Max: {execTime}", + "limitsListError": "❌ Error retrieving limits: {error}", + "requestStopped": "🛑 Your AI request has been stopped.", + "requestRemovedFromQueue": "🛑 Your AI request has been removed from the queue.", + "noActiveRequest": "ℹ️ You don't have any active AI requests to stop.", + "executionTimeoutReached": "\n\n⏱️ Max execution time limit reached!" }, "maInvalidModule": "Please provide a valid module ID from The Mod Archive.\nExample: `/modarchive 81574`", "maDownloadError": "Error downloading the file. Check the module ID and try again.", @@ -196,5 +228,10 @@ "header": "✨ *Your AI Usage Stats*", "requests": "*Total AI Requests:* {aiRequests}", "characters": "*Total AI Characters:* {aiCharacters}\n_That's around {bookCount} books of text!_" + }, + "twoFactor": { + "helpEntry": "🔒 2FA", + "helpDesc": "🔒 *2FA*\n\n- /2fa: Show your 2FA settings", + "codeMessage": "🔒 *{botName} 2FA*\n\nYour 2FA code is: `{code}`" } } diff --git a/src/locales/portuguese.json b/telegram/locales/portuguese.json old mode 100644 new mode 100755 similarity index 81% rename from src/locales/portuguese.json rename to telegram/locales/portuguese.json index eda8cf4..9ef22ed --- a/src/locales/portuguese.json +++ b/telegram/locales/portuguese.json @@ -18,6 +18,7 @@ }, "unexpectedErr": "Ocorreu um erro inesperado: {error}", "errInvalidOption": "Ops! Opção inválida!", + "commandDisabled": "🚫 Este comando está atualmente desativado para sua conta.\n\nVocê pode habilitá-lo na interface web: {frontUrl}", "kickingMyself": "*Já que você não precisa de mim, vou sair daqui.*", "kickingMyselfErr": "Erro ao sair do chat.", "noPermission": "Você não tem permissão para executar este comando.", @@ -66,7 +67,8 @@ "animalCommandsDesc": "🐱 *Animais*\n\n- /soggy | /soggycat `<1 | 2 | 3 | 4 | orig | thumb | sticker | alt>`: Envia o [meme do gato encharcado](https://knowyourmeme.com/memes/soggy-cat)\n- /cat - Envia uma foto aleatória de um gato.\n- /fox - Envia uma foto aleatória de uma raposa.\n- /duck - Envia uma foto aleatória de um pato.\n- /dog - Envia uma imagem aleatória de um cachorro.\n- /httpcat ``: Envia memes de gato do http.cat com o código HTTP especificado. Exemplo: `/httpcat 404`", "ai": { "helpEntry": "✨ Comandos de IA", - "helpDesc": "✨ *Comandos de IA*\n\n- /ask ``: Fazer uma pergunta a uma IA\n- /think ``: Fazer uma pergunta a um modelo de pensamento\n- /ai ``: Fazer uma pergunta a um modelo de IA personalizado\n- /aistats: Mostra suas estatísticas de uso de IA", + "helpDesc": "✨ *Comandos de IA*\n\n- /ask ``: Fazer uma pergunta a uma IA\n- /think ``: Fazer uma pergunta a um modelo de pensamento\n- /ai ``: Fazer uma pergunta a um modelo de IA personalizado\n- /aistop: Parar sua solicitação de IA atual\n- /aistats: Mostra suas estatísticas de uso de IA", + "helpDescAdmin": "✨ *Comandos de IA*\n\n- /ask ``: Fazer uma pergunta a uma IA\n- /think ``: Fazer uma pergunta a um modelo de pensamento\n- /ai ``: Fazer uma pergunta a um modelo de IA personalizado\n- /aistop: Parar sua solicitação de IA atual\n- /aistats: Mostra suas estatísticas de uso de IA\n\n*Comandos de Admin:*\n- /queue: Listar fila atual de IA\n- /qdel ``: Limpar itens da fila para um usuário\n- /qlimit `` ``: Timeout de usuário dos comandos de IA\n- /setexec `` ``: Definir tempo máximo de execução para usuário\n- /rlimit ``: Remover todos os limites de IA para usuário\n- /limits: Listar todos os limites atuais de IA", "disabled": "A AIApi foi desativada\\.", "disabledForUser": "As funções de IA estão desativadas para a sua conta. Você pode ativá-las com o comando /settings.", "pulling": "🔄 Modelo {model} não encontrado localmente, baixando...", @@ -91,7 +93,37 @@ "statusComplete": "✅ Completo!", "modelHeader": "🤖 *{model}* 🌡️ *{temperature}* {status}", "noChatFound": "Nenhum chat encontrado", - "pulled": "✅ {model} baixado com sucesso, por favor tente o comando novamente." + "pulled": "✅ {model} baixado com sucesso, por favor tente o comando novamente.", + "queueEmpty": "✅ A fila de IA está atualmente vazia.", + "queueList": "📋 *Status da Fila de IA*\n\n{queueItems}\n\n*Total de itens:* {totalItems}", + "queueItem": "• Usuário: {username} ({userId})\n Modelo: {model}\n Status: {status}\n", + "queueCleared": "✅ Limpos {count} itens da fila para o usuário {userId}.", + "queueClearError": "❌ Erro ao limpar fila para o usuário {userId}: {error}", + "noQueueItems": "ℹ️ Nenhum item da fila encontrado para o usuário {userId}.", + "userTimedOut": "⏱️ Usuário {userId} foi suspenso dos comandos de IA até {timeoutEnd}.", + "userTimeoutRemoved": "✅ Timeout de IA removido para o usuário {userId}.", + "userTimeoutError": "❌ Erro ao definir timeout para o usuário {userId}: {error}", + "invalidDuration": "❌ Formato de duração inválido. Use: 1m, 1h, 1d, 1w, etc.", + "userExecTimeSet": "⏱️ Tempo máximo de execução definido para {duration} para o usuário {userId}.", + "userExecTimeRemoved": "✅ Limite de tempo máximo de execução removido para o usuário {userId}.", + "userExecTimeError": "❌ Erro ao definir tempo de execução para o usuário {userId}: {error}", + "invalidUserId": "❌ ID de usuário inválido. Por favor, forneça um ID de usuário válido do Telegram.", + "userNotFound": "❌ Usuário {userId} não encontrado na base de dados.", + "userTimedOutFromAI": "⏱️ Você está atualmente suspenso dos comandos de IA até {timeoutEnd}.", + "requestTooLong": "⏱️ Sua solicitação está demorando muito. Foi cancelada para evitar sobrecarga do sistema.", + "userLimitsRemoved": "✅ Todos os limites de IA removidos para o usuário {userId}.", + "userLimitRemoveError": "❌ Erro ao remover limites para o usuário {userId}: {error}", + "limitsHeader": "📋 *Limites Atuais de IA*", + "noLimitsSet": "✅ Nenhum limite de IA está atualmente definido.", + "timeoutLimitsHeader": "*🔒 Usuários com Timeouts de IA:*", + "timeoutLimitItem": "• {displayName} ({userId}) - Até: {timeoutEnd}", + "execLimitsHeader": "*⏱️ Usuários com Limites de Tempo de Execução:*", + "execLimitItem": "• {displayName} ({userId}) - Máx: {execTime}", + "limitsListError": "❌ Erro ao recuperar limites: {error}", + "requestStopped": "🛑 Sua solicitação de IA foi interrompida.", + "requestRemovedFromQueue": "🛑 Sua solicitação de IA foi removida da fila.", + "noActiveRequest": "ℹ️ Você não tem nenhuma solicitação ativa de IA para parar.", + "executionTimeoutReached": "\n\n⏱️ Limite máximo de tempo de execução atingido!" }, "maInvalidModule": "Por favor, forneça um ID de módulo válido do The Mod Archive.\nExemplo: `/modarchive 81574`", "maDownloadError": "Erro ao baixar o arquivo. Verifique o ID do módulo e tente novamente.", @@ -191,5 +223,10 @@ "header": "✨ *Suas estatísticas de uso de IA*", "requests": "*Total de requisições de IA:* {aiRequests}", "characters": "*Total de caracteres de IA:* {aiCharacters}\n_Isso é cerca de {bookCount} livros de texto!_" + }, + "twoFactor": { + "helpEntry": "🔒 2FA", + "helpDesc": "🔒 *2FA*\n\n- /2fa: Mostra suas configurações de 2FA", + "codeMessage": "🔒 *{botName} 2FA*\n\nSeu código de 2FA é: `{code}`" } } \ No newline at end of file diff --git a/src/plugins/checklang.ts b/telegram/plugins/checklang.ts old mode 100644 new mode 100755 similarity index 100% rename from src/plugins/checklang.ts rename to telegram/plugins/checklang.ts diff --git a/src/plugins/verifyInput.ts b/telegram/plugins/verifyInput.ts old mode 100644 new mode 100755 similarity index 100% rename from src/plugins/verifyInput.ts rename to telegram/plugins/verifyInput.ts diff --git a/src/plugins/ytDlpWrapper.ts b/telegram/plugins/ytDlpWrapper.ts old mode 100644 new mode 100755 similarity index 100% rename from src/plugins/ytDlpWrapper.ts rename to telegram/plugins/ytDlpWrapper.ts diff --git a/src/props/resources.json b/telegram/props/resources.json old mode 100644 new mode 100755 similarity index 100% rename from src/props/resources.json rename to telegram/props/resources.json diff --git a/telegram/utils/check-command-disabled.ts b/telegram/utils/check-command-disabled.ts new file mode 100755 index 0000000..2346afd --- /dev/null +++ b/telegram/utils/check-command-disabled.ts @@ -0,0 +1,72 @@ +// CHECK-COMMAND-DISABLED.TS +// by ihatenodejs/Aidan +// +// ----------------------------------------------------------------------- +// +// This is free and unencumbered software released into the public domain. +// +// Anyone is free to copy, modify, publish, use, compile, sell, or +// distribute this software, either in source code form or as a compiled +// binary, for any purpose, commercial or non-commercial, and by any +// means. +// +// In jurisdictions that recognize copyright laws, the author or authors +// of this software dedicate any and all copyright interest in the +// software to the public domain. We make this dedication for the benefit +// of the public at large and to the detriment of our heirs and +// successors. We intend this dedication to be an overt act of +// relinquishment in perpetuity of all present and future rights to this +// software under copyright law. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +// IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +// OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +// OTHER DEALINGS IN THE SOFTWARE. +// +// For more information, please refer to + +import { Context } from 'telegraf'; +import { getStrings } from '../plugins/checklang'; +import { replyToMessageId } from './reply-to-message-id'; + +export async function isCommandDisabled(ctx: Context, db: any, commandId: string): Promise { + if (!ctx.from) return false; + + const telegramId = String(ctx.from.id); + + try { + const user = await db.query.usersTable.findFirst({ + where: (fields, { eq }) => eq(fields.telegramId, telegramId), + columns: { + disabledCommands: true, + languageCode: true, + }, + }); + + if (!user) return false; + + const isDisabled = user.disabledCommands?.includes(commandId) || false; + + if (isDisabled) { + const Strings = getStrings(user.languageCode); + const frontUrl = process.env.frontUrl || 'https://kowalski.social'; + const reply_to_message_id = replyToMessageId(ctx); + + await ctx.reply( + Strings.commandDisabled.replace('{frontUrl}', frontUrl), + { + parse_mode: 'Markdown', + ...(reply_to_message_id && { reply_parameters: { message_id: reply_to_message_id } }) + } + ); + } + + return isDisabled; + } catch (error) { + console.error('[💽 DB] Error checking disabled commands:', error); + return false; + } +} diff --git a/src/utils/ensure-user.ts b/telegram/utils/ensure-user.ts old mode 100644 new mode 100755 similarity index 94% rename from src/utils/ensure-user.ts rename to telegram/utils/ensure-user.ts index 5322992..9de1e01 --- a/src/utils/ensure-user.ts +++ b/telegram/utils/ensure-user.ts @@ -28,7 +28,7 @@ // // For more information, please refer to -import { usersTable } from '../db/schema'; +import { usersTable } from '../../database/schema'; export async function ensureUserInDb(ctx, db) { if (!ctx.from) return; @@ -52,6 +52,9 @@ export async function ensureUserInDb(ctx, db) { aiTemperature: 0.9, aiRequests: 0, aiCharacters: 0, + disabledCommands: [], + aiTimeoutUntil: null, + aiMaxExecutionTime: 0, }; try { await db.insert(usersTable).values(userToInsert); diff --git a/src/utils/language-code.ts b/telegram/utils/language-code.ts old mode 100644 new mode 100755 similarity index 100% rename from src/utils/language-code.ts rename to telegram/utils/language-code.ts diff --git a/src/utils/log.ts b/telegram/utils/log.ts old mode 100644 new mode 100755 similarity index 100% rename from src/utils/log.ts rename to telegram/utils/log.ts diff --git a/src/utils/rate-limiter.ts b/telegram/utils/rate-limiter.ts old mode 100644 new mode 100755 similarity index 100% rename from src/utils/rate-limiter.ts rename to telegram/utils/rate-limiter.ts diff --git a/src/utils/reply-to-message-id.ts b/telegram/utils/reply-to-message-id.ts old mode 100644 new mode 100755 similarity index 100% rename from src/utils/reply-to-message-id.ts rename to telegram/utils/reply-to-message-id.ts diff --git a/webui/.gitignore b/webui/.gitignore new file mode 100755 index 0000000..5ef6a52 --- /dev/null +++ b/webui/.gitignore @@ -0,0 +1,41 @@ +# See https://help.github.com/articles/ignoring-files/ for more about ignoring files. + +# dependencies +/node_modules +/.pnp +.pnp.* +.yarn/* +!.yarn/patches +!.yarn/plugins +!.yarn/releases +!.yarn/versions + +# testing +/coverage + +# next.js +/.next/ +/out/ + +# production +/build + +# misc +.DS_Store +*.pem + +# debug +npm-debug.log* +yarn-debug.log* +yarn-error.log* +.pnpm-debug.log* + +# env files (can opt-in for committing if needed) +.env* + +# vercel +.vercel + +# typescript +*.tsbuildinfo +next-env.d.ts diff --git a/webui/LICENSE b/webui/LICENSE new file mode 100644 index 0000000..c32dd18 --- /dev/null +++ b/webui/LICENSE @@ -0,0 +1,24 @@ +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to \ No newline at end of file diff --git a/webui/app/about/page.tsx b/webui/app/about/page.tsx new file mode 100755 index 0000000..ab15a1c --- /dev/null +++ b/webui/app/about/page.tsx @@ -0,0 +1,549 @@ +import { Button } from "@/components/ui/button" +import { + Sparkles, + Users, + Download, + Brain, + Shield, + Zap, + Tv, + Heart, + Code, + Globe, + MessageSquare, + Layers, + Network, + Lock, + UserCheck, + BarChart3, + Languages, + Trash2, + FileText, + Headphones, + CloudSun, + Smartphone, + Dices, + Cat, + Music, + Bot +} from "lucide-react"; +import { SiTypescript, SiPostgresql, SiDocker, SiNextdotjs, SiBun, SiForgejo } from "react-icons/si"; +import { RiTelegram2Line } from "react-icons/ri"; +import { BsInfoLg } from "react-icons/bs"; +import { TbRocket, TbSparkles } from "react-icons/tb"; +import Link from "next/link"; +import { TbPalette } from "react-icons/tb"; +import Footer from "@/components/footer"; + +export default function About() { + return ( +
+
+
+
+
+ +
+
+

+ About Kowalski +

+ +

+ Kowalski is an open-source, feature-rich Telegram bot built with modern web technologies. + From AI-powered conversations to video downloads, user management, and community features — + it's designed to enhance your Telegram experience while respecting your privacy. +

+ +
+ + + +
+
+
+ +
+
+
+

Architecture

+

+ We've built Kowalski with modern technologies and best practices for reliability and maintainability. +

+
+ +
+
+
+
+ +
+

Tech Stack

+
+ +

+ Kowalski is built completely in TypeScript with Node.js and Telegraf. + The web interface uses Next.js with Tailwind CSS, while data persistence is handled by PostgreSQL with Drizzle ORM. +

+ +
+
+ +
+
TypeScript + Node.js
+
Type-safe backend w/ Telegraf
+
+
+
+ +
+
Next.js WebUI
+
Modern, responsive admin and user panel
+
+
+
+ +
+
PostgreSQL + Drizzle ORM
+
Reliable data persistence
+
+
+
+
+ +
+
+
+ +
+

Deployment

+
+ +

+ Kowalski is built to be deployed anywhere, and has been tested on multiple platforms. + We prioritize support for Docker and Bun for easy deployment. +

+ +
+
+ +
+
Docker Support
+
Easy containerized deployment w/ Docker Compose
+
+
+
+ +
+
Bun
+
A fast JavaScript runtime for best performance
+
+
+
+ +
{/* some ppl probably don't know what af means :( */} +
Modular AF Backend
+
Command-based structure for easy feature addition
+
+
+
+
+
+
+
+ +
+
+
+

AI Integrations

+

+ Powered by Ollama, Kowalski has support for 50+ AI models, with customizable + options for users and admins. +

+
+ +
+
+
+
+ +
+

Vast Model Support

+
+ +

+ Kowalski has support for 50+ models, both thinking and non-thinking. We have + good Markdown parsing, with customizable options for both users and admins. +

+ +
+
+ +
+
/ask - Quick Responses
+
Fast answers using smaller non-thinking models
+
+
+
+ +
+
/think - Deep Reasoning
+
Advanced thinking models with togglable reasoning visibility
+
+
+
+ +
+
/ai - Your Custom Model!
+
Use your personally configured AI model
+
+
+
+
+ +
+
+
+ +
+

Kowalski's Powerful

+
+ +

+ We have amazing Markdown V2 parsing, queue management, and usage statistics tracking. + It's hella private, too. AI is disabled by default for the best user experience. +

+ +
+
+ +
+
Streaming
+
Real-time Markdown V2 message updates as the model generates
+
+
+
+ +
+
Usage Stats
+
Track your AI requests and usage with /aistats
+
+
+
+ +
+
Queues
+
High usage limits with intelligent request queuing
+
+
+
+
+
+
+
+ +
+
+
+

We're User-First

+

+ Kowalski has privacy-focused user management with customizable settings, + multilingual support, and transparent data handling. +

+
+ +
+
+
+
+ +
+

Privacy

+
+ +

+ User data is minimized and linked only by Telegram ID. No personal information + is shared with third parties, and users maintain full control over their data + with easy account deletion options. +

+ +
+
+ +
+
Limited Data Collection
+
Only essential data is stored, linked by Telegram ID
+
+
+
+ +
+
Transparent Policies
+
Clear privacy policy accessible via /privacy
+
+
+
+ +
+
Easy Account Deletion
+
You can delete your data at any time
+
+
+
+
+ +
+
+
+ +
+

Customization

+
+ +

+ Personalize your experience with custom AI preferences, + temperature settings, language selection, and detailed usage statistics. +

+ +
+
+ +
+
AI Preferences
+
Choose default models and configure temperature
+
+
+
+ +
+
Multilingual Support
+
English and Portuguese language options
+
+
+
+ +
+
Usage Analytics
+
Personal statistics and usage tracking
+
+
+
+
+
+
+
+ +
+
+
+

There's WAYYYYY more!

+

+ Beyond AI, Kowalski has a ton of entertainment, utility, fun, configuration, and information + commands. +

+
+ +
+
+
+
+ +
+

Media Downloads

+
+

+ Download videos from YouTube and 1000s of other platforms using yt-dlp. + Featuring automatic size checking for Telegram'. +

+
+
+ + /yt [URL] - Video downloads +
+
+ + Automatic size limit handling +
+
+
+ +
+
+
+ +
+

Information & Utilities

+
+

+ Access real-world information like weather reports, device specifications, + HTTP status codes, and a Last.fm music integration. +

+
+
+ + /weather - Weather reports +
+
+ + /device - GSMArena specs +
+
+ + /last - Last.fm integration +
+
+
+ +
+
+
+ +
+

Entertainment

+
+

+ Interactive emojis, random animal pictures, My Little Pony, + and fun commands to engage you and your community. +

+
+
+ + /dice, /slot - Interactive games +
+
+ + /cat, /dog - Random animals +
+
+ + /mlp - My Little Pony DB +
+
+
+
+
+
+ +
+
+
+

Our Community

+

+ Kowalski is built by developers, for developers. We use open licenses and + take input from our development communities. +

+
+ +
+
+
+
+ +
+

Open Development

+
+ +

+ Kowalski is licensed under BSD-3-Clause with components under Unlicense. Our + codebase is available on our Forgejo and GitHub, with lots of documentation. +

+ +
+
+ +
+
Public Code
+
Feel free to contribute or review our code
+
+
+
+ +
+
Documentation
+
We have documentation to help contributors, users, and admins
+
+
+
+ +
+
Contributor Friendly
+
Our communities are welcoming to new contributors
+
+
+
+
+ +
+
+
+ +
+

Community Centric

+
+ +

+ Kowalski was created by Lucas Gabriel (lucmsilva). It is now also maintained by ihatenodejs, + givfnz2, and other contributors. Thank you to all of our contributors! +

+ +
+
+ +
+
Active Maintenance
+
Regular updates and fixes w/ room for input and feedback
+
+
+
+ +
+
Quality Code
+
We use TypeScript, linting, and modern standards
+
+
+
+ +
+
Focus on New Features
+
We are always looking for new features to add
+
+
+
+
+
+ +
+
+
+ + Ready to contribute? +
+ +
+
+
+
+
+
+ ); +} diff --git a/webui/app/account/delete/page.tsx b/webui/app/account/delete/page.tsx new file mode 100755 index 0000000..9541167 --- /dev/null +++ b/webui/app/account/delete/page.tsx @@ -0,0 +1,204 @@ +"use client"; + +import { useState } from "react"; +import { useRouter } from "next/navigation"; +import { Button } from "@/components/ui/button"; +import { + Dialog, + DialogContent, + DialogDescription, + DialogFooter, + DialogHeader, + DialogTitle, + DialogTrigger, +} from "@/components/ui/dialog"; +import { Trash2, ArrowLeft, AlertTriangle } from "lucide-react"; +import Link from "next/link"; +import { useAuth } from "@/contexts/auth-context"; +import { motion } from "framer-motion"; + +export default function DeleteAccountPage() { + const [isDeleting, setIsDeleting] = useState(false); + const [dialogOpen, setDialogOpen] = useState(false); + const { user, isAuthenticated, loading } = useAuth(); + const router = useRouter(); + + const handleDeleteAccount = async () => { + if (!user) return; + + setIsDeleting(true); + + try { + const response = await fetch('/api/user/delete', { + method: 'DELETE', + credentials: 'include' + }); + + if (response.ok) { + alert('Your account has been deleted. You will now be redirected to the home page. Thanks for using Kowalski!'); + window.location.href = '/'; + } else { + const error = await response.json(); + alert(`Failed to delete account: ${error.message || 'Unknown error'}`); + } + } catch (error) { + console.error('Error deleting account:', error); + alert('An error occurred while deleting your account. Please try again.'); + } finally { + setIsDeleting(false); + setDialogOpen(false); + } + }; + + if (loading) { + return ( +
+
+
+ ); + } + + if (!isAuthenticated) { + router.push('/login'); + return null; + } + + return ( +
+
+ +
+ +
+ +
+
+
+ +
+
+

Delete Account

+

Permanently remove your account and data

+
+
+ +
+
+ +
+

+ This action cannot be undone +

+

+ Deleting your account will permanently remove all your data, including: +

+
    +
  • Your user profile and settings
  • +
  • AI usage statistics and request history
  • +
  • Custom AI model preferences
  • +
  • Command configuration and disabled commands
  • +
  • All associated sessions and authentication data
  • +
+
+
+
+ +
+

Account Information

+
+
+ Username: + @{user?.username} +
+
+ Name: + {user?.firstName} {user?.lastName} +
+
+ Telegram ID: + {user?.telegramId} +
+
+ AI Requests: + {user?.aiRequests.toLocaleString()} +
+
+
+ +
+
+
+

Ready to delete your account?

+

+ This will immediately and permanently delete your account. +

+
+ + + + + + + + + + Confirm Account Deletion + + +

+ Are you absolutely sure you want to delete your account? This action cannot be undone. +

+

+ Your account @{user?.username} and all associated data will be permanently removed. +

+
+
+ + + + +
+
+
+
+
+
+
+
+ ); +} diff --git a/webui/app/account/page.tsx b/webui/app/account/page.tsx new file mode 100755 index 0000000..4960369 --- /dev/null +++ b/webui/app/account/page.tsx @@ -0,0 +1,725 @@ +"use client"; + +import { useEffect, useState } from "react"; +import { Button } from "@/components/ui/button"; +import { Input } from "@/components/ui/input"; +import { Tabs, TabsList, TabsTrigger, TabsContent } from "@/components/ui/tabs"; +import { + User, + Bot, + Brain, + Settings, + CloudSun, + Smartphone, + Heart, + Cat, + Dices, + Thermometer, + BarChart3, + LogOut, + Edit3, + Save, + X, + Network, + Cpu, + Languages, + Bug, + Lightbulb, + ExternalLink, + Quote, + Info, + Shuffle, + Rainbow, + Database, + Hash, + Download, + Archive +} from "lucide-react"; +import { RiTelegram2Line } from "react-icons/ri"; +import { motion } from "framer-motion"; +import { ModelPicker } from "@/components/account/model-picker"; +import { useAuth } from "@/contexts/auth-context"; +import { FaLastfm } from "react-icons/fa"; +import { TiInfinity } from "react-icons/ti"; + +interface CommandCard { + id: string; + icon: React.ComponentType>; + title: string; + description: string; + commands: string[]; + category: "ai" | "entertainment" | "utility" | "media" | "admin" | "animals"; + gradient: string; + enabled: boolean; +} + +const allCommands: CommandCard[] = [ + { + id: "ai-ask-think", + icon: Brain, + title: "AI Chats", + description: "Chat with AI models and use deep thinking", + commands: ["/ask", "/think"], + category: "ai", + gradient: "from-purple-500 to-pink-500", + enabled: true + }, + { + id: "ai-custom", + icon: Bot, + title: "Custom AI Model", + description: "Use your personally configured AI model", + commands: ["/ai"], + category: "ai", + gradient: "from-indigo-500 to-purple-500", + enabled: true + }, + { + id: "ai-stats", + icon: BarChart3, + title: "AI Statistics", + description: "View your AI usage statistics", + commands: ["/aistats"], + category: "ai", + gradient: "from-purple-600 to-indigo-600", + enabled: true + }, + { + id: "games-dice", + icon: Dices, + title: "Interactive Emojis", + description: "Roll dice, play slots, and other interactive emojis", + commands: ["/dice", "/slot", "/ball", "/dart", "/bowling"], + category: "entertainment", + gradient: "from-green-500 to-teal-500", + enabled: true + }, + { + id: "fun-random", + icon: Shuffle, + title: "Fun Commands", + description: "Random numbers and fun responses", + commands: ["/random", "/furry", "/gay"], + category: "entertainment", + gradient: "from-pink-500 to-rose-500", + enabled: true + }, + { + id: "infinite-dice", + icon: TiInfinity, + title: "Infinite Dice", + description: "Sends an infinite dice sticker", + commands: ["/idice"], + category: "entertainment", + gradient: "from-yellow-500 to-orange-500", + enabled: true + }, + { + id: "animals-basic", + icon: Cat, + title: "Animal Pictures", + description: "Get random cute animal pictures", + commands: ["/cat", "/dog", "/duck", "/fox"], + category: "animals", + gradient: "from-orange-500 to-red-500", + enabled: true + }, + { + id: "soggy-cat", + icon: Heart, + title: "Soggy Cat", + description: "Wet cats!", + commands: ["/soggy", "/soggycat"], + category: "animals", + gradient: "from-blue-500 to-purple-500", + enabled: true + }, + { + id: "weather", + icon: CloudSun, + title: "Weather", + description: "Get current weather for any location", + commands: ["/weather", "/clima"], + category: "utility", + gradient: "from-blue-500 to-cyan-500", + enabled: true + }, + { + id: "device-specs", + icon: Smartphone, + title: "Device Specifications", + description: "Look up phone specifications via GSMArena", + commands: ["/device", "/d"], + category: "utility", + gradient: "from-slate-500 to-gray-500", + enabled: true + }, + { + id: "http-status", + icon: Network, + title: "HTTP Status Codes", + description: "Look up HTTP status codes and meanings", + commands: ["/http", "/httpcat"], + category: "utility", + gradient: "from-emerald-500 to-green-500", + enabled: true + }, + { + id: "codename-lookup", + icon: Hash, + title: "Codename Lookup", + description: "Look up codenames and meanings", + commands: ["/codename", "/whatis"], + category: "utility", + gradient: "from-teal-500 to-cyan-500", + enabled: true + }, + { + id: "info-commands", + icon: Info, + title: "Information", + description: "Get chat and user information", + commands: ["/chatinfo", "/userinfo"], + category: "utility", + gradient: "from-indigo-500 to-blue-500", + enabled: true + }, + { + id: "quotes", + icon: Quote, + title: "Random Quotes", + description: "Get random quotes", + commands: ["/quote"], + category: "utility", + gradient: "from-amber-500 to-yellow-500", + enabled: true + }, + { + id: "youtube-download", + icon: Download, + title: "Video Downloads", + description: "Download videos from YouTube and 1000+ platforms", + commands: ["/yt", "/ytdl", "/video", "/dl"], + category: "media", + gradient: "from-red-500 to-pink-500", + enabled: true + }, + { + id: "lastfm", + icon: FaLastfm, + title: "Last.fm Integration", + description: "Connect your music listening history", + commands: ["/last", "/lfm", "/setuser"], + category: "media", + gradient: "from-violet-500 to-purple-500", + enabled: true + }, + { + id: "mlp-content", + icon: Database, + title: "MLP Database", + description: "My Little Pony content and information", + commands: ["/mlp", "/mlpchar", "/mlpep", "/mlpcomic"], + category: "media", + gradient: "from-fuchsia-500 to-pink-500", + enabled: true + }, + { + id: "modarchive", + icon: Archive, + title: "Mod Archive", + description: "Access classic tracker music files", + commands: ["/modarchive", "/tma"], + category: "media", + gradient: "from-cyan-500 to-blue-500", + enabled: true + }, + { + id: "random-pony", + icon: Rainbow, + title: "Random Pony Art", + description: "Get random My Little Pony artwork", + commands: ["/rpony", "/randompony", "/mlpart"], + category: "media", + gradient: "from-pink-500 to-purple-500", + enabled: true + }, +]; + +const categoryColors = { + ai: "bg-purple-500/10 text-purple-600 border-purple-200 dark:border-purple-800", + entertainment: "bg-green-500/10 text-green-600 border-green-200 dark:border-green-800", + utility: "bg-blue-500/10 text-blue-600 border-blue-200 dark:border-blue-800", + media: "bg-red-500/10 text-red-600 border-red-200 dark:border-red-800", + admin: "bg-orange-500/10 text-orange-600 border-orange-200 dark:border-orange-800", + animals: "bg-emerald-500/10 text-emerald-600 border-emerald-200 dark:border-emerald-800" +}; + +const languageOptions = [ + { code: 'en', name: 'English', flag: '🇺🇸' }, + { code: 'pt', name: 'Português', flag: '🇧🇷' }, +]; + +export default function AccountPage() { + const [editingTemp, setEditingTemp] = useState(false); + const [tempValue, setTempValue] = useState(""); + const [selectedCategory, setSelectedCategory] = useState(null); + const [reportTab, setReportTab] = useState("bug"); + const [commands, setCommands] = useState(allCommands); + + const { user, loading, logout, refreshUser } = useAuth(); + + useEffect(() => { + if (user) { + setTempValue(user.aiTemperature.toString()); + setCommands(allCommands.map(cmd => ({ + ...cmd, + enabled: !user.disabledCommands.includes(cmd.id) + }))); + } + }, [user]); + + const updateSetting = async (setting: string, value: boolean | number | string) => { + try { + const response = await fetch('/api/user/settings', { + method: 'PATCH', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ [setting]: value }), + credentials: 'include' + }); + + if (response.ok) { + await refreshUser(); + } + } catch (error) { + console.error('Error updating setting:', error); + } + }; + + const saveTemperature = () => { + const temp = parseFloat(tempValue); + if (temp >= 0.1 && temp <= 2.0) { + updateSetting('aiTemperature', temp); + setEditingTemp(false); + } + }; + + const toggleCommand = async (commandId: string) => { + if (!user) return; + + const commandToToggle = commands.find(cmd => cmd.id === commandId); + if (!commandToToggle) return; + + const newEnabledState = !commandToToggle.enabled; + + setCommands(prev => prev.map(cmd => + cmd.id === commandId ? { ...cmd, enabled: newEnabledState } : cmd + )); + + try { + let newDisabledCommands: string[]; + + if (newEnabledState) { + newDisabledCommands = user.disabledCommands.filter(id => id !== commandId); + } else { + newDisabledCommands = [...user.disabledCommands, commandId]; + } + + const response = await fetch('/api/user/settings', { + method: 'PATCH', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ disabledCommands: newDisabledCommands }), + credentials: 'include' + }); + + if (response.ok) { + await refreshUser(); + } else { + setCommands(prev => prev.map(cmd => + cmd.id === commandId ? { ...cmd, enabled: !newEnabledState } : cmd + )); + console.error('Failed to update command state'); + } + } catch (error) { + setCommands(prev => prev.map(cmd => + cmd.id === commandId ? { ...cmd, enabled: !newEnabledState } : cmd + )); + console.error('Error updating command state:', error); + } + }; + + const filteredCommands = selectedCategory + ? commands.filter(cmd => cmd.category === selectedCategory) + : commands; + + if (loading) { + return ( +
+
+
+ ); + } + + if (!user) { + return ( +
+
+

Authentication Required

+ +
+
+ ); + } + + return ( +
+
+
+
+
+ +
+
+

Welcome back, {user.firstName}!

+

@{user.username}

+
+
+ +
+ +
+ +
+ +

AI Usage

+
+
+

{user.aiRequests}

+

Total AI Requests

+

{user.aiCharacters.toLocaleString()}

+

Characters Generated

+
+
+ + +
+ +

AI Settings

+
+
+
+ AI Enabled + +
+
+ Show Thinking + +
+
+
+ + +
+ +

Temperature

+
+
+
+ {editingTemp ? ( + <> + setTempValue(e.target.value)} + className="h-8 w-20" + /> + + + + ) : ( + <> + {user.aiTemperature} + + + )} +
+

Controls randomness in AI responses. Lower values (0.1-0.5) = more focused, higher values (0.7-2.0) = more creative.

+
+
+ + +
+ +

Language Options

+
+
+
+ {languageOptions.map((lang) => ( + + ))} +
+

Choose your preferred language for bot responses and interface text.

+
+
+ + +
+ +

My Model

+
+
+ updateSetting('customAiModel', newModel)} + className="w-full" + /> +

Your selected AI model for custom /ai commands. Different models have varying capabilities, speeds, and response styles.

+
+
+ + +
+ +

Report An Issue

+
+
+ + + + + Bug Report + + + + Feature Request + + +
+ +

Found a bug or issue? Report it to help us improve Kowalski.

+ +
+ +

Have an idea for a new feature? Let us know what you'd like to see!

+ +
+
+
+
+
+ +
+ + +

Command Management

+
+ + {Object.entries(categoryColors).map(([category, colorClass]) => ( + + ))} +
+
+ + + {filteredCommands.map((command) => ( +
+
+
+ +
+
+
toggleCommand(command.id)} + > +
+
+
+
+ +

+ {command.title} +

+

+ {command.description} +

+ +
+
+ {command.commands.slice(0, 2).map((cmd, idx) => ( + + {cmd} + + ))} + {command.commands.length > 2 && ( + + +{command.commands.length - 2} + + )} +
+
+ {command.category === "ai" ? "AI" : command.category} +
+
+
+ ))} + + + +
+ Ready to start using Kowalski? + +
+
+
+
+ ); +} diff --git a/webui/app/api/auth/logout/route.ts b/webui/app/api/auth/logout/route.ts new file mode 100755 index 0000000..d30c5ad --- /dev/null +++ b/webui/app/api/auth/logout/route.ts @@ -0,0 +1,34 @@ +import { NextRequest, NextResponse } from "next/server"; +import { invalidateSession } from "@/lib/auth"; +import { SESSION_COOKIE_NAME } from "@/lib/auth-constants"; + +export async function POST(request: NextRequest) { + try { + const cookieToken = request.cookies.get(SESSION_COOKIE_NAME)?.value; + const authHeader = request.headers.get('authorization'); + const bearerToken = authHeader?.startsWith('Bearer ') ? authHeader.slice(7) : null; + const sessionToken = bearerToken || cookieToken; + + if (sessionToken) { + await invalidateSession(sessionToken); + } + + const response = NextResponse.json({ success: true }); + + response.cookies.set(SESSION_COOKIE_NAME, '', { + httpOnly: true, + secure: process.env.NODE_ENV === "production", + sameSite: "lax", + expires: new Date(0), + path: "/", + }); + + return response; + + } catch (error) { + console.error("Error in logout API:", error); + return NextResponse.json({ + error: "Internal server error" + }, { status: 500 }); + } +} diff --git a/webui/app/api/auth/username/route.ts b/webui/app/api/auth/username/route.ts new file mode 100755 index 0000000..00baa61 --- /dev/null +++ b/webui/app/api/auth/username/route.ts @@ -0,0 +1,91 @@ +import { NextRequest, NextResponse } from "next/server"; +import { eq } from "drizzle-orm"; +import * as schema from "@/lib/schema"; +import { db } from "@/lib/db"; + +export async function POST(request: NextRequest) { + try { + const requestContentType = request.headers.get('content-type'); + if (!requestContentType || !requestContentType.includes('application/json')) { + return NextResponse.json({ success: false, error: "Invalid content type" }, { status: 400 }); + } + + const body = await request.json(); + const { username } = body; + + if (!username) { + return NextResponse.json({ success: false, error: "Username is required" }, { status: 400 }); + } + + if (typeof username !== 'string' || username.length < 3 || username.length > 32) { + return NextResponse.json({ success: false, error: "Invalid username format" }, { status: 400 }); + } + + const cleanUsername = username.replace('@', ''); + + const user = await db.query.usersTable.findFirst({ + where: eq(schema.usersTable.username, cleanUsername), + columns: { + telegramId: true, + username: true, + }, + }); + + if (!user) { + const botUsername = process.env.botUsername || "KowalskiNodeBot"; + return NextResponse.json({ success: false, error: `Please DM @${botUsername} before signing in.` }, { status: 404 }); + } + + const botApiUrl = process.env.botApiUrl || "http://kowalski:3030"; + const fullUrl = `${botApiUrl}/2fa/get`; + + const botApiResponse = await fetch(fullUrl, { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify({ userId: user.telegramId }), + }); + + if (!botApiResponse.ok) { + const errorText = await botApiResponse.text(); + console.error("Bot API error response:", errorText); + return NextResponse.json({ + success: false, + error: `Bot API error: ${botApiResponse.status} - ${errorText.slice(0, 200)}` + }, { status: 500 }); + } + + const contentType = botApiResponse.headers.get("content-type"); + if (!contentType || !contentType.includes("application/json")) { + const errorText = await botApiResponse.text(); + console.error("Bot API returned non-JSON:", errorText.slice(0, 200)); + return NextResponse.json({ + success: false, + error: "Bot API returned invalid response format" + }, { status: 500 }); + } + + const botApiResult = await botApiResponse.json(); + + if (!botApiResult.generated) { + return NextResponse.json({ + success: false, + error: botApiResult.error || "Failed to send 2FA code" + }, { status: 500 }); + } + + return NextResponse.json({ + success: true, + message: "2FA code sent successfully", + userId: user.telegramId + }); + + } catch (error) { + console.error("Error in username API:", error); + return NextResponse.json({ + success: false, + error: "Internal server error" + }, { status: 500 }); + } +} \ No newline at end of file diff --git a/webui/app/api/auth/verify/route.ts b/webui/app/api/auth/verify/route.ts new file mode 100755 index 0000000..3728e72 --- /dev/null +++ b/webui/app/api/auth/verify/route.ts @@ -0,0 +1,107 @@ +import { NextRequest, NextResponse } from "next/server"; +import { eq, and, gt } from "drizzle-orm"; +import * as schema from "@/lib/schema"; +import { db } from "@/lib/db"; +import { createSession, getSessionCookieOptions } from "@/lib/auth"; +import { SESSION_COOKIE_NAME } from "@/lib/auth-constants"; + +export async function POST(request: NextRequest) { + try { + const contentType = request.headers.get('content-type'); + if (!contentType || !contentType.includes('application/json')) { + return NextResponse.json({ + success: false, + error: "Invalid content type" + }, { status: 400 }); + } + + const body = await request.json(); + const { userId, code } = body; + + if (!userId || !code) { + return NextResponse.json({ + success: false, + error: "User ID and code are required" + }, { status: 400 }); + } + + if (typeof userId !== 'string' || typeof code !== 'string') { + return NextResponse.json({ + success: false, + error: "Invalid input format" + }, { status: 400 }); + } + + if (!/^\d{6}$/.test(code)) { + return NextResponse.json({ + success: false, + error: "Invalid code format" + }, { status: 400 }); + } + + const twoFactorRecord = await db.query.twoFactorTable.findFirst({ + where: and( + eq(schema.twoFactorTable.userId, userId), + gt(schema.twoFactorTable.codeExpiresAt, new Date()) + ), + }); + + if (!twoFactorRecord) { + return NextResponse.json({ + success: false, + error: "No valid 2FA code found or code has expired" + }, { status: 404 }); + } + + if (twoFactorRecord.codeAttempts >= 5) { + await db.delete(schema.twoFactorTable) + .where(eq(schema.twoFactorTable.userId, userId)); + + return NextResponse.json({ + success: false, + error: "Too many failed attempts. Please request a new code." + }, { status: 429 }); + } + + if (twoFactorRecord.currentCode !== code) { + await db.update(schema.twoFactorTable) + .set({ + codeAttempts: twoFactorRecord.codeAttempts + 1, + updatedAt: new Date() + }) + .where(eq(schema.twoFactorTable.userId, userId)); + + console.log(`2FA verification failed for user: ${userId}, attempts: ${twoFactorRecord.codeAttempts + 1}`); + return NextResponse.json({ + success: false, + error: "Invalid 2FA code" + }, { status: 401 }); + } + + const session = await createSession(userId); + + await db.delete(schema.twoFactorTable) + .where(eq(schema.twoFactorTable.userId, userId)); + + console.log("2FA verification successful for user:", userId); + + const response = NextResponse.json({ + success: true, + message: "2FA verification successful", + redirectTo: "/account", + sessionToken: session.sessionToken + }); + + const cookieOptions = getSessionCookieOptions(); + response.cookies.set(SESSION_COOKIE_NAME, session.sessionToken, cookieOptions); + + return response; + + } catch (error) { + console.error("Error in verify API:", error); + return NextResponse.json({ + success: false, + error: "Internal server error" + }, { status: 500 }); + } +} diff --git a/webui/app/api/user/delete/route.ts b/webui/app/api/user/delete/route.ts new file mode 100755 index 0000000..e1c4d93 --- /dev/null +++ b/webui/app/api/user/delete/route.ts @@ -0,0 +1,59 @@ +import { NextRequest, NextResponse } from "next/server"; +import { validateSession } from "@/lib/auth"; +import { SESSION_COOKIE_NAME } from "@/lib/auth-constants"; +import { db } from "@/lib/db"; +import { usersTable, sessionsTable, twoFactorTable } from "@/lib/schema"; +import { eq } from "drizzle-orm"; + +export async function DELETE(request: NextRequest) { + try { + const cookieToken = request.cookies.get(SESSION_COOKIE_NAME)?.value; + const authHeader = request.headers.get('authorization'); + const bearerToken = authHeader?.startsWith('Bearer ') ? authHeader.slice(7) : null; + const sessionToken = bearerToken || cookieToken; + + if (!sessionToken) { + return NextResponse.json({ error: "Authentication required" }, { status: 401 }); + } + + const sessionData = await validateSession(sessionToken); + + if (!sessionData || !sessionData.user) { + return NextResponse.json({ error: "Invalid or expired session" }, { status: 401 }); + } + + const userId = sessionData.user.telegramId; + + await db.transaction(async (tx) => { + await tx.delete(sessionsTable) + .where(eq(sessionsTable.userId, userId)); + + await tx.delete(twoFactorTable) + .where(eq(twoFactorTable.userId, userId)); + + await tx.delete(usersTable) + .where(eq(usersTable.telegramId, userId)); + }); + + const response = NextResponse.json({ + success: true, + message: "Account deleted successfully" + }); + + response.cookies.set(SESSION_COOKIE_NAME, '', { + httpOnly: true, + secure: process.env.NODE_ENV === "production", + sameSite: "lax", + expires: new Date(0), + path: "/", + }); + + return response; + + } catch (error) { + console.error("Error deleting account:", error); + return NextResponse.json({ + error: "Failed to delete account" + }, { status: 500 }); + } +} diff --git a/webui/app/api/user/profile/route.ts b/webui/app/api/user/profile/route.ts new file mode 100755 index 0000000..8a18ab8 --- /dev/null +++ b/webui/app/api/user/profile/route.ts @@ -0,0 +1,46 @@ +import { NextRequest, NextResponse } from "next/server"; +import { validateSession } from "@/lib/auth"; +import { SESSION_COOKIE_NAME } from "@/lib/auth-constants"; + +export async function GET(request: NextRequest) { + try { + const cookieToken = request.cookies.get(SESSION_COOKIE_NAME)?.value; + const authHeader = request.headers.get('authorization'); + const bearerToken = authHeader?.startsWith('Bearer ') ? authHeader.slice(7) : null; + const sessionToken = bearerToken || cookieToken; + + if (!sessionToken) { + return NextResponse.json({ error: "Authentication required" }, { status: 401 }); + } + + const sessionData = await validateSession(sessionToken); + + if (!sessionData || !sessionData.user) { + return NextResponse.json({ error: "Invalid or expired session" }, { status: 401 }); + } + + const { user } = sessionData; + const sanitizedUser = { + telegramId: user.telegramId, + username: user.username, + firstName: user.firstName, + lastName: user.lastName, + aiEnabled: user.aiEnabled, + showThinking: user.showThinking, + customAiModel: user.customAiModel, + aiTemperature: user.aiTemperature, + aiRequests: user.aiRequests, + aiCharacters: user.aiCharacters, + disabledCommands: user.disabledCommands, + languageCode: user.languageCode, + }; + + return NextResponse.json(sanitizedUser); + + } catch (error) { + console.error("Error in profile API:", error); + return NextResponse.json({ + error: "Internal server error" + }, { status: 500 }); + } +} diff --git a/webui/app/api/user/settings/route.ts b/webui/app/api/user/settings/route.ts new file mode 100755 index 0000000..bc5d7a2 --- /dev/null +++ b/webui/app/api/user/settings/route.ts @@ -0,0 +1,103 @@ +import { NextRequest, NextResponse } from "next/server"; +import { eq } from "drizzle-orm"; +import { validateSession } from "@/lib/auth"; +import { SESSION_COOKIE_NAME } from "@/lib/auth-constants"; +import { db } from "@/lib/db"; +import * as schema from "@/lib/schema"; + +interface UserUpdates { + aiEnabled?: boolean; + showThinking?: boolean; + customAiModel?: string; + aiTemperature?: number; + disabledCommands?: string[]; + languageCode?: string; + updatedAt?: Date; +} + +export async function PATCH(request: NextRequest) { + try { + const cookieToken = request.cookies.get(SESSION_COOKIE_NAME)?.value; + const authHeader = request.headers.get('authorization'); + const bearerToken = authHeader?.startsWith('Bearer ') ? authHeader.slice(7) : null; + const sessionToken = bearerToken || cookieToken; + + if (!sessionToken) { + return NextResponse.json({ error: "Authentication required" }, { status: 401 }); + } + + const sessionData = await validateSession(sessionToken); + + if (!sessionData || !sessionData.user) { + return NextResponse.json({ error: "Invalid or expired session" }, { status: 401 }); + } + + const contentType = request.headers.get('content-type'); + if (!contentType || !contentType.includes('application/json')) { + return NextResponse.json({ error: "Invalid content type" }, { status: 400 }); + } + + const updates = await request.json(); + const userId = sessionData.user.telegramId; + + if (!updates || typeof updates !== 'object') { + return NextResponse.json({ error: "Invalid request body" }, { status: 400 }); + } + + const allowedFields = [ + 'aiEnabled', + 'showThinking', + 'customAiModel', + 'aiTemperature', + 'disabledCommands', + 'languageCode' + ]; + + const filteredUpdates: UserUpdates = {}; + + for (const [key, value] of Object.entries(updates)) { + if (allowedFields.includes(key)) { + if (key === 'aiEnabled' || key === 'showThinking') { + filteredUpdates[key] = Boolean(value); + } else if (key === 'aiTemperature') { + const temp = Number(value); + if (temp >= 0.1 && temp <= 2.0) { + filteredUpdates[key] = temp; + } else { + return NextResponse.json({ error: "Temperature must be between 0.1 and 2.0" }, { status: 400 }); + } + } else if (key === 'customAiModel' || key === 'languageCode') { + if (typeof value === 'string' && value.length > 0 && value.length < 100) { + filteredUpdates[key] = value; + } else { + return NextResponse.json({ error: `Invalid ${key}` }, { status: 400 }); + } + } else if (key === 'disabledCommands') { + if (Array.isArray(value) && value.every(item => typeof item === 'string' && item.length < 50) && value.length < 100) { + filteredUpdates[key] = value; + } else { + return NextResponse.json({ error: "Invalid disabled commands" }, { status: 400 }); + } + } + } + } + + if (Object.keys(filteredUpdates).length === 0) { + return NextResponse.json({ error: "No valid updates provided" }, { status: 400 }); + } + + filteredUpdates.updatedAt = new Date(); + + await db.update(schema.usersTable) + .set(filteredUpdates) + .where(eq(schema.usersTable.telegramId, userId)); + + return NextResponse.json({ success: true }); + + } catch (error) { + console.error("Error in settings API:", error); + return NextResponse.json({ + error: "Internal server error" + }, { status: 500 }); + } +} diff --git a/webui/app/globals.css b/webui/app/globals.css new file mode 100755 index 0000000..9644468 --- /dev/null +++ b/webui/app/globals.css @@ -0,0 +1,126 @@ +@import "tailwindcss"; +@import "tw-animate-css"; + +@custom-variant dark (&:is(.dark *)); + +@theme inline { + --color-background: var(--background); + --color-foreground: var(--foreground); + --font-sans: var(--font-geist-sans); + --font-mono: var(--font-geist-mono); + --color-sidebar-ring: var(--sidebar-ring); + --color-sidebar-border: var(--sidebar-border); + --color-sidebar-accent-foreground: var(--sidebar-accent-foreground); + --color-sidebar-accent: var(--sidebar-accent); + --color-sidebar-primary-foreground: var(--sidebar-primary-foreground); + --color-sidebar-primary: var(--sidebar-primary); + --color-sidebar-foreground: var(--sidebar-foreground); + --color-sidebar: var(--sidebar); + --color-chart-5: var(--chart-5); + --color-chart-4: var(--chart-4); + --color-chart-3: var(--chart-3); + --color-chart-2: var(--chart-2); + --color-chart-1: var(--chart-1); + --color-ring: var(--ring); + --color-input: var(--input); + --color-border: var(--border); + --color-destructive: var(--destructive); + --color-accent-foreground: var(--accent-foreground); + --color-accent: var(--accent); + --color-muted-foreground: var(--muted-foreground); + --color-muted: var(--muted); + --color-secondary-foreground: var(--secondary-foreground); + --color-secondary: var(--secondary); + --color-primary-foreground: var(--primary-foreground); + --color-primary: var(--primary); + --color-popover-foreground: var(--popover-foreground); + --color-popover: var(--popover); + --color-card-foreground: var(--card-foreground); + --color-card: var(--card); + --radius-sm: calc(var(--radius) - 4px); + --radius-md: calc(var(--radius) - 2px); + --radius-lg: var(--radius); + --radius-xl: calc(var(--radius) + 4px); +} + +body { + font-family: var(--font-sora); +} + +:root { + --radius: 0.625rem; + --background: oklch(1 0 0); + --foreground: oklch(0.145 0 0); + --card: oklch(1 0 0); + --card-foreground: oklch(0.145 0 0); + --popover: oklch(1 0 0); + --popover-foreground: oklch(0.145 0 0); + --primary: oklch(0.205 0 0); + --primary-foreground: oklch(0.985 0 0); + --secondary: oklch(0.97 0 0); + --secondary-foreground: oklch(0.205 0 0); + --muted: oklch(0.97 0 0); + --muted-foreground: oklch(0.556 0 0); + --accent: oklch(0.97 0 0); + --accent-foreground: oklch(0.205 0 0); + --destructive: oklch(0.577 0.245 27.325); + --border: oklch(0.922 0 0); + --input: oklch(0.922 0 0); + --ring: oklch(0.708 0 0); + --chart-1: oklch(0.646 0.222 41.116); + --chart-2: oklch(0.6 0.118 184.704); + --chart-3: oklch(0.398 0.07 227.392); + --chart-4: oklch(0.828 0.189 84.429); + --chart-5: oklch(0.769 0.188 70.08); + --sidebar: oklch(0.985 0 0); + --sidebar-foreground: oklch(0.145 0 0); + --sidebar-primary: oklch(0.205 0 0); + --sidebar-primary-foreground: oklch(0.985 0 0); + --sidebar-accent: oklch(0.97 0 0); + --sidebar-accent-foreground: oklch(0.205 0 0); + --sidebar-border: oklch(0.922 0 0); + --sidebar-ring: oklch(0.708 0 0); +} + +.dark { + --background: oklch(0.145 0 0); + --foreground: oklch(0.985 0 0); + --card: oklch(0.205 0 0); + --card-foreground: oklch(0.985 0 0); + --popover: oklch(0.205 0 0); + --popover-foreground: oklch(0.985 0 0); + --primary: oklch(0.922 0 0); + --primary-foreground: oklch(0.205 0 0); + --secondary: oklch(0.269 0 0); + --secondary-foreground: oklch(0.985 0 0); + --muted: oklch(0.269 0 0); + --muted-foreground: oklch(0.708 0 0); + --accent: oklch(0.269 0 0); + --accent-foreground: oklch(0.985 0 0); + --destructive: oklch(0.704 0.191 22.216); + --border: oklch(1 0 0 / 10%); + --input: oklch(1 0 0 / 15%); + --ring: oklch(0.556 0 0); + --chart-1: oklch(0.488 0.243 264.376); + --chart-2: oklch(0.696 0.17 162.48); + --chart-3: oklch(0.769 0.188 70.08); + --chart-4: oklch(0.627 0.265 303.9); + --chart-5: oklch(0.645 0.246 16.439); + --sidebar: oklch(0.205 0 0); + --sidebar-foreground: oklch(0.985 0 0); + --sidebar-primary: oklch(0.488 0.243 264.376); + --sidebar-primary-foreground: oklch(0.985 0 0); + --sidebar-accent: oklch(0.269 0 0); + --sidebar-accent-foreground: oklch(0.985 0 0); + --sidebar-border: oklch(1 0 0 / 10%); + --sidebar-ring: oklch(0.556 0 0); +} + +@layer base { + * { + @apply border-border outline-ring/50; + } + body { + @apply bg-background text-foreground; + } +} diff --git a/webui/app/layout.tsx b/webui/app/layout.tsx new file mode 100755 index 0000000..ac6e82e --- /dev/null +++ b/webui/app/layout.tsx @@ -0,0 +1,54 @@ +import type { Metadata } from "next"; +import { Sora } from "next/font/google"; +import "./globals.css"; +import { ThemeProvider } from "@/components/providers"; +import { SidebarProvider, SidebarInset, SidebarTrigger } from "@/components/ui/sidebar"; +import { AppSidebar } from "@/components/app-sidebar"; +import { AuthProvider } from "@/contexts/auth-context"; +import { HeaderAuth } from "@/components/header-auth"; + +const sora = Sora({ + variable: "--font-sora", + subsets: ["latin"], +}); + +export const metadata: Metadata = { + title: "Kowalski", + description: "A powerful, multi-function Telegram bot", +}; + +export default function RootLayout({ + children, +}: Readonly<{ + children: React.ReactNode; +}>) { + return ( + + + + + + + +
+ +
+ +
+
+
+ {children} +
+
+
+
+
+ + + ); +} diff --git a/webui/app/login/page.tsx b/webui/app/login/page.tsx new file mode 100755 index 0000000..90daad2 --- /dev/null +++ b/webui/app/login/page.tsx @@ -0,0 +1,311 @@ +"use client"; + +import { Button } from "@/components/ui/button"; +import { Input } from "@/components/ui/input"; +import { RiTelegram2Line } from "react-icons/ri"; +import { TbLoader } from "react-icons/tb"; +import { useState, Suspense } from "react"; +import { useSearchParams } from "next/navigation"; +import { motion, AnimatePresence } from "framer-motion"; + +export const dynamic = 'force-dynamic' + +type FormStep = "username" | "twofa"; + +type VerifyResponse = { + success: boolean; + message?: string; + redirectTo?: string; + sessionToken?: string; + error?: string; +}; + +const buttonVariants = { + initial: { scale: 1 }, + tap: { scale: 0.98 }, +}; + +function LoginForm() { + const [step, setStep] = useState("username"); + const [username, setUsername] = useState(""); + const [twoFaCode, setTwoFaCode] = useState(""); + const [userId, setUserId] = useState(""); + const [isLoading, setIsLoading] = useState(false); + const [error, setError] = useState(""); + const searchParams = useSearchParams(); + const returnTo = searchParams.get('returnTo') || '/account'; + + const handleUsernameSubmit = async (e: React.FormEvent) => { + e.preventDefault(); + if (!username.trim()) return; + + setIsLoading(true); + setError(""); + + try { + const response = await fetch("/api/auth/username", { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify({ username: username.trim() }), + }); + + const result = await response.json(); + + if (result.success) { + setUserId(result.userId); + setStep("twofa"); + } else { + setError(result.error || "Failed to find user"); + } + } catch (err) { + console.error("Username submission error:", err); + setError("Network error. Please try again."); + } finally { + setIsLoading(false); + } + }; + + const handleTwoFaSubmit = async (e: React.FormEvent) => { + e.preventDefault(); + if (!twoFaCode.trim() || twoFaCode.length !== 6) return; + + setIsLoading(true); + setError(""); + + try { + const response = await fetch("/api/auth/verify", { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify({ userId, code: twoFaCode }), + }); + + const result: VerifyResponse = await response.json(); + + if (result.success) { + const redirectTo = result.redirectTo || returnTo; + if (result.sessionToken) { + try { + localStorage.setItem('kowalski-session', result.sessionToken); + } catch (storageError) { + console.error('localStorage error:', storageError); + } + } + + window.location.href = redirectTo; + } else { + setError(result.error || "Invalid 2FA code"); + } + } catch (err) { + console.error("2FA verification error:", err); + console.log("Error details:", { + message: err instanceof Error ? err.message : 'Unknown error', + stack: err instanceof Error ? err.stack : 'No stack trace', + name: err instanceof Error ? err.name : 'Unknown error type' + }); + const errorMessage = err instanceof Error ? + `Error: ${err.message}` : + "Network error. Please try again."; + setError(errorMessage); + } finally { + setIsLoading(false); + } + }; + + const resetForm = () => { + setStep("username"); + setUsername(""); + setTwoFaCode(""); + setUserId(""); + setError(""); + }; + + const LoadingSpinner = ({ text }: { text: string }) => ( +
+ + {text} +
+ ); + + return ( +
+
+
+
+
+ +
+
+ + + {step === "username" && ( + +

+ Login to Kowalski +

+ +

+ Please enter your Telegram username to continue. +

+ +
+
+ setUsername(e.target.value)} + disabled={isLoading} + className="text-center text-lg py-6" + autoFocus + /> +
+ + + {error && ( + + {error} + + )} + + + + + +
+
+ )} + + {step === "twofa" && ( + +

+ Enter 2FA Code +

+ +

+ We've sent a 6-digit code to your Telegram. Please enter it below. +

+ +
+
+ setTwoFaCode(e.target.value.replace(/\D/g, '').slice(0, 6))} + disabled={isLoading} + className="text-center text-2xl font-mono tracking-widest py-6" + maxLength={6} + autoFocus + /> +
+ + + {error && ( + + {error} + + )} + + +
+ + + + + + + +
+
+
+ )} +
+
+
+
+ ); +} + +export default function LoginPage() { + return ( + +
+
+ }> + + + ); +} diff --git a/webui/app/page.tsx b/webui/app/page.tsx new file mode 100755 index 0000000..d2de9d2 --- /dev/null +++ b/webui/app/page.tsx @@ -0,0 +1,251 @@ +import { Button } from "@/components/ui/button" +import { + Bot, + Sparkles, + Users, + Settings, + Download, + Brain, + Shield, + Zap, + Tv, + Trash, + Lock, +} from "lucide-react"; +import { SiYoutube, SiForgejo } from "react-icons/si"; +import { RiTelegram2Line } from "react-icons/ri"; +import { TbEyeSpark } from "react-icons/tb"; +import Image from "next/image"; +import Footer from "@/components/footer"; +import Link from "next/link"; + +export default function Home() { + return ( +
+
+
+
+
+ Kowalski Logo +
+
+

+ Kowalski +

+ +

+ A powerful, multi-function Telegram bot with AI capabilities, media downloading, + user management, and much more. Built for communities and power users. +

+ +
+ + + +
+
+
+ +
+
+
+

Features You'll Love

+

+ Powered by TypeScript, Telegraf, Next.js, and AI. +

+
+ +
+
+
+
+ +
+

AI Commands

+
+ +

+ Interact with over 50 AI models through simple commands. Get intelligent responses, + assistance, or problem-solving help right in Telegram. +

+ +
+
+ +
+
/ai
+
Ask questions to a custom AI model of your choice
+
+
+
+ +
+
/ask
+
Quick AI responses for everyday questions
+
+
+
+ +
+
/think
+
Deep reasoning with optional visible thinking
+
+
+
+
+ +
+
+
+ +
+

YouTube/Video Downloads

+
+ +

+ Download videos directly from YouTube and other platforms and watch them in Telegram. + Supports thousands of sites with integrated yt-dlp. +

+ +
+
+ +
+
/yt [URL]
+
Quickly download videos up to 50MB
+
+
+
+ +
+
Automatic Ratelimit Detection
+
We'll notify you if something goes wrong
+
+
+
+ +
+
High Quality Downloads
+
Kowalski automatically chooses the best quality for you
+
+
+
+
+
+
+
+ +
+
+
+

+ Control and Fun +

+

+ Your user data is always minimized and under your control. That certainly + doesn't mean the experience is lacking! +

+
+ +
+
+
+
+ +
+

User Accounts

+
+ +

+ Your user data is linked only by your Telegram ID. No data is ever sent to third parties + or used for anything other than providing you with the best experience. +

+ +
+
+ +
+
Personal Settings
+
Custom AI models, temperature, and language preferences
+
+
+
+ +
+
Account Statistics
+
Track AI requests, characters processed, and more
+
+
+
+ +
+
Leave at Any Time
+
We make it easy to delete your account at any time
+
+
+
+
+ +
+
+
+ +
+

Web Interface

+
+ +

+ Kowalski includes a web interface, made with Next.js, to make it easier to manage your + bot, user account, and more. It's tailored to both users and admins. +

+ +
+
+ +
+
Everything's Clean
+
We don't clutter your view with ads or distractions.
+
+
+
+ +
+
Do Everything!
+
We aim to integrate every feature into the web interface.
+
+
+
+ +
+
Private
+
We don't use any analytics, tracking, or third-party scripts.
+
+
+
+
+
+
+
+
+
+ ); +} diff --git a/webui/components.json b/webui/components.json new file mode 100755 index 0000000..335484f --- /dev/null +++ b/webui/components.json @@ -0,0 +1,21 @@ +{ + "$schema": "https://ui.shadcn.com/schema.json", + "style": "new-york", + "rsc": true, + "tsx": true, + "tailwind": { + "config": "", + "css": "app/globals.css", + "baseColor": "neutral", + "cssVariables": true, + "prefix": "" + }, + "aliases": { + "components": "@/components", + "utils": "@/lib/utils", + "ui": "@/components/ui", + "lib": "@/lib", + "hooks": "@/hooks" + }, + "iconLibrary": "lucide" +} \ No newline at end of file diff --git a/webui/components/account/ai.ts b/webui/components/account/ai.ts new file mode 100755 index 0000000..de8d402 --- /dev/null +++ b/webui/components/account/ai.ts @@ -0,0 +1,434 @@ +export interface ModelInfo { + name: string; + label: string; + descriptionEn: string; + descriptionPt: string; + models: Array<{ + name: string; + label: string; + parameterSize: string; + thinking: boolean; + uncensored: boolean; + }>; +} + +export const defaultFlashModel = "gemma3:4b" +export const defaultThinkingModel = "qwen3:4b" +export const unloadModelAfterB = 12 // how many billion params until model is auto-unloaded +export const maxUserQueueSize = 3 + +export const models: ModelInfo[] = [ + { + name: 'gemma3n', + label: 'gemma3n', + descriptionEn: 'Gemma3n is a family of open, light on-device models for general tasks.', + descriptionPt: 'Gemma3n é uma família de modelos abertos, leves e para dispositivos locais, para tarefas gerais.', + models: [ + { + name: 'gemma3n:e2b', + label: 'Gemma3n e2b', + parameterSize: '2B', + thinking: false, + uncensored: false + }, + { + name: 'gemma3n:e4b', + label: 'Gemma3n e4b', + parameterSize: '4B', + thinking: false, + uncensored: false + }, + ] + }, + { + name: 'gemma3', + label: 'gemma3 [ & Uncensored ]', + descriptionEn: 'Gemma3-abliterated is a family of open, uncensored models for general tasks.', + descriptionPt: 'Gemma3-abliterated é uma família de modelos abertos, não censurados, para tarefas gerais.', + models: [ + { + name: 'huihui_ai/gemma3-abliterated:1b', + label: 'Gemma3 Uncensored 1B', + parameterSize: '1B', + thinking: false, + uncensored: true + }, + { + name: 'huihui_ai/gemma3-abliterated:4b', + label: 'Gemma3 Uncensored 4B', + parameterSize: '4B', + thinking: false, + uncensored: true + }, + { + name: 'gemma3:1b', + label: 'Gemma3 1B', + parameterSize: '1B', + thinking: false, + uncensored: false + }, + { + name: 'gemma3:4b', + label: 'Gemma3 4B', + parameterSize: '4B', + thinking: false, + uncensored: false + }, + ] + }, + { + name: 'qwen3', + label: 'Qwen3', + descriptionEn: 'Qwen3 is a multilingual reasoning model series.', + descriptionPt: 'Qwen3 é uma série de modelos multilingues.', + models: [ + { + name: 'qwen3:0.6b', + label: 'Qwen3 0.6B', + parameterSize: '0.6B', + thinking: true, + uncensored: false + }, + { + name: 'qwen3:1.7b', + label: 'Qwen3 1.7B', + parameterSize: '1.7B', + thinking: true, + uncensored: false + }, + { + name: 'qwen3:4b', + label: 'Qwen3 4B', + parameterSize: '4B', + thinking: true, + uncensored: false + }, + { + name: 'qwen3:8b', + label: 'Qwen3 8B', + parameterSize: '8B', + thinking: true, + uncensored: false + }, + { + name: 'qwen3:14b', + label: 'Qwen3 14B', + parameterSize: '14B', + thinking: true, + uncensored: false + }, + { + name: 'qwen3:30b', + label: 'Qwen3 30B', + parameterSize: '30B', + thinking: true, + uncensored: false + }, + { + name: 'qwen3:32b', + label: 'Qwen3 32B', + parameterSize: '32B', + thinking: true, + uncensored: false + }, + ] + }, + { + name: 'qwen3-abliterated', + label: 'Qwen3 [ Uncensored ]', + descriptionEn: 'Qwen3-abliterated is a multilingual reasoning model series.', + descriptionPt: 'Qwen3-abliterated é uma série de modelos multilingues.', + models: [ + { + name: 'huihui_ai/qwen3-abliterated:0.6b', + label: 'Qwen3 Uncensored 0.6B', + parameterSize: '0.6B', + thinking: true, + uncensored: true + }, + { + name: 'huihui_ai/qwen3-abliterated:1.7b', + label: 'Qwen3 Uncensored 1.7B', + parameterSize: '1.7B', + thinking: true, + uncensored: true + }, + { + name: 'huihui_ai/qwen3-abliterated:4b', + label: 'Qwen3 Uncensored 4B', + parameterSize: '4B', + thinking: true, + uncensored: true + }, + { + name: 'huihui_ai/qwen3-abliterated:8b', + label: 'Qwen3 Uncensored 8B', + parameterSize: '8B', + thinking: true, + uncensored: true + }, + { + name: 'huihui_ai/qwen3-abliterated:14b', + label: 'Qwen3 Uncensored 14B', + parameterSize: '14B', + thinking: true, + uncensored: true + }, + { + name: 'huihui_ai/qwen3-abliterated:30b', + label: 'Qwen3 Uncensored 30B', + parameterSize: '30B', + thinking: true, + uncensored: true + }, + { + name: 'huihui_ai/qwen3-abliterated:32b', + label: 'Qwen3 Uncensored 32B', + parameterSize: '32B', + thinking: true, + uncensored: true + }, + ] + }, + { + name: 'qwq', + label: 'QwQ', + descriptionEn: 'QwQ is the reasoning model of the Qwen series.', + descriptionPt: 'QwQ é o modelo de raciocínio da série Qwen.', + models: [ + { + name: 'qwq:32b', + label: 'QwQ 32B', + parameterSize: '32B', + thinking: true, + uncensored: false + }, + { + name: 'huihui_ai/qwq-abliterated:32b', + label: 'QwQ Uncensored 32B', + parameterSize: '32B', + thinking: true, + uncensored: true + }, + ] + }, + { + name: 'llama4', + label: 'Llama4', + descriptionEn: 'The latest collection of multimodal models from Meta.', + descriptionPt: 'A coleção mais recente de modelos multimodais da Meta.', + models: [ + { + name: 'llama4:scout', + label: 'Llama4 109B A17B', + parameterSize: '109B', + thinking: false, + uncensored: false + }, + ] + }, + { + name: 'deepseek', + label: 'DeepSeek [ & Uncensored ]', + descriptionEn: 'DeepSeek is a research model for reasoning tasks.', + descriptionPt: 'DeepSeek é um modelo de pesquisa para tarefas de raciocínio.', + models: [ + { + name: 'deepseek-r1:1.5b', + label: 'DeepSeek 1.5B', + parameterSize: '1.5B', + thinking: true, + uncensored: false + }, + { + name: 'deepseek-r1:7b', + label: 'DeepSeek 7B', + parameterSize: '7B', + thinking: true, + uncensored: false + }, + { + name: 'deepseek-r1:8b', + label: 'DeepSeek 8B', + parameterSize: '8B', + thinking: true, + uncensored: false + }, + { + name: 'huihui_ai/deepseek-r1-abliterated:1.5b', + label: 'DeepSeek Uncensored 1.5B', + parameterSize: '1.5B', + thinking: true, + uncensored: true + }, + { + name: 'huihui_ai/deepseek-r1-abliterated:7b', + label: 'DeepSeek Uncensored 7B', + parameterSize: '7B', + thinking: true, + uncensored: true + }, + { + name: 'huihui_ai/deepseek-r1-abliterated:8b', + label: 'DeepSeek Uncensored 8B', + parameterSize: '8B', + thinking: true, + uncensored: true + }, + { + name: 'huihui_ai/deepseek-r1-abliterated:14b', + label: 'DeepSeek Uncensored 14B', + parameterSize: '14B', + thinking: true, + uncensored: true + }, + ] + }, + { + name: 'hermes3', + label: 'Hermes3', + descriptionEn: 'Hermes 3 is the latest version of the flagship Hermes series of LLMs by Nous Research.', + descriptionPt: 'Hermes 3 é a versão mais recente da série Hermes de LLMs da Nous Research.', + models: [ + { + name: 'hermes3:3b', + label: 'Hermes3 3B', + parameterSize: '3B', + thinking: false, + uncensored: false + }, + { + name: 'hermes3:8b', + label: 'Hermes3 8B', + parameterSize: '8B', + thinking: false, + uncensored: false + }, + ] + }, + { + name: 'mistral', + label: 'Mistral', + descriptionEn: 'The 7B model released by Mistral AI, updated to version 0.3.', + descriptionPt: 'O modelo 7B lançado pela Mistral AI, atualizado para a versão 0.3.', + models: [ + { + name: 'mistral:7b', + label: 'Mistral 7B', + parameterSize: '7B', + thinking: false, + uncensored: false + }, + ] + }, + { + name: 'phi4 [ & Uncensored ]', + label: 'Phi4', + descriptionEn: 'Phi-4 is a 14B parameter, state-of-the-art open model from Microsoft. ', + descriptionPt: 'Phi-4 é um modelo de 14B de última geração, aberto pela Microsoft.', + models: [ + { + name: 'hf.co/unsloth/Phi-4-mini-reasoning-GGUF', + label: 'Phi4 Mini Reasoning', + parameterSize: '4B', + thinking: true, + uncensored: false + }, + { + name: 'phi4:14b', + label: 'Phi4 14B', + parameterSize: '14B', + thinking: false, + uncensored: false + }, + { + name: 'hf.co/unsloth/Phi-4-reasoning-plus-GGUF', + label: 'Phi4 Reasoning Plus', + parameterSize: '14B', + thinking: true, + uncensored: false + }, + { + name: 'huihui_ai/phi4-abliterated:14b', + label: 'Phi4 Uncensored 14B', + parameterSize: '14B', + thinking: false, + uncensored: true + }, + ] + }, + { + name: 'phi3', + label: 'Phi3', + descriptionEn: 'Phi-3 is a family of lightweight 3B (Mini) and 14B (Medium) state-of-the-art open models by Microsoft.', + descriptionPt: 'Phi-3 é uma família de modelos leves de 3B (Mini) e 14B (Médio) de última geração, abertos pela Microsoft.', + models: [ + { + name: 'phi3:3.8b', + label: 'Phi3 3.8B', + parameterSize: '3.8B', + thinking: false, + uncensored: false + }, + ] + }, + { + name: 'llama3', + label: 'Llama4', + descriptionEn: 'Llama 3, a lightweight model from Meta.', + descriptionPt: 'Llama 3, um modelo leve da Meta.', + models: [ + { + name: 'llama3:8b', + label: 'Llama3 8B', + parameterSize: '8B', + thinking: false, + uncensored: false + }, + ] + }, + { + name: 'llama3.1 [ Uncensored ]', + label: 'Llama3.1', + descriptionEn: 'Ablitered v3 llama-3.1 8b with uncensored prompt ', + descriptionPt: 'Llama3.1 é um modelo aberto, leve e para dispositivos locais, com prompt não censurado.', + models: [ + { + name: 'mannix/llama3.1-8b-abliterated:latest', + label: 'Llama3.1 8B', + parameterSize: '8B', + thinking: false, + uncensored: true + }, + ] + }, + { + name: 'llama3.2 [ & Uncensored ]', + label: 'Llama3.2', + descriptionEn: 'Llama3.2 is a family of open, lightweight models for general tasks.', + descriptionPt: 'Llama3.2 é uma família de modelos abertos, leves e para dispositivos locais, para tarefas gerais.', + models: [ + { + name: 'llama3.2:1b', + label: 'Llama3.2 1B', + parameterSize: '1B', + thinking: false, + uncensored: false + }, + { + name: 'llama3.2:3b', + label: 'Llama3.2 3B', + parameterSize: '3B', + thinking: false, + uncensored: false + }, + { + name: 'socialnetwooky/llama3.2-abliterated:3b_q8_0', + label: 'Llama3.2 Uncensored 3B', + parameterSize: '3B', + thinking: false, + uncensored: true + }, + ] + }, +]; \ No newline at end of file diff --git a/webui/components/account/model-picker.tsx b/webui/components/account/model-picker.tsx new file mode 100755 index 0000000..319b889 --- /dev/null +++ b/webui/components/account/model-picker.tsx @@ -0,0 +1,155 @@ +"use client" + +/* +Adapted from https://ui.shadcn.com/docs/components/combobox +*/ + +import * as React from "react" +import { CheckIcon, ChevronsUpDownIcon, Cpu, Brain, ShieldOff } from "lucide-react" +import { cn } from "@/lib/utils" +import { Button } from "@/components/ui/button" +import { + Command, + CommandEmpty, + CommandGroup, + CommandInput, + CommandItem, + CommandList, +} from "@/components/ui/command" +import { + Popover, + PopoverContent, + PopoverTrigger, +} from "@/components/ui/popover" +import { models } from "./ai" + +interface ModelPickerProps { + value?: string + onValueChange?: (value: string) => void + disabled?: boolean + className?: string +} + +export function ModelPicker({ value, onValueChange, disabled = false, className }: ModelPickerProps) { + const [open, setOpen] = React.useState(false) + + const currentModel = React.useMemo(() => { + for (const category of models) { + const model = category.models.find(m => m.name === value) + if (model) { + return { + model, + category: category.label, + categoryDescription: category.descriptionEn + } + } + } + return null + }, [value]) + + const handleSelect = (modelName: string) => { + onValueChange?.(modelName) + setOpen(false) + } + + return ( + + + + + + + + + No model found. + {models.map((category) => ( + +
+ {category.descriptionEn} +
+ {category.models.map((model) => ( + handleSelect(model.name)} + className="flex items-center gap-3 py-3" + > + +
+
{model.label}
+
+ + {model.parameterSize} + + {model.thinking && ( + + + Thinking + + )} + {model.uncensored && ( + + + Uncensored + + )} +
+
+
+ ))} +
+ ))} +
+
+
+
+ ) +} diff --git a/webui/components/app-sidebar.tsx b/webui/components/app-sidebar.tsx new file mode 100755 index 0000000..98aff08 --- /dev/null +++ b/webui/components/app-sidebar.tsx @@ -0,0 +1,232 @@ +"use client"; + +import * as React from "react" +import { + Home, + MessageSquare, + Users, + Sparkles, + User, + Trash2, + LogOut +} from "lucide-react" +import Link from "next/link" +import Image from "next/image" +import { + Sidebar, + SidebarContent, + SidebarFooter, + SidebarGroup, + SidebarGroupContent, + SidebarGroupLabel, + SidebarHeader, + SidebarMenu, + SidebarMenuButton, + SidebarMenuItem, + useSidebar, +} from "@/components/ui/sidebar" +import { ThemeToggle } from "@/components/theme-toggle" +import { SiYoutube } from "react-icons/si" +import { RiTelegram2Line } from "react-icons/ri" +import { useAuth } from "@/contexts/auth-context" +import { Badge } from "@/components/ui/badge" + +interface AccountItem { + title: string; + url: string; + icon: React.ComponentType>; + danger?: boolean; +} + +const navigation = [ + { + title: "Home", + url: "/", + icon: Home, + }, + { + title: "About", + url: "/about", + icon: MessageSquare, + }, +] + +const features = [ + { + title: "AI Commands", + url: "/#ai-features", + icon: Sparkles, + }, + { + title: "Video Download", + url: "/#youtube-features", + icon: SiYoutube, + }, + { + title: "User Accounts & UI", + url: "/#user-features", + icon: Users, + }, +] + +export function AppSidebar() { + const { isAuthenticated, loading, logout } = useAuth(); + const { setOpenMobile, isMobile } = useSidebar(); + + const handleMenuItemClick = () => { + if (isMobile) { + setOpenMobile(false); + } + }; + + const accountItems: AccountItem[] = React.useMemo(() => { + if (loading) { + return []; + } + + if (isAuthenticated) { + return [ + { + title: "My Account", + url: "/account", + icon: User, + }, + { + title: "Logout", + url: "#", + icon: LogOut, + danger: true, + }, + { + title: "Delete Account", + url: "/account/delete", + icon: Trash2, + danger: true, + }, + ]; + } else { + return [ + { + title: "Sign in with Telegram", + url: "/login", + icon: RiTelegram2Line, + }, + ]; + } + }, [isAuthenticated, loading]); + + return ( + + + + + +
+ +
+ Kowalski Logo +
+
+ Kowalski +
+ + Beta +
+
+
+
+
+ + + + Navigation + + + {navigation.map((item) => ( + + + + + {item.title} + + + + ))} + + + + + {!loading && ( + + Account + + + {accountItems.map((item) => ( + + {item.title === "Logout" ? ( + { + logout(); + handleMenuItemClick(); + }} + className={item.danger ? "text-red-600 hover:text-red-700 dark:text-red-400 dark:hover:text-red-300" : ""} + > + + {item.title} + + ) : ( + + + + {item.title} + + + )} + + ))} + + + + )} + + + Features + + + {features.map((item) => ( + + + + + {item.title} + + + + ))} + + + + + + + + +
+ +
+
+
+
+
+ ) +} diff --git a/webui/components/footer.tsx b/webui/components/footer.tsx new file mode 100755 index 0000000..5daee38 --- /dev/null +++ b/webui/components/footer.tsx @@ -0,0 +1,24 @@ +import Link from "next/link"; +import Image from "next/image"; + +export default function Footer() { + return ( +
+
+
+ Kowalski + Kowalski +
+

+ Built with ❤️ by ABOCN and contributors under open source licenses. +

+
+
+ ); +} diff --git a/webui/components/header-auth.tsx b/webui/components/header-auth.tsx new file mode 100755 index 0000000..a265036 --- /dev/null +++ b/webui/components/header-auth.tsx @@ -0,0 +1,29 @@ +"use client"; + +import { Button } from "@/components/ui/button"; +import { RiTelegram2Line } from "react-icons/ri"; +import Link from "next/link"; +import { useAuth } from "@/contexts/auth-context"; + +export function HeaderAuth() { + const { isAuthenticated, loading } = useAuth(); + + if (loading) { + return ( +
+ ); + } + + if (isAuthenticated) { + return null; + } + + return ( + + ); +} diff --git a/webui/components/providers.tsx b/webui/components/providers.tsx new file mode 100755 index 0000000..4be39c8 --- /dev/null +++ b/webui/components/providers.tsx @@ -0,0 +1,21 @@ +"use client" + +import * as React from "react" +import { ThemeProvider as NextThemesProvider } from "next-themes" + +export function ThemeProvider({ + children, + ...props +}: React.ComponentProps) { + return ( + + {children} + + ) +} \ No newline at end of file diff --git a/webui/components/theme-toggle.tsx b/webui/components/theme-toggle.tsx new file mode 100755 index 0000000..47aabff --- /dev/null +++ b/webui/components/theme-toggle.tsx @@ -0,0 +1,37 @@ +"use client" + +import * as React from "react" +import { Moon, Sun } from "lucide-react" +import { useTheme } from "next-themes" + +import { Button } from "@/components/ui/button" + +export function ThemeToggle() { + const { theme, setTheme } = useTheme() + const [mounted, setMounted] = React.useState(false) + + React.useEffect(() => { + setMounted(true) + }, []) + + if (!mounted) { + return ( + + ) + } + + return ( + + ) +} diff --git a/webui/components/ui/badge.tsx b/webui/components/ui/badge.tsx new file mode 100644 index 0000000..0205413 --- /dev/null +++ b/webui/components/ui/badge.tsx @@ -0,0 +1,46 @@ +import * as React from "react" +import { Slot } from "@radix-ui/react-slot" +import { cva, type VariantProps } from "class-variance-authority" + +import { cn } from "@/lib/utils" + +const badgeVariants = cva( + "inline-flex items-center justify-center rounded-md border px-2 py-0.5 text-xs font-medium w-fit whitespace-nowrap shrink-0 [&>svg]:size-3 gap-1 [&>svg]:pointer-events-none focus-visible:border-ring focus-visible:ring-ring/50 focus-visible:ring-[3px] aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40 aria-invalid:border-destructive transition-[color,box-shadow] overflow-hidden", + { + variants: { + variant: { + default: + "border-transparent bg-primary text-primary-foreground [a&]:hover:bg-primary/90", + secondary: + "border-transparent bg-secondary text-secondary-foreground [a&]:hover:bg-secondary/90", + destructive: + "border-transparent bg-destructive text-white [a&]:hover:bg-destructive/90 focus-visible:ring-destructive/20 dark:focus-visible:ring-destructive/40 dark:bg-destructive/60", + outline: + "text-foreground [a&]:hover:bg-accent [a&]:hover:text-accent-foreground", + }, + }, + defaultVariants: { + variant: "default", + }, + } +) + +function Badge({ + className, + variant, + asChild = false, + ...props +}: React.ComponentProps<"span"> & + VariantProps & { asChild?: boolean }) { + const Comp = asChild ? Slot : "span" + + return ( + + ) +} + +export { Badge, badgeVariants } diff --git a/webui/components/ui/button.tsx b/webui/components/ui/button.tsx new file mode 100755 index 0000000..a2df8dc --- /dev/null +++ b/webui/components/ui/button.tsx @@ -0,0 +1,59 @@ +import * as React from "react" +import { Slot } from "@radix-ui/react-slot" +import { cva, type VariantProps } from "class-variance-authority" + +import { cn } from "@/lib/utils" + +const buttonVariants = cva( + "inline-flex items-center justify-center gap-2 whitespace-nowrap rounded-md text-sm font-medium transition-all disabled:pointer-events-none disabled:opacity-50 [&_svg]:pointer-events-none [&_svg:not([class*='size-'])]:size-4 shrink-0 [&_svg]:shrink-0 outline-none focus-visible:border-ring focus-visible:ring-ring/50 focus-visible:ring-[3px] aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40 aria-invalid:border-destructive", + { + variants: { + variant: { + default: + "bg-primary text-primary-foreground shadow-xs hover:bg-primary/90", + destructive: + "bg-destructive text-white shadow-xs hover:bg-destructive/90 focus-visible:ring-destructive/20 dark:focus-visible:ring-destructive/40 dark:bg-destructive/60", + outline: + "border bg-background shadow-xs hover:bg-accent hover:text-accent-foreground dark:bg-input/30 dark:border-input dark:hover:bg-input/50", + secondary: + "bg-secondary text-secondary-foreground shadow-xs hover:bg-secondary/80", + ghost: + "hover:bg-accent hover:text-accent-foreground dark:hover:bg-accent/50", + link: "text-primary underline-offset-4 hover:underline", + }, + size: { + default: "h-9 px-4 py-2 has-[>svg]:px-3", + sm: "h-8 rounded-md gap-1.5 px-3 has-[>svg]:px-2.5", + lg: "h-10 rounded-md px-6 has-[>svg]:px-4", + icon: "size-9", + }, + }, + defaultVariants: { + variant: "default", + size: "default", + }, + } +) + +function Button({ + className, + variant, + size, + asChild = false, + ...props +}: React.ComponentProps<"button"> & + VariantProps & { + asChild?: boolean + }) { + const Comp = asChild ? Slot : "button" + + return ( + + ) +} + +export { Button, buttonVariants } diff --git a/webui/components/ui/command.tsx b/webui/components/ui/command.tsx new file mode 100755 index 0000000..8cb4ca7 --- /dev/null +++ b/webui/components/ui/command.tsx @@ -0,0 +1,184 @@ +"use client" + +import * as React from "react" +import { Command as CommandPrimitive } from "cmdk" +import { SearchIcon } from "lucide-react" + +import { cn } from "@/lib/utils" +import { + Dialog, + DialogContent, + DialogDescription, + DialogHeader, + DialogTitle, +} from "@/components/ui/dialog" + +function Command({ + className, + ...props +}: React.ComponentProps) { + return ( + + ) +} + +function CommandDialog({ + title = "Command Palette", + description = "Search for a command to run...", + children, + className, + showCloseButton = true, + ...props +}: React.ComponentProps & { + title?: string + description?: string + className?: string + showCloseButton?: boolean +}) { + return ( + + + {title} + {description} + + + + {children} + + + + ) +} + +function CommandInput({ + className, + ...props +}: React.ComponentProps) { + return ( +
+ + +
+ ) +} + +function CommandList({ + className, + ...props +}: React.ComponentProps) { + return ( + + ) +} + +function CommandEmpty({ + ...props +}: React.ComponentProps) { + return ( + + ) +} + +function CommandGroup({ + className, + ...props +}: React.ComponentProps) { + return ( + + ) +} + +function CommandSeparator({ + className, + ...props +}: React.ComponentProps) { + return ( + + ) +} + +function CommandItem({ + className, + ...props +}: React.ComponentProps) { + return ( + + ) +} + +function CommandShortcut({ + className, + ...props +}: React.ComponentProps<"span">) { + return ( + + ) +} + +export { + Command, + CommandDialog, + CommandInput, + CommandList, + CommandEmpty, + CommandGroup, + CommandItem, + CommandShortcut, + CommandSeparator, +} diff --git a/webui/components/ui/dialog.tsx b/webui/components/ui/dialog.tsx new file mode 100755 index 0000000..d9ccec9 --- /dev/null +++ b/webui/components/ui/dialog.tsx @@ -0,0 +1,143 @@ +"use client" + +import * as React from "react" +import * as DialogPrimitive from "@radix-ui/react-dialog" +import { XIcon } from "lucide-react" + +import { cn } from "@/lib/utils" + +function Dialog({ + ...props +}: React.ComponentProps) { + return +} + +function DialogTrigger({ + ...props +}: React.ComponentProps) { + return +} + +function DialogPortal({ + ...props +}: React.ComponentProps) { + return +} + +function DialogClose({ + ...props +}: React.ComponentProps) { + return +} + +function DialogOverlay({ + className, + ...props +}: React.ComponentProps) { + return ( + + ) +} + +function DialogContent({ + className, + children, + showCloseButton = true, + ...props +}: React.ComponentProps & { + showCloseButton?: boolean +}) { + return ( + + + + {children} + {showCloseButton && ( + + + Close + + )} + + + ) +} + +function DialogHeader({ className, ...props }: React.ComponentProps<"div">) { + return ( +
+ ) +} + +function DialogFooter({ className, ...props }: React.ComponentProps<"div">) { + return ( +
+ ) +} + +function DialogTitle({ + className, + ...props +}: React.ComponentProps) { + return ( + + ) +} + +function DialogDescription({ + className, + ...props +}: React.ComponentProps) { + return ( + + ) +} + +export { + Dialog, + DialogClose, + DialogContent, + DialogDescription, + DialogFooter, + DialogHeader, + DialogOverlay, + DialogPortal, + DialogTitle, + DialogTrigger, +} diff --git a/webui/components/ui/input.tsx b/webui/components/ui/input.tsx new file mode 100755 index 0000000..03295ca --- /dev/null +++ b/webui/components/ui/input.tsx @@ -0,0 +1,21 @@ +import * as React from "react" + +import { cn } from "@/lib/utils" + +function Input({ className, type, ...props }: React.ComponentProps<"input">) { + return ( + + ) +} + +export { Input } diff --git a/webui/components/ui/popover.tsx b/webui/components/ui/popover.tsx new file mode 100755 index 0000000..01e468b --- /dev/null +++ b/webui/components/ui/popover.tsx @@ -0,0 +1,48 @@ +"use client" + +import * as React from "react" +import * as PopoverPrimitive from "@radix-ui/react-popover" + +import { cn } from "@/lib/utils" + +function Popover({ + ...props +}: React.ComponentProps) { + return +} + +function PopoverTrigger({ + ...props +}: React.ComponentProps) { + return +} + +function PopoverContent({ + className, + align = "center", + sideOffset = 4, + ...props +}: React.ComponentProps) { + return ( + + + + ) +} + +function PopoverAnchor({ + ...props +}: React.ComponentProps) { + return +} + +export { Popover, PopoverTrigger, PopoverContent, PopoverAnchor } diff --git a/webui/components/ui/separator.tsx b/webui/components/ui/separator.tsx new file mode 100755 index 0000000..275381c --- /dev/null +++ b/webui/components/ui/separator.tsx @@ -0,0 +1,28 @@ +"use client" + +import * as React from "react" +import * as SeparatorPrimitive from "@radix-ui/react-separator" + +import { cn } from "@/lib/utils" + +function Separator({ + className, + orientation = "horizontal", + decorative = true, + ...props +}: React.ComponentProps) { + return ( + + ) +} + +export { Separator } diff --git a/webui/components/ui/sheet.tsx b/webui/components/ui/sheet.tsx new file mode 100755 index 0000000..84649ad --- /dev/null +++ b/webui/components/ui/sheet.tsx @@ -0,0 +1,139 @@ +"use client" + +import * as React from "react" +import * as SheetPrimitive from "@radix-ui/react-dialog" +import { XIcon } from "lucide-react" + +import { cn } from "@/lib/utils" + +function Sheet({ ...props }: React.ComponentProps) { + return +} + +function SheetTrigger({ + ...props +}: React.ComponentProps) { + return +} + +function SheetClose({ + ...props +}: React.ComponentProps) { + return +} + +function SheetPortal({ + ...props +}: React.ComponentProps) { + return +} + +function SheetOverlay({ + className, + ...props +}: React.ComponentProps) { + return ( + + ) +} + +function SheetContent({ + className, + children, + side = "right", + ...props +}: React.ComponentProps & { + side?: "top" | "right" | "bottom" | "left" +}) { + return ( + + + + {children} + + + Close + + + + ) +} + +function SheetHeader({ className, ...props }: React.ComponentProps<"div">) { + return ( +
+ ) +} + +function SheetFooter({ className, ...props }: React.ComponentProps<"div">) { + return ( +
+ ) +} + +function SheetTitle({ + className, + ...props +}: React.ComponentProps) { + return ( + + ) +} + +function SheetDescription({ + className, + ...props +}: React.ComponentProps) { + return ( + + ) +} + +export { + Sheet, + SheetTrigger, + SheetClose, + SheetContent, + SheetHeader, + SheetFooter, + SheetTitle, + SheetDescription, +} diff --git a/webui/components/ui/sidebar.tsx b/webui/components/ui/sidebar.tsx new file mode 100755 index 0000000..1ee5a45 --- /dev/null +++ b/webui/components/ui/sidebar.tsx @@ -0,0 +1,726 @@ +"use client" + +import * as React from "react" +import { Slot } from "@radix-ui/react-slot" +import { cva, VariantProps } from "class-variance-authority" +import { PanelLeftIcon } from "lucide-react" + +import { useIsMobile } from "@/hooks/use-mobile" +import { cn } from "@/lib/utils" +import { Button } from "@/components/ui/button" +import { Input } from "@/components/ui/input" +import { Separator } from "@/components/ui/separator" +import { + Sheet, + SheetContent, + SheetDescription, + SheetHeader, + SheetTitle, +} from "@/components/ui/sheet" +import { Skeleton } from "@/components/ui/skeleton" +import { + Tooltip, + TooltipContent, + TooltipProvider, + TooltipTrigger, +} from "@/components/ui/tooltip" + +const SIDEBAR_COOKIE_NAME = "sidebar_state" +const SIDEBAR_COOKIE_MAX_AGE = 60 * 60 * 24 * 7 +const SIDEBAR_WIDTH = "16rem" +const SIDEBAR_WIDTH_MOBILE = "18rem" +const SIDEBAR_WIDTH_ICON = "3rem" +const SIDEBAR_KEYBOARD_SHORTCUT = "b" + +type SidebarContextProps = { + state: "expanded" | "collapsed" + open: boolean + setOpen: (open: boolean) => void + openMobile: boolean + setOpenMobile: (open: boolean) => void + isMobile: boolean + toggleSidebar: () => void +} + +const SidebarContext = React.createContext(null) + +function useSidebar() { + const context = React.useContext(SidebarContext) + if (!context) { + throw new Error("useSidebar must be used within a SidebarProvider.") + } + + return context +} + +function SidebarProvider({ + defaultOpen = true, + open: openProp, + onOpenChange: setOpenProp, + className, + style, + children, + ...props +}: React.ComponentProps<"div"> & { + defaultOpen?: boolean + open?: boolean + onOpenChange?: (open: boolean) => void +}) { + const isMobile = useIsMobile() + const [openMobile, setOpenMobile] = React.useState(false) + + // This is the internal state of the sidebar. + // We use openProp and setOpenProp for control from outside the component. + const [_open, _setOpen] = React.useState(defaultOpen) + const open = openProp ?? _open + const setOpen = React.useCallback( + (value: boolean | ((value: boolean) => boolean)) => { + const openState = typeof value === "function" ? value(open) : value + if (setOpenProp) { + setOpenProp(openState) + } else { + _setOpen(openState) + } + + // This sets the cookie to keep the sidebar state. + document.cookie = `${SIDEBAR_COOKIE_NAME}=${openState}; path=/; max-age=${SIDEBAR_COOKIE_MAX_AGE}` + }, + [setOpenProp, open] + ) + + // Helper to toggle the sidebar. + const toggleSidebar = React.useCallback(() => { + return isMobile ? setOpenMobile((open) => !open) : setOpen((open) => !open) + }, [isMobile, setOpen, setOpenMobile]) + + // Adds a keyboard shortcut to toggle the sidebar. + React.useEffect(() => { + const handleKeyDown = (event: KeyboardEvent) => { + if ( + event.key === SIDEBAR_KEYBOARD_SHORTCUT && + (event.metaKey || event.ctrlKey) + ) { + event.preventDefault() + toggleSidebar() + } + } + + window.addEventListener("keydown", handleKeyDown) + return () => window.removeEventListener("keydown", handleKeyDown) + }, [toggleSidebar]) + + // We add a state so that we can do data-state="expanded" or "collapsed". + // This makes it easier to style the sidebar with Tailwind classes. + const state = open ? "expanded" : "collapsed" + + const contextValue = React.useMemo( + () => ({ + state, + open, + setOpen, + isMobile, + openMobile, + setOpenMobile, + toggleSidebar, + }), + [state, open, setOpen, isMobile, openMobile, setOpenMobile, toggleSidebar] + ) + + return ( + + +
+ {children} +
+
+
+ ) +} + +function Sidebar({ + side = "left", + variant = "sidebar", + collapsible = "offcanvas", + className, + children, + ...props +}: React.ComponentProps<"div"> & { + side?: "left" | "right" + variant?: "sidebar" | "floating" | "inset" + collapsible?: "offcanvas" | "icon" | "none" +}) { + const { isMobile, state, openMobile, setOpenMobile } = useSidebar() + + if (collapsible === "none") { + return ( +
+ {children} +
+ ) + } + + if (isMobile) { + return ( + + + + Sidebar + Displays the mobile sidebar. + +
{children}
+
+
+ ) + } + + return ( +
+ {/* This is what handles the sidebar gap on desktop */} +
+ +
+ ) +} + +function SidebarTrigger({ + className, + onClick, + ...props +}: React.ComponentProps) { + const { toggleSidebar } = useSidebar() + + return ( + + ) +} + +function SidebarRail({ className, ...props }: React.ComponentProps<"button">) { + const { toggleSidebar } = useSidebar() + + return ( + + ) + } +) +TabsTrigger.displayName = "TabsTrigger" + +interface TabsContentProps { + value: string + children: React.ReactNode + className?: string +} + +const TabsContent = React.forwardRef( + ({ className, children, value, ...props }, ref) => { + const context = React.useContext(TabsContext) + + if (!context) { + throw new Error("TabsContent must be used within Tabs") + } + + const { value: currentValue } = context + + if (currentValue !== value) { + return null + } + + return ( +
+ {children} +
+ ) + } +) +TabsContent.displayName = "TabsContent" + +export { Tabs, TabsList, TabsTrigger, TabsContent } \ No newline at end of file diff --git a/webui/components/ui/tooltip.tsx b/webui/components/ui/tooltip.tsx new file mode 100755 index 0000000..4ee26b3 --- /dev/null +++ b/webui/components/ui/tooltip.tsx @@ -0,0 +1,61 @@ +"use client" + +import * as React from "react" +import * as TooltipPrimitive from "@radix-ui/react-tooltip" + +import { cn } from "@/lib/utils" + +function TooltipProvider({ + delayDuration = 0, + ...props +}: React.ComponentProps) { + return ( + + ) +} + +function Tooltip({ + ...props +}: React.ComponentProps) { + return ( + + + + ) +} + +function TooltipTrigger({ + ...props +}: React.ComponentProps) { + return +} + +function TooltipContent({ + className, + sideOffset = 0, + children, + ...props +}: React.ComponentProps) { + return ( + + + {children} + + + + ) +} + +export { Tooltip, TooltipTrigger, TooltipContent, TooltipProvider } diff --git a/webui/contexts/auth-context.tsx b/webui/contexts/auth-context.tsx new file mode 100755 index 0000000..503473b --- /dev/null +++ b/webui/contexts/auth-context.tsx @@ -0,0 +1,130 @@ +"use client"; + +import React, { createContext, useContext, useEffect, useState } from "react"; + +interface UserData { + telegramId: string; + username: string; + firstName: string; + lastName: string; + aiEnabled: boolean; + showThinking: boolean; + customAiModel: string; + aiTemperature: number; + aiRequests: number; + aiCharacters: number; + disabledCommands: string[]; + languageCode: string; +} + +interface AuthContextType { + user: UserData | null; + loading: boolean; + isAuthenticated: boolean; + logout: () => Promise; + refreshUser: () => Promise; +} + +const AuthContext = createContext(undefined); + +export function AuthProvider({ children }: { children: React.ReactNode }) { + const [user, setUser] = useState(null); + const [loading, setLoading] = useState(true); + + const isAuthenticated = !!user; + + const fetchUser = async () => { + try { + if (typeof window === 'undefined') { + setUser(null); + setLoading(false); + return; + } + + const sessionToken = localStorage.getItem('kowalski-session'); + + if (!sessionToken) { + setUser(null); + setLoading(false); + return; + } + + const response = await fetch('/api/user/profile', { + headers: { + 'Authorization': `Bearer ${sessionToken}` + } + }); + + if (response.ok) { + const userData = await response.json(); + setUser(userData); + } else { + setUser(null); + if (typeof window !== 'undefined') { + localStorage.removeItem('kowalski-session'); + } + } + } catch (error) { + console.error('Error fetching user data:', error); + setUser(null); + } finally { + setLoading(false); + } + }; + + const logout = async () => { + try { + if (typeof window !== 'undefined') { + const sessionToken = localStorage.getItem('kowalski-session'); + if (sessionToken) { + await fetch('/api/auth/logout', { + method: 'POST', + headers: { + 'Authorization': `Bearer ${sessionToken}` + } + }); + } + localStorage.removeItem('kowalski-session'); + } + setUser(null); + window.location.href = '/login'; + } catch (error) { + console.error('Logout error:', error); + if (typeof window !== 'undefined') { + localStorage.removeItem('kowalski-session'); + } + setUser(null); + window.location.href = '/login'; + } + }; + + const refreshUser = async () => { + await fetchUser(); + }; + + useEffect(() => { + fetchUser(); + }, []); + + return ( + + {children} + + ); +} + +export function useAuth() { + const context = useContext(AuthContext); + if (context === undefined) { + throw new Error('useAuth must be used within an AuthProvider'); + } + return context; +} diff --git a/webui/drizzle.config.ts b/webui/drizzle.config.ts new file mode 100755 index 0000000..e63a91d --- /dev/null +++ b/webui/drizzle.config.ts @@ -0,0 +1,11 @@ +import 'dotenv/config'; +import { defineConfig } from 'drizzle-kit'; + +export default defineConfig({ + out: './drizzle', + schema: './lib/schema.ts', + dialect: 'postgresql', + dbCredentials: { + url: process.env.databaseUrl!, + }, +}); diff --git a/webui/eslint.config.mjs b/webui/eslint.config.mjs new file mode 100755 index 0000000..c85fb67 --- /dev/null +++ b/webui/eslint.config.mjs @@ -0,0 +1,16 @@ +import { dirname } from "path"; +import { fileURLToPath } from "url"; +import { FlatCompat } from "@eslint/eslintrc"; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = dirname(__filename); + +const compat = new FlatCompat({ + baseDirectory: __dirname, +}); + +const eslintConfig = [ + ...compat.extends("next/core-web-vitals", "next/typescript"), +]; + +export default eslintConfig; diff --git a/webui/hooks/use-mobile.ts b/webui/hooks/use-mobile.ts new file mode 100755 index 0000000..2b0fe1d --- /dev/null +++ b/webui/hooks/use-mobile.ts @@ -0,0 +1,19 @@ +import * as React from "react" + +const MOBILE_BREAKPOINT = 768 + +export function useIsMobile() { + const [isMobile, setIsMobile] = React.useState(undefined) + + React.useEffect(() => { + const mql = window.matchMedia(`(max-width: ${MOBILE_BREAKPOINT - 1}px)`) + const onChange = () => { + setIsMobile(window.innerWidth < MOBILE_BREAKPOINT) + } + mql.addEventListener("change", onChange) + setIsMobile(window.innerWidth < MOBILE_BREAKPOINT) + return () => mql.removeEventListener("change", onChange) + }, []) + + return !!isMobile +} diff --git a/webui/lib/auth-constants.ts b/webui/lib/auth-constants.ts new file mode 100755 index 0000000..a670833 --- /dev/null +++ b/webui/lib/auth-constants.ts @@ -0,0 +1,2 @@ +export const SESSION_COOKIE_NAME = "kowalski-session"; +export const SESSION_DURATION = 7 * 24 * 60 * 60 * 1000; diff --git a/webui/lib/auth-helpers.ts b/webui/lib/auth-helpers.ts new file mode 100755 index 0000000..b04b3b2 --- /dev/null +++ b/webui/lib/auth-helpers.ts @@ -0,0 +1,135 @@ +import { NextRequest, NextResponse } from 'next/server'; +import { validateSession } from './auth'; +import { SESSION_COOKIE_NAME } from './auth-constants'; + +export async function requireAuth(request: NextRequest) { + const sessionToken = request.cookies.get(SESSION_COOKIE_NAME)?.value; + + if (!sessionToken) { + throw NextResponse.json({ error: "Authentication required" }, { status: 401 }); + } + + const sessionData = await validateSession(sessionToken); + + if (!sessionData || !sessionData.user) { + throw NextResponse.json({ error: "Invalid or expired session" }, { status: 401 }); + } + + return sessionData; +} + +export async function validateJsonRequest(request: NextRequest) { + const contentType = request.headers.get('content-type'); + if (!contentType || !contentType.includes('application/json')) { + throw NextResponse.json({ error: "Invalid content type" }, { status: 400 }); + } + + try { + const body = await request.json(); + + if (!body || typeof body !== 'object') { + throw NextResponse.json({ error: "Invalid request body" }, { status: 400 }); + } + + return body; + } catch { + throw NextResponse.json({ error: "Invalid JSON" }, { status: 400 }); + } +} + +export function validateString(value: unknown, fieldName: string, minLength = 1, maxLength = 1000): string { + if (typeof value !== 'string') { + throw NextResponse.json({ error: `${fieldName} must be a string` }, { status: 400 }); + } + + if (value.length < minLength || value.length > maxLength) { + throw NextResponse.json({ + error: `${fieldName} must be between ${minLength} and ${maxLength} characters` + }, { status: 400 }); + } + + return value; +} + +export function validateArray(value: unknown, fieldName: string, maxLength = 100): unknown[] { + if (!Array.isArray(value)) { + throw NextResponse.json({ error: `${fieldName} must be an array` }, { status: 400 }); + } + + if (value.length > maxLength) { + throw NextResponse.json({ + error: `${fieldName} cannot have more than ${maxLength} items` + }, { status: 400 }); + } + + return value; +} + +export function validateNumber(value: unknown, fieldName: string, min?: number, max?: number): number { + const num = Number(value); + + if (isNaN(num)) { + throw NextResponse.json({ error: `${fieldName} must be a valid number` }, { status: 400 }); + } + + if (min !== undefined && num < min) { + throw NextResponse.json({ error: `${fieldName} must be at least ${min}` }, { status: 400 }); + } + + if (max !== undefined && num > max) { + throw NextResponse.json({ error: `${fieldName} must be at most ${max}` }, { status: 400 }); + } + + return num; +} + +export function handleApiError(error: unknown, operation: string) { + console.error(`Error in ${operation}:`, error); + + if (error instanceof NextResponse) { + return error; + } + + return NextResponse.json({ + error: "Internal server error" + }, { status: 500 }); +} + +const rateLimitMap = new Map(); + +export function rateLimit(identifier: string, maxAttempts = 5, windowMs = 15 * 60 * 1000) { + const now = Date.now(); + const key = identifier; + const record = rateLimitMap.get(key); + + if (!record) { + rateLimitMap.set(key, { count: 1, timestamp: now }); + return { allowed: true, remaining: maxAttempts - 1 }; + } + + if (now - record.timestamp > windowMs) { + rateLimitMap.set(key, { count: 1, timestamp: now }); + return { allowed: true, remaining: maxAttempts - 1 }; + } + + record.count++; + + if (record.count > maxAttempts) { + return { allowed: false, remaining: 0 }; + } + + return { allowed: true, remaining: maxAttempts - record.count }; +} + +export function cleanupRateLimit() { + const now = Date.now(); + const windowMs = 15 * 60 * 1000; // 15m + + for (const [key, record] of rateLimitMap.entries()) { + if (now - record.timestamp > windowMs) { + rateLimitMap.delete(key); + } + } +} + +setInterval(cleanupRateLimit, 10 * 60 * 1000); diff --git a/webui/lib/auth.ts b/webui/lib/auth.ts new file mode 100755 index 0000000..17679e0 --- /dev/null +++ b/webui/lib/auth.ts @@ -0,0 +1,148 @@ +import { eq, and, gt, lt } from "drizzle-orm"; +import { db } from "./db"; +import { sessionsTable, usersTable } from "./schema"; +import { randomBytes } from "crypto"; + +export interface SessionData { + id: string; + userId: string; + sessionToken: string; + expiresAt: Date; + user?: { + telegramId: string; + username: string; + firstName: string; + lastName: string; + aiEnabled: boolean; + showThinking: boolean; + customAiModel: string; + aiTemperature: number; + aiRequests: number; + aiCharacters: number; + disabledCommands: string[]; + languageCode: string; + }; +} + +import { SESSION_COOKIE_NAME, SESSION_DURATION } from "./auth-constants"; + +export { SESSION_COOKIE_NAME }; + +export function generateSessionToken(): string { + return randomBytes(32).toString("hex"); +} + +export function generateSessionId(): string { + return randomBytes(16).toString("hex"); +} + +export async function createSession(userId: string): Promise { + const sessionId = generateSessionId(); + const sessionToken = generateSessionToken(); + const expiresAt = new Date(Date.now() + SESSION_DURATION); + + await db.delete(sessionsTable) + .where( + and( + eq(sessionsTable.userId, userId), + lt(sessionsTable.expiresAt, new Date()) + ) + ); + + const [session] = await db.insert(sessionsTable) + .values({ + id: sessionId, + userId, + sessionToken, + expiresAt, + }) + .returning(); + + return session; +} + +export async function validateSession(sessionToken: string): Promise { + if (!sessionToken || typeof sessionToken !== 'string' || sessionToken.length < 32) { + return null; + } + + try { + const sessionWithUser = await db + .select({ + session: sessionsTable, + user: usersTable, + }) + .from(sessionsTable) + .innerJoin(usersTable, eq(sessionsTable.userId, usersTable.telegramId)) + .where( + and( + eq(sessionsTable.sessionToken, sessionToken), + gt(sessionsTable.expiresAt, new Date()) + ) + ) + .limit(1); + + if (sessionWithUser.length === 0) { + await cleanupExpiredSessions(); + return null; + } + + const { session, user } = sessionWithUser[0]; + + const oneDay = 24 * 60 * 60 * 1000; + const timeUntilExpiry = session.expiresAt.getTime() - Date.now(); + + if (timeUntilExpiry < oneDay) { + const newExpiresAt = new Date(Date.now() + SESSION_DURATION); + await db.update(sessionsTable) + .set({ expiresAt: newExpiresAt }) + .where(eq(sessionsTable.id, session.id)); + + session.expiresAt = newExpiresAt; + } + + return { + id: session.id, + userId: session.userId, + sessionToken: session.sessionToken, + expiresAt: session.expiresAt, + user: { + telegramId: user.telegramId, + username: user.username, + firstName: user.firstName, + lastName: user.lastName, + aiEnabled: user.aiEnabled, + showThinking: user.showThinking, + customAiModel: user.customAiModel, + aiTemperature: user.aiTemperature, + aiRequests: user.aiRequests, + aiCharacters: user.aiCharacters, + disabledCommands: user.disabledCommands || [], + languageCode: user.languageCode, + }, + }; + } catch (error) { + console.error("Error validating session:", error); + return null; + } +} + +export async function invalidateSession(sessionToken: string): Promise { + await db.delete(sessionsTable) + .where(eq(sessionsTable.sessionToken, sessionToken)); +} + +export async function cleanupExpiredSessions(): Promise { + await db.delete(sessionsTable) + .where(lt(sessionsTable.expiresAt, new Date())); +} + +export function getSessionCookieOptions() { + return { + httpOnly: true, + secure: process.env.NODE_ENV === "production", + sameSite: "lax" as const, + maxAge: SESSION_DURATION / 1000, + path: "/", + }; +} diff --git a/webui/lib/db.ts b/webui/lib/db.ts new file mode 100755 index 0000000..f121f23 --- /dev/null +++ b/webui/lib/db.ts @@ -0,0 +1,13 @@ +import { drizzle } from "drizzle-orm/node-postgres"; +import { Pool } from "pg"; +import * as schema from "./schema"; + +const pool = new Pool({ + connectionString: process.env.databaseUrl, +}); + +export const db = drizzle(pool, { schema }); + +pool.on('error', (err) => { + console.error('Unexpected error on idle client', err); +}); diff --git a/webui/lib/schema.ts b/webui/lib/schema.ts new file mode 100755 index 0000000..ce9a8ed --- /dev/null +++ b/webui/lib/schema.ts @@ -0,0 +1,52 @@ +import { + integer, + pgTable, + varchar, + timestamp, + boolean, + real, + index +} from "drizzle-orm/pg-core"; + +export const usersTable = pgTable("users", { + telegramId: varchar({ length: 255 }).notNull().primaryKey(), + username: varchar({ length: 255 }).notNull(), + firstName: varchar({ length: 255 }).notNull(), + lastName: varchar({ length: 255 }).notNull(), + aiEnabled: boolean().notNull().default(false), + showThinking: boolean().notNull().default(false), + customAiModel: varchar({ length: 255 }).notNull().default("deepseek-r1:1.5b"), + aiTemperature: real().notNull().default(0.9), + aiRequests: integer().notNull().default(0), + aiCharacters: integer().notNull().default(0), + disabledCommands: varchar({ length: 255 }).array().notNull().default([]), + languageCode: varchar({ length: 255 }).notNull(), + aiTimeoutUntil: timestamp(), + aiMaxExecutionTime: integer().default(0), + createdAt: timestamp().notNull().defaultNow(), + updatedAt: timestamp().notNull().defaultNow(), +}); + +export const twoFactorTable = pgTable("two_factor", { + userId: varchar({ length: 255 }).notNull().references(() => usersTable.telegramId).primaryKey(), + currentCode: varchar({ length: 255 }).notNull(), + codeExpiresAt: timestamp().notNull(), + codeAttempts: integer().notNull().default(0), + createdAt: timestamp().notNull().defaultNow(), + updatedAt: timestamp().notNull().defaultNow(), +}, (table) => [ + index("idx_two_factor_user_id").on(table.userId), + index("idx_two_factor_code_expires_at").on(table.codeExpiresAt), +]); + +export const sessionsTable = pgTable("sessions", { + id: varchar({ length: 255 }).notNull().primaryKey(), + userId: varchar({ length: 255 }).notNull().references(() => usersTable.telegramId), + sessionToken: varchar({ length: 255 }).notNull().unique(), + expiresAt: timestamp().notNull(), + createdAt: timestamp().notNull().defaultNow(), + updatedAt: timestamp().notNull().defaultNow(), +}, (table) => [ + index("idx_sessions_user_id").on(table.userId), + index("idx_sessions_expires_at").on(table.expiresAt), +]); diff --git a/webui/lib/utils.ts b/webui/lib/utils.ts new file mode 100755 index 0000000..bd0c391 --- /dev/null +++ b/webui/lib/utils.ts @@ -0,0 +1,6 @@ +import { clsx, type ClassValue } from "clsx" +import { twMerge } from "tailwind-merge" + +export function cn(...inputs: ClassValue[]) { + return twMerge(clsx(inputs)) +} diff --git a/webui/middleware.ts b/webui/middleware.ts new file mode 100755 index 0000000..14be9bb --- /dev/null +++ b/webui/middleware.ts @@ -0,0 +1,47 @@ +import { NextRequest, NextResponse } from 'next/server'; +import { SESSION_COOKIE_NAME } from '@/lib/auth-constants'; + +const protectedApiRoutes = [ + '/api/user/profile', + '/api/user/settings', + '/api/user/delete', +]; + +export function middleware(request: NextRequest) { + const { pathname } = request.nextUrl; + + const cookieToken = request.cookies.get(SESSION_COOKIE_NAME)?.value; + const authHeader = request.headers.get('authorization'); + const bearerToken = authHeader?.startsWith('Bearer ') ? authHeader.slice(7) : null; + const sessionToken = bearerToken || cookieToken; + + const isProtectedApiRoute = protectedApiRoutes.some(route => + pathname === route || pathname.startsWith(route + '/') + ); + + if (isProtectedApiRoute && !sessionToken) { + return new NextResponse( + JSON.stringify({ error: 'Authentication required' }), + { status: 401, headers: { 'Content-Type': 'application/json' } } + ); + } + + if (pathname === '/login' && sessionToken) { + return NextResponse.redirect(new URL('/account', request.url)); + } + + const response = NextResponse.next(); + + response.headers.set('X-Content-Type-Options', 'nosniff'); + response.headers.set('X-Frame-Options', 'DENY'); + response.headers.set('X-XSS-Protection', '1; mode=block'); + response.headers.set('Referrer-Policy', 'strict-origin-when-cross-origin'); + + return response; +} + +export const config = { + matcher: [ + '/((?!_next/static|_next/image|favicon.ico|.*\\.(?:svg|png|jpg|jpeg|gif|webp)$).*)', + ], +}; \ No newline at end of file diff --git a/webui/next.config.ts b/webui/next.config.ts new file mode 100755 index 0000000..e9ffa30 --- /dev/null +++ b/webui/next.config.ts @@ -0,0 +1,7 @@ +import type { NextConfig } from "next"; + +const nextConfig: NextConfig = { + /* config options here */ +}; + +export default nextConfig; diff --git a/webui/package.json b/webui/package.json new file mode 100755 index 0000000..fbcd1f4 --- /dev/null +++ b/webui/package.json @@ -0,0 +1,44 @@ +{ + "name": "webui", + "version": "0.1.0", + "private": true, + "scripts": { + "dev": "next dev --turbopack", + "build": "next build", + "start": "next start", + "lint": "next lint" + }, + "dependencies": { + "@radix-ui/react-dialog": "^1.1.14", + "@radix-ui/react-popover": "^1.1.14", + "@radix-ui/react-separator": "^1.1.7", + "@radix-ui/react-slot": "^1.2.3", + "@radix-ui/react-tabs": "^1.1.12", + "@radix-ui/react-tooltip": "^1.2.7", + "class-variance-authority": "^0.7.1", + "clsx": "^2.1.1", + "cmdk": "^1.1.1", + "drizzle-orm": "^0.44.2", + "lucide-react": "^0.525.0", + "motion": "^12.23.0", + "next": "15.3.4", + "next-themes": "^0.4.6", + "pg": "^8.16.3", + "react": "^19.0.0", + "react-dom": "^19.0.0", + "react-icons": "^5.5.0", + "tailwind-merge": "^3.3.1" + }, + "devDependencies": { + "@eslint/eslintrc": "^3", + "@tailwindcss/postcss": "^4", + "@types/node": "^20", + "@types/react": "^19", + "@types/react-dom": "^19", + "eslint": "^9", + "eslint-config-next": "15.3.4", + "tailwindcss": "^4", + "tw-animate-css": "^1.3.5", + "typescript": "^5" + } +} diff --git a/webui/postcss.config.mjs b/webui/postcss.config.mjs new file mode 100755 index 0000000..c7bcb4b --- /dev/null +++ b/webui/postcss.config.mjs @@ -0,0 +1,5 @@ +const config = { + plugins: ["@tailwindcss/postcss"], +}; + +export default config; diff --git a/webui/public/kowalski.svg b/webui/public/kowalski.svg new file mode 100755 index 0000000..6b61105 --- /dev/null +++ b/webui/public/kowalski.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/webui/tsconfig.json b/webui/tsconfig.json new file mode 100755 index 0000000..d8b9323 --- /dev/null +++ b/webui/tsconfig.json @@ -0,0 +1,27 @@ +{ + "compilerOptions": { + "target": "ES2017", + "lib": ["dom", "dom.iterable", "esnext"], + "allowJs": true, + "skipLibCheck": true, + "strict": true, + "noEmit": true, + "esModuleInterop": true, + "module": "esnext", + "moduleResolution": "bundler", + "resolveJsonModule": true, + "isolatedModules": true, + "jsx": "preserve", + "incremental": true, + "plugins": [ + { + "name": "next" + } + ], + "paths": { + "@/*": ["./*"] + } + }, + "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"], + "exclude": ["node_modules"] +}