Compare commits

...

25 Commits

Author SHA1 Message Date
h
5b1f50a6f6 feat(frontend): optimize photo requesting for slower internet 2026-01-24 17:42:08 +01:00
h
d9ca679fe3 feat(frontend): add unpair button to phone 2026-01-24 17:15:17 +01:00
h
65d2d49355 feat(frontend): optimizations for slow internet situations 2026-01-24 17:05:44 +01:00
h
474b53b45e feat(infra): add migration to Makefile, better convex key generation 2026-01-24 00:25:31 +01:00
h
1ae04f4458 feat(infra): proper migration script 2026-01-24 00:24:36 +01:00
h
b9c4296ca3 feat(*): send images from website 2026-01-23 02:16:46 +01:00
h
9f4dd8313e feat(frontend): better stealth overlay 2026-01-22 15:41:38 +01:00
h
ff72e66b21 feat(frontend): add stealth overlay 2026-01-22 15:33:24 +01:00
h
8e6662397e feat(bot): better exam followup prompt 2026-01-21 21:58:20 +01:00
h
5e22cf6660 feat(bot): better exam followup prompt 2026-01-21 21:51:59 +01:00
h
cfd753cb3a feat(bot): better exam system prompt 2026-01-21 21:48:46 +01:00
h
856a39cc3f feat(bot): better prompt for multiple images 2026-01-21 21:45:07 +01:00
h
b27d95258f feat(bot): better prompt for multiple images 2026-01-21 21:42:43 +01:00
h
2dadf7b973 fix(frontend): messages not displaying, add schema to validators 2026-01-21 21:39:20 +01:00
h
cb188f7cc0 feat(*): add multiple image support 2026-01-21 21:32:46 +01:00
h
87aa974084 feat(frontend): add scroll to bottom button 2026-01-21 20:41:22 +01:00
h
03d56006dc feat(frontend): POST for images 2026-01-21 20:31:04 +01:00
h
bd361c6e7d fix(bot): better buttons 2026-01-21 20:30:59 +01:00
h
646254cd24 fix(bot): better prompt preset 2026-01-21 20:30:55 +01:00
h
0db9dc1526 fix(frontend): scroll 2026-01-21 20:30:49 +01:00
h
470bfe4375 feat(infra): migrate 2026-01-21 20:30:49 +01:00
h
10dff8f45e fix(frontend): better layout 2026-01-21 20:30:49 +01:00
h
6ccc06f90f fix(*): images do work 2026-01-21 20:30:49 +01:00
h
69ddb3173f fix(frontend): ws replacement 2026-01-21 20:30:49 +01:00
h
11811819f1 fix(frontend): not building 2026-01-21 20:30:42 +01:00
43 changed files with 2796 additions and 245 deletions

View File

@@ -1,4 +1,4 @@
.PHONY: recreate down restart frontend deploy rebuild migrate convex-key script .PHONY: recreate down reset hard-reset restart frontend deploy rebuild migrate convex-key script
recreate: recreate:
docker compose --profile services up -d docker compose --profile services up -d
@@ -6,11 +6,20 @@ recreate:
down: down:
docker compose --profile services down docker compose --profile services down
reset:
$(MAKE) down
$(MAKE) recreate
hard-reset:
docker compose down
docker compose up -d
restart: restart:
docker compose --profile services restart docker compose --profile services restart
frontend: frontend:
docker compose build frontend docker compose build frontend
$(MAKE) migrate
docker compose up -d frontend docker compose up -d frontend
deploy: deploy:
@@ -22,10 +31,16 @@ rebuild:
docker compose --profile services up -d docker compose --profile services up -d
migrate: migrate:
docker compose --profile migrate run --rm migrator $(filter-out $@,$(MAKECMDGOALS)) docker compose run --rm migrate
convex-key: convex-key:
docker compose exec convex ./generate_admin_key.sh @output=$$(docker compose exec convex ./generate_admin_key.sh 2>&1); \
echo "$$output"; \
if echo "$$output" | grep -q "Admin key:"; then \
key=$$(echo "$$output" | tail -1); \
sed -i '' 's#^CONVEX_SELF_HOSTED_ADMIN_KEY=.*#CONVEX_SELF_HOSTED_ADMIN_KEY='"$$key"'#' frontend/.env; \
echo "Updated frontend/.env with new admin key"; \
fi
script: script:
@cd backend && docker compose --profile scripts run --rm script-runner scripts/$(subst .,/,$(word 2,$(MAKECMDGOALS))).py $(wordlist 3,$(words $(MAKECMDGOALS)),$(MAKECMDGOALS)) @cd backend && docker compose --profile scripts run --rm script-runner scripts/$(subst .,/,$(word 2,$(MAKECMDGOALS))).py $(wordlist 3,$(words $(MAKECMDGOALS)),$(MAKECMDGOALS))

View File

@@ -1,11 +1,21 @@
import asyncio import asyncio
import base64
import contextlib import contextlib
import io import io
import time import time
from collections.abc import Awaitable, Callable
from typing import Any
from aiogram import Bot, F, Router, html, types from aiogram import BaseMiddleware, Bot, F, Router, html, types
from aiogram.enums import ChatAction from aiogram.enums import ChatAction
from aiogram.types import KeyboardButton, ReplyKeyboardMarkup, ReplyKeyboardRemove from aiogram.types import (
BufferedInputFile,
InputMediaPhoto,
KeyboardButton,
ReplyKeyboardMarkup,
ReplyKeyboardRemove,
TelegramObject,
)
from convex import ConvexInt64 from convex import ConvexInt64
from bot.modules.ai import ( from bot.modules.ai import (
@@ -22,10 +32,57 @@ from utils.convex import ConvexClient
router = Router() router = Router()
convex = ConvexClient(env.convex_url) convex = ConvexClient(env.convex_url)
ALBUM_COLLECT_DELAY = 0.5
class AlbumMiddleware(BaseMiddleware):
def __init__(self) -> None:
self.albums: dict[str, list[types.Message]] = {}
self.scheduled: set[str] = set()
async def __call__(
self,
handler: Callable[[TelegramObject, dict[str, Any]], Awaitable[Any]],
event: TelegramObject,
data: dict[str, Any],
) -> Any: # noqa: ANN401
if not isinstance(event, types.Message) or not event.media_group_id:
return await handler(event, data)
album_id = event.media_group_id
if album_id not in self.albums:
self.albums[album_id] = []
self.albums[album_id].append(event)
if album_id in self.scheduled:
return None
self.scheduled.add(album_id)
await asyncio.sleep(ALBUM_COLLECT_DELAY)
messages = self.albums.pop(album_id, [])
self.scheduled.discard(album_id)
if messages:
data["album"] = messages
return await handler(messages[0], data)
return None
router.message.middleware(AlbumMiddleware())
EDIT_THROTTLE_SECONDS = 1.0 EDIT_THROTTLE_SECONDS = 1.0
TELEGRAM_MAX_LENGTH = 4096 TELEGRAM_MAX_LENGTH = 4096
async def fetch_chat_images(chat_id: str) -> list[ImageData]:
chat_images = await convex.query("messages:getChatImages", {"chatId": chat_id})
return [
ImageData(data=base64.b64decode(img["base64"]), media_type=img["mediaType"])
for img in (chat_images or [])
]
def make_follow_up_keyboard(options: list[str]) -> ReplyKeyboardMarkup: def make_follow_up_keyboard(options: list[str]) -> ReplyKeyboardMarkup:
buttons = [[KeyboardButton(text=opt)] for opt in options] buttons = [[KeyboardButton(text=opt)] for opt in options]
return ReplyKeyboardMarkup( return ReplyKeyboardMarkup(
@@ -114,8 +171,13 @@ async def send_long_message(
) )
async def process_message_from_web( # noqa: C901, PLR0915 async def process_message_from_web( # noqa: C901, PLR0912, PLR0913, PLR0915
convex_user_id: str, text: str, bot: Bot, convex_chat_id: str convex_user_id: str,
text: str,
bot: Bot,
convex_chat_id: str,
images_base64: list[str] | None = None,
images_media_types: list[str] | None = None,
) -> None: ) -> None:
user = await convex.query("users:getById", {"userId": convex_user_id}) user = await convex.query("users:getById", {"userId": convex_user_id})
@@ -126,9 +188,32 @@ async def process_message_from_web( # noqa: C901, PLR0915
is_summarize = text == "/summarize" is_summarize = text == "/summarize"
if tg_chat_id and not is_summarize: if tg_chat_id and not is_summarize:
await bot.send_message( if images_base64 and images_media_types:
tg_chat_id, f"📱 {html.quote(text)}", reply_markup=ReplyKeyboardRemove() if len(images_base64) == 1:
) photo_bytes = base64.b64decode(images_base64[0])
await bot.send_photo(
tg_chat_id,
BufferedInputFile(photo_bytes, "photo.jpg"),
caption=f"📱 {text}" if text else "📱",
reply_markup=ReplyKeyboardRemove(),
)
else:
media = []
img_pairs = zip(images_base64, images_media_types, strict=True)
for i, (img_b64, _) in enumerate(img_pairs):
photo_bytes = base64.b64decode(img_b64)
caption = f"📱 {text}" if i == 0 and text else None
media.append(
InputMediaPhoto(
media=BufferedInputFile(photo_bytes, f"photo_{i}.jpg"),
caption=caption,
)
)
await bot.send_media_group(tg_chat_id, media)
else:
await bot.send_message(
tg_chat_id, f"📱 {html.quote(text)}", reply_markup=ReplyKeyboardRemove()
)
api_key = user["geminiApiKey"] api_key = user["geminiApiKey"]
model_name = user.get("model", "gemini-3-pro-preview") model_name = user.get("model", "gemini-3-pro-preview")
@@ -178,7 +263,11 @@ async def process_message_from_web( # noqa: C901, PLR0915
prompt_text = text prompt_text = text
hist = history[:-1] hist = history[:-1]
final_answer = await stream_response(text_agent, prompt_text, hist, on_chunk) chat_images = await fetch_chat_images(convex_chat_id)
final_answer = await stream_response(
text_agent, prompt_text, hist, on_chunk, images=chat_images
)
if state: if state:
await state.flush() await state.flush()
@@ -189,7 +278,7 @@ async def process_message_from_web( # noqa: C901, PLR0915
follow_up_agent = create_follow_up_agent( follow_up_agent = create_follow_up_agent(
api_key=api_key, model_name=follow_up_model, system_prompt=follow_up_prompt api_key=api_key, model_name=follow_up_model, system_prompt=follow_up_prompt
) )
follow_ups = await get_follow_ups(follow_up_agent, full_history) follow_ups = await get_follow_ups(follow_up_agent, full_history, chat_images)
if state: if state:
await state.stop_typing() await state.stop_typing()
@@ -204,6 +293,21 @@ async def process_message_from_web( # noqa: C901, PLR0915
}, },
) )
if is_summarize:
await convex.mutation(
"chats:clear", {"chatId": convex_chat_id, "preserveImages": True}
)
await convex.mutation(
"messages:create",
{
"chatId": convex_chat_id,
"role": "assistant",
"content": final_answer,
"source": "web",
"followUpOptions": follow_ups,
},
)
if tg_chat_id and processing_msg: if tg_chat_id and processing_msg:
with contextlib.suppress(Exception): with contextlib.suppress(Exception):
await processing_msg.delete() await processing_msg.delete()
@@ -229,7 +333,7 @@ async def process_message_from_web( # noqa: C901, PLR0915
async def process_message( async def process_message(
user_id: int, text: str, bot: Bot, chat_id: int, image: ImageData | None = None user_id: int, text: str, bot: Bot, chat_id: int, *, skip_user_message: bool = False
) -> None: ) -> None:
user = await convex.query( user = await convex.query(
"users:getByTelegramId", {"telegramId": ConvexInt64(user_id)} "users:getByTelegramId", {"telegramId": ConvexInt64(user_id)}
@@ -251,15 +355,16 @@ async def process_message(
api_key = user["geminiApiKey"] api_key = user["geminiApiKey"]
model_name = user.get("model", "gemini-3-pro-preview") model_name = user.get("model", "gemini-3-pro-preview")
await convex.mutation( if not skip_user_message:
"messages:create", await convex.mutation(
{ "messages:create",
"chatId": active_chat_id, {
"role": "user", "chatId": active_chat_id,
"content": text, "role": "user",
"source": "telegram", "content": text,
}, "source": "telegram",
) },
)
assistant_message_id = await convex.mutation( assistant_message_id = await convex.mutation(
"messages:create", "messages:create",
@@ -293,8 +398,10 @@ async def process_message(
{"messageId": assistant_message_id, "content": content}, {"messageId": assistant_message_id, "content": content},
) )
chat_images = await fetch_chat_images(active_chat_id)
final_answer = await stream_response( final_answer = await stream_response(
text_agent, text, history[:-2], on_chunk, image=image text_agent, text, history[:-2], on_chunk, images=chat_images
) )
await state.flush() await state.flush()
@@ -305,7 +412,7 @@ async def process_message(
follow_up_agent = create_follow_up_agent( follow_up_agent = create_follow_up_agent(
api_key=api_key, model_name=follow_up_model, system_prompt=follow_up_prompt api_key=api_key, model_name=follow_up_model, system_prompt=follow_up_prompt
) )
follow_ups = await get_follow_ups(follow_up_agent, full_history, image=image) follow_ups = await get_follow_ups(follow_up_agent, full_history, chat_images)
await state.stop_typing() await state.stop_typing()
@@ -367,6 +474,74 @@ async def on_text_message(message: types.Message, bot: Bot) -> None:
await process_message(message.from_user.id, message.text, bot, message.chat.id) await process_message(message.from_user.id, message.text, bot, message.chat.id)
@router.message(F.media_group_id, F.photo)
async def on_album_message(
message: types.Message, bot: Bot, album: list[types.Message]
) -> None:
if not message.from_user:
return
await convex.mutation(
"users:getOrCreate",
{
"telegramId": ConvexInt64(message.from_user.id),
"telegramChatId": ConvexInt64(message.chat.id),
},
)
user = await convex.query(
"users:getByTelegramId", {"telegramId": ConvexInt64(message.from_user.id)}
)
if not user or not user.get("activeChatId"):
await message.answer("Use /new first to create a chat.")
return
caption = message.caption or "Process the images according to your task"
images_base64: list[str] = []
images_media_types: list[str] = []
for msg in album:
if not msg.photo:
continue
photo = msg.photo[-1]
file = await bot.get_file(photo.file_id)
if not file.file_path:
continue
buffer = io.BytesIO()
await bot.download_file(file.file_path, buffer)
image_bytes = buffer.getvalue()
images_base64.append(base64.b64encode(image_bytes).decode())
ext = file.file_path.rsplit(".", 1)[-1].lower()
media_type = f"image/{ext}" if ext in ("png", "gif", "webp") else "image/jpeg"
images_media_types.append(media_type)
if not images_base64:
await message.answer("Failed to get photos.")
return
active_chat_id = user["activeChatId"]
await convex.mutation(
"messages:create",
{
"chatId": active_chat_id,
"role": "user",
"content": caption,
"source": "telegram",
"imagesBase64": images_base64,
"imagesMediaTypes": images_media_types,
},
)
await process_message(
message.from_user.id, caption, bot, message.chat.id, skip_user_message=True
)
@router.message(F.photo) @router.message(F.photo)
async def on_photo_message(message: types.Message, bot: Bot) -> None: async def on_photo_message(message: types.Message, bot: Bot) -> None:
if not message.from_user or not message.photo: if not message.from_user or not message.photo:
@@ -380,6 +555,14 @@ async def on_photo_message(message: types.Message, bot: Bot) -> None:
}, },
) )
user = await convex.query(
"users:getByTelegramId", {"telegramId": ConvexInt64(message.from_user.id)}
)
if not user or not user.get("activeChatId"):
await message.answer("Use /new first to create a chat.")
return
caption = message.caption or "Process the image according to your task" caption = message.caption or "Process the image according to your task"
photo = message.photo[-1] photo = message.photo[-1]
@@ -391,11 +574,24 @@ async def on_photo_message(message: types.Message, bot: Bot) -> None:
buffer = io.BytesIO() buffer = io.BytesIO()
await bot.download_file(file.file_path, buffer) await bot.download_file(file.file_path, buffer)
image_bytes = buffer.getvalue() image_bytes = buffer.getvalue()
image_base64 = base64.b64encode(image_bytes).decode()
ext = file.file_path.rsplit(".", 1)[-1].lower() ext = file.file_path.rsplit(".", 1)[-1].lower()
media_type = f"image/{ext}" if ext in ("png", "gif", "webp") else "image/jpeg" media_type = f"image/{ext}" if ext in ("png", "gif", "webp") else "image/jpeg"
image = ImageData(data=image_bytes, media_type=media_type)
active_chat_id = user["activeChatId"]
await convex.mutation(
"messages:create",
{
"chatId": active_chat_id,
"role": "user",
"content": caption,
"source": "telegram",
"imageBase64": image_base64,
"imageMediaType": media_type,
},
)
await process_message( await process_message(
message.from_user.id, caption, bot, message.chat.id, image=image message.from_user.id, caption, bot, message.chat.id, skip_user_message=True
) )

View File

@@ -41,7 +41,7 @@ def create_text_agent(
model = GoogleModel(model_name, provider=provider) model = GoogleModel(model_name, provider=provider)
base_prompt = system_prompt or DEFAULT_SYSTEM_PROMPT base_prompt = system_prompt or DEFAULT_SYSTEM_PROMPT
full_prompt = f"{base_prompt} {LATEX_INSTRUCTION}" full_prompt = f"{base_prompt} {LATEX_INSTRUCTION}"
return Agent(model, system_prompt=full_prompt) return Agent(model, instructions=full_prompt)
def create_follow_up_agent( def create_follow_up_agent(
@@ -52,7 +52,7 @@ def create_follow_up_agent(
provider = GoogleProvider(api_key=api_key) provider = GoogleProvider(api_key=api_key)
model = GoogleModel(model_name, provider=provider) model = GoogleModel(model_name, provider=provider)
prompt = system_prompt or DEFAULT_FOLLOW_UP prompt = system_prompt or DEFAULT_FOLLOW_UP
return Agent(model, output_type=FollowUpOptions, system_prompt=prompt) return Agent(model, output_type=FollowUpOptions, instructions=prompt)
def build_message_history(history: list[dict[str, str]]) -> list[ModelMessage]: def build_message_history(history: list[dict[str, str]]) -> list[ModelMessage]:
@@ -99,17 +99,17 @@ async def stream_response( # noqa: PLR0913
async def get_follow_ups( async def get_follow_ups(
follow_up_agent: Agent[None, FollowUpOptions], follow_up_agent: Agent[None, FollowUpOptions],
history: list[dict[str, str]], history: list[dict[str, str]],
image: ImageData | None = None, images: list[ImageData] | None = None,
) -> list[str]: ) -> list[str]:
message_history = build_message_history(history) if history else None message_history = build_message_history(history) if history else None
if image: if images:
prompt: list[str | BinaryContent] = [ prompt: list[str | BinaryContent] = ["Process this:"]
"Suggest follow-up options based on this conversation and image.", prompt.extend(
BinaryContent(data=image.data, media_type=image.media_type), BinaryContent(data=img.data, media_type=img.media_type) for img in images
] )
else: else:
prompt = "Suggest follow-up questions based on this conversation." # type: ignore[assignment] prompt = "Process this conversation." # type: ignore[assignment]
result = await follow_up_agent.run(prompt, message_history=message_history) result = await follow_up_agent.run(prompt, message_history=message_history)
return result.output["options"] return result.output["options"]

View File

@@ -1,6 +1,6 @@
EXAM_SYSTEM = """You help solve problem sets and exams. EXAM_SYSTEM = """You help solve problem sets and exams.
When you receive an IMAGE with problems: When you receive just an IMAGE to process with problems:
- Give HINTS in Russian for each problem - Give HINTS in Russian for each problem
- Focus on key insights and potential difficulties, - Focus on key insights and potential difficulties,
give all formulas that will be helpful give all formulas that will be helpful
@@ -9,18 +9,22 @@ give all formulas that will be helpful
When asked for DETAILS on a specific problem (or a problem number): When asked for DETAILS on a specific problem (or a problem number):
- Provide full structured solution in English - Provide full structured solution in English
- Academic style, as it would be written in a notebook - Academic style, as it would be written in a notebook on real exam
- Step by step, clean, no fluff""" - Step by step, clean, no fluff, no overcompications, reuse thoughts inside
one task, as you would write it on an exam, be consistent
- This is also true if you get a summary, and then problem number is asked"""
EXAM_FOLLOW_UP = """You see a problem set image. List available problem numbers. EXAM_FOLLOW_UP = """Look at the problem set image and list ALL problem numbers as
Output only the numbers that exist in the image, like: 1, 2, 3, 4, 5 options. Split by subparts ONLY if they are totally different tasks, not the steps of
If problems have letters (a, b, c), list them as: 1a, 1b, 2a, etc. one.
Keep it minimal - just the identifiers. If there are multiple problem sets/sheets, break it down logically and specify set,
Then, if applicable, output some possible followups of conversation""" for example Group A: 1, Group A: 2a, Group B: 2b, etc.
Or, Theory: 1, Theory: 2a, Practice: 1, etc.
Only output identifiers that exist in the image."""
DEFAULT_FOLLOW_UP = ( DEFAULT_FOLLOW_UP = (
"Based on the conversation, suggest 3 short follow-up questions " "Based on the conversation, suggest 3 short follow-up questions "
"the user might want to ask. Be concise, each under 50 chars." "the user might want to ask. Each option should be under 50 characters."
) )
SUMMARIZE_PROMPT = """You are summarize agent. You may receive: SUMMARIZE_PROMPT = """You are summarize agent. You may receive:

View File

@@ -51,6 +51,8 @@ async def handle_pending_generation(bot: Bot, item: dict, item_id: str) -> None:
text=item["userMessage"], text=item["userMessage"],
bot=bot, bot=bot,
convex_chat_id=item["chatId"], convex_chat_id=item["chatId"],
images_base64=item.get("imagesBase64"),
images_media_types=item.get("imagesMediaTypes"),
) )
except Exception as e: # noqa: BLE001 except Exception as e: # noqa: BLE001
logger.error(f"Error processing {item_id}: {e}") logger.error(f"Error processing {item_id}: {e}")

View File

@@ -26,6 +26,9 @@
} }
handle { handle {
request_body {
max_size 50MB
}
reverse_proxy stealth-ai-relay-frontend:3000 reverse_proxy stealth-ai-relay-frontend:3000
} }
} }

View File

@@ -69,6 +69,8 @@ services:
build: build:
context: ./frontend context: ./frontend
dockerfile: Dockerfile dockerfile: Dockerfile
args:
- PUBLIC_CONVEX_URL=${PUBLIC_CONVEX_URL}
image: stealth-ai-relay/frontend image: stealth-ai-relay/frontend
profiles: profiles:
- frontend - frontend
@@ -85,6 +87,8 @@ services:
build: build:
context: ./frontend context: ./frontend
dockerfile: Dockerfile dockerfile: Dockerfile
args:
- PUBLIC_CONVEX_URL=${PUBLIC_CONVEX_URL}
image: stealth-ai-relay/frontend image: stealth-ai-relay/frontend
volumes: volumes:
- ./frontend:/app - ./frontend:/app
@@ -121,6 +125,20 @@ services:
browserless: browserless:
entrypoint: [ "python" ] entrypoint: [ "python" ]
migrate:
image: stealth-ai-relay/frontend
volumes:
- ./frontend:/app
- /app/node_modules
env_file:
- "frontend/.env"
- ".env"
profiles:
- migrate
networks:
database:
command: x convex deploy
convex-dashboard: convex-dashboard:
image: ghcr.io/get-convex/convex-dashboard:latest image: ghcr.io/get-convex/convex-dashboard:latest
stop_grace_period: 10s stop_grace_period: 10s

3
frontend/.gitignore vendored
View File

@@ -21,6 +21,3 @@ Thumbs.db
# Vite # Vite
vite.config.js.timestamp-* vite.config.js.timestamp-*
vite.config.ts.timestamp-* vite.config.ts.timestamp-*
# Convex
src/lib/convex/_generated

View File

@@ -5,5 +5,8 @@ yarn.lock
bun.lock bun.lock
bun.lockb bun.lockb
# Convex generated files
src/lib/convex/_generated/
# Miscellaneous # Miscellaneous
/static/ /static/

View File

@@ -3,6 +3,9 @@ FROM oven/bun:alpine
ENV TERM=xterm-256color ENV TERM=xterm-256color
ENV COLORTERM=truecolor ENV COLORTERM=truecolor
ARG PUBLIC_CONVEX_URL
ENV PUBLIC_CONVEX_URL=$PUBLIC_CONVEX_URL
WORKDIR /app WORKDIR /app
COPY package.json bun.lock* ./ COPY package.json bun.lock* ./

View File

@@ -22,9 +22,11 @@ export default defineConfig(
languageOptions: { globals: { ...globals.browser, ...globals.node } }, languageOptions: { globals: { ...globals.browser, ...globals.node } },
rules: { rules: {
// typescript-eslint strongly recommend that you do not use the no-undef lint rule on TypeScript projects. 'no-undef': 'off',
// see: https://typescript-eslint.io/troubleshooting/faqs/eslint/#i-get-errors-from-the-no-undef-rule-about-global-variables-not-being-defined-even-though-there-are-no-typescript-errors '@typescript-eslint/no-unused-vars': [
'no-undef': 'off' 'error',
{ argsIgnorePattern: '^_', varsIgnorePattern: '^_' }
]
} }
}, },
{ {

12
frontend/src/app.d.ts vendored
View File

@@ -1,12 +1,10 @@
// See https://svelte.dev/docs/kit/types#app.d.ts import type { ConvexHttpClient } from 'convex/browser';
// for information about these interfaces
declare global { declare global {
namespace App { namespace App {
// interface Error {} interface Locals {
// interface Locals {} convex: ConvexHttpClient;
// interface PageData {} }
// interface PageState {}
// interface Platform {}
} }
} }

View File

@@ -1,35 +0,0 @@
/* eslint-disable */
/**
* Generated `api` utility.
*
* THIS CODE IS AUTOMATICALLY GENERATED.
*
* To regenerate, run `npx convex dev`.
* @module
*/
import type { ApiFromModules, FilterApi, FunctionReference } from 'convex/server';
declare const fullApi: ApiFromModules<{}>;
/**
* A utility for referencing Convex functions in your app's public API.
*
* Usage:
* ```js
* const myFunctionReference = api.myModule.myFunction;
* ```
*/
export declare const api: FilterApi<typeof fullApi, FunctionReference<any, 'public'>>;
/**
* A utility for referencing Convex functions in your app's internal API.
*
* Usage:
* ```js
* const myFunctionReference = internal.myModule.myFunction;
* ```
*/
export declare const internal: FilterApi<typeof fullApi, FunctionReference<any, 'internal'>>;
export declare const components: {};

View File

@@ -0,0 +1,8 @@
import { ConvexHttpClient } from 'convex/browser';
import { PUBLIC_CONVEX_URL } from '$env/static/public';
import type { Handle } from '@sveltejs/kit';
export const handle: Handle = async ({ event, resolve }) => {
event.locals.convex = new ConvexHttpClient(PUBLIC_CONVEX_URL);
return resolve(event);
};

View File

@@ -0,0 +1,184 @@
<script lang="ts">
import { onMount } from 'svelte';
interface Props {
showPreview?: boolean;
oncapture: (base64: string, mediaType: string) => void;
onclose: () => void;
}
let { showPreview = true, oncapture, onclose }: Props = $props();
let videoElement: HTMLVideoElement | null = $state(null);
let stream: MediaStream | null = $state(null);
let capturedImage: { base64: string; mediaType: string } | null = $state(null);
let error: string | null = $state(null);
let closed = false;
async function findUltraWideCamera(): Promise<string | null> {
try {
const devices = await navigator.mediaDevices.enumerateDevices();
const videoDevices = devices.filter((d) => d.kind === 'videoinput');
const ultraWide = videoDevices.find(
(d) => d.label.toLowerCase().includes('ultra') && d.label.toLowerCase().includes('back')
);
return ultraWide?.deviceId ?? null;
} catch {
return null;
}
}
async function startCamera() {
if (closed) return;
if (!navigator.mediaDevices?.getUserMedia) {
error = 'Camera not supported (requires HTTPS)';
return;
}
try {
stream = await navigator.mediaDevices.getUserMedia({
video: { facingMode: { ideal: 'environment' } },
audio: false
});
const ultraWideId = await findUltraWideCamera();
if (ultraWideId) {
stream.getTracks().forEach((t) => t.stop());
stream = await navigator.mediaDevices.getUserMedia({
video: {
deviceId: { exact: ultraWideId },
width: { ideal: 4032 },
height: { ideal: 3024 }
},
audio: false
});
} else {
stream.getTracks().forEach((t) => t.stop());
stream = await navigator.mediaDevices.getUserMedia({
video: {
facingMode: { ideal: 'environment' },
width: { ideal: 4032 },
height: { ideal: 3024 }
},
audio: false
});
}
if (videoElement && !closed) {
videoElement.srcObject = stream;
}
} catch (e) {
error = e instanceof Error ? e.message : 'Camera access denied';
}
}
function stopCamera() {
if (stream) {
stream.getTracks().forEach((track) => track.stop());
stream = null;
}
}
function capture() {
if (!videoElement) return;
const canvas = document.createElement('canvas');
canvas.width = videoElement.videoWidth;
canvas.height = videoElement.videoHeight;
const ctx = canvas.getContext('2d');
if (!ctx) return;
ctx.drawImage(videoElement, 0, 0);
const maxSize = 1920;
const scale = Math.min(maxSize / canvas.width, maxSize / canvas.height, 1);
const outCanvas = document.createElement('canvas');
outCanvas.width = Math.round(canvas.width * scale);
outCanvas.height = Math.round(canvas.height * scale);
const outCtx = outCanvas.getContext('2d');
if (!outCtx) return;
outCtx.drawImage(canvas, 0, 0, outCanvas.width, outCanvas.height);
const base64 = outCanvas.toDataURL('image/jpeg', 0.65).split(',')[1];
const mediaType = 'image/jpeg';
stopCamera();
if (showPreview) {
capturedImage = { base64, mediaType };
} else {
oncapture(base64, mediaType);
}
}
function acceptCapture() {
if (capturedImage) {
oncapture(capturedImage.base64, capturedImage.mediaType);
}
}
function retake() {
capturedImage = null;
startCamera();
}
function close() {
closed = true;
stopCamera();
onclose();
}
onMount(() => {
startCamera();
return () => {
closed = true;
stopCamera();
};
});
</script>
<div class="fixed inset-0 z-50 flex flex-col bg-black" data-camera-ui>
{#if error}
<div class="flex flex-1 flex-col items-center justify-center p-4">
<p class="mb-4 text-center text-sm text-red-400">{error}</p>
<button onclick={close} class="rounded bg-neutral-700 px-4 py-2 text-sm text-white">
Close
</button>
</div>
{:else if capturedImage}
<div class="relative min-h-0 flex-1">
<button class="absolute inset-0" onclick={acceptCapture}>
<img
src="data:{capturedImage.mediaType};base64,{capturedImage.base64}"
alt="Captured"
class="h-full w-full object-contain"
/>
</button>
</div>
<div class="flex gap-4 p-4">
<button onclick={retake} class="flex-1 rounded bg-neutral-700 py-3 text-sm text-white">
Retake
</button>
<button onclick={acceptCapture} class="flex-1 rounded bg-blue-600 py-3 text-sm text-white">
Use
</button>
</div>
{:else}
<video bind:this={videoElement} autoplay playsinline muted class="h-full w-full object-cover"
></video>
<button
onclick={close}
class="absolute top-4 right-4 flex h-8 w-8 items-center justify-center rounded-full bg-black/50 text-white"
>
×
</button>
<button
onclick={capture}
aria-label="Capture photo"
class="absolute bottom-8 left-1/2 h-16 w-16 -translate-x-1/2 rounded-full border-4 border-white bg-white/20"
></button>
{/if}
</div>

View File

@@ -0,0 +1,144 @@
<script lang="ts">
import { onMount } from 'svelte';
interface Props {
oncomplete: (base64: string, mediaType: string, thumbnailBase64: string) => void;
oncancel: () => void;
}
let { oncomplete, oncancel }: Props = $props();
let count = $state(3);
let videoElement: HTMLVideoElement | null = $state(null);
let stream: MediaStream | null = $state(null);
let error: string | null = $state(null);
let cancelled = false;
let countdownInterval: ReturnType<typeof setInterval> | null = null;
async function startCamera() {
if (cancelled) return;
if (!navigator.mediaDevices?.getUserMedia) {
error = 'Camera not supported (requires HTTPS)';
return;
}
try {
const constraints: MediaStreamConstraints = {
video: {
facingMode: { ideal: 'environment' },
width: { ideal: 1920 },
height: { ideal: 1080 }
},
audio: false
};
stream = await navigator.mediaDevices.getUserMedia(constraints);
if (videoElement && !cancelled) {
videoElement.srcObject = stream;
startCountdown();
}
} catch (e) {
error = e instanceof Error ? e.message : 'Camera access denied';
}
}
function stopCamera() {
if (countdownInterval) {
clearInterval(countdownInterval);
countdownInterval = null;
}
if (stream) {
stream.getTracks().forEach((track) => track.stop());
stream = null;
}
}
function startCountdown() {
countdownInterval = setInterval(() => {
if (cancelled) {
stopCamera();
return;
}
count--;
if (count === 0) {
if (countdownInterval) clearInterval(countdownInterval);
capture();
}
}, 1000);
}
function capture() {
if (cancelled || !videoElement) {
stopCamera();
return;
}
const canvas = document.createElement('canvas');
canvas.width = videoElement.videoWidth;
canvas.height = videoElement.videoHeight;
const ctx = canvas.getContext('2d');
if (!ctx) {
stopCamera();
oncancel();
return;
}
ctx.drawImage(videoElement, 0, 0);
const base64 = canvas.toDataURL('image/jpeg', 0.85).split(',')[1];
const mediaType = 'image/jpeg';
const thumbMaxSize = 800;
const scale = Math.min(thumbMaxSize / canvas.width, thumbMaxSize / canvas.height, 1);
const thumbCanvas = document.createElement('canvas');
thumbCanvas.width = Math.round(canvas.width * scale);
thumbCanvas.height = Math.round(canvas.height * scale);
const thumbCtx = thumbCanvas.getContext('2d');
if (thumbCtx) {
thumbCtx.drawImage(canvas, 0, 0, thumbCanvas.width, thumbCanvas.height);
}
const thumbnailBase64 = thumbCanvas.toDataURL('image/jpeg', 0.7).split(',')[1];
stopCamera();
oncomplete(base64, mediaType, thumbnailBase64);
}
function handleCancel() {
cancelled = true;
stopCamera();
oncancel();
}
onMount(() => {
startCamera();
return () => {
cancelled = true;
stopCamera();
};
});
</script>
<div class="fixed inset-0 z-50 flex flex-col bg-black" data-camera-ui>
{#if error}
<div class="flex flex-1 flex-col items-center justify-center p-4">
<p class="mb-4 text-center text-sm text-red-400">{error}</p>
<button onclick={handleCancel} class="rounded bg-neutral-700 px-4 py-2 text-sm text-white">
Close
</button>
</div>
{:else}
<div class="relative flex-1">
<video bind:this={videoElement} autoplay playsinline muted class="h-full w-full object-cover"
></video>
<div class="absolute inset-0 flex items-center justify-center">
<span class="text-8xl font-bold text-white drop-shadow-lg">{count}</span>
</div>
</div>
<div class="p-4 text-center">
<button onclick={handleCancel} class="text-sm text-neutral-400">Cancel</button>
</div>
{/if}
</div>

View File

@@ -2,34 +2,35 @@
interface Props { interface Props {
onsubmit: (message: string) => void; onsubmit: (message: string) => void;
disabled?: boolean; disabled?: boolean;
allowEmpty?: boolean;
} }
let { onsubmit, disabled = false }: Props = $props(); let { onsubmit, disabled = false, allowEmpty = false }: Props = $props();
let value = $state(''); let value = $state('');
function handleSubmit(e: Event) { function handleSubmit(e: Event) {
e.preventDefault(); e.preventDefault();
const trimmed = value.trim(); const trimmed = value.trim();
if (trimmed && !disabled) { if ((trimmed || allowEmpty) && !disabled) {
onsubmit(trimmed); onsubmit(trimmed);
value = ''; value = '';
} }
} }
</script> </script>
<form onsubmit={handleSubmit} class="flex gap-2"> <form onsubmit={handleSubmit} class="flex gap-1">
<input <input
type="text" type="text"
bind:value bind:value
{disabled} {disabled}
placeholder="Message..." placeholder="..."
class="flex-1 rounded-lg bg-neutral-800 px-3 py-2 text-[11px] text-white placeholder-neutral-500 outline-none focus:ring-1 focus:ring-neutral-600" class="min-w-0 flex-1 rounded bg-neutral-800 px-2 py-1 text-[10px] text-white placeholder-neutral-500 outline-none"
/> />
<button <button
type="submit" type="submit"
{disabled} {disabled}
class="rounded-lg bg-blue-600 px-3 py-2 text-[11px] text-white transition-colors hover:bg-blue-500 disabled:opacity-50" class="shrink-0 rounded bg-blue-600 px-2 py-1 text-[10px] text-white disabled:opacity-50"
> >
Send &gt;
</button> </button>
</form> </form>

View File

@@ -0,0 +1,29 @@
<script lang="ts">
import type { Id } from '$lib/convex/_generated/dataModel';
interface Photo {
_id: Id<'photoDrafts'>;
mediaType: string;
}
interface Props {
photos: Photo[];
onremove: (index: number) => void;
}
let { photos, onremove }: Props = $props();
</script>
{#if photos.length > 0}
<div class="flex flex-wrap gap-1">
{#each photos as _photo, i (i)}
<button
onclick={() => onremove(i)}
class="flex items-center gap-1 rounded bg-blue-600/30 px-1.5 py-0.5 text-[8px] text-blue-300"
>
<span>photo {i + 1}</span>
<span class="text-blue-400">&times;</span>
</button>
{/each}
</div>
{/if}

View File

@@ -0,0 +1,65 @@
<script lang="ts">
interface Props {
hasCamera: boolean;
hasOnlineDevices: boolean;
ontakephoto: () => void;
onrequestphoto: () => void;
}
let { hasCamera, hasOnlineDevices, ontakephoto, onrequestphoto }: Props = $props();
let menuOpen = $state(false);
function handleClick() {
if (hasCamera && hasOnlineDevices) {
menuOpen = !menuOpen;
} else if (hasOnlineDevices) {
onrequestphoto();
} else {
ontakephoto();
}
}
function handleTakePhoto() {
menuOpen = false;
ontakephoto();
}
function handleRequestPhoto() {
menuOpen = false;
onrequestphoto();
}
function handleBackdropClick() {
menuOpen = false;
}
</script>
<div class="relative">
<button
onclick={handleClick}
class="shrink-0 rounded bg-neutral-800 px-1.5 py-0.5 text-[8px] text-neutral-400"
>
+
</button>
{#if menuOpen}
<button class="fixed inset-0 z-40" onclick={handleBackdropClick} aria-label="Close menu"
></button>
<div
class="absolute bottom-full left-0 z-50 mb-1 overflow-hidden rounded bg-neutral-800 shadow-lg"
>
<button
onclick={handleTakePhoto}
class="block w-full px-3 py-2 text-left text-[10px] whitespace-nowrap text-white hover:bg-neutral-700"
>
Take photo
</button>
<button
onclick={handleRequestPhoto}
class="block w-full px-3 py-2 text-left text-[10px] whitespace-nowrap text-white hover:bg-neutral-700"
>
Request photo
</button>
</div>
{/if}
</div>

View File

@@ -0,0 +1,27 @@
<script lang="ts">
interface Props {
base64: string;
mediaType: string;
onaccept: () => void;
onreject: () => void;
}
let { base64, mediaType, onaccept, onreject }: Props = $props();
</script>
<div class="fixed inset-0 z-50 overflow-auto bg-black" data-camera-ui>
<button class="block min-h-full min-w-full" onclick={onaccept}>
<img
src="data:{mediaType};base64,{base64}"
alt="Preview"
class="min-h-dvh min-w-full object-cover"
/>
</button>
</div>
<button
onclick={onreject}
class="fixed top-4 right-4 z-[9999] flex h-10 w-10 items-center justify-center rounded-full bg-red-600 text-xl text-white shadow-lg"
data-camera-ui
>
×
</button>

View File

@@ -0,0 +1,22 @@
<script lang="ts">
interface Props {
onaccept: () => void;
ondecline: () => void;
}
let { onaccept, ondecline }: Props = $props();
</script>
<div class="fixed inset-0 z-50 flex items-center justify-center bg-black/80 p-4" data-camera-ui>
<div class="w-full max-w-xs rounded-lg bg-neutral-900 p-4">
<p class="mb-4 text-center text-sm text-white">Photo requested</p>
<div class="flex gap-3">
<button onclick={ondecline} class="flex-1 rounded bg-neutral-700 py-2 text-sm text-white">
Decline
</button>
<button onclick={onaccept} class="flex-1 rounded bg-blue-600 py-2 text-sm text-white">
Capture
</button>
</div>
</div>
</div>

View File

@@ -0,0 +1,135 @@
<script lang="ts">
import { onMount } from 'svelte';
interface Props {
oncapture: (base64: string, mediaType: string, thumbnailBase64: string) => void;
onunpair?: () => void;
}
let { oncapture, onunpair }: Props = $props();
let videoElement: HTMLVideoElement | null = $state(null);
let stream: MediaStream | null = $state(null);
let ready = $state(false);
async function findUltraWideCamera(): Promise<string | null> {
try {
const devices = await navigator.mediaDevices.enumerateDevices();
const videoDevices = devices.filter((d) => d.kind === 'videoinput');
const ultraWide = videoDevices.find(
(d) => d.label.toLowerCase().includes('ultra') && d.label.toLowerCase().includes('back')
);
return ultraWide?.deviceId ?? null;
} catch {
return null;
}
}
async function startCamera() {
if (!navigator.mediaDevices?.getUserMedia) return;
try {
stream = await navigator.mediaDevices.getUserMedia({
video: { facingMode: { ideal: 'environment' } },
audio: false
});
const ultraWideId = await findUltraWideCamera();
if (ultraWideId) {
stream.getTracks().forEach((t) => t.stop());
stream = await navigator.mediaDevices.getUserMedia({
video: {
deviceId: { exact: ultraWideId },
width: { ideal: 4032 },
height: { ideal: 3024 }
},
audio: false
});
} else {
stream.getTracks().forEach((t) => t.stop());
stream = await navigator.mediaDevices.getUserMedia({
video: {
facingMode: { ideal: 'environment' },
width: { ideal: 4032 },
height: { ideal: 3024 }
},
audio: false
});
}
if (videoElement) {
videoElement.srcObject = stream;
await new Promise<void>((resolve) => {
if (videoElement) {
videoElement.onloadedmetadata = () => resolve();
}
});
ready = true;
}
} catch {
ready = false;
}
}
export function capture() {
if (!ready || !videoElement) return false;
const canvas = document.createElement('canvas');
canvas.width = videoElement.videoWidth;
canvas.height = videoElement.videoHeight;
const ctx = canvas.getContext('2d');
if (!ctx) return false;
ctx.drawImage(videoElement, 0, 0);
const maxSize = 1920;
const scale = Math.min(maxSize / canvas.width, maxSize / canvas.height, 1);
const outCanvas = document.createElement('canvas');
outCanvas.width = Math.round(canvas.width * scale);
outCanvas.height = Math.round(canvas.height * scale);
const outCtx = outCanvas.getContext('2d');
if (!outCtx) return false;
outCtx.drawImage(canvas, 0, 0, outCanvas.width, outCanvas.height);
const base64 = outCanvas.toDataURL('image/jpeg', 0.65).split(',')[1];
const mediaType = 'image/jpeg';
const thumbMaxSize = 800;
const thumbScale = Math.min(thumbMaxSize / outCanvas.width, thumbMaxSize / outCanvas.height, 1);
const thumbCanvas = document.createElement('canvas');
thumbCanvas.width = Math.round(outCanvas.width * thumbScale);
thumbCanvas.height = Math.round(outCanvas.height * thumbScale);
const thumbCtx = thumbCanvas.getContext('2d');
if (thumbCtx) {
thumbCtx.drawImage(outCanvas, 0, 0, thumbCanvas.width, thumbCanvas.height);
}
const thumbnailBase64 = thumbCanvas.toDataURL('image/jpeg', 0.6).split(',')[1];
oncapture(base64, mediaType, thumbnailBase64);
return true;
}
onMount(() => {
startCamera();
return () => {
if (stream) {
stream.getTracks().forEach((track) => track.stop());
}
};
});
</script>
<div class="fixed inset-0 z-40 bg-black">
<video bind:this={videoElement} autoplay playsinline muted class="h-full w-full object-cover"
></video>
{#if onunpair}
<button
onclick={onunpair}
class="absolute top-4 left-4 z-10 rounded-full bg-red-600/80 px-3 py-1.5 text-xs text-white"
>
unpair
</button>
{/if}
</div>

View File

@@ -0,0 +1,64 @@
<script lang="ts">
import { onMount } from 'svelte';
let stealthMode = $state(false);
let lastTap = $state({ time: 0, x: 0, y: 0 });
onMount(() => {
document.body.style.touchAction = 'manipulation';
return () => {
document.body.style.touchAction = '';
};
});
function isInCenterZone(x: number, y: number): boolean {
const w = window.innerWidth;
const h = window.innerHeight;
return x > w * 0.3 && x < w * 0.7 && y > h * 0.3 && y < h * 0.7;
}
function handleTouchEnd(e: TouchEvent) {
if (e.touches.length > 0) return;
const target = e.target as HTMLElement;
if (target?.closest('[data-camera-ui]')) return;
const touch = e.changedTouches[0];
const now = Date.now();
const x = touch.clientX;
const y = touch.clientY;
if (!isInCenterZone(x, y)) {
lastTap = { time: 0, x: 0, y: 0 };
return;
}
const timeDiff = now - lastTap.time;
const distX = Math.abs(x - lastTap.x);
const distY = Math.abs(y - lastTap.y);
if (timeDiff < 500 && distX < 50 && distY < 50) {
stealthMode = !stealthMode;
lastTap = { time: 0, x: 0, y: 0 };
e.preventDefault();
} else {
lastTap = { time: now, x, y };
}
}
</script>
<svelte:document ontouchend={handleTouchEnd} />
{#if stealthMode}
<div class="stealth-overlay" ontouchend={handleTouchEnd}></div>
{/if}
<style>
.stealth-overlay {
position: fixed;
inset: 0;
z-index: 9999;
background: #000;
touch-action: manipulation;
}
</style>

View File

@@ -0,0 +1,29 @@
<script lang="ts">
import { onMount } from 'svelte';
interface Props {
oncomplete: () => void;
oncancel: () => void;
}
let { oncomplete, oncancel }: Props = $props();
let count = $state(3);
onMount(() => {
const interval = setInterval(() => {
count--;
if (count === 0) {
clearInterval(interval);
oncomplete();
}
}, 1000);
return () => clearInterval(interval);
});
</script>
<div class="fixed inset-0 z-50 flex flex-col items-center justify-center bg-black" data-camera-ui>
<span class="text-8xl font-bold text-white">{count}</span>
<button onclick={oncancel} class="mt-8 text-sm text-neutral-400">Cancel</button>
</div>

View File

@@ -0,0 +1,113 @@
import { ConvexHttpClient } from 'convex/browser';
import { getContext, setContext } from 'svelte';
import type { FunctionReference, FunctionArgs, FunctionReturnType } from 'convex/server';
const POLLING_CONTEXT_KEY = 'convex-polling';
const POLL_INTERVAL = 1000;
type PollingContext = {
client: ConvexHttpClient;
};
export function hasWebSocketSupport(): boolean {
if (typeof window === 'undefined') return true;
try {
return 'WebSocket' in window && typeof WebSocket !== 'undefined';
} catch {
return false;
}
}
export function setupPollingConvex(url: string): void {
const client = new ConvexHttpClient(url);
setContext<PollingContext>(POLLING_CONTEXT_KEY, { client });
}
export function usePollingClient(): ConvexHttpClient {
const ctx = getContext<PollingContext>(POLLING_CONTEXT_KEY);
if (!ctx) {
throw new Error('Convex polling client not set up. Call setupPollingConvex first.');
}
return ctx.client;
}
type QueryState<T> = {
data: T | undefined;
error: Error | null;
isLoading: boolean;
};
export function usePollingMutation<Mutation extends FunctionReference<'mutation'>>(
mutation: Mutation
): (args: FunctionArgs<Mutation>) => Promise<FunctionReturnType<Mutation>> {
const client = usePollingClient();
return (args: FunctionArgs<Mutation>) => client.mutation(mutation, args);
}
export function usePollingQuery<Query extends FunctionReference<'query'>>(
query: Query,
argsGetter: () => FunctionArgs<Query> | 'skip'
): {
data: FunctionReturnType<Query> | undefined;
error: Error | null;
isLoading: boolean;
} {
const client = usePollingClient();
// eslint-disable-next-line prefer-const
let state = $state<QueryState<FunctionReturnType<Query>>>({
data: undefined,
error: null,
isLoading: true
});
let intervalId: ReturnType<typeof setInterval> | null = null;
let lastArgsJson = '';
async function poll() {
const args = argsGetter();
if (args === 'skip') {
state.isLoading = false;
return;
}
const argsJson = JSON.stringify(args);
if (argsJson !== lastArgsJson) {
state.isLoading = true;
lastArgsJson = argsJson;
}
try {
const result = await client.query(query, args);
state.data = result;
state.error = null;
state.isLoading = false;
} catch (err) {
state.error = err instanceof Error ? err : new Error(String(err));
state.isLoading = false;
}
}
$effect(() => {
poll();
intervalId = setInterval(poll, POLL_INTERVAL);
return () => {
if (intervalId) {
clearInterval(intervalId);
}
};
});
return {
get data() {
return state.data;
},
get error() {
return state.error;
},
get isLoading() {
return state.isLoading;
}
};
}

View File

@@ -0,0 +1,63 @@
/* eslint-disable */
/**
* Generated `api` utility.
*
* THIS CODE IS AUTOMATICALLY GENERATED.
*
* To regenerate, run `npx convex dev`.
* @module
*/
import type * as chats from "../chats.js";
import type * as devicePairings from "../devicePairings.js";
import type * as messages from "../messages.js";
import type * as pairingRequests from "../pairingRequests.js";
import type * as pendingGenerations from "../pendingGenerations.js";
import type * as photoDrafts from "../photoDrafts.js";
import type * as photoRequests from "../photoRequests.js";
import type * as users from "../users.js";
import type {
ApiFromModules,
FilterApi,
FunctionReference,
} from "convex/server";
declare const fullApi: ApiFromModules<{
chats: typeof chats;
devicePairings: typeof devicePairings;
messages: typeof messages;
pairingRequests: typeof pairingRequests;
pendingGenerations: typeof pendingGenerations;
photoDrafts: typeof photoDrafts;
photoRequests: typeof photoRequests;
users: typeof users;
}>;
/**
* A utility for referencing Convex functions in your app's public API.
*
* Usage:
* ```js
* const myFunctionReference = api.myModule.myFunction;
* ```
*/
export declare const api: FilterApi<
typeof fullApi,
FunctionReference<any, "public">
>;
/**
* A utility for referencing Convex functions in your app's internal API.
*
* Usage:
* ```js
* const myFunctionReference = internal.myModule.myFunction;
* ```
*/
export declare const internal: FilterApi<
typeof fullApi,
FunctionReference<any, "internal">
>;
export declare const components: {};

View File

@@ -8,7 +8,7 @@
* @module * @module
*/ */
import { anyApi, componentsGeneric } from 'convex/server'; import { anyApi, componentsGeneric } from "convex/server";
/** /**
* A utility for referencing Convex functions in your app's API. * A utility for referencing Convex functions in your app's API.

View File

@@ -8,29 +8,29 @@
* @module * @module
*/ */
import { AnyDataModel } from 'convex/server'; import type {
import type { GenericId } from 'convex/values'; DataModelFromSchemaDefinition,
DocumentByName,
/** TableNamesInDataModel,
* No `schema.ts` file found! SystemTableNames,
* } from "convex/server";
* This generated code has permissive types like `Doc = any` because import type { GenericId } from "convex/values";
* Convex doesn't know your schema. If you'd like more type safety, see import schema from "../schema.js";
* https://docs.convex.dev/using/schemas for instructions on how to add a
* schema file.
*
* After you change a schema, rerun codegen with `npx convex dev`.
*/
/** /**
* The names of all of your Convex tables. * The names of all of your Convex tables.
*/ */
export type TableNames = string; export type TableNames = TableNamesInDataModel<DataModel>;
/** /**
* The type of a document stored in Convex. * The type of a document stored in Convex.
*
* @typeParam TableName - A string literal type of the table name (like "users").
*/ */
export type Doc = any; export type Doc<TableName extends TableNames> = DocumentByName<
DataModel,
TableName
>;
/** /**
* An identifier for a document in Convex. * An identifier for a document in Convex.
@@ -42,8 +42,11 @@ export type Doc = any;
* *
* IDs are just strings at runtime, but this type can be used to distinguish them from other * IDs are just strings at runtime, but this type can be used to distinguish them from other
* strings when type checking. * strings when type checking.
*
* @typeParam TableName - A string literal type of the table name (like "users").
*/ */
export type Id<TableName extends TableNames = TableNames> = GenericId<TableName>; export type Id<TableName extends TableNames | SystemTableNames> =
GenericId<TableName>;
/** /**
* A type describing your Convex data model. * A type describing your Convex data model.
@@ -54,4 +57,4 @@ export type Id<TableName extends TableNames = TableNames> = GenericId<TableName>
* This type is used to parameterize methods like `queryGeneric` and * This type is used to parameterize methods like `queryGeneric` and
* `mutationGeneric` to make them type-safe. * `mutationGeneric` to make them type-safe.
*/ */
export type DataModel = AnyDataModel; export type DataModel = DataModelFromSchemaDefinition<typeof schema>;

View File

@@ -9,17 +9,17 @@
*/ */
import { import {
ActionBuilder, ActionBuilder,
HttpActionBuilder, HttpActionBuilder,
MutationBuilder, MutationBuilder,
QueryBuilder, QueryBuilder,
GenericActionCtx, GenericActionCtx,
GenericMutationCtx, GenericMutationCtx,
GenericQueryCtx, GenericQueryCtx,
GenericDatabaseReader, GenericDatabaseReader,
GenericDatabaseWriter GenericDatabaseWriter,
} from 'convex/server'; } from "convex/server";
import type { DataModel } from './dataModel.js'; import type { DataModel } from "./dataModel.js";
/** /**
* Define a query in this Convex app's public API. * Define a query in this Convex app's public API.
@@ -29,7 +29,7 @@ import type { DataModel } from './dataModel.js';
* @param func - The query function. It receives a {@link QueryCtx} as its first argument. * @param func - The query function. It receives a {@link QueryCtx} as its first argument.
* @returns The wrapped query. Include this as an `export` to name it and make it accessible. * @returns The wrapped query. Include this as an `export` to name it and make it accessible.
*/ */
export declare const query: QueryBuilder<DataModel, 'public'>; export declare const query: QueryBuilder<DataModel, "public">;
/** /**
* Define a query that is only accessible from other Convex functions (but not from the client). * Define a query that is only accessible from other Convex functions (but not from the client).
@@ -39,7 +39,7 @@ export declare const query: QueryBuilder<DataModel, 'public'>;
* @param func - The query function. It receives a {@link QueryCtx} as its first argument. * @param func - The query function. It receives a {@link QueryCtx} as its first argument.
* @returns The wrapped query. Include this as an `export` to name it and make it accessible. * @returns The wrapped query. Include this as an `export` to name it and make it accessible.
*/ */
export declare const internalQuery: QueryBuilder<DataModel, 'internal'>; export declare const internalQuery: QueryBuilder<DataModel, "internal">;
/** /**
* Define a mutation in this Convex app's public API. * Define a mutation in this Convex app's public API.
@@ -49,7 +49,7 @@ export declare const internalQuery: QueryBuilder<DataModel, 'internal'>;
* @param func - The mutation function. It receives a {@link MutationCtx} as its first argument. * @param func - The mutation function. It receives a {@link MutationCtx} as its first argument.
* @returns The wrapped mutation. Include this as an `export` to name it and make it accessible. * @returns The wrapped mutation. Include this as an `export` to name it and make it accessible.
*/ */
export declare const mutation: MutationBuilder<DataModel, 'public'>; export declare const mutation: MutationBuilder<DataModel, "public">;
/** /**
* Define a mutation that is only accessible from other Convex functions (but not from the client). * Define a mutation that is only accessible from other Convex functions (but not from the client).
@@ -59,7 +59,7 @@ export declare const mutation: MutationBuilder<DataModel, 'public'>;
* @param func - The mutation function. It receives a {@link MutationCtx} as its first argument. * @param func - The mutation function. It receives a {@link MutationCtx} as its first argument.
* @returns The wrapped mutation. Include this as an `export` to name it and make it accessible. * @returns The wrapped mutation. Include this as an `export` to name it and make it accessible.
*/ */
export declare const internalMutation: MutationBuilder<DataModel, 'internal'>; export declare const internalMutation: MutationBuilder<DataModel, "internal">;
/** /**
* Define an action in this Convex app's public API. * Define an action in this Convex app's public API.
@@ -72,7 +72,7 @@ export declare const internalMutation: MutationBuilder<DataModel, 'internal'>;
* @param func - The action. It receives an {@link ActionCtx} as its first argument. * @param func - The action. It receives an {@link ActionCtx} as its first argument.
* @returns The wrapped action. Include this as an `export` to name it and make it accessible. * @returns The wrapped action. Include this as an `export` to name it and make it accessible.
*/ */
export declare const action: ActionBuilder<DataModel, 'public'>; export declare const action: ActionBuilder<DataModel, "public">;
/** /**
* Define an action that is only accessible from other Convex functions (but not from the client). * Define an action that is only accessible from other Convex functions (but not from the client).
@@ -80,7 +80,7 @@ export declare const action: ActionBuilder<DataModel, 'public'>;
* @param func - The function. It receives an {@link ActionCtx} as its first argument. * @param func - The function. It receives an {@link ActionCtx} as its first argument.
* @returns The wrapped function. Include this as an `export` to name it and make it accessible. * @returns The wrapped function. Include this as an `export` to name it and make it accessible.
*/ */
export declare const internalAction: ActionBuilder<DataModel, 'internal'>; export declare const internalAction: ActionBuilder<DataModel, "internal">;
/** /**
* Define an HTTP action. * Define an HTTP action.

View File

@@ -9,14 +9,14 @@
*/ */
import { import {
actionGeneric, actionGeneric,
httpActionGeneric, httpActionGeneric,
queryGeneric, queryGeneric,
mutationGeneric, mutationGeneric,
internalActionGeneric, internalActionGeneric,
internalMutationGeneric, internalMutationGeneric,
internalQueryGeneric internalQueryGeneric,
} from 'convex/server'; } from "convex/server";
/** /**
* Define a query in this Convex app's public API. * Define a query in this Convex app's public API.

View File

@@ -43,8 +43,15 @@ export const clear = mutation({
.collect(); .collect();
for (const message of messages) { for (const message of messages) {
if (args.preserveImages && message.imageStorageId) { if (args.preserveImages) {
continue; const hasLegacyImage = message.imageBase64 || message.imagesBase64?.length;
const messageImages = await ctx.db
.query('messageImages')
.withIndex('by_message_id', (q) => q.eq('messageId', message._id))
.first();
if (hasLegacyImage || messageImages) {
continue;
}
} }
await ctx.db.delete(message._id); await ctx.db.delete(message._id);
} }

View File

@@ -0,0 +1,109 @@
import { v } from 'convex/values';
import { mutation, query } from './_generated/server';
export const register = mutation({
args: {
chatId: v.id('chats'),
deviceId: v.string(),
hasCamera: v.boolean()
},
returns: v.id('devicePairings'),
handler: async (ctx, args) => {
const existing = await ctx.db
.query('devicePairings')
.withIndex('by_chat_id', (q) => q.eq('chatId', args.chatId))
.collect();
const device = existing.find((d) => d.deviceId === args.deviceId);
if (device) {
await ctx.db.patch(device._id, {
hasCamera: args.hasCamera,
lastSeen: Date.now()
});
return device._id;
}
return await ctx.db.insert('devicePairings', {
chatId: args.chatId,
deviceId: args.deviceId,
hasCamera: args.hasCamera,
lastSeen: Date.now()
});
}
});
export const heartbeat = mutation({
args: {
chatId: v.id('chats'),
deviceId: v.string()
},
returns: v.null(),
handler: async (ctx, args) => {
const devices = await ctx.db
.query('devicePairings')
.withIndex('by_chat_id', (q) => q.eq('chatId', args.chatId))
.collect();
const device = devices.find((d) => d.deviceId === args.deviceId);
if (device) {
await ctx.db.patch(device._id, { lastSeen: Date.now() });
}
return null;
}
});
export const getMyDevice = query({
args: { chatId: v.id('chats'), deviceId: v.string() },
returns: v.union(
v.object({
_id: v.id('devicePairings'),
_creationTime: v.number(),
chatId: v.id('chats'),
deviceId: v.string(),
hasCamera: v.boolean(),
pairedWithDeviceId: v.optional(v.string()),
lastSeen: v.number()
}),
v.null()
),
handler: async (ctx, args) => {
const devices = await ctx.db
.query('devicePairings')
.withIndex('by_chat_id', (q) => q.eq('chatId', args.chatId))
.collect();
return devices.find((d) => d.deviceId === args.deviceId) ?? null;
}
});
export const getPairedDevice = query({
args: { chatId: v.id('chats'), deviceId: v.string() },
returns: v.union(
v.object({
_id: v.id('devicePairings'),
_creationTime: v.number(),
chatId: v.id('chats'),
deviceId: v.string(),
hasCamera: v.boolean(),
pairedWithDeviceId: v.optional(v.string()),
lastSeen: v.number()
}),
v.null()
),
handler: async (ctx, args) => {
const devices = await ctx.db
.query('devicePairings')
.withIndex('by_chat_id', (q) => q.eq('chatId', args.chatId))
.collect();
const myDevice = devices.find((d) => d.deviceId === args.deviceId);
if (!myDevice?.pairedWithDeviceId) return null;
const thirtySecondsAgo = Date.now() - 30000;
const paired = devices.find(
(d) => d.deviceId === myDevice.pairedWithDeviceId && d.lastSeen > thirtySecondsAgo
);
return paired ?? null;
}
});

View File

@@ -1,5 +1,6 @@
import { v } from 'convex/values'; import { v } from 'convex/values';
import { internalMutation, mutation, query } from './_generated/server'; import { mutation, query } from './_generated/server';
import type { Id } from './_generated/dataModel';
export const listByChat = query({ export const listByChat = query({
args: { chatId: v.id('chats') }, args: { chatId: v.id('chats') },
@@ -10,8 +11,6 @@ export const listByChat = query({
chatId: v.id('chats'), chatId: v.id('chats'),
role: v.union(v.literal('user'), v.literal('assistant')), role: v.union(v.literal('user'), v.literal('assistant')),
content: v.string(), content: v.string(),
imageStorageId: v.optional(v.id('_storage')),
imageMediaType: v.optional(v.string()),
followUpOptions: v.optional(v.array(v.string())), followUpOptions: v.optional(v.array(v.string())),
source: v.union(v.literal('telegram'), v.literal('web')), source: v.union(v.literal('telegram'), v.literal('web')),
createdAt: v.number(), createdAt: v.number(),
@@ -19,11 +18,23 @@ export const listByChat = query({
}) })
), ),
handler: async (ctx, args) => { handler: async (ctx, args) => {
return await ctx.db const messages = await ctx.db
.query('messages') .query('messages')
.withIndex('by_chat_id_and_created_at', (q) => q.eq('chatId', args.chatId)) .withIndex('by_chat_id_and_created_at', (q) => q.eq('chatId', args.chatId))
.order('asc') .order('asc')
.collect(); .collect();
return messages.map((m) => ({
_id: m._id,
_creationTime: m._creationTime,
chatId: m.chatId,
role: m.role,
content: m.content,
followUpOptions: m.followUpOptions,
source: m.source,
createdAt: m.createdAt,
isStreaming: m.isStreaming
}));
} }
}); });
@@ -33,8 +44,11 @@ export const create = mutation({
role: v.union(v.literal('user'), v.literal('assistant')), role: v.union(v.literal('user'), v.literal('assistant')),
content: v.string(), content: v.string(),
source: v.union(v.literal('telegram'), v.literal('web')), source: v.union(v.literal('telegram'), v.literal('web')),
imageStorageId: v.optional(v.id('_storage')), imageBase64: v.optional(v.string()),
imageMediaType: v.optional(v.string()), imageMediaType: v.optional(v.string()),
imagesBase64: v.optional(v.array(v.string())),
imagesMediaTypes: v.optional(v.array(v.string())),
photoDraftIds: v.optional(v.array(v.id('photoDrafts'))),
followUpOptions: v.optional(v.array(v.string())), followUpOptions: v.optional(v.array(v.string())),
isStreaming: v.optional(v.boolean()) isStreaming: v.optional(v.boolean())
}, },
@@ -45,25 +59,59 @@ export const create = mutation({
role: args.role, role: args.role,
content: args.content, content: args.content,
source: args.source, source: args.source,
imageStorageId: args.imageStorageId, imageBase64: args.imageBase64,
imageMediaType: args.imageMediaType, imageMediaType: args.imageMediaType,
imagesBase64: args.imagesBase64,
imagesMediaTypes: args.imagesMediaTypes,
followUpOptions: args.followUpOptions, followUpOptions: args.followUpOptions,
createdAt: Date.now(), createdAt: Date.now(),
isStreaming: args.isStreaming isStreaming: args.isStreaming
}); });
const drafts: Array<{ base64: string; mediaType: string; id: Id<'photoDrafts'> }> = [];
if (args.photoDraftIds && args.photoDraftIds.length > 0) {
for (const draftId of args.photoDraftIds) {
const draft = await ctx.db.get(draftId);
if (draft) {
drafts.push({ base64: draft.base64, mediaType: draft.mediaType, id: draft._id });
}
}
}
for (let i = 0; i < drafts.length; i++) {
await ctx.db.insert('messageImages', {
messageId,
base64: drafts[i].base64,
mediaType: drafts[i].mediaType,
order: i
});
}
if (args.source === 'web' && args.role === 'user') { if (args.source === 'web' && args.role === 'user') {
const chat = await ctx.db.get(args.chatId); const chat = await ctx.db.get(args.chatId);
if (chat) { if (chat) {
await ctx.db.insert('pendingGenerations', { const pendingGenId = await ctx.db.insert('pendingGenerations', {
userId: chat.userId, userId: chat.userId,
chatId: args.chatId, chatId: args.chatId,
userMessage: args.content, userMessage: args.content,
createdAt: Date.now() createdAt: Date.now()
}); });
for (let i = 0; i < drafts.length; i++) {
await ctx.db.insert('pendingGenerationImages', {
pendingGenerationId: pendingGenId,
base64: drafts[i].base64,
mediaType: drafts[i].mediaType,
order: i
});
}
} }
} }
for (const draft of drafts) {
await ctx.db.delete(draft.id);
}
return messageId; return messageId;
} }
}); });
@@ -132,8 +180,10 @@ export const getLastAssistantMessage = query({
chatId: v.id('chats'), chatId: v.id('chats'),
role: v.union(v.literal('user'), v.literal('assistant')), role: v.union(v.literal('user'), v.literal('assistant')),
content: v.string(), content: v.string(),
imageStorageId: v.optional(v.id('_storage')), imageBase64: v.optional(v.string()),
imageMediaType: v.optional(v.string()), imageMediaType: v.optional(v.string()),
imagesBase64: v.optional(v.array(v.string())),
imagesMediaTypes: v.optional(v.array(v.string())),
followUpOptions: v.optional(v.array(v.string())), followUpOptions: v.optional(v.array(v.string())),
source: v.union(v.literal('telegram'), v.literal('web')), source: v.union(v.literal('telegram'), v.literal('web')),
createdAt: v.number(), createdAt: v.number(),
@@ -152,21 +202,12 @@ export const getLastAssistantMessage = query({
} }
}); });
export const generateUploadUrl = mutation({ export const getChatImages = query({
args: {},
returns: v.string(),
handler: async (ctx) => {
return await ctx.storage.generateUploadUrl();
}
});
export const getImageUrls = query({
args: { chatId: v.id('chats') }, args: { chatId: v.id('chats') },
returns: v.array( returns: v.array(
v.object({ v.object({
storageId: v.id('_storage'), base64: v.string(),
mediaType: v.string(), mediaType: v.string()
url: v.union(v.string(), v.null())
}) })
), ),
handler: async (ctx, args) => { handler: async (ctx, args) => {
@@ -175,41 +216,33 @@ export const getImageUrls = query({
.withIndex('by_chat_id', (q) => q.eq('chatId', args.chatId)) .withIndex('by_chat_id', (q) => q.eq('chatId', args.chatId))
.collect(); .collect();
const imageMessages = messages.filter((m) => m.imageStorageId && m.imageMediaType); const images: Array<{ base64: string; mediaType: string }> = [];
const results = [];
for (const msg of imageMessages) { for (const m of messages) {
if (msg.imageStorageId && msg.imageMediaType) { const msgImages = await ctx.db
const url = await ctx.storage.getUrl(msg.imageStorageId); .query('messageImages')
results.push({ .withIndex('by_message_id', (q) => q.eq('messageId', m._id))
storageId: msg.imageStorageId, .collect();
mediaType: msg.imageMediaType,
url for (const img of msgImages.sort((a, b) => a.order - b.order)) {
images.push({ base64: img.base64, mediaType: img.mediaType });
}
if (m.imagesBase64 && m.imagesMediaTypes) {
for (let i = 0; i < m.imagesBase64.length; i++) {
images.push({
base64: m.imagesBase64[i],
mediaType: m.imagesMediaTypes[i]
});
}
} else if (m.imageBase64 && m.imageMediaType) {
images.push({
base64: m.imageBase64,
mediaType: m.imageMediaType
}); });
} }
} }
return results; return images;
}
});
export const createWithImage = internalMutation({
args: {
chatId: v.id('chats'),
content: v.string(),
imageStorageId: v.id('_storage'),
imageMediaType: v.string()
},
returns: v.id('messages'),
handler: async (ctx, args) => {
return await ctx.db.insert('messages', {
chatId: args.chatId,
role: 'user' as const,
content: args.content,
source: 'telegram' as const,
imageStorageId: args.imageStorageId,
imageMediaType: args.imageMediaType,
createdAt: Date.now()
});
} }
}); });

View File

@@ -0,0 +1,122 @@
import { v } from 'convex/values';
import { mutation, query } from './_generated/server';
export const create = mutation({
args: {
chatId: v.id('chats'),
fromDeviceId: v.string()
},
returns: v.id('pairingRequests'),
handler: async (ctx, args) => {
const existing = await ctx.db
.query('pairingRequests')
.withIndex('by_chat_id', (q) => q.eq('chatId', args.chatId))
.collect();
const pending = existing.find(
(r) => r.fromDeviceId === args.fromDeviceId && r.status === 'pending'
);
if (pending) return pending._id;
return await ctx.db.insert('pairingRequests', {
chatId: args.chatId,
fromDeviceId: args.fromDeviceId,
status: 'pending',
createdAt: Date.now()
});
}
});
export const accept = mutation({
args: {
requestId: v.id('pairingRequests'),
acceptingDeviceId: v.string()
},
returns: v.null(),
handler: async (ctx, args) => {
const request = await ctx.db.get(args.requestId);
if (!request || request.status !== 'pending') return null;
await ctx.db.patch(args.requestId, { status: 'accepted' });
const devices = await ctx.db
.query('devicePairings')
.withIndex('by_chat_id', (q) => q.eq('chatId', request.chatId))
.collect();
const fromDevice = devices.find((d) => d.deviceId === request.fromDeviceId);
const acceptingDevice = devices.find((d) => d.deviceId === args.acceptingDeviceId);
if (fromDevice) {
await ctx.db.patch(fromDevice._id, { pairedWithDeviceId: args.acceptingDeviceId });
}
if (acceptingDevice) {
await ctx.db.patch(acceptingDevice._id, { pairedWithDeviceId: request.fromDeviceId });
}
return null;
}
});
export const reject = mutation({
args: { requestId: v.id('pairingRequests') },
returns: v.null(),
handler: async (ctx, args) => {
const request = await ctx.db.get(args.requestId);
if (!request || request.status !== 'pending') return null;
await ctx.db.patch(args.requestId, { status: 'rejected' });
return null;
}
});
export const getPending = query({
args: { chatId: v.id('chats'), excludeDeviceId: v.string() },
returns: v.union(
v.object({
_id: v.id('pairingRequests'),
_creationTime: v.number(),
chatId: v.id('chats'),
fromDeviceId: v.string(),
status: v.union(v.literal('pending'), v.literal('accepted'), v.literal('rejected')),
createdAt: v.number()
}),
v.null()
),
handler: async (ctx, args) => {
const requests = await ctx.db
.query('pairingRequests')
.withIndex('by_chat_id', (q) => q.eq('chatId', args.chatId))
.collect();
return (
requests.find((r) => r.status === 'pending' && r.fromDeviceId !== args.excludeDeviceId) ??
null
);
}
});
export const unpair = mutation({
args: {
chatId: v.id('chats'),
deviceId: v.string()
},
returns: v.null(),
handler: async (ctx, args) => {
const devices = await ctx.db
.query('devicePairings')
.withIndex('by_chat_id', (q) => q.eq('chatId', args.chatId))
.collect();
const myDevice = devices.find((d) => d.deviceId === args.deviceId);
if (!myDevice?.pairedWithDeviceId) return null;
const pairedDevice = devices.find((d) => d.deviceId === myDevice.pairedWithDeviceId);
await ctx.db.patch(myDevice._id, { pairedWithDeviceId: undefined });
if (pairedDevice) {
await ctx.db.patch(pairedDevice._id, { pairedWithDeviceId: undefined });
}
return null;
}
});

View File

@@ -10,11 +10,33 @@ export const list = query({
userId: v.id('users'), userId: v.id('users'),
chatId: v.id('chats'), chatId: v.id('chats'),
userMessage: v.string(), userMessage: v.string(),
imagesBase64: v.optional(v.array(v.string())),
imagesMediaTypes: v.optional(v.array(v.string())),
createdAt: v.number() createdAt: v.number()
}) })
), ),
handler: async (ctx) => { handler: async (ctx) => {
return await ctx.db.query('pendingGenerations').collect(); const pending = await ctx.db.query('pendingGenerations').collect();
const result = [];
for (const p of pending) {
const images = await ctx.db
.query('pendingGenerationImages')
.withIndex('by_pending_generation_id', (q) => q.eq('pendingGenerationId', p._id))
.collect();
const sortedImages = images.sort((a, b) => a.order - b.order);
result.push({
...p,
imagesBase64:
sortedImages.length > 0 ? sortedImages.map((img) => img.base64) : p.imagesBase64,
imagesMediaTypes:
sortedImages.length > 0 ? sortedImages.map((img) => img.mediaType) : p.imagesMediaTypes
});
}
return result;
} }
}); });
@@ -39,7 +61,35 @@ export const remove = mutation({
args: { id: v.id('pendingGenerations') }, args: { id: v.id('pendingGenerations') },
returns: v.null(), returns: v.null(),
handler: async (ctx, args) => { handler: async (ctx, args) => {
const images = await ctx.db
.query('pendingGenerationImages')
.withIndex('by_pending_generation_id', (q) => q.eq('pendingGenerationId', args.id))
.collect();
for (const img of images) {
await ctx.db.delete(img._id);
}
await ctx.db.delete(args.id); await ctx.db.delete(args.id);
return null; return null;
} }
}); });
export const getImages = query({
args: { pendingGenerationId: v.id('pendingGenerations') },
returns: v.array(
v.object({
base64: v.string(),
mediaType: v.string()
})
),
handler: async (ctx, args) => {
const images = await ctx.db
.query('pendingGenerationImages')
.withIndex('by_pending_generation_id', (q) =>
q.eq('pendingGenerationId', args.pendingGenerationId)
)
.collect();
return images
.sort((a, b) => a.order - b.order)
.map((img) => ({ base64: img.base64, mediaType: img.mediaType }));
}
});

View File

@@ -0,0 +1,93 @@
import { v } from 'convex/values';
import { mutation, query } from './_generated/server';
const photoValidator = v.object({
base64: v.string(),
mediaType: v.string()
});
export const get = query({
args: { chatId: v.id('chats'), deviceId: v.string() },
returns: v.object({
photos: v.array(
v.object({
_id: v.id('photoDrafts'),
mediaType: v.string()
})
)
}),
handler: async (ctx, args) => {
const drafts = await ctx.db
.query('photoDrafts')
.withIndex('by_chat_id_and_device_id', (q) =>
q.eq('chatId', args.chatId).eq('deviceId', args.deviceId)
)
.collect();
return {
photos: drafts.map((d) => ({
_id: d._id,
mediaType: d.mediaType
}))
};
}
});
export const addPhoto = mutation({
args: {
chatId: v.id('chats'),
deviceId: v.string(),
photo: photoValidator
},
returns: v.null(),
handler: async (ctx, args) => {
await ctx.db.insert('photoDrafts', {
chatId: args.chatId,
deviceId: args.deviceId,
base64: args.photo.base64,
mediaType: args.photo.mediaType,
createdAt: Date.now()
});
return null;
}
});
export const removePhoto = mutation({
args: {
chatId: v.id('chats'),
deviceId: v.string(),
index: v.number()
},
returns: v.null(),
handler: async (ctx, args) => {
const drafts = await ctx.db
.query('photoDrafts')
.withIndex('by_chat_id_and_device_id', (q) =>
q.eq('chatId', args.chatId).eq('deviceId', args.deviceId)
)
.collect();
if (drafts[args.index]) {
await ctx.db.delete(drafts[args.index]._id);
}
return null;
}
});
export const clear = mutation({
args: { chatId: v.id('chats'), deviceId: v.string() },
returns: v.null(),
handler: async (ctx, args) => {
const drafts = await ctx.db
.query('photoDrafts')
.withIndex('by_chat_id_and_device_id', (q) =>
q.eq('chatId', args.chatId).eq('deviceId', args.deviceId)
)
.collect();
for (const draft of drafts) {
await ctx.db.delete(draft._id);
}
return null;
}
});

View File

@@ -0,0 +1,293 @@
import { v } from 'convex/values';
import { mutation, query } from './_generated/server';
const photoRequestValidator = v.object({
_id: v.id('photoRequests'),
_creationTime: v.number(),
chatId: v.id('chats'),
requesterId: v.string(),
captureDeviceId: v.optional(v.string()),
status: v.union(
v.literal('pending'),
v.literal('countdown'),
v.literal('capture_now'),
v.literal('captured'),
v.literal('accepted'),
v.literal('rejected')
),
photoBase64: v.optional(v.string()),
photoMediaType: v.optional(v.string()),
thumbnailBase64: v.optional(v.string()),
createdAt: v.number()
});
const photoRequestLightValidator = v.object({
_id: v.id('photoRequests'),
status: v.union(
v.literal('pending'),
v.literal('countdown'),
v.literal('capture_now'),
v.literal('captured'),
v.literal('accepted'),
v.literal('rejected')
),
photoMediaType: v.optional(v.string()),
thumbnailBase64: v.optional(v.string())
});
export const create = mutation({
args: {
chatId: v.id('chats'),
requesterId: v.string(),
captureDeviceId: v.string()
},
returns: v.id('photoRequests'),
handler: async (ctx, args) => {
const oldRequests = await ctx.db
.query('photoRequests')
.withIndex('by_chat_id', (q) => q.eq('chatId', args.chatId))
.take(20);
for (const req of oldRequests) {
if (req.status === 'pending' || req.status === 'countdown' || req.status === 'capture_now') {
await ctx.db.patch(req._id, { status: 'rejected' });
}
}
return await ctx.db.insert('photoRequests', {
chatId: args.chatId,
requesterId: args.requesterId,
captureDeviceId: args.captureDeviceId,
status: 'countdown',
createdAt: Date.now()
});
}
});
export const markCaptureNow = mutation({
args: { requestId: v.id('photoRequests') },
returns: v.null(),
handler: async (ctx, args) => {
await ctx.db.patch(args.requestId, { status: 'capture_now' });
return null;
}
});
export const submitPhoto = mutation({
args: {
requestId: v.id('photoRequests'),
photoBase64: v.string(),
photoMediaType: v.string(),
thumbnailBase64: v.optional(v.string())
},
returns: v.boolean(),
handler: async (ctx, args) => {
const req = await ctx.db.get(args.requestId);
if (!req || req.status !== 'capture_now') {
return false;
}
await ctx.db.patch(args.requestId, {
status: 'captured',
photoBase64: args.photoBase64,
photoMediaType: args.photoMediaType,
thumbnailBase64: args.thumbnailBase64
});
return true;
}
});
export const markAccepted = mutation({
args: { requestId: v.id('photoRequests') },
returns: v.null(),
handler: async (ctx, args) => {
await ctx.db.patch(args.requestId, { status: 'accepted' });
return null;
}
});
export const markRejected = mutation({
args: { requestId: v.id('photoRequests') },
returns: v.boolean(),
handler: async (ctx, args) => {
const req = await ctx.db.get(args.requestId);
if (!req || req.status === 'accepted' || req.status === 'rejected') {
return false;
}
await ctx.db.patch(req._id, { status: 'rejected' });
return true;
}
});
const captureNowLightValidator = v.object({
_id: v.id('photoRequests'),
status: v.literal('capture_now')
});
export const getCaptureNowRequest = query({
args: { chatId: v.id('chats'), deviceId: v.optional(v.string()) },
returns: v.union(captureNowLightValidator, v.null()),
handler: async (ctx, args) => {
const now = Date.now();
const maxAge = 60 * 1000;
const requests = await ctx.db
.query('photoRequests')
.withIndex('by_chat_id', (q) => q.eq('chatId', args.chatId))
.order('desc')
.take(50);
const found = requests.find((r) => r.status === 'capture_now' && now - r.createdAt < maxAge);
if (!found) return null;
return { _id: found._id, status: 'capture_now' as const };
}
});
export const getActiveForCapture = query({
args: { chatId: v.id('chats'), deviceId: v.optional(v.string()) },
returns: v.union(photoRequestValidator, v.null()),
handler: async (ctx, args) => {
const requests = await ctx.db
.query('photoRequests')
.withIndex('by_chat_id', (q) => q.eq('chatId', args.chatId))
.order('desc')
.take(50);
return requests.find((r) => r.status === 'countdown' || r.status === 'capture_now') ?? null;
}
});
export const getMyActiveRequest = query({
args: { chatId: v.id('chats'), deviceId: v.optional(v.string()) },
returns: v.union(photoRequestLightValidator, v.null()),
handler: async (ctx, args) => {
const requests = await ctx.db
.query('photoRequests')
.withIndex('by_chat_id', (q) => q.eq('chatId', args.chatId))
.order('desc')
.take(100);
if (!args.deviceId) return null;
const found = requests.find(
(r) =>
r.requesterId === args.deviceId &&
(r.status === 'countdown' || r.status === 'capture_now' || r.status === 'captured')
);
if (!found) return null;
return {
_id: found._id,
status: found.status,
photoMediaType: found.photoMediaType,
thumbnailBase64: found.thumbnailBase64
};
}
});
export const getPhotoData = query({
args: { requestId: v.id('photoRequests') },
returns: v.union(
v.object({
photoBase64: v.string(),
photoMediaType: v.string(),
thumbnailBase64: v.optional(v.string())
}),
v.null()
),
handler: async (ctx, args) => {
const req = await ctx.db.get(args.requestId);
if (!req || !req.photoBase64 || !req.photoMediaType) return null;
return {
photoBase64: req.photoBase64,
photoMediaType: req.photoMediaType,
thumbnailBase64: req.thumbnailBase64
};
}
});
export const getPhotoPreview = query({
args: { requestId: v.id('photoRequests') },
returns: v.union(
v.object({
thumbnailBase64: v.string(),
photoMediaType: v.string()
}),
v.null()
),
handler: async (ctx, args) => {
const req = await ctx.db.get(args.requestId);
if (!req || !req.photoMediaType) return null;
return {
thumbnailBase64: req.thumbnailBase64 || req.photoBase64 || '',
photoMediaType: req.photoMediaType
};
}
});
export const acceptPhotoToDraft = mutation({
args: {
requestId: v.id('photoRequests'),
chatId: v.id('chats'),
deviceId: v.string()
},
returns: v.id('photoDrafts'),
handler: async (ctx, args) => {
const req = await ctx.db.get(args.requestId);
if (!req || !req.photoBase64 || !req.photoMediaType) {
throw new Error('Photo request not found or has no photo');
}
const draftId = await ctx.db.insert('photoDrafts', {
chatId: args.chatId,
deviceId: args.deviceId,
base64: req.photoBase64,
mediaType: req.photoMediaType,
createdAt: Date.now()
});
await ctx.db.patch(args.requestId, { status: 'accepted' });
return draftId;
}
});
export const cleanup = mutation({
args: { chatId: v.id('chats') },
returns: v.number(),
handler: async (ctx, args) => {
const requests = await ctx.db
.query('photoRequests')
.withIndex('by_chat_id', (q) => q.eq('chatId', args.chatId))
.take(20);
let deleted = 0;
for (const req of requests) {
await ctx.db.delete(req._id);
deleted++;
}
return deleted;
}
});
export const getCapturedForPhone = query({
args: { chatId: v.id('chats'), deviceId: v.optional(v.string()) },
returns: v.union(photoRequestValidator, v.null()),
handler: async (ctx, args) => {
const requests = await ctx.db
.query('photoRequests')
.withIndex('by_chat_id', (q) => q.eq('chatId', args.chatId))
.order('desc')
.take(50);
if (!args.deviceId) return null;
return (
requests.find((r) => r.captureDeviceId === args.deviceId && r.status === 'captured') ?? null
);
}
});

View File

@@ -23,8 +23,11 @@ export default defineSchema({
chatId: v.id('chats'), chatId: v.id('chats'),
role: v.union(v.literal('user'), v.literal('assistant')), role: v.union(v.literal('user'), v.literal('assistant')),
content: v.string(), content: v.string(),
imageStorageId: v.optional(v.id('_storage')), imageBase64: v.optional(v.string()),
imageMediaType: v.optional(v.string()), imageMediaType: v.optional(v.string()),
imageStorageId: v.optional(v.id('_storage')),
imagesBase64: v.optional(v.array(v.string())),
imagesMediaTypes: v.optional(v.array(v.string())),
followUpOptions: v.optional(v.array(v.string())), followUpOptions: v.optional(v.array(v.string())),
source: v.union(v.literal('telegram'), v.literal('web')), source: v.union(v.literal('telegram'), v.literal('web')),
createdAt: v.number(), createdAt: v.number(),
@@ -37,6 +40,63 @@ export default defineSchema({
userId: v.id('users'), userId: v.id('users'),
chatId: v.id('chats'), chatId: v.id('chats'),
userMessage: v.string(), userMessage: v.string(),
imagesBase64: v.optional(v.array(v.string())),
imagesMediaTypes: v.optional(v.array(v.string())),
createdAt: v.number() createdAt: v.number()
}) }),
pendingGenerationImages: defineTable({
pendingGenerationId: v.id('pendingGenerations'),
base64: v.string(),
mediaType: v.string(),
order: v.number()
}).index('by_pending_generation_id', ['pendingGenerationId']),
messageImages: defineTable({
messageId: v.id('messages'),
base64: v.string(),
mediaType: v.string(),
order: v.number()
}).index('by_message_id', ['messageId']),
devicePairings: defineTable({
chatId: v.id('chats'),
deviceId: v.string(),
hasCamera: v.boolean(),
pairedWithDeviceId: v.optional(v.string()),
lastSeen: v.number()
}).index('by_chat_id', ['chatId']),
pairingRequests: defineTable({
chatId: v.id('chats'),
fromDeviceId: v.string(),
status: v.union(v.literal('pending'), v.literal('accepted'), v.literal('rejected')),
createdAt: v.number()
}).index('by_chat_id', ['chatId']),
photoRequests: defineTable({
chatId: v.id('chats'),
requesterId: v.string(),
captureDeviceId: v.optional(v.string()),
status: v.union(
v.literal('pending'),
v.literal('countdown'),
v.literal('capture_now'),
v.literal('captured'),
v.literal('accepted'),
v.literal('rejected')
),
photoBase64: v.optional(v.string()),
photoMediaType: v.optional(v.string()),
thumbnailBase64: v.optional(v.string()),
createdAt: v.number()
}).index('by_chat_id', ['chatId']),
photoDrafts: defineTable({
chatId: v.id('chats'),
deviceId: v.string(),
base64: v.string(),
mediaType: v.string(),
createdAt: v.number()
}).index('by_chat_id_and_device_id', ['chatId', 'deviceId'])
}); });

View File

@@ -3,10 +3,19 @@
import favicon from '$lib/assets/favicon.svg'; import favicon from '$lib/assets/favicon.svg';
import { PUBLIC_CONVEX_URL } from '$env/static/public'; import { PUBLIC_CONVEX_URL } from '$env/static/public';
import { setupConvex } from 'convex-svelte'; import { setupConvex } from 'convex-svelte';
import { hasWebSocketSupport, setupPollingConvex } from '$lib/convex-polling.svelte';
import { setContext } from 'svelte';
let { children } = $props(); let { children } = $props();
setupConvex(PUBLIC_CONVEX_URL); const usePolling = !hasWebSocketSupport();
setContext('convex-use-polling', usePolling);
if (usePolling) {
setupPollingConvex(PUBLIC_CONVEX_URL);
} else {
setupConvex(PUBLIC_CONVEX_URL);
}
</script> </script>
<svelte:head><link rel="icon" href={favicon} /></svelte:head> <svelte:head><link rel="icon" href={favicon} /></svelte:head>

View File

@@ -1,18 +1,120 @@
<script lang="ts"> <script lang="ts">
import { page } from '$app/state'; import { page } from '$app/state';
import { browser } from '$app/environment';
import { getContext, onMount } from 'svelte';
import { SvelteSet } from 'svelte/reactivity';
import { useQuery, useConvexClient } from 'convex-svelte'; import { useQuery, useConvexClient } from 'convex-svelte';
import {
usePollingQuery,
usePollingMutation,
usePollingClient
} from '$lib/convex-polling.svelte';
import { api } from '$lib/convex/_generated/api'; import { api } from '$lib/convex/_generated/api';
import type { Id } from '$lib/convex/_generated/dataModel';
import ChatMessage from '$lib/components/ChatMessage.svelte'; import ChatMessage from '$lib/components/ChatMessage.svelte';
import ChatInput from '$lib/components/ChatInput.svelte'; import ChatInput from '$lib/components/ChatInput.svelte';
import FollowUpButtons from '$lib/components/FollowUpButtons.svelte'; import FollowUpButtons from '$lib/components/FollowUpButtons.svelte';
import StealthOverlay from '$lib/components/StealthOverlay.svelte';
import CameraCapture from '$lib/components/CameraCapture.svelte';
import WatchCountdown from '$lib/components/WatchCountdown.svelte';
import PhotoPreview from '$lib/components/PhotoPreview.svelte';
import DraftBadge from '$lib/components/DraftBadge.svelte';
import SilentCapture from '$lib/components/SilentCapture.svelte';
const usePolling = getContext<boolean>('convex-use-polling') ?? false;
let mnemonic = $derived(page.params.mnemonic); let mnemonic = $derived(page.params.mnemonic);
const client = useConvexClient();
const chatData = useQuery(api.chats.getWithUser, () => (mnemonic ? { mnemonic } : 'skip')); let lastMessageElement: HTMLDivElement | null = $state(null);
const messagesQuery = useQuery(api.messages.listByChat, () => let showScrollButton = $state(false);
chatData.data?.chat?._id ? { chatId: chatData.data.chat._id } : 'skip'
); let deviceId = $state('');
let hasCamera = $state(false);
let showCamera = $state(false);
let showWatchCountdown = $state(false);
let activeRequestId: Id<'photoRequests'> | null = $state(null);
let previewPhoto: {
thumbnail: string;
mediaType: string;
requestId: Id<'photoRequests'>;
} | null = $state(null);
let shownPreviewIds = new SvelteSet<string>();
let silentCaptureRef: SilentCapture | null = $state(null);
let processedCaptureNowIds = new SvelteSet<string>();
function generateId(): string {
if (typeof crypto !== 'undefined' && crypto.randomUUID) {
return crypto.randomUUID();
}
return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, (c) => {
const r = (Math.random() * 16) | 0;
const v = c === 'x' ? r : (r & 0x3) | 0x8;
return v.toString(16);
});
}
function getOrCreateDeviceId(): string {
if (!browser) return '';
let id = localStorage.getItem('stealth-device-id');
if (!id) {
id = generateId();
localStorage.setItem('stealth-device-id', id);
}
return id;
}
async function checkCamera(): Promise<boolean> {
if (!browser) return false;
if (!navigator.mediaDevices?.enumerateDevices) return false;
try {
const devices = await navigator.mediaDevices.enumerateDevices();
return devices.some((d) => d.kind === 'videoinput');
} catch {
return false;
}
}
onMount(() => {
deviceId = getOrCreateDeviceId();
checkCamera().then((has) => {
hasCamera = has;
});
});
$effect(() => {
if (!lastMessageElement) return;
const observer = new IntersectionObserver(
([entry]) => {
showScrollButton = !entry.isIntersecting;
},
{ threshold: 0, rootMargin: '0px 0px -90% 0px' }
);
observer.observe(lastMessageElement);
return () => observer.disconnect();
});
function scrollToLastMessage() {
lastMessageElement?.scrollIntoView({ behavior: 'smooth', block: 'start' });
}
const chatDataWs = usePolling
? null
: useQuery(api.chats.getWithUser, () => (mnemonic ? { mnemonic } : 'skip'));
const chatDataPoll = usePolling
? usePollingQuery(api.chats.getWithUser, () => (mnemonic ? { mnemonic } : 'skip'))
: null;
const chatData = $derived(usePolling ? chatDataPoll! : chatDataWs!);
const chatId = $derived(chatData.data?.chat?._id);
const messagesQueryWs = usePolling
? null
: useQuery(api.messages.listByChat, () => (chatId ? { chatId } : 'skip'));
const messagesQueryPoll = usePolling
? usePollingQuery(api.messages.listByChat, () => (chatId ? { chatId } : 'skip'))
: null;
const messagesQuery = $derived(usePolling ? messagesQueryPoll! : messagesQueryWs!);
let messages = $derived(messagesQuery.data ?? []); let messages = $derived(messagesQuery.data ?? []);
let lastMessage = $derived(messages[messages.length - 1]); let lastMessage = $derived(messages[messages.length - 1]);
@@ -22,79 +124,555 @@
: [] : []
); );
const myDeviceWs = usePolling
? null
: useQuery(api.devicePairings.getMyDevice, () =>
chatId && deviceId ? { chatId, deviceId } : 'skip'
);
const myDevicePoll = usePolling
? usePollingQuery(api.devicePairings.getMyDevice, () =>
chatId && deviceId ? { chatId, deviceId } : 'skip'
)
: null;
const myDevice = $derived(usePolling ? myDevicePoll! : myDeviceWs!);
const pairedDeviceWs = usePolling
? null
: useQuery(api.devicePairings.getPairedDevice, () =>
chatId && deviceId ? { chatId, deviceId } : 'skip'
);
const pairedDevicePoll = usePolling
? usePollingQuery(api.devicePairings.getPairedDevice, () =>
chatId && deviceId ? { chatId, deviceId } : 'skip'
)
: null;
const pairedDevice = $derived(usePolling ? pairedDevicePoll! : pairedDeviceWs!);
const isPaired = $derived(!!myDevice.data?.pairedWithDeviceId && !!pairedDevice.data);
const pendingPairingWs = usePolling
? null
: useQuery(api.pairingRequests.getPending, () =>
chatId && deviceId ? { chatId, excludeDeviceId: deviceId } : 'skip'
);
const pendingPairingPoll = usePolling
? usePollingQuery(api.pairingRequests.getPending, () =>
chatId && deviceId ? { chatId, excludeDeviceId: deviceId } : 'skip'
)
: null;
const pendingPairing = $derived(usePolling ? pendingPairingPoll! : pendingPairingWs!);
const captureNowRequestWs = usePolling
? null
: useQuery(api.photoRequests.getCaptureNowRequest, () => (chatId ? { chatId } : 'skip'));
const captureNowRequestPoll = usePolling
? usePollingQuery(api.photoRequests.getCaptureNowRequest, () => (chatId ? { chatId } : 'skip'))
: null;
const captureNowRequest = $derived(usePolling ? captureNowRequestPoll! : captureNowRequestWs!);
const myActiveRequestWs = usePolling
? null
: useQuery(api.photoRequests.getMyActiveRequest, () =>
chatId && deviceId ? { chatId, deviceId } : 'skip'
);
const myActiveRequestPoll = usePolling
? usePollingQuery(api.photoRequests.getMyActiveRequest, () =>
chatId && deviceId ? { chatId, deviceId } : 'skip'
)
: null;
const myActiveRequest = $derived(usePolling ? myActiveRequestPoll! : myActiveRequestWs!);
const photoDraftWs = usePolling
? null
: useQuery(api.photoDrafts.get, () => (chatId && deviceId ? { chatId, deviceId } : 'skip'));
const photoDraftPoll = usePolling
? usePollingQuery(api.photoDrafts.get, () =>
chatId && deviceId ? { chatId, deviceId } : 'skip'
)
: null;
const photoDraft = $derived(usePolling ? photoDraftPoll! : photoDraftWs!);
const draftPhotos = $derived(photoDraft.data?.photos ?? []);
$effect(() => { $effect(() => {
if (messages.length) { const req = captureNowRequest.data;
if (req && hasCamera && !processedCaptureNowIds.has(req._id)) {
processedCaptureNowIds.add(req._id);
const tryCapture = () => {
const success = silentCaptureRef?.capture();
if (!success) {
setTimeout(tryCapture, 100);
}
};
tryCapture();
}
});
$effect(() => {
const req = myActiveRequest.data;
if (req?.status === 'captured' && req.photoMediaType) {
if (shownPreviewIds.has(req._id)) return;
shownPreviewIds.add(req._id);
const client = pollingClient ?? clientWs;
if (client) {
client.query(api.photoRequests.getPhotoPreview, { requestId: req._id }).then((data) => {
if (data) {
previewPhoto = {
thumbnail: data.thumbnailBase64,
mediaType: data.photoMediaType,
requestId: req._id
};
}
});
}
}
});
let prevMessageCount = 0;
let prevLastMessageId: string | undefined;
$effect(() => {
const count = messages.length;
const lastId = lastMessage?._id;
if (count > prevMessageCount || (lastId && lastId !== prevLastMessageId)) {
prevMessageCount = count;
prevLastMessageId = lastId;
window.scrollTo(0, document.body.scrollHeight); window.scrollTo(0, document.body.scrollHeight);
} }
}); });
async function sendMessage(content: string) { const clientWs = usePolling ? null : useConvexClient();
const pollingClient = usePolling ? usePollingClient() : null;
const createMessagePoll = usePolling ? usePollingMutation(api.messages.create) : null;
const registerDevicePoll = usePolling ? usePollingMutation(api.devicePairings.register) : null;
const heartbeatPoll = usePolling ? usePollingMutation(api.devicePairings.heartbeat) : null;
const addPhotoPoll = usePolling ? usePollingMutation(api.photoDrafts.addPhoto) : null;
const removePhotoPoll = usePolling ? usePollingMutation(api.photoDrafts.removePhoto) : null;
const createPairingPoll = usePolling ? usePollingMutation(api.pairingRequests.create) : null;
const acceptPairingPoll = usePolling ? usePollingMutation(api.pairingRequests.accept) : null;
const rejectPairingPoll = usePolling ? usePollingMutation(api.pairingRequests.reject) : null;
const unpairPoll = usePolling ? usePollingMutation(api.pairingRequests.unpair) : null;
const createRequestPoll = usePolling ? usePollingMutation(api.photoRequests.create) : null;
const markCaptureNowPoll = usePolling
? usePollingMutation(api.photoRequests.markCaptureNow)
: null;
const submitPhotoPoll = usePolling ? usePollingMutation(api.photoRequests.submitPhoto) : null;
const markRejectedPoll = usePolling ? usePollingMutation(api.photoRequests.markRejected) : null;
const acceptPhotoToDraftPoll = usePolling
? usePollingMutation(api.photoRequests.acceptPhotoToDraft)
: null;
$effect(() => {
if (!chatId || !deviceId) return;
if (usePolling && registerDevicePoll) {
registerDevicePoll({ chatId, deviceId, hasCamera });
} else if (clientWs) {
clientWs.mutation(api.devicePairings.register, {
chatId,
deviceId,
hasCamera
});
}
const interval = setInterval(() => {
if (usePolling && heartbeatPoll) {
heartbeatPoll({ chatId, deviceId });
} else if (clientWs) {
clientWs.mutation(api.devicePairings.heartbeat, { chatId, deviceId });
}
}, 10000);
return () => clearInterval(interval);
});
function sendMessage(content: string) {
const chat = chatData.data?.chat; const chat = chatData.data?.chat;
if (!chat) return; if (!chat) return;
await client.mutation(api.messages.create, { const photos = draftPhotos;
chatId: chat._id, const photoDraftIds = photos.length > 0 ? photos.map((p) => p._id) : undefined;
role: 'user',
content, const messageContent =
source: 'web' content || (photos.length > 0 ? 'Process images according to your task' : '');
}); if (!messageContent) return;
if (usePolling && createMessagePoll) {
createMessagePoll({
chatId: chat._id,
role: 'user',
content: messageContent,
source: 'web',
photoDraftIds
});
} else if (clientWs) {
clientWs.mutation(api.messages.create, {
chatId: chat._id,
role: 'user',
content: messageContent,
source: 'web',
photoDraftIds
});
}
} }
async function summarize() { function summarize() {
const chat = chatData.data?.chat; const chat = chatData.data?.chat;
if (!chat) return; if (!chat) return;
await client.mutation(api.messages.create, { if (usePolling && createMessagePoll) {
chatId: chat._id, createMessagePoll({
role: 'user', chatId: chat._id,
content: '/summarize', role: 'user',
source: 'web' content: '/summarize',
}); source: 'web'
});
} else if (clientWs) {
clientWs.mutation(api.messages.create, {
chatId: chat._id,
role: 'user',
content: '/summarize',
source: 'web'
});
}
}
function handleTakePhoto() {
showCamera = true;
}
function handleCameraCapture(base64: string, mediaType: string) {
showCamera = false;
if (!chatId) return;
if (usePolling && addPhotoPoll) {
addPhotoPoll({ chatId, deviceId, photo: { base64, mediaType } });
} else if (clientWs) {
clientWs.mutation(api.photoDrafts.addPhoto, {
chatId,
deviceId,
photo: { base64, mediaType }
});
}
}
function handleCameraClose() {
showCamera = false;
}
function handlePair() {
if (!chatId) return;
if (usePolling && createPairingPoll) {
createPairingPoll({ chatId, fromDeviceId: deviceId });
} else if (clientWs) {
clientWs.mutation(api.pairingRequests.create, {
chatId,
fromDeviceId: deviceId
});
}
}
function handleAcceptPairing() {
const req = pendingPairing.data;
if (!req) return;
if (usePolling && acceptPairingPoll) {
acceptPairingPoll({ requestId: req._id, acceptingDeviceId: deviceId });
} else if (clientWs) {
clientWs.mutation(api.pairingRequests.accept, {
requestId: req._id,
acceptingDeviceId: deviceId
});
}
}
function handleRejectPairing() {
const req = pendingPairing.data;
if (!req) return;
if (usePolling && rejectPairingPoll) {
rejectPairingPoll({ requestId: req._id });
} else if (clientWs) {
clientWs.mutation(api.pairingRequests.reject, { requestId: req._id });
}
}
function handleUnpair() {
if (!chatId) return;
if (usePolling && unpairPoll) {
unpairPoll({ chatId, deviceId });
} else if (clientWs) {
clientWs.mutation(api.pairingRequests.unpair, {
chatId,
deviceId
});
}
}
function handleRequestPhoto() {
if (!chatId || !pairedDevice.data) return;
const captureDeviceId = pairedDevice.data.deviceId;
if (usePolling && createRequestPoll) {
createRequestPoll({ chatId, requesterId: deviceId, captureDeviceId }).then((id) => {
if (id) {
activeRequestId = id as Id<'photoRequests'>;
showWatchCountdown = true;
}
});
} else if (clientWs) {
clientWs
.mutation(api.photoRequests.create, {
chatId,
requesterId: deviceId,
captureDeviceId
})
.then((id) => {
activeRequestId = id;
showWatchCountdown = true;
});
}
}
function handleWatchCountdownComplete() {
showWatchCountdown = false;
if (!activeRequestId) return;
const reqId = activeRequestId;
activeRequestId = null;
if (usePolling && markCaptureNowPoll) {
markCaptureNowPoll({ requestId: reqId });
} else if (clientWs) {
clientWs.mutation(api.photoRequests.markCaptureNow, { requestId: reqId });
}
}
function handleWatchCountdownCancel() {
showWatchCountdown = false;
if (activeRequestId && markRejectedPoll) {
if (usePolling) {
markRejectedPoll({ requestId: activeRequestId });
} else if (clientWs) {
clientWs.mutation(api.photoRequests.markRejected, { requestId: activeRequestId });
}
}
activeRequestId = null;
}
function handleSilentCapture(base64: string, mediaType: string, thumbnailBase64: string) {
const req = captureNowRequest.data;
if (!req) return;
if (usePolling && submitPhotoPoll) {
submitPhotoPoll({
requestId: req._id,
photoBase64: base64,
photoMediaType: mediaType,
thumbnailBase64
});
} else if (clientWs) {
clientWs.mutation(api.photoRequests.submitPhoto, {
requestId: req._id,
photoBase64: base64,
photoMediaType: mediaType,
thumbnailBase64
});
}
}
function handlePreviewAccept() {
if (!previewPhoto || !chatId) return;
const reqId = previewPhoto.requestId;
previewPhoto = null;
if (usePolling && acceptPhotoToDraftPoll) {
acceptPhotoToDraftPoll({ requestId: reqId, chatId, deviceId });
} else if (clientWs) {
clientWs.mutation(api.photoRequests.acceptPhotoToDraft, {
requestId: reqId,
chatId,
deviceId
});
}
}
function handlePreviewReject() {
if (!previewPhoto) return;
const reqId = previewPhoto.requestId;
previewPhoto = null;
if (usePolling && markRejectedPoll) {
markRejectedPoll({ requestId: reqId });
} else if (clientWs) {
clientWs.mutation(api.photoRequests.markRejected, {
requestId: reqId
});
}
}
function handleRemoveDraftPhoto(index: number) {
if (!chatId) return;
if (usePolling && removePhotoPoll) {
removePhotoPoll({ chatId, deviceId, index });
} else if (clientWs) {
clientWs.mutation(api.photoDrafts.removePhoto, {
chatId,
deviceId,
index
});
}
} }
</script> </script>
<svelte:head> <svelte:head>
<title>Chat</title> <title>Chat</title>
<meta name="viewport" content="width=device-width, initial-scale=1" /> <meta
name="viewport"
content="width=device-width, initial-scale=1, maximum-scale=1, user-scalable=no, viewport-fit=cover"
/>
</svelte:head> </svelte:head>
<div class="min-h-dvh bg-black text-white"> <div class="min-h-dvh bg-black p-1.5 text-white">
{#if chatData.isLoading} {#if chatData.isLoading}
<div class="flex min-h-dvh items-center justify-center text-neutral-500">Loading...</div> <div class="py-4 text-center text-xs text-neutral-500">Loading...</div>
{:else if chatData.error} {:else if chatData.error}
<div class="flex min-h-dvh items-center justify-center text-red-500"> <div class="py-4 text-center text-red-500">
Error: {chatData.error.toString()} <div class="text-xs">Error</div>
<div class="text-[8px] break-all">{chatData.error.message}</div>
</div> </div>
{:else if !chatData.data} {:else if !chatData.data}
<div class="flex min-h-dvh items-center justify-center text-neutral-500">Chat not found</div> <div class="py-4 text-center text-xs text-neutral-500">Not found</div>
{:else} {:else}
<div class="space-y-1.5 p-2"> <div class="space-y-1">
{#each messages as message (message._id)} {#each messages as message, i (message._id)}
<ChatMessage {#if i === messages.length - 1}
role={message.role} <div bind:this={lastMessageElement}>
content={message.content} <ChatMessage
isStreaming={message.isStreaming} role={message.role}
/> content={message.content}
isStreaming={message.isStreaming}
/>
</div>
{:else}
<ChatMessage
role={message.role}
content={message.content}
isStreaming={message.isStreaming}
/>
{/if}
{/each} {/each}
</div> </div>
{#if followUpOptions.length > 0} {#if followUpOptions.length > 0}
<div class="border-t border-neutral-800 px-2 py-1.5"> <div class="mt-2">
<FollowUpButtons options={followUpOptions} onselect={sendMessage} /> <FollowUpButtons options={followUpOptions} onselect={sendMessage} />
</div> </div>
{/if} {/if}
<div class="border-t border-neutral-800 px-2 pt-1.5"> <div class="mt-3 space-y-2">
<button <div class="flex gap-2">
onclick={summarize} {#if hasCamera}
class="rounded bg-neutral-800 px-2 py-1 text-[10px] text-neutral-400" <button
> onclick={handleTakePhoto}
/summarize class="flex-1 rounded bg-neutral-800 py-2 text-xs text-neutral-300"
</button> >
</div> + photo
</button>
<div class="p-2 pt-1"> {/if}
<ChatInput onsubmit={sendMessage} /> {#if isPaired && pairedDevice.data?.hasCamera}
<button
onclick={handleRequestPhoto}
class="flex-1 rounded bg-neutral-800 py-2 text-xs text-neutral-300"
>
request
</button>
{/if}
{#if isPaired}
<button
onclick={handleUnpair}
class="flex-1 rounded bg-red-900/50 py-2 text-xs text-red-300"
>
unpair
</button>
{:else}
<button
onclick={handlePair}
class="flex-1 rounded bg-neutral-800 py-2 text-xs text-neutral-300"
>
pair
</button>
{/if}
<button
onclick={summarize}
class="flex-1 rounded bg-neutral-800 py-2 text-xs text-neutral-300"
>
/sum
</button>
</div>
{#if draftPhotos.length > 0}
<DraftBadge photos={draftPhotos} onremove={handleRemoveDraftPhoto} />
{/if}
<ChatInput onsubmit={sendMessage} allowEmpty={draftPhotos.length > 0} />
</div> </div>
{/if} {/if}
{#if showScrollButton}
<button
onclick={scrollToLastMessage}
class="fixed right-3 bottom-12 z-50 flex h-8 w-8 animate-pulse items-center justify-center rounded-full bg-blue-600 text-white shadow-lg"
>
</button>
{/if}
<StealthOverlay />
{#if showCamera}
<CameraCapture oncapture={handleCameraCapture} onclose={handleCameraClose} />
{/if}
{#if pendingPairing.data && !isPaired}
<div class="fixed inset-0 z-50 flex items-center justify-center bg-black/90" data-camera-ui>
<div class="rounded-lg bg-neutral-900 p-6 text-center">
<p class="mb-4 text-sm text-white">Accept pairing request?</p>
<div class="flex gap-3">
<button
onclick={handleAcceptPairing}
class="flex-1 rounded bg-blue-600 py-2 text-sm text-white"
>
Accept
</button>
<button
onclick={handleRejectPairing}
class="flex-1 rounded bg-neutral-700 py-2 text-sm text-white"
>
Reject
</button>
</div>
</div>
</div>
{/if}
{#if showWatchCountdown}
<WatchCountdown
oncomplete={handleWatchCountdownComplete}
oncancel={handleWatchCountdownCancel}
/>
{/if}
{#if previewPhoto}
<PhotoPreview
base64={previewPhoto.thumbnail}
mediaType={previewPhoto.mediaType}
onaccept={handlePreviewAccept}
onreject={handlePreviewReject}
/>
{/if}
{#if hasCamera && isPaired}
<SilentCapture
bind:this={silentCaptureRef}
oncapture={handleSilentCapture}
onunpair={handleUnpair}
/>
{/if}
</div> </div>

View File

@@ -0,0 +1,91 @@
import { api } from '$lib/convex/_generated/api';
import { error } from '@sveltejs/kit';
import type { RequestHandler } from './$types';
function detectImageType(bytes: Uint8Array): string | null {
if (bytes[0] === 0xff && bytes[1] === 0xd8 && bytes[2] === 0xff) {
return 'image/jpeg';
}
if (bytes[0] === 0x89 && bytes[1] === 0x50 && bytes[2] === 0x4e && bytes[3] === 0x47) {
return 'image/png';
}
if (bytes[0] === 0x47 && bytes[1] === 0x49 && bytes[2] === 0x46) {
return 'image/gif';
}
if (bytes[0] === 0x52 && bytes[1] === 0x49 && bytes[2] === 0x46 && bytes[3] === 0x46) {
return 'image/webp';
}
return null;
}
export const POST: RequestHandler = async ({ params, request, locals }) => {
const mnemonic = params.mnemonic;
const chatData = await locals.convex.query(api.chats.getByMnemonic, { mnemonic });
if (!chatData) {
throw error(404, 'Chat not found');
}
const rawContentType = request.headers.get('content-type') || '';
const caption = request.headers.get('x-caption') || '';
console.log('[POST /{mnemonic}] headers:', Object.fromEntries(request.headers.entries()));
console.log('[POST /{mnemonic}] content-type:', rawContentType);
let base64: string;
let mediaType: string;
if (rawContentType.includes('multipart/form-data')) {
const formData = await request.formData();
const keys = [...formData.keys()];
console.log('[POST /{mnemonic}] formData keys:', keys);
let file: File | null = null;
for (const key of ['file', 'image', 'photo', 'upload', 'attachment', ...keys]) {
const value = formData.get(key);
if (value instanceof File) {
file = value;
console.log('[POST /{mnemonic}] found file in field:', key);
break;
}
}
if (!file) {
throw error(400, `No file found in form data. Keys: ${keys.join(', ')}`);
}
const buffer = await file.arrayBuffer();
const bytes = new Uint8Array(buffer);
base64 = Buffer.from(buffer).toString('base64');
mediaType = detectImageType(bytes) || file.type || 'image/jpeg';
console.log('[POST /{mnemonic}] file:', file.name, file.type, 'size:', buffer.byteLength);
} else if (rawContentType.includes('application/x-www-form-urlencoded')) {
throw error(400, 'Use Form with File field, not URL-encoded form');
} else {
const buffer = await request.arrayBuffer();
const bytes = new Uint8Array(buffer);
base64 = Buffer.from(buffer).toString('base64');
mediaType = detectImageType(bytes) || rawContentType || 'image/jpeg';
console.log('[POST /{mnemonic}] raw bytes size:', buffer.byteLength);
console.log('[POST /{mnemonic}] detected type:', mediaType);
}
if (!base64 || base64.length === 0) {
throw error(400, 'Empty image data');
}
await locals.convex.mutation(api.messages.create, {
chatId: chatData._id,
role: 'user',
content: caption,
source: 'web',
imageBase64: base64,
imageMediaType: mediaType
});
return new Response(JSON.stringify({ ok: true, mediaType, size: base64.length }), {
headers: { 'Content-Type': 'application/json' }
});
};

View File

@@ -1,5 +1,17 @@
@import 'tailwindcss'; @import 'tailwindcss';
html,
body {
background: black;
min-height: 100%;
overscroll-behavior: none;
-webkit-overflow-scrolling: touch;
}
* {
-webkit-tap-highlight-color: transparent;
}
.prose-mini h1, .prose-mini h1,
.prose-mini h2, .prose-mini h2,
.prose-mini h3, .prose-mini h3,

View File

@@ -3,5 +3,6 @@ import { sveltekit } from '@sveltejs/kit/vite';
import { defineConfig } from 'vite'; import { defineConfig } from 'vite';
export default defineConfig({ export default defineConfig({
plugins: [tailwindcss(), sveltekit()] plugins: [tailwindcss(), sveltekit()],
server: { allowedHosts: ['reasonable-duncan-stations-parking.trycloudflare.com'] }
}); });