feat(*): first mvp

This commit is contained in:
h
2026-01-20 21:54:48 +01:00
parent b9703da2fc
commit ec17f5e0fd
52 changed files with 2599 additions and 576 deletions

View File

@@ -0,0 +1,115 @@
from collections.abc import Awaitable, Callable
from dataclasses import dataclass
from pydantic_ai import (
Agent,
BinaryContent,
ModelMessage,
ModelRequest,
ModelResponse,
TextPart,
UserPromptPart,
)
from pydantic_ai.models.google import GoogleModel
from pydantic_ai.providers.google import GoogleProvider
from .models import FollowUpOptions
from .prompts import DEFAULT_FOLLOW_UP
StreamCallback = Callable[[str], Awaitable[None]]
@dataclass
class ImageData:
data: bytes
media_type: str
LATEX_INSTRUCTION = "For math, use LaTeX: $...$ inline, $$...$$ display."
DEFAULT_SYSTEM_PROMPT = (
"You are a helpful AI assistant. Provide clear, concise answers."
)
def create_text_agent(
api_key: str,
model_name: str = "gemini-3-pro-preview",
system_prompt: str | None = None,
) -> Agent[None, str]:
provider = GoogleProvider(api_key=api_key)
model = GoogleModel(model_name, provider=provider)
base_prompt = system_prompt or DEFAULT_SYSTEM_PROMPT
full_prompt = f"{base_prompt} {LATEX_INSTRUCTION}"
return Agent(model, system_prompt=full_prompt)
def create_follow_up_agent(
api_key: str,
model_name: str = "gemini-2.5-flash-lite",
system_prompt: str | None = None,
) -> Agent[None, FollowUpOptions]:
provider = GoogleProvider(api_key=api_key)
model = GoogleModel(model_name, provider=provider)
prompt = system_prompt or DEFAULT_FOLLOW_UP
return Agent(model, output_type=FollowUpOptions, system_prompt=prompt)
def build_message_history(history: list[dict[str, str]]) -> list[ModelMessage]:
messages: list[ModelMessage] = []
for msg in history:
if msg["role"] == "user":
messages.append(
ModelRequest(parts=[UserPromptPart(content=msg["content"])])
)
else:
messages.append(ModelResponse(parts=[TextPart(content=msg["content"])]))
return messages
async def stream_response( # noqa: PLR0913
text_agent: Agent[None, str],
message: str,
history: list[dict[str, str]] | None = None,
on_chunk: StreamCallback | None = None,
image: ImageData | None = None,
images: list[ImageData] | None = None,
) -> str:
message_history = build_message_history(history) if history else None
all_images = images or ([image] if image else [])
if all_images:
prompt: list[str | BinaryContent] = [message]
prompt.extend(
BinaryContent(data=img.data, media_type=img.media_type)
for img in all_images
)
else:
prompt = message # type: ignore[assignment]
stream = text_agent.run_stream(prompt, message_history=message_history)
async with stream as result:
async for text in result.stream_text():
if on_chunk:
await on_chunk(text)
return await result.get_output()
async def get_follow_ups(
follow_up_agent: Agent[None, FollowUpOptions],
history: list[dict[str, str]],
image: ImageData | None = None,
) -> list[str]:
message_history = build_message_history(history) if history else None
if image:
prompt: list[str | BinaryContent] = [
"Suggest follow-up options based on this conversation and image.",
BinaryContent(data=image.data, media_type=image.media_type),
]
else:
prompt = "Suggest follow-up questions based on this conversation." # type: ignore[assignment]
result = await follow_up_agent.run(prompt, message_history=message_history)
return result.output["options"]