All checks were successful
Deploy FluentGerman.ai / deploy (push) Successful in 53s
65 lines
2.2 KiB
Python
65 lines
2.2 KiB
Python
"""FluentGerman.ai — Chat router with SSE streaming."""
|
|
|
|
import json
|
|
import logging
|
|
|
|
from fastapi import APIRouter, Depends
|
|
from fastapi.responses import StreamingResponse
|
|
from sqlalchemy import select, func
|
|
from sqlalchemy.ext.asyncio import AsyncSession
|
|
|
|
from app.auth import get_current_user
|
|
from app.database import get_db
|
|
from app.models import Instruction, User
|
|
from app.schemas import ChatRequest, DashboardOut
|
|
from app.services.instruction_service import get_system_prompt
|
|
from app.services.llm_service import chat_stream
|
|
|
|
logger = logging.getLogger("fluentgerman.chat")
|
|
|
|
router = APIRouter(prefix="/api/chat", tags=["chat"])
|
|
|
|
|
|
@router.get("/dashboard", response_model=DashboardOut)
|
|
async def dashboard(
|
|
user: User = Depends(get_current_user),
|
|
db: AsyncSession = Depends(get_db),
|
|
):
|
|
"""Return personalised dashboard data: username + latest instruction date."""
|
|
result = await db.execute(
|
|
select(func.max(Instruction.created_at)).where(
|
|
(Instruction.user_id == user.id) | Instruction.user_id.is_(None)
|
|
)
|
|
)
|
|
latest = result.scalar_one_or_none()
|
|
return DashboardOut(username=user.username, latest_instruction_at=latest)
|
|
|
|
|
|
@router.post("/")
|
|
async def chat(
|
|
body: ChatRequest,
|
|
user: User = Depends(get_current_user),
|
|
db: AsyncSession = Depends(get_db),
|
|
):
|
|
"""Send a message and receive a streamed SSE response."""
|
|
logger.info("Chat request from user=%s message_len=%d history=%d",
|
|
user.username, len(body.message), len(body.history))
|
|
|
|
system_prompt = await get_system_prompt(db, user.id)
|
|
|
|
messages = [{"role": "system", "content": system_prompt}]
|
|
for msg in body.history:
|
|
messages.append({"role": msg.role, "content": msg.content})
|
|
messages.append({"role": "user", "content": body.message})
|
|
|
|
async def event_generator():
|
|
try:
|
|
async for token in chat_stream(messages):
|
|
yield f"data: {json.dumps({'token': token})}\n\n"
|
|
yield "data: [DONE]\n\n"
|
|
except Exception as e:
|
|
logger.error("LLM streaming error: %s", e, exc_info=True)
|
|
yield f"data: {json.dumps({'error': str(e)})}\n\n"
|
|
|
|
return StreamingResponse(event_generator(), media_type="text/event-stream")
|