initial commit
This commit is contained in:
0
backend/app/services/__init__.py
Normal file
0
backend/app/services/__init__.py
Normal file
52
backend/app/services/instruction_service.py
Normal file
52
backend/app/services/instruction_service.py
Normal file
@@ -0,0 +1,52 @@
|
||||
"""FluentGerman.ai — Instruction assembly service."""
|
||||
|
||||
from sqlalchemy import select
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.models import Instruction, InstructionType
|
||||
|
||||
|
||||
async def get_system_prompt(db: AsyncSession, user_id: int) -> str:
|
||||
"""Assemble the full system prompt from global + personal + homework instructions."""
|
||||
|
||||
# Global instructions (no user_id)
|
||||
result = await db.execute(
|
||||
select(Instruction).where(
|
||||
Instruction.user_id.is_(None),
|
||||
Instruction.type == InstructionType.GLOBAL,
|
||||
)
|
||||
)
|
||||
global_instructions = result.scalars().all()
|
||||
|
||||
# Personal + homework for this user
|
||||
result = await db.execute(
|
||||
select(Instruction).where(Instruction.user_id == user_id)
|
||||
)
|
||||
user_instructions = result.scalars().all()
|
||||
|
||||
parts: list[str] = []
|
||||
|
||||
if global_instructions:
|
||||
parts.append("=== TEACHING METHOD ===")
|
||||
for inst in global_instructions:
|
||||
parts.append(f"[{inst.title}]\n{inst.content}")
|
||||
|
||||
personal = [i for i in user_instructions if i.type == InstructionType.PERSONAL]
|
||||
if personal:
|
||||
parts.append("\n=== PERSONAL INSTRUCTIONS ===")
|
||||
for inst in personal:
|
||||
parts.append(f"[{inst.title}]\n{inst.content}")
|
||||
|
||||
homework = [i for i in user_instructions if i.type == InstructionType.HOMEWORK]
|
||||
if homework:
|
||||
parts.append("\n=== CURRENT HOMEWORK ===")
|
||||
for inst in homework:
|
||||
parts.append(f"[{inst.title}]\n{inst.content}")
|
||||
|
||||
if not parts:
|
||||
return (
|
||||
"You are a helpful German language tutor. Help the student learn German "
|
||||
"through conversation, corrections, and explanations."
|
||||
)
|
||||
|
||||
return "\n\n".join(parts)
|
||||
65
backend/app/services/llm_service.py
Normal file
65
backend/app/services/llm_service.py
Normal file
@@ -0,0 +1,65 @@
|
||||
"""FluentGerman.ai — LLM service (provider-agnostic via LiteLLM)."""
|
||||
|
||||
import logging
|
||||
from collections.abc import AsyncGenerator
|
||||
|
||||
import litellm
|
||||
|
||||
from app.config import get_settings
|
||||
|
||||
logger = logging.getLogger("fluentgerman.llm")
|
||||
|
||||
|
||||
def _resolve_model(model: str, provider: str) -> str:
|
||||
"""Ensure Gemini models have the 'gemini/' prefix for Google AI Studio."""
|
||||
if provider == "gemini" and not model.startswith("gemini/"):
|
||||
resolved = f"gemini/{model}"
|
||||
logger.info("Auto-prefixed model: %s → %s (Google AI Studio)", model, resolved)
|
||||
return resolved
|
||||
return model
|
||||
|
||||
|
||||
async def chat_stream(
|
||||
messages: list[dict[str, str]],
|
||||
model: str | None = None,
|
||||
) -> AsyncGenerator[str, None]:
|
||||
"""Stream chat completion tokens from the configured LLM provider."""
|
||||
settings = get_settings()
|
||||
model = _resolve_model(model or settings.llm_model, settings.llm_provider)
|
||||
|
||||
logger.info("LLM request: model=%s messages=%d", model, len(messages))
|
||||
|
||||
response = await litellm.acompletion(
|
||||
model=model,
|
||||
messages=messages,
|
||||
api_key=settings.llm_api_key,
|
||||
stream=True,
|
||||
)
|
||||
|
||||
async for chunk in response:
|
||||
delta = chunk.choices[0].delta
|
||||
if delta.content:
|
||||
yield delta.content
|
||||
|
||||
|
||||
async def summarize_instruction(raw_text: str) -> str:
|
||||
"""Ask the LLM to distill raw voice transcript into a structured instruction."""
|
||||
settings = get_settings()
|
||||
model = _resolve_model(settings.llm_model, settings.llm_provider)
|
||||
|
||||
meta_prompt = (
|
||||
"You are an expert assistant for a language teacher. "
|
||||
"The following text is a raw transcript of the teacher describing a learning instruction, "
|
||||
"homework, or teaching method. Distill it into a clear, concise, structured instruction "
|
||||
"that can be used as a system prompt for a language-learning LLM assistant. "
|
||||
"Output ONLY the instruction text, no preamble.\n\n"
|
||||
f"Transcript:\n{raw_text}"
|
||||
)
|
||||
|
||||
response = await litellm.acompletion(
|
||||
model=model,
|
||||
messages=[{"role": "user", "content": meta_prompt}],
|
||||
api_key=settings.llm_api_key,
|
||||
)
|
||||
|
||||
return response.choices[0].message.content.strip()
|
||||
37
backend/app/services/voice_service.py
Normal file
37
backend/app/services/voice_service.py
Normal file
@@ -0,0 +1,37 @@
|
||||
"""FluentGerman.ai — Voice service (API provider + browser fallback)."""
|
||||
|
||||
import io
|
||||
|
||||
import openai
|
||||
|
||||
from app.config import get_settings
|
||||
|
||||
|
||||
async def transcribe(audio_bytes: bytes, filename: str = "audio.webm") -> str:
|
||||
"""Transcribe audio to text using OpenAI Whisper API."""
|
||||
settings = get_settings()
|
||||
client = openai.AsyncOpenAI(api_key=settings.llm_api_key)
|
||||
|
||||
audio_file = io.BytesIO(audio_bytes)
|
||||
audio_file.name = filename
|
||||
|
||||
transcript = await client.audio.transcriptions.create(
|
||||
model=settings.stt_model,
|
||||
file=audio_file,
|
||||
)
|
||||
return transcript.text
|
||||
|
||||
|
||||
async def synthesize(text: str) -> bytes:
|
||||
"""Synthesize text to speech using OpenAI TTS API."""
|
||||
settings = get_settings()
|
||||
client = openai.AsyncOpenAI(api_key=settings.llm_api_key)
|
||||
|
||||
response = await client.audio.speech.create(
|
||||
model=settings.tts_model,
|
||||
voice=settings.tts_voice,
|
||||
input=text,
|
||||
response_format="mp3",
|
||||
)
|
||||
|
||||
return response.content
|
||||
Reference in New Issue
Block a user