added more logging for debugging voice issues
Some checks failed
Deploy FluentGerman.ai / deploy (push) Failing after 27s

This commit is contained in:
2026-02-18 11:52:41 +01:00
parent be366777d4
commit 8631e286bd
3 changed files with 32 additions and 33 deletions

View File

@@ -1,34 +1,13 @@
"""FluentGerman.ai — Admin voice-to-instruction & voice API router."""
from fastapi import APIRouter, Depends, UploadFile, File
from sqlalchemy.ext.asyncio import AsyncSession
from fastapi import APIRouter, Depends, UploadFile, File, HTTPException
import logging
from app.auth import require_admin, get_current_user
from app.config import get_settings
from app.database import get_db
from app.models import User
from app.schemas import VoiceConfigOut, VoiceInstructionRequest
from app.services.llm_service import summarize_instruction
from app.services.voice_service import synthesize, transcribe
from fastapi.responses import Response
# ... imports ...
router = APIRouter(prefix="/api/voice", tags=["voice"])
@router.get("/config", response_model=VoiceConfigOut)
async def voice_config(user: User = Depends(get_current_user)):
"""Return current voice mode so frontend knows whether to use browser or API."""
settings = get_settings()
# API STT (Whisper) works with OpenAI-compatible providers
# Check if we have a dedicated voice key OR a generic LLM key for OpenAI
has_key = bool(settings.openai_api_key or (settings.llm_api_key and settings.llm_provider == "openai"))
api_available = bool(settings.voice_mode == "api" and has_key)
return VoiceConfigOut(
voice_mode=settings.voice_mode,
voice_api_available=api_available,
)
logger = logging.getLogger("fluentgerman.voice")
# ...
@router.post("/transcribe")
async def transcribe_audio(
@@ -36,9 +15,13 @@ async def transcribe_audio(
user: User = Depends(get_current_user),
):
"""Transcribe uploaded audio to text (API mode only)."""
audio_bytes = await audio.read()
text = await transcribe(audio_bytes, filename=audio.filename or "audio.webm")
return {"text": text}
try:
audio_bytes = await audio.read()
text = await transcribe(audio_bytes, filename=audio.filename or "audio.webm")
return {"text": text}
except Exception as e:
logger.error(f"Transcription failed: {str(e)}", exc_info=True)
raise HTTPException(status_code=500, detail=f"Transcription failed: {str(e)}")
@router.post("/synthesize")
@@ -47,8 +30,12 @@ async def synthesize_text(
user: User = Depends(get_current_user),
):
"""Convert text to speech audio (API mode only)."""
audio_bytes = await synthesize(text)
return Response(content=audio_bytes, media_type="audio/mpeg")
try:
audio_bytes = await synthesize(text)
return Response(content=audio_bytes, media_type="audio/mpeg")
except Exception as e:
logger.error(f"Synthesis failed: {str(e)}", exc_info=True)
raise HTTPException(status_code=500, detail=f"Synthesis failed: {str(e)}")
@router.post("/generate-instruction", dependencies=[Depends(require_admin)])

View File

@@ -1,6 +1,7 @@
"""FluentGerman.ai — Voice service (API provider + browser fallback)."""
import io
import logging
import openai
@@ -12,6 +13,11 @@ async def transcribe(audio_bytes: bytes, filename: str = "audio.webm") -> str:
settings = get_settings()
# Use dedicated OpenAI key if available, otherwise fallback to LLM key
api_key = settings.openai_api_key or settings.llm_api_key
key_type = "OPENAI_API_KEY" if settings.openai_api_key else "LLM_API_KEY"
masked = f"{api_key[:4]}...{api_key[-4:]}" if api_key and len(api_key) > 8 else "EMPTY"
logging.getLogger("fluentgerman.voice").info(f"Transcribing with {key_type}: {masked}")
client = openai.AsyncOpenAI(api_key=api_key)
audio_file = io.BytesIO(audio_bytes)
@@ -29,6 +35,11 @@ async def synthesize(text: str) -> bytes:
settings = get_settings()
# Use dedicated OpenAI key if available, otherwise fallback to LLM key
api_key = settings.openai_api_key or settings.llm_api_key
key_type = "OPENAI_API_KEY" if settings.openai_api_key else "LLM_API_KEY"
masked = f"{api_key[:4]}...{api_key[-4:]}" if api_key and len(api_key) > 8 else "EMPTY"
logging.getLogger("fluentgerman.voice").info(f"Synthesizing with {key_type}: {masked}")
client = openai.AsyncOpenAI(api_key=api_key)
response = await client.audio.speech.create(