bugfix speech-to-text and implement avatar and personal greeting
All checks were successful
Deploy FluentGerman.ai / deploy (push) Successful in 53s

This commit is contained in:
2026-02-16 20:11:09 +01:00
parent 8f5bfa3cbc
commit 84cd052ded
8 changed files with 284 additions and 19 deletions

View File

@@ -14,11 +14,32 @@ function renderMarkdown(text) {
return DOMPurify.sanitize(raw);
}
/**
* Format a date as a friendly relative time string.
*/
function relativeTime(dateStr) {
if (!dateStr) return null;
const date = new Date(dateStr);
const now = new Date();
const diffMs = now - date;
const diffMins = Math.floor(diffMs / 60000);
const diffHours = Math.floor(diffMs / 3600000);
const diffDays = Math.floor(diffMs / 86400000);
if (diffMins < 1) return 'just now';
if (diffMins < 60) return `${diffMins} minute${diffMins !== 1 ? 's' : ''} ago`;
if (diffHours < 24) return `${diffHours} hour${diffHours !== 1 ? 's' : ''} ago`;
if (diffDays === 1) return 'yesterday';
if (diffDays < 30) return `${diffDays} days ago`;
return date.toLocaleDateString('en-GB', { day: 'numeric', month: 'short', year: 'numeric' });
}
document.addEventListener('DOMContentLoaded', async () => {
if (!requireAuth()) return;
const user = getUser();
document.getElementById('user-name').textContent = user?.username || 'User';
const displayName = user?.username || 'User';
document.getElementById('user-name').textContent = displayName;
const messagesEl = document.getElementById('chat-messages');
const inputEl = document.getElementById('chat-input');
@@ -27,7 +48,36 @@ document.addEventListener('DOMContentLoaded', async () => {
let history = [];
// Init voice
// ── Personalised welcome ──────────────────────────────────────────
const greetingEl = document.getElementById('welcome-greeting');
const subtitleEl = document.getElementById('welcome-subtitle');
const metaEl = document.getElementById('welcome-meta');
// Immediately show the username we already have from localStorage
greetingEl.textContent = `Hallo, ${displayName}! 👋`;
// Fetch dashboard info for latest instruction data
try {
const resp = await api('/chat/dashboard');
if (resp?.ok) {
const data = await resp.json();
// Use server username (authoritative)
greetingEl.textContent = `Hallo, ${data.username}! 👋`;
if (data.latest_instruction_at) {
const ago = relativeTime(data.latest_instruction_at);
metaEl.innerHTML = `<span class="meta-dot"></span> Lessons last updated <strong>${ago}</strong>`;
metaEl.classList.add('visible');
} else {
metaEl.textContent = 'No custom lessons configured yet';
metaEl.classList.add('visible');
}
}
} catch (e) {
console.warn('[Chat] Could not fetch dashboard info:', e);
}
// ── Voice ─────────────────────────────────────────────────────────
const voice = new VoiceManager();
await voice.init();
@@ -44,7 +94,7 @@ document.addEventListener('DOMContentLoaded', async () => {
voiceBtn.addEventListener('click', () => voice.toggleRecording());
// Chat
// ── Chat ──────────────────────────────────────────────────────────
function appendMessage(role, content) {
const div = document.createElement('div');
div.className = `message message-${role}`;

View File

@@ -11,10 +11,11 @@ class VoiceManager {
this.audioChunks = [];
this.onResult = null;
this.onStateChange = null;
this.browserSTTSupported = false;
}
async init() {
// Always init browser STT as fallback
// Check browser STT support
this._initBrowserSTT();
// Fetch voice mode from server
@@ -23,21 +24,32 @@ class VoiceManager {
if (response?.ok) {
const config = await response.json();
this.mode = config.voice_mode;
console.log('[Voice] Mode:', this.mode);
console.log('[Voice] Server mode:', this.mode);
}
} catch (e) {
console.warn('[Voice] Could not fetch config, using browser mode');
this.mode = 'browser';
}
// Auto-fallback: if server says "browser" but browser doesn't support STT, use API
if (this.mode === 'browser' && !this.browserSTTSupported) {
console.warn('[Voice] Browser STT not supported, falling back to API mode');
this.mode = 'api';
showToast('Using cloud voice recognition — your browser doesn\'t support built-in speech recognition.', 'info');
}
console.log('[Voice] Active mode:', this.mode);
}
_initBrowserSTT() {
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
if (!SpeechRecognition) {
console.warn('[Voice] Speech recognition not supported in this browser');
console.warn('[Voice] SpeechRecognition API not available in this browser');
this.browserSTTSupported = false;
return;
}
this.browserSTTSupported = true;
this.recognition = new SpeechRecognition();
this.recognition.continuous = false;
this.recognition.interimResults = false;
@@ -114,10 +126,12 @@ class VoiceManager {
showToast('Voice recognition failed to start. Try again.', 'error');
}
} else {
console.warn('[Voice] No speech recognition available');
showToast('Speech recognition not supported in this browser', 'error');
// Shouldn't happen after init() fallback, but safety net
console.warn('[Voice] No speech recognition available, switching to API');
this.mode = 'api';
this.isRecording = false;
if (this.onStateChange) this.onStateChange(false);
showToast('Switched to cloud voice recognition. Please try again.', 'info');
}
}
}
@@ -161,9 +175,7 @@ class VoiceManager {
this.lastInputWasVoice = true;
if (this.onResult) this.onResult(data.text);
} else {
showToast('Transcription failed. Falling back to browser voice.', 'error');
// Fallback: switch to browser mode for this session
this.mode = 'browser';
showToast('Transcription failed. Please try again.', 'error');
}
} catch (e) {
console.error('[Voice] API transcription error:', e);