updated avatars and voice mode
All checks were successful
Deploy FluentGerman.ai / deploy (push) Successful in 51s

This commit is contained in:
2026-02-16 20:59:39 +01:00
parent 3890f0479f
commit bce4124974
5 changed files with 227 additions and 137 deletions

View File

@@ -24,12 +24,12 @@
<!-- Chat -->
<div class="chat-container">
<div class="chat-messages" id="chat-messages">
<!-- Welcome section injected by JS -->
<!-- Welcome section -->
<div class="welcome-section" id="welcome-section">
<div class="welcome-row">
<div class="avatar-container" id="avatar-container">
<div class="avatar-ring"></div>
<div class="avatar-placeholder">🎓</div>
<img class="avatar-img" id="avatar-img" src="" alt="Tutor avatar">
<div class="avatar-pulse"></div>
</div>
<div class="welcome-text">
@@ -42,10 +42,31 @@
</div>
<div class="chat-input-bar">
<button class="voice-btn" id="voice-btn" title="Voice input">🎤</button>
<input type="text" id="chat-input" placeholder="Type your message or click 🎤 to speak..."
autocomplete="off">
<!-- Voice toggle -->
<label class="voice-toggle" title="Toggle voice mode">
<input type="checkbox" id="voice-toggle-input">
<span class="voice-toggle-slider"></span>
<svg class="voice-toggle-icon" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
<path d="M12 1a3 3 0 0 0-3 3v8a3 3 0 0 0 6 0V4a3 3 0 0 0-3-3z" />
<path d="M19 10v2a7 7 0 0 1-14 0v-2" />
<line x1="12" y1="19" x2="12" y2="23" />
<line x1="8" y1="23" x2="16" y2="23" />
</svg>
</label>
<input type="text" id="chat-input" placeholder="Type your message..." autocomplete="off">
<button class="btn btn-primary" id="send-btn">Send</button>
<!-- Mic button (visible only when voice mode ON) -->
<button class="mic-btn hidden" id="mic-btn" title="Hold to speak">
<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round"
stroke-linejoin="round">
<path d="M12 1a3 3 0 0 0-3 3v8a3 3 0 0 0 6 0V4a3 3 0 0 0-3-3z" />
<path d="M19 10v2a7 7 0 0 1-14 0v-2" />
<line x1="12" y1="19" x2="12" y2="23" />
<line x1="8" y1="23" x2="16" y2="23" />
</svg>
</button>
</div>
</div>

View File

@@ -519,6 +519,7 @@ textarea {
.chat-input-bar {
display: flex;
align-items: center;
gap: 10px;
padding: 16px 24px;
background: var(--glass-bg);
@@ -530,29 +531,99 @@ textarea {
flex: 1;
}
.voice-btn {
width: 46px;
height: 46px;
border-radius: 50%;
/* ── Voice Toggle Switch ──────────────────────────────────────────── */
.voice-toggle {
position: relative;
display: flex;
align-items: center;
cursor: pointer;
flex-shrink: 0;
}
.voice-toggle input {
position: absolute;
opacity: 0;
width: 0;
height: 0;
}
.voice-toggle-slider {
width: 40px;
height: 22px;
background: var(--bg-input);
border: 1px solid var(--glass-border);
border-radius: var(--radius-full);
transition: all var(--transition);
position: relative;
flex-shrink: 0;
}
.voice-toggle-slider::before {
content: '';
position: absolute;
top: 2px;
left: 2px;
width: 16px;
height: 16px;
border-radius: 50%;
background: var(--text-muted);
transition: all var(--transition);
}
.voice-toggle input:checked+.voice-toggle-slider {
background: var(--gradient-btn);
border-color: transparent;
box-shadow: 0 0 12px var(--accent-glow);
}
.voice-toggle input:checked+.voice-toggle-slider::before {
transform: translateX(18px);
background: #fff;
}
.voice-toggle-icon {
width: 16px;
height: 16px;
margin-left: 4px;
color: var(--text-muted);
transition: color var(--transition);
flex-shrink: 0;
}
.voice-toggle input:checked~.voice-toggle-icon {
color: var(--accent);
}
/* ── Mic Button (modern, subtle) ─────────────────────────────────── */
.mic-btn {
width: 42px;
height: 42px;
border-radius: 50%;
background: transparent;
border: 1px solid var(--glass-border);
color: var(--text-secondary);
font-size: 1.2rem;
cursor: pointer;
transition: all var(--transition);
display: flex;
align-items: center;
justify-content: center;
position: relative;
flex-shrink: 0;
padding: 0;
}
.voice-btn:hover {
.mic-btn svg {
width: 18px;
height: 18px;
}
.mic-btn:hover {
background: var(--bg-hover);
color: var(--text-primary);
color: var(--accent);
border-color: var(--border-focus);
transform: scale(1.05);
}
.voice-btn.recording {
.mic-btn.recording {
background: linear-gradient(135deg, #fb7185, #e11d48);
color: #fff;
border-color: transparent;
@@ -560,6 +631,15 @@ textarea {
animation: recordPulse 1.8s infinite;
}
.mic-btn.hidden {
display: none;
}
/* ── Utility ──────────────────────────────────────────────────────── */
.hidden {
display: none !important;
}
/* ── Tables ───────────────────────────────────────────────────────── */
.table-wrapper {
overflow-x: auto;
@@ -783,15 +863,14 @@ tr:hover td {
animation: avatarSpin 6s linear infinite;
}
.avatar-placeholder {
.avatar-img {
position: absolute;
inset: 4px;
width: calc(100% - 8px);
height: calc(100% - 8px);
border-radius: 50%;
object-fit: cover;
background: var(--glass-bg);
display: flex;
align-items: center;
justify-content: center;
font-size: 1.8rem;
animation: avatarBreathe 3s ease-in-out infinite;
box-shadow: inset 0 0 20px rgba(124, 108, 240, 0.08);
}
@@ -1110,3 +1189,14 @@ tr:hover td {
background: rgba(124, 108, 240, 0.35);
color: #fff;
}
/* ── Version label ───────────────────────────────────────────────── */
.version-label {
display: block;
text-align: center;
margin-top: 16px;
font-size: 0.7rem;
color: var(--text-muted);
letter-spacing: 0.06em;
opacity: 0.5;
}

View File

@@ -1,5 +1,6 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
@@ -7,6 +8,7 @@
<title>FluentGerman.ai — Login</title>
<link rel="stylesheet" href="/css/style.css">
</head>
<body>
<div class="page-center">
<div class="card card-sm">
@@ -28,9 +30,11 @@
<button type="submit" class="btn btn-primary btn-block">Sign In</button>
</form>
</div>
<span class="version-label">v0.1</span>
</div>
<script src="/js/api.js"></script>
<script src="/js/auth.js"></script>
</body>
</html>

View File

@@ -2,21 +2,15 @@
// Configure marked for safe rendering
marked.setOptions({
breaks: true, // Convert \n to <br>
gfm: true, // GitHub-flavored markdown
breaks: true,
gfm: true,
});
/**
* Render markdown text to sanitized HTML.
*/
function renderMarkdown(text) {
const raw = marked.parse(text);
return DOMPurify.sanitize(raw);
}
/**
* Format a date as a friendly relative time string.
*/
function relativeTime(dateStr) {
if (!dateStr) return null;
const date = new Date(dateStr);
@@ -34,6 +28,9 @@ function relativeTime(dateStr) {
return date.toLocaleDateString('en-GB', { day: 'numeric', month: 'short', year: 'numeric' });
}
// Avatar — DiceBear "avataaars" style for friendly illustrated headshots
const AVATAR_SEEDS = ['Felix', 'Lena', 'Hans', 'Sophie', 'Klaus', 'Marta', 'Otto', 'Emma'];
document.addEventListener('DOMContentLoaded', async () => {
if (!requireAuth()) return;
@@ -41,27 +38,31 @@ document.addEventListener('DOMContentLoaded', async () => {
const displayName = user?.username || 'User';
document.getElementById('user-name').textContent = displayName;
// Random avatar from DiceBear
const avatarImg = document.getElementById('avatar-img');
const seed = AVATAR_SEEDS[Math.floor(Math.random() * AVATAR_SEEDS.length)];
avatarImg.src = `https://api.dicebear.com/9.x/avataaars/svg?seed=${seed}&backgroundColor=b6e3f4,c0aede,d1d4f9`;
const messagesEl = document.getElementById('chat-messages');
const inputEl = document.getElementById('chat-input');
const sendBtn = document.getElementById('send-btn');
const voiceBtn = document.getElementById('voice-btn');
const micBtn = document.getElementById('mic-btn');
const voiceToggle = document.getElementById('voice-toggle-input');
let history = [];
let voiceModeOn = false;
// ── Personalised welcome ──────────────────────────────────────────
const greetingEl = document.getElementById('welcome-greeting');
const subtitleEl = document.getElementById('welcome-subtitle');
const metaEl = document.getElementById('welcome-meta');
// Immediately show the username we already have from localStorage
greetingEl.textContent = `Hallo, ${displayName}! 👋`;
// Fetch dashboard info for latest instruction data
try {
const resp = await api('/chat/dashboard');
if (resp?.ok) {
const data = await resp.json();
// Use server username (authoritative)
greetingEl.textContent = `Hallo, ${data.username}! 👋`;
if (data.latest_instruction_at) {
@@ -74,20 +75,33 @@ document.addEventListener('DOMContentLoaded', async () => {
}
}
} catch (e) {
console.warn('[Chat] Could not fetch dashboard info:', e);
console.warn('[Chat] Could not fetch dashboard:', e);
}
// ── Voice ─────────────────────────────────────────────────────────
const voice = new VoiceManager();
await voice.init();
// Disable mic button if no STT method is available
if (voice.isDisabled) {
voiceBtn.disabled = true;
voiceBtn.title = 'Voice input requires Chrome or Edge (with HTTPS)';
voiceBtn.style.opacity = '0.35';
voiceBtn.style.cursor = 'not-allowed';
}
// Voice toggle handler
voiceToggle.addEventListener('change', () => {
voiceModeOn = voiceToggle.checked;
if (voiceModeOn) {
if (voice.isDisabled) {
showToast('Voice requires Chrome or Edge (HTTPS).', 'error');
voiceToggle.checked = false;
voiceModeOn = false;
return;
}
micBtn.classList.remove('hidden');
inputEl.placeholder = 'Voice mode ON — click the mic to speak...';
} else {
micBtn.classList.add('hidden');
inputEl.placeholder = 'Type your message...';
// Stop any active recording
if (voice.isRecording) voice.stopRecording();
}
});
voice.onResult = (text) => {
inputEl.value = text;
@@ -96,11 +110,10 @@ document.addEventListener('DOMContentLoaded', async () => {
};
voice.onStateChange = (recording) => {
voiceBtn.classList.toggle('recording', recording);
voiceBtn.textContent = recording ? '⏹' : '🎤';
micBtn.classList.toggle('recording', recording);
};
voiceBtn.addEventListener('click', () => voice.toggleRecording());
micBtn.addEventListener('click', () => voice.toggleRecording());
// ── Chat ──────────────────────────────────────────────────────────
function appendMessage(role, content) {
@@ -122,7 +135,6 @@ document.addEventListener('DOMContentLoaded', async () => {
const text = inputEl.value.trim();
if (!text) return;
// Capture whether this was a voice input BEFORE clearing
const wasVoice = voice.lastInputWasVoice;
voice.lastInputWasVoice = false;
@@ -132,7 +144,6 @@ document.addEventListener('DOMContentLoaded', async () => {
appendMessage('user', text);
history.push({ role: 'user', content: text });
// Create assistant message placeholder
const assistantEl = appendMessage('assistant', '');
let fullResponse = '';
@@ -166,7 +177,6 @@ document.addEventListener('DOMContentLoaded', async () => {
const parsed = JSON.parse(data);
if (parsed.token) {
fullResponse += parsed.token;
// Live-render markdown as tokens stream in
assistantEl.innerHTML = renderMarkdown(fullResponse);
messagesEl.scrollTop = messagesEl.scrollHeight;
}
@@ -183,8 +193,8 @@ document.addEventListener('DOMContentLoaded', async () => {
if (fullResponse) {
history.push({ role: 'assistant', content: fullResponse });
// Auto-speak response if the user used voice input
if (wasVoice) {
// Auto-speak if voice mode is ON (regardless of input method)
if (voiceModeOn) {
await voice.speak(fullResponse);
}
}

View File

@@ -1,12 +1,11 @@
/* FluentGerman.ai — Voice module (Web Speech API + API mode) */
/* FluentGerman.ai — Voice module (API-only TTS, browser + API STT) */
class VoiceManager {
constructor() {
this.mode = 'browser'; // will be set from server config
this.mode = 'browser';
this.recognition = null;
this.synthesis = window.speechSynthesis;
this.isRecording = false;
this.isDisabled = false; // true when no STT method is available
this.isDisabled = false;
this.lastInputWasVoice = false;
this.mediaRecorder = null;
this.audioChunks = [];
@@ -17,10 +16,8 @@ class VoiceManager {
}
async init() {
// Check browser STT support
this._initBrowserSTT();
// Fetch voice mode from server
try {
const response = await api('/voice/config');
if (response?.ok) {
@@ -34,24 +31,17 @@ class VoiceManager {
this.mode = 'browser';
}
// Determine the best available mode
// Determine best STT method
if (this.mode === 'browser' && !this.browserSTTSupported) {
if (this.apiAvailable) {
console.log('[Voice] Browser STT not supported, falling back to API mode');
this.mode = 'api';
showToast('Using cloud voice recognition — your browser doesn\'t support built-in speech recognition.', 'info');
} else {
// Neither method works
console.warn('[Voice] No STT method available — disabling voice input');
this.isDisabled = true;
}
} else if (this.mode === 'api' && !this.apiAvailable) {
// Server says API but API isn't actually configured
if (this.browserSTTSupported) {
console.log('[Voice] API STT not configured, using browser STT');
this.mode = 'browser';
} else {
console.warn('[Voice] No STT method available — disabling voice input');
this.isDisabled = true;
}
}
@@ -62,7 +52,6 @@ class VoiceManager {
_initBrowserSTT() {
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
if (!SpeechRecognition) {
console.warn('[Voice] SpeechRecognition API not available in this browser');
this.browserSTTSupported = false;
return;
}
@@ -75,35 +64,32 @@ class VoiceManager {
this.recognition.onresult = (event) => {
const text = event.results[0][0].transcript;
console.log('[Voice] Browser STT result:', text);
console.log('[Voice] STT result:', text);
this.lastInputWasVoice = true;
if (this.onResult) this.onResult(text);
};
this.recognition.onend = () => {
console.log('[Voice] Browser STT ended');
this.isRecording = false;
if (this.onStateChange) this.onStateChange(false);
};
this.recognition.onerror = (event) => {
console.error('[Voice] Browser STT error:', event.error);
console.error('[Voice] STT error:', event.error);
this.isRecording = false;
if (this.onStateChange) this.onStateChange(false);
if (event.error === 'not-allowed') {
showToast('Microphone access denied. Please allow microphone in browser settings.', 'error');
showToast('Microphone access denied. Allow it in browser settings.', 'error');
} else if (event.error === 'no-speech') {
showToast('No speech detected. Try again.', 'error');
}
};
console.log('[Voice] Browser STT initialized');
}
async startRecording() {
if (this.isDisabled) {
showToast('Voice input requires Chrome or Edge (with HTTPS). Firefox is not supported.', 'error');
showToast('Voice requires Chrome or Edge (HTTPS).', 'error');
return;
}
@@ -112,79 +98,66 @@ class VoiceManager {
if (this.onStateChange) this.onStateChange(true);
if (this.mode === 'api') {
// API mode — record audio via MediaRecorder, send to Whisper
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
this.audioChunks = [];
this.mediaRecorder = new MediaRecorder(stream);
this.mediaRecorder.ondataavailable = (event) => {
this.audioChunks.push(event.data);
};
this.mediaRecorder.ondataavailable = (e) => this.audioChunks.push(e.data);
this.mediaRecorder.onstop = async () => {
stream.getTracks().forEach(track => track.stop());
stream.getTracks().forEach(t => t.stop());
const blob = new Blob(this.audioChunks, { type: 'audio/webm' });
await this._transcribeAPI(blob);
};
this.mediaRecorder.start();
console.log('[Voice] API recording started');
} catch (e) {
console.error('[Voice] Microphone access error:', e);
showToast('Microphone access denied', 'error');
this.isRecording = false;
if (this.onStateChange) this.onStateChange(false);
}
} else {
// Browser mode — use Web Speech API
if (this.recognition) {
try {
this.recognition.start();
console.log('[Voice] Browser STT started');
} catch (e) {
console.error('[Voice] Failed to start recognition:', e);
this.isRecording = false;
if (this.onStateChange) this.onStateChange(false);
showToast('Voice recognition failed to start. Try again.', 'error');
showToast('Voice recognition failed. Try again.', 'error');
}
} else {
console.warn('[Voice] No speech recognition available');
this.isRecording = false;
if (this.onStateChange) this.onStateChange(false);
showToast('Voice input requires Chrome or Edge (with HTTPS).', 'error');
}
}
}
stopRecording() {
console.log('[Voice] Stopping recording...');
if (this.mode === 'api') {
if (this.mediaRecorder && this.mediaRecorder.state === 'recording') {
if (this.mediaRecorder?.state === 'recording') {
this.mediaRecorder.stop();
} else {
this.isRecording = false;
if (this.onStateChange) this.onStateChange(false);
}
} else {
if (this.recognition) {
try {
this.recognition.stop();
} catch (e) {
// Already stopped
}
}
try { this.recognition?.stop(); } catch (e) { /* already stopped */ }
this.isRecording = false;
if (this.onStateChange) this.onStateChange(false);
}
}
toggleRecording() {
if (this.isDisabled) {
showToast('Voice requires Chrome or Edge (HTTPS).', 'error');
return;
}
this.isRecording ? this.stopRecording() : this.startRecording();
}
async _transcribeAPI(blob) {
try {
const formData = new FormData();
formData.append('audio', blob, 'recording.webm');
console.log('[Voice] Sending audio to API for transcription...');
const response = await api('/voice/transcribe', {
method: 'POST',
body: formData,
@@ -192,14 +165,12 @@ class VoiceManager {
if (response?.ok) {
const data = await response.json();
console.log('[Voice] API transcription result:', data.text);
this.lastInputWasVoice = true;
if (this.onResult) this.onResult(data.text);
} else {
showToast('Transcription failed. Please try again.', 'error');
showToast('Transcription failed.', 'error');
}
} catch (e) {
console.error('[Voice] API transcription error:', e);
showToast('Transcription error', 'error');
} finally {
this.isRecording = false;
@@ -207,33 +178,20 @@ class VoiceManager {
}
}
/**
* Speak text via API TTS only. No browser fallback.
* Strips markdown formatting before sending.
*/
async speak(text) {
if (this.mode === 'api' && this.apiAvailable) {
return this._speakAPI(text);
} else {
return this._speakBrowser(text);
if (!this.apiAvailable) {
console.log('[Voice] API TTS not available, skipping speech');
return;
}
}
_speakBrowser(text) {
return new Promise((resolve) => {
// Cancel any ongoing speech
this.synthesis.cancel();
const utterance = new SpeechSynthesisUtterance(text);
utterance.lang = 'de-DE';
utterance.rate = 0.95;
utterance.onend = resolve;
utterance.onerror = () => {
console.warn('[Voice] Browser TTS error');
resolve();
};
this.synthesis.speak(utterance);
});
}
const clean = VoiceManager.stripMarkdown(text);
async _speakAPI(text) {
try {
const response = await api(`/voice/synthesize?text=${encodeURIComponent(text)}`, {
const response = await api(`/voice/synthesize?text=${encodeURIComponent(clean)}`, {
method: 'POST',
});
@@ -242,26 +200,33 @@ class VoiceManager {
const audioUrl = URL.createObjectURL(audioBlob);
const audio = new Audio(audioUrl);
await audio.play();
return new Promise(resolve => {
audio.onended = resolve;
});
return new Promise(resolve => { audio.onended = resolve; });
}
} catch (e) {
console.warn('[Voice] API TTS failed, falling back to browser');
console.warn('[Voice] API TTS failed:', e);
}
// Fallback to browser TTS
return this._speakBrowser(text);
}
toggleRecording() {
if (this.isDisabled) {
showToast('Voice input requires Chrome or Edge (with HTTPS). Firefox is not supported.', 'error');
return;
}
if (this.isRecording) {
this.stopRecording();
} else {
this.startRecording();
}
/**
* Strip markdown formatting from text so TTS reads naturally.
*/
static stripMarkdown(text) {
return text
.replace(/```[\s\S]*?```/g, '') // code blocks
.replace(/`([^`]+)`/g, '$1') // inline code
.replace(/#{1,6}\s+/g, '') // headings
.replace(/\*\*([^*]+)\*\*/g, '$1') // bold
.replace(/\*([^*]+)\*/g, '$1') // italic
.replace(/__([^_]+)__/g, '$1') // bold alt
.replace(/_([^_]+)_/g, '$1') // italic alt
.replace(/~~([^~]+)~~/g, '$1') // strikethrough
.replace(/^\s*[-*+]\s+/gm, '') // unordered lists
.replace(/^\s*\d+\.\s+/gm, '') // ordered lists
.replace(/\[([^\]]+)\]\([^)]+\)/g, '$1')// links
.replace(/!\[([^\]]*)\]\([^)]+\)/g, '') // images
.replace(/>\s+/g, '') // blockquotes
.replace(/\n{2,}/g, '. ') // paragraph breaks → pause
.replace(/\n/g, ' ') // newlines → space
.trim();
}
}