fix: SpeechRecognition import

This commit is contained in:
Lucas Santana 2025-01-26 11:11:04 -03:00
parent ccacf76d9a
commit 59a7adfeee
5 changed files with 82 additions and 10 deletions

View File

@ -1,9 +1,9 @@
import React, { useState, useEffect } from 'react'; import React from 'react';
import { useNavigate } from 'react-router-dom'; import { useNavigate } from 'react-router-dom';
import { supabase } from '../../lib/supabase'; import { supabase } from '../../lib/supabase';
import { useSession } from '../../hooks/useSession'; import { useSession } from '../../hooks/useSession';
import { useStoryCategories } from '../../hooks/useStoryCategories'; import { useStoryCategories } from '../../hooks/useStoryCategories';
import { Wand2, ArrowLeft, ArrowRight } from 'lucide-react'; import { Wand2, ArrowLeft } from 'lucide-react';
import { useStudentTracking } from '../../hooks/useStudentTracking'; import { useStudentTracking } from '../../hooks/useStudentTracking';
interface Category { interface Category {
@ -21,7 +21,7 @@ interface StoryStep {
isContextStep?: boolean; isContextStep?: boolean;
} }
interface StoryChoices { export interface StoryChoices {
theme_id: string | null; theme_id: string | null;
subject_id: string | null; subject_id: string | null;
character_id: string | null; character_id: string | null;
@ -32,12 +32,12 @@ interface StoryChoices {
interface StoryGeneratorProps { interface StoryGeneratorProps {
initialContext?: string; initialContext?: string;
onContextChange: (context: string) => void; onContextChange: (context: string) => void;
inputMode: 'voice' | 'text' | 'form'; inputMode: 'voice' | 'form';
voiceTranscript: string; voiceTranscript: string;
isGenerating: boolean; isGenerating: boolean;
setIsGenerating: (value: boolean) => void; setIsGenerating: (value: boolean) => void;
step: number; step: number;
setStep: (step: number) => void; setStep: (value: number | ((prev: number) => number)) => void;
choices: StoryChoices; choices: StoryChoices;
setChoices: React.Dispatch<React.SetStateAction<StoryChoices>>; setChoices: React.Dispatch<React.SetStateAction<StoryChoices>>;
} }
@ -86,7 +86,7 @@ export function StoryGenerator({
]; ];
// 3. useEffect que depende dos dados // 3. useEffect que depende dos dados
useEffect(() => { React.useEffect(() => {
if (inputMode === 'voice' && voiceTranscript && themes) { if (inputMode === 'voice' && voiceTranscript && themes) {
setStep(steps.length); setStep(steps.length);
setChoices(prev => ({ setChoices(prev => ({
@ -99,7 +99,7 @@ export function StoryGenerator({
} }
}, [inputMode, voiceTranscript, steps.length, themes, setStep, setChoices]); }, [inputMode, voiceTranscript, steps.length, themes, setStep, setChoices]);
useEffect(() => { React.useEffect(() => {
setChoices(prev => ({ setChoices(prev => ({
...prev, ...prev,
context: inputMode === 'voice' ? voiceTranscript : initialContext context: inputMode === 'voice' ? voiceTranscript : initialContext
@ -125,7 +125,7 @@ export function StoryGenerator({
const handleSelect = (key: keyof StoryChoices, value: string) => { const handleSelect = (key: keyof StoryChoices, value: string) => {
setChoices(prev => ({ ...prev, [key]: value })); setChoices(prev => ({ ...prev, [key]: value }));
if (step < steps.length) { if (step < steps.length) {
setStep(prev => prev + 1); setStep((prev: number) => prev + 1);
} }
}; };
@ -327,7 +327,7 @@ export function StoryGenerator({
{/* Navigation Buttons */} {/* Navigation Buttons */}
<div className="flex justify-between pt-6"> <div className="flex justify-between pt-6">
<button <button
onClick={() => setStep(prev => prev - 1)} onClick={() => setStep((prev: number) => prev - 1)}
disabled={step === 1} disabled={step === 1}
className="flex items-center gap-2 px-4 py-2 text-gray-600 disabled:opacity-50" className="flex items-center gap-2 px-4 py-2 text-gray-600 disabled:opacity-50"
> >

View File

@ -6,12 +6,16 @@ import { useEffect } from 'react';
interface VoiceCommandButtonProps { interface VoiceCommandButtonProps {
className?: string; className?: string;
onTranscriptUpdate: (transcript: string) => void; onTranscriptUpdate: (transcript: string) => void;
onStart?: () => void;
onStop?: () => void;
disabled?: boolean; disabled?: boolean;
} }
export function VoiceCommandButton({ export function VoiceCommandButton({
className, className,
onTranscriptUpdate, onTranscriptUpdate,
onStart,
onStop,
disabled = false disabled = false
}: VoiceCommandButtonProps) { }: VoiceCommandButtonProps) {
const { const {
@ -29,6 +33,16 @@ export function VoiceCommandButton({
} }
}, [transcript, onTranscriptUpdate]); }, [transcript, onTranscriptUpdate]);
const handleStart = () => {
onStart?.();
start();
};
const handleStop = () => {
onStop?.();
stop();
};
useEffect(() => { useEffect(() => {
if (status === 'recording') { if (status === 'recording') {
onTranscriptUpdate(''); // Limpar contexto ao iniciar nova gravação onTranscriptUpdate(''); // Limpar contexto ao iniciar nova gravação
@ -46,7 +60,7 @@ export function VoiceCommandButton({
return ( return (
<div className="relative group"> <div className="relative group">
<button <button
onClick={status === 'recording' ? stop : start} onClick={status === 'recording' ? handleStop : handleStart}
className={cn( className={cn(
'flex items-center gap-3 px-4 py-2 rounded-lg transition-all', 'flex items-center gap-3 px-4 py-2 rounded-lg transition-all',
status === 'recording' status === 'recording'

View File

@ -1,4 +1,5 @@
import { useState, useEffect, useCallback } from 'react'; import { useState, useEffect, useCallback } from 'react';
import { SpeechRecognition } from '../../../types/speech';
type RecognitionState = 'idle' | 'recording' | 'processing' | 'error'; type RecognitionState = 'idle' | 'recording' | 'processing' | 'error';

View File

@ -8,6 +8,7 @@ import { AdaptiveTitle, AdaptiveParagraph, AdaptiveText } from '../../components
import { useUppercasePreference } from '../../hooks/useUppercasePreference'; import { useUppercasePreference } from '../../hooks/useUppercasePreference';
import { useSpeechRecognition } from '@/features/voice-commands/hooks/useSpeechRecognition'; import { useSpeechRecognition } from '@/features/voice-commands/hooks/useSpeechRecognition';
import { VoiceCommandButton } from '@/features/voice-commands/components/VoiceCommandButton'; import { VoiceCommandButton } from '@/features/voice-commands/components/VoiceCommandButton';
import type { StoryChoices } from '@/components/story/StoryGenerator';
export function CreateStoryPage() { export function CreateStoryPage() {
const navigate = useNavigate(); const navigate = useNavigate();
@ -40,6 +41,23 @@ export function CreateStoryPage() {
context: '' context: ''
}); });
// Manipuladores para gravação de voz
const handleStartRecording = () => {
setError(null);
startRecording();
};
const handleStopRecording = () => {
stopRecording();
};
// Atualizar status da interface baseado no status da gravação
useEffect(() => {
if (recordingStatus === 'recording') {
setInputMode('voice');
}
}, [recordingStatus]);
useEffect(() => { useEffect(() => {
if (inputMode === 'voice' && voiceTranscript) { if (inputMode === 'voice' && voiceTranscript) {
setStep(5); setStep(5);
@ -142,6 +160,8 @@ export function CreateStoryPage() {
setInputMode('voice'); setInputMode('voice');
setStoryContext(transcript); setStoryContext(transcript);
}} }}
onStart={handleStartRecording}
onStop={handleStopRecording}
disabled={isGenerating} disabled={isGenerating}
className="w-full justify-center py-3" className="w-full justify-center py-3"
/> />
@ -167,6 +187,8 @@ export function CreateStoryPage() {
setStoryContext(newContext); setStoryContext(newContext);
} }
}} }}
inputMode={inputMode}
voiceTranscript={voiceTranscript || ''}
isGenerating={isGenerating} isGenerating={isGenerating}
setIsGenerating={setIsGenerating} setIsGenerating={setIsGenerating}
step={step} step={step}

35
src/types/speech.d.ts vendored Normal file
View File

@ -0,0 +1,35 @@
interface SpeechRecognitionErrorEvent extends Event {
error: string;
}
interface SpeechRecognitionEvent extends Event {
results: {
[index: number]: {
[index: number]: {
transcript: string;
};
};
};
}
interface SpeechRecognition extends EventTarget {
continuous: boolean;
lang: string;
interimResults: boolean;
maxAlternatives: number;
start(): void;
stop(): void;
onstart: () => void;
onend: () => void;
onerror: (event: SpeechRecognitionErrorEvent) => void;
onresult: (event: SpeechRecognitionEvent) => void;
}
declare global {
interface Window {
SpeechRecognition: new () => SpeechRecognition;
webkitSpeechRecognition: new () => SpeechRecognition;
}
}
export function useSpeechRecognition(): SpeechRecognition;