fix: SpeechRecognition import

This commit is contained in:
Lucas Santana 2025-01-26 11:11:04 -03:00
parent ccacf76d9a
commit 59a7adfeee
5 changed files with 82 additions and 10 deletions

View File

@ -1,9 +1,9 @@
import React, { useState, useEffect } from 'react';
import React from 'react';
import { useNavigate } from 'react-router-dom';
import { supabase } from '../../lib/supabase';
import { useSession } from '../../hooks/useSession';
import { useStoryCategories } from '../../hooks/useStoryCategories';
import { Wand2, ArrowLeft, ArrowRight } from 'lucide-react';
import { Wand2, ArrowLeft } from 'lucide-react';
import { useStudentTracking } from '../../hooks/useStudentTracking';
interface Category {
@ -21,7 +21,7 @@ interface StoryStep {
isContextStep?: boolean;
}
interface StoryChoices {
export interface StoryChoices {
theme_id: string | null;
subject_id: string | null;
character_id: string | null;
@ -32,12 +32,12 @@ interface StoryChoices {
interface StoryGeneratorProps {
initialContext?: string;
onContextChange: (context: string) => void;
inputMode: 'voice' | 'text' | 'form';
inputMode: 'voice' | 'form';
voiceTranscript: string;
isGenerating: boolean;
setIsGenerating: (value: boolean) => void;
step: number;
setStep: (step: number) => void;
setStep: (value: number | ((prev: number) => number)) => void;
choices: StoryChoices;
setChoices: React.Dispatch<React.SetStateAction<StoryChoices>>;
}
@ -86,7 +86,7 @@ export function StoryGenerator({
];
// 3. useEffect que depende dos dados
useEffect(() => {
React.useEffect(() => {
if (inputMode === 'voice' && voiceTranscript && themes) {
setStep(steps.length);
setChoices(prev => ({
@ -99,7 +99,7 @@ export function StoryGenerator({
}
}, [inputMode, voiceTranscript, steps.length, themes, setStep, setChoices]);
useEffect(() => {
React.useEffect(() => {
setChoices(prev => ({
...prev,
context: inputMode === 'voice' ? voiceTranscript : initialContext
@ -125,7 +125,7 @@ export function StoryGenerator({
const handleSelect = (key: keyof StoryChoices, value: string) => {
setChoices(prev => ({ ...prev, [key]: value }));
if (step < steps.length) {
setStep(prev => prev + 1);
setStep((prev: number) => prev + 1);
}
};
@ -327,7 +327,7 @@ export function StoryGenerator({
{/* Navigation Buttons */}
<div className="flex justify-between pt-6">
<button
onClick={() => setStep(prev => prev - 1)}
onClick={() => setStep((prev: number) => prev - 1)}
disabled={step === 1}
className="flex items-center gap-2 px-4 py-2 text-gray-600 disabled:opacity-50"
>

View File

@ -6,12 +6,16 @@ import { useEffect } from 'react';
interface VoiceCommandButtonProps {
className?: string;
onTranscriptUpdate: (transcript: string) => void;
onStart?: () => void;
onStop?: () => void;
disabled?: boolean;
}
export function VoiceCommandButton({
className,
onTranscriptUpdate,
onStart,
onStop,
disabled = false
}: VoiceCommandButtonProps) {
const {
@ -29,6 +33,16 @@ export function VoiceCommandButton({
}
}, [transcript, onTranscriptUpdate]);
const handleStart = () => {
onStart?.();
start();
};
const handleStop = () => {
onStop?.();
stop();
};
useEffect(() => {
if (status === 'recording') {
onTranscriptUpdate(''); // Limpar contexto ao iniciar nova gravação
@ -46,7 +60,7 @@ export function VoiceCommandButton({
return (
<div className="relative group">
<button
onClick={status === 'recording' ? stop : start}
onClick={status === 'recording' ? handleStop : handleStart}
className={cn(
'flex items-center gap-3 px-4 py-2 rounded-lg transition-all',
status === 'recording'

View File

@ -1,4 +1,5 @@
import { useState, useEffect, useCallback } from 'react';
import { SpeechRecognition } from '../../../types/speech';
type RecognitionState = 'idle' | 'recording' | 'processing' | 'error';

View File

@ -8,6 +8,7 @@ import { AdaptiveTitle, AdaptiveParagraph, AdaptiveText } from '../../components
import { useUppercasePreference } from '../../hooks/useUppercasePreference';
import { useSpeechRecognition } from '@/features/voice-commands/hooks/useSpeechRecognition';
import { VoiceCommandButton } from '@/features/voice-commands/components/VoiceCommandButton';
import type { StoryChoices } from '@/components/story/StoryGenerator';
export function CreateStoryPage() {
const navigate = useNavigate();
@ -40,6 +41,23 @@ export function CreateStoryPage() {
context: ''
});
// Manipuladores para gravação de voz
const handleStartRecording = () => {
setError(null);
startRecording();
};
const handleStopRecording = () => {
stopRecording();
};
// Atualizar status da interface baseado no status da gravação
useEffect(() => {
if (recordingStatus === 'recording') {
setInputMode('voice');
}
}, [recordingStatus]);
useEffect(() => {
if (inputMode === 'voice' && voiceTranscript) {
setStep(5);
@ -142,6 +160,8 @@ export function CreateStoryPage() {
setInputMode('voice');
setStoryContext(transcript);
}}
onStart={handleStartRecording}
onStop={handleStopRecording}
disabled={isGenerating}
className="w-full justify-center py-3"
/>
@ -167,6 +187,8 @@ export function CreateStoryPage() {
setStoryContext(newContext);
}
}}
inputMode={inputMode}
voiceTranscript={voiceTranscript || ''}
isGenerating={isGenerating}
setIsGenerating={setIsGenerating}
step={step}

35
src/types/speech.d.ts vendored Normal file
View File

@ -0,0 +1,35 @@
interface SpeechRecognitionErrorEvent extends Event {
error: string;
}
interface SpeechRecognitionEvent extends Event {
results: {
[index: number]: {
[index: number]: {
transcript: string;
};
};
};
}
interface SpeechRecognition extends EventTarget {
continuous: boolean;
lang: string;
interimResults: boolean;
maxAlternatives: number;
start(): void;
stop(): void;
onstart: () => void;
onend: () => void;
onerror: (event: SpeechRecognitionErrorEvent) => void;
onresult: (event: SpeechRecognitionEvent) => void;
}
declare global {
interface Window {
SpeechRecognition: new () => SpeechRecognition;
webkitSpeechRecognition: new () => SpeechRecognition;
}
}
export function useSpeechRecognition(): SpeechRecognition;