/ src / components / KeenSlider.tsx
KeenSlider.tsx
  1  import React, { useState, useEffect, useRef } from 'react';
  2  import { useKeenSlider } from 'keen-slider/react';
  3  import 'keen-slider/keen-slider.min.css';
  4  import { ArrowTopRightOnSquareIcon, MicrophoneIcon } from '@heroicons/react/24/outline';
  5  import { useAccount } from 'wagmi';
  6  import { RingLoader } from 'react-spinners';
  7  import { Button } from './Button';
  8  import AuthButton from './AuthButton';
  9  import RecordingInterface from './RecordingInterface';
 10  import ScoreDisplay from './ScoreDisplay';
 11  
 12  interface Word {
 13    id: number;
 14    text: string;
 15    start_time: string;
 16    end_time: string;
 17  }
 18  
 19  interface Phrase {
 20    text: string;
 21    text_jpn: string;
 22    start_time: string;
 23    end_time: string;
 24    stop_marker?: string;
 25    words: Word[];
 26  }
 27  
 28  interface Song {
 29    stream_id: string;
 30    uuid: string;
 31    song_title_eng: string;
 32    artist_name_eng: string;
 33    invidious_video_id: string;
 34    audioSrc: string;
 35    phrases: Phrase[];
 36  }
 37  
 38  interface KeenSliderProps {
 39    initialSongs: Song[];
 40    onSongChange: (index: number) => void;
 41    onVoiceButtonResponse: () => void;
 42    onPlay: () => void;
 43  }
 44  
 45  const KeenSlider: React.FC<KeenSliderProps> = (props) => {
 46    const [currentSongIndex, setCurrentSongIndex] = useState(0);
 47    const [currentPhraseIndex, setCurrentPhraseIndex] = useState(0);
 48    const [currentWordIndex, setCurrentWordIndex] = useState(0);
 49    const [hasMicrophonePermission, setHasMicrophonePermission] = useState(false);
 50    const [isRecording, setIsRecording] = useState(false);
 51    const [isProcessing, setIsProcessing] = useState(false);
 52    const [score, setScore] = useState<number | null>(null);
 53    const [hasStartedRecording, setHasStartedRecording] = useState(false);
 54    const [sessionStarted, setSessionStarted] = useState(false); // New state to track session start
 55    const { isConnected } = useAccount();
 56    const audioRef = useRef<HTMLAudioElement>(null);
 57    const [sliderRef, instanceRef] = useKeenSlider<HTMLDivElement>({
 58      loop: true,
 59      mode: "snap",
 60      vertical: true,
 61      slides: {
 62        perView: 1,
 63        spacing: 15,
 64      },
 65      slideChanged: (slider) => {
 66        setCurrentSongIndex(slider.track.details.rel);
 67        setCurrentPhraseIndex(0);
 68        setCurrentWordIndex(0);
 69        setScore(null);
 70        setIsProcessing(false);
 71        setIsRecording(false);
 72        setHasStartedRecording(false);
 73        props.onSongChange(slider.track.details.rel);
 74        if (sessionStarted) {
 75          handleStartRecording(true); // Automatically start recording if session has started
 76        } else {
 77          handleStartSong();
 78        }
 79      },
 80    });
 81  
 82    const currentSong = props.initialSongs[currentSongIndex];
 83    const currentPhrase = currentSong?.phrases[currentPhraseIndex];
 84  
 85    const handleAuthClick = () => {
 86      console.log('Auth button clicked');
 87    };
 88  
 89    const handleStartSong = (startTime?: string) => {
 90      if (audioRef.current) {
 91        audioRef.current.pause(); // Pause the audio before changing the source
 92        audioRef.current.currentTime = startTime ? parseFloat(startTime) : parseFloat(currentPhrase.start_time);
 93        const playAudio = () => {
 94          audioRef.current?.play().catch(error => {
 95            console.error('Error playing audio:', error);
 96          });
 97        };
 98        if (audioRef.current.readyState >= 3) { // Check if the audio is ready to play
 99          playAudio();
100        } else {
101          audioRef.current.addEventListener('canplaythrough', playAudio, { once: true });
102        }
103      }
104    };
105  
106    const handleRequestMicrophonePermission = async () => {
107      try {
108        await navigator.mediaDevices.getUserMedia({ audio: true });
109        setHasMicrophonePermission(true);
110      } catch (error) {
111        console.error('Error requesting microphone permission:', error);
112      }
113    };
114  
115    const handleStartRecording = async (_autoStart = false) => {
116      console.log('Starting recording...');
117      if (!hasMicrophonePermission) {
118        await handleRequestMicrophonePermission();
119      }
120      setIsRecording(true);
121      setHasStartedRecording(true);
122      setSessionStarted(true); // Mark session as started
123      setScore(null);
124      setIsProcessing(false);
125      handleStartSong(currentPhrase.start_time);
126    };
127  
128    const handleStopRecording = () => {
129      console.log('Stopping recording...');
130      setIsRecording(false);
131      setIsProcessing(true);
132      if (audioRef.current) {
133        audioRef.current.pause();
134      }
135      // Simulate API call
136      setTimeout(() => {
137        setIsProcessing(false);
138        setScore(Math.floor(Math.random() * 100));
139        console.log('Recording processed, score set.');
140      }, 2000);
141    };
142  
143    const handleAgain = () => {
144      setScore(null);
145      setIsRecording(true);
146      handleStartSong(currentPhrase.start_time);
147    };
148  
149    const handleGood = () => {
150      setScore(null);
151      setIsProcessing(false);
152      if (currentPhraseIndex < currentSong.phrases.length - 1) {
153        const nextPhrase = currentSong.phrases[currentPhraseIndex + 1];
154        setCurrentPhraseIndex(currentPhraseIndex + 1);
155        setCurrentWordIndex(0);
156        setIsRecording(true);
157        handleStartSong(nextPhrase.start_time);
158      } else {
159        // Move to next song
160        setIsRecording(false);
161        setHasStartedRecording(false);
162        instanceRef.current?.next();
163        setTimeout(() => {
164          setCurrentPhraseIndex(0);
165          setCurrentWordIndex(0);
166          setIsRecording(true);
167          setHasStartedRecording(true);
168          handleStartSong();
169        }, 500); // Adjust the delay as needed
170      }
171    };
172  
173    const renderPhrase = (phrase: Phrase) => {
174      const lines = phrase.text.split('\n');
175      const translations = phrase.text_jpn.split('\n');
176      return lines.map((line, lineIndex) => (
177        <React.Fragment key={lineIndex}>
178          {line.split(' ').map((word, wordIndex) => {
179            const fullWordIndex = lines.slice(0, lineIndex).reduce((acc, l) => acc + l.split(' ').length, 0) + wordIndex;
180            return (
181              <span 
182                key={`${lineIndex}-${wordIndex}`} 
183                className={fullWordIndex === currentWordIndex ? 'text-blue-500 font-bold' : ''}
184              >
185                {word}{' '}
186              </span>
187            );
188          })}
189          <br />
190          <span className="text-sm text-gray-600">{translations[lineIndex]}</span>
191          {lineIndex < lines.length - 1 && <br />}
192        </React.Fragment>
193      ));
194    };
195  
196    useEffect(() => {
197      const audio = audioRef.current;
198      if (!audio) return;
199  
200      const updateWord = () => {
201        const currentTime = audio.currentTime;
202        const currentWord = currentPhrase.words.find(
203          word => currentTime >= parseFloat(word.start_time) && currentTime < parseFloat(word.end_time)
204        );
205        if (currentWord) {
206          setCurrentWordIndex(currentPhrase.words.indexOf(currentWord));
207        }
208  
209        // Check if we're near the end of the phrase
210        const timeUntilEnd = parseFloat(currentPhrase.end_time) - currentTime;
211        if (timeUntilEnd <= 0.1 && isRecording) { // Adjusted cutoff value
212          console.log('Near end of phrase, stopping recording...');
213          handleStopRecording();
214        }
215  
216        // Ensure the last word is reached
217        if (currentTime >= parseFloat(currentPhrase.end_time)) {
218          setCurrentWordIndex(currentPhrase.words.length - 1);
219        }
220      };
221  
222      audio.addEventListener('timeupdate', updateWord);
223      return () => audio.removeEventListener('timeupdate', updateWord);
224    }, [currentPhrase, isRecording]);
225  
226    useEffect(() => {
227      if (sessionStarted && !isRecording) {
228        console.log('Session started, auto-starting recording...');
229        handleStartRecording(true);
230      }
231    }, [currentSongIndex]);
232  
233    return (
234      <div className="w-full h-full flex flex-col">
235        <div ref={sliderRef} className="keen-slider flex-grow w-full overflow-hidden">
236          {props.initialSongs.length === 0 ? (
237            <div className="keen-slider__slide flex items-center justify-center bg-neutral-900 w-full h-full">
238              <h2 className="text-2xl text-white">No Songs Available</h2>
239            </div>
240          ) : (
241            props.initialSongs.map((song, songIndex) => (
242              <div key={song.uuid} className="keen-slider__slide flex flex-col h-full p-4 w-full">
243                <div className="flex items-center justify-end mb-4">
244                  <h2 className="text-xs font-bold mr-2">{song.song_title_eng} - {song.artist_name_eng}</h2>
245                  <a href={`https://www.youtube.com/watch?v=${song.invidious_video_id}`} target="_blank" rel="noopener noreferrer">
246                    <ArrowTopRightOnSquareIcon className="w-4 h-4" />
247                  </a>
248                </div>
249                <div className="flex-grow flex items-center">
250                  <div className="w-full">
251                    {songIndex === currentSongIndex && (
252                      <div className="mb-4 text-left">
253                        <p className="text-lg">
254                          {renderPhrase(currentPhrase)}
255                        </p>
256                      </div>
257                    )}
258                  </div>
259                </div>
260                <div className="mt-auto">
261                  {!isConnected && (
262                    <AuthButton onAuthClick={handleAuthClick} />
263                  )}
264                  {isConnected && !hasStartedRecording && !sessionStarted && (
265                    <Button
266                      icon={<MicrophoneIcon className="w-5 h-5" />} // Only icon, no label
267                      onClick={handleStartRecording}
268                      fullWidth
269                    />
270                  )}
271                  {isRecording && (
272                    <RecordingInterface onStopRecording={handleStopRecording} />
273                  )}
274                  {isProcessing && (
275                    <div className="flex justify-center items-center h-16">
276                      <RingLoader color="#3B82F6" loading={true} size={50} />
277                    </div>
278                  )}
279                  {score !== null && (
280                    <ScoreDisplay score={score} onAgain={handleAgain} onGood={handleGood} />
281                  )}
282                </div>
283              </div>
284            ))
285          )}
286        </div>
287        <audio ref={audioRef} src={currentSong?.audioSrc} />
288      </div>
289    );
290  };
291  
292  export default KeenSlider;