Untitled
unknown
plain_text
10 months ago
7.3 kB
4
Indexable
import { useEffect, useRef, useState } from 'react';
import { Backdrop, Box, IconButton, Tooltip } from '@mui/material';
import MicIcon from '@mui/icons-material/Mic';
import CloseIcon from '@mui/icons-material/Close';
import MicOffIcon from '@mui/icons-material/MicOff';
import { useAuthTenantId, useAuthUserName } from '@/hooks/useAuthDetail';
import { useGetApiToken } from '@/hooks/query/useApiKey';
import { storageKey } from '@/constants/storageKey';
import * as storage from '@/utils/storage';
import { useParams } from 'react-router-dom';
import { useAudioChat, useCreateConversation } from '@/hooks/query/useConversation';
import AudioRecorder from './AudioRecord';
interface AudioUIProps {
toggleOverlay: () => void;
showOverlay: boolean;
allowMicrophone: boolean;
setAllowMicrophone: React.Dispatch<React.SetStateAction<boolean>>;
}
const AudioUI = ({
toggleOverlay,
showOverlay,
allowMicrophone,
setAllowMicrophone
}: AudioUIProps) => {
const [audioBlob, setAudioBlob] = useState<Blob | null>(null);
const audioRecorderRef = useRef<{ stopRecording: () => Promise<Blob | null> } | null>(null);
const authTenantId = useAuthTenantId();
const [conversationId, setConversationId] = useState('');
const [history, setHistory] = useState<any>([]);
const { projectId, ingestId } = useParams();
const { mutateAsync: conversationCreate } = useCreateConversation();
const authUserName = useAuthUserName();
const { data: apiToken } = useGetApiToken(authTenantId);
const { clientId, clientSecret } = (apiToken as any)?.tokens?.[0] || {};
let apiKey: string = '';
if (apiToken) {
apiKey = btoa(clientId?.concat(':', clientSecret));
}
const knowledgeBaseIdentifier = storage.get(storageKey.PROJECT_IDENTIFIER);
const handleRecordingStop = (blob: Blob) => {
setAudioBlob(blob);
};
const createConversation = async (apiKey: string) => {
try {
const response = await conversationCreate({
projectId,
api_key: apiKey,
projectIngestIdentifier: ingestId,
username: btoa(authUserName),
userType: 'botChat'
});
setConversationId(response.conversation_id as string);
} catch (err: any) {
console.log(err);
}
};
useEffect(() => {
if (apiKey) {
createConversation(apiKey);
}
}, [conversationCreate, projectId, apiKey, ingestId]);
const { mutateAsync: audioConversation } = useAudioChat();
const handleEndClick = async () => {
toggleOverlay();
setAllowMicrophone(false);
let recordedBlob = null;
if (audioRecorderRef.current) {
recordedBlob = await audioRecorderRef.current.stopRecording();
}
if (!recordedBlob) {
console.error('No audioBlob available.');
return;
}
try {
console.log("recordedBlobrecordedBlob", recordedBlob)
const fileName = `audio_${knowledgeBaseIdentifier}_${conversationId}.wav`;
const file = new File([recordedBlob], fileName, { type: 'audio/wav' });
const response = await audioConversation({
projectId,
file,
api_key: apiKey,
conversation_id: conversationId,
history
});
console.log('API Response:', response);
setAudioBlob(null);
setHistory([]);
} catch (error) {
console.error('Error:', error);
}
};
return (
<>
<Backdrop
sx={{ color: '#fff', zIndex: (theme) => theme.zIndex.drawer + 1 }}
open={showOverlay}
>
<Box
sx={{
display: 'flex',
flexDirection: 'row',
alignItems: 'center',
justifyContent: 'center',
background: '#000000cc',
width: '100%',
height: '100%',
position: 'fixed',
gap: 4
}}
>
<Tooltip
title={allowMicrophone ? 'Turn Off Microphone' : 'Turn On Microphone'}
placement="top"
>
<IconButton onClick={() => setAllowMicrophone(!allowMicrophone)}>
{allowMicrophone ? (
<MicIcon
sx={{
fontSize: 40,
cursor: 'pointer'
}}
/>
) : (
<MicOffIcon
sx={{
fontSize: 40,
cursor: 'pointer',
color: '#df4759'
}}
/>
)}
</IconButton>
</Tooltip>
<Tooltip title="End" placement="top">
<IconButton onClick={handleEndClick}>
<CloseIcon
sx={{
fontSize: 40,
cursor: 'pointer'
}}
/>
</IconButton>
</Tooltip>
</Box>
</Backdrop>
<AudioRecorder
allowMicrophone={allowMicrophone}
onRecordingStop={handleRecordingStop}
ref={audioRecorderRef}
/>
{audioBlob && <audio controls src={URL.createObjectURL(audioBlob)} />}
</>
);
};
export default AudioUI;
import {
useState,
useEffect,
useRef,
forwardRef,
useImperativeHandle
} from 'react';
interface AudioRecorderProps {
allowMicrophone: boolean;
onRecordingStop: (audioBlob: Blob) => void;
}
const AudioRecorder = forwardRef(
({ allowMicrophone, onRecordingStop }: AudioRecorderProps, ref) => {
const [isRecording, setIsRecording] = useState(false);
const mediaRecorderRef = useRef<MediaRecorder | null>(null);
const audioChunksRef = useRef<Blob[]>([]);
const audioStreamRef = useRef<MediaStream | null>(null);
useEffect(() => {
if (allowMicrophone && !isRecording) {
startRecording();
} else {
stopRecording();
}
}, [allowMicrophone]);
const startRecording = async () => {
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
audioStreamRef.current = stream;
mediaRecorderRef.current = new MediaRecorder(stream);
mediaRecorderRef.current.ondataavailable = (e) => {
if (e.data.size > 0) {
audioChunksRef.current.push(e.data);
}
};
mediaRecorderRef.current.start();
setIsRecording(true);
} catch (error) {
console.error('Error accessing microphone:', error);
}
};
const stopRecording = () => {
return new Promise<Blob | null>((resolve) => {
if (mediaRecorderRef.current && mediaRecorderRef.current.state !== 'inactive') {
mediaRecorderRef.current.onstop = () => {
const audioBlob = new Blob(audioChunksRef.current, { type: 'audio/wav' });
onRecordingStop(audioBlob);
resolve(audioBlob);
audioChunksRef.current = [];
if (audioStreamRef.current) {
audioStreamRef.current.getTracks().forEach((track) => track.stop());
audioStreamRef.current = null;
}
};
mediaRecorderRef.current.stop();
} else {
resolve(null);
}
});
};
useImperativeHandle(ref, () => ({
stopRecording
}));
return null;
}
);
export default AudioRecorder;
Editor is loading...
Leave a Comment