Overview
This tutorial walks you through building a fully functional voice chat application using NextEVI’s React SDK. You’ll learn how to set up voice connections, handle user input, and display conversation messages.
This example demonstrates the core concepts of NextEVI integration and can serve as a starting point for more complex applications.
Prerequisites
React 16.8+ with hooks support
NextEVI API credentials (API key, project ID, config ID)
Modern browser with microphone access
Complete Example
Here’s the full working example:
import React , { useState } from 'react' ;
import { VoiceProvider , useVoice } from '@nextevi/voice-react' ;
import './VoiceChat.css' ;
// Main App component with provider
function App () {
return (
< div className = "app" >
< VoiceProvider debug = { true } >
< header className = "app-header" >
< h1 > NextEVI Voice Chat </ h1 >
< p > Click connect and start speaking! </ p >
</ header >
< VoiceChat />
</ VoiceProvider >
</ div >
);
}
// Voice chat component
function VoiceChat () {
const {
connect ,
disconnect ,
readyState ,
messages ,
isRecording ,
isTTSPlaying ,
error ,
clearMessages
} = useVoice ();
const [ isConnecting , setIsConnecting ] = useState ( false );
const handleConnect = async () => {
setIsConnecting ( true );
try {
await connect ({
auth: {
apiKey: process . env . REACT_APP_NEXTEVI_API_KEY || "oak_your_api_key_here" ,
projectId: process . env . REACT_APP_NEXTEVI_PROJECT_ID || "your_project_id" ,
configId: process . env . REACT_APP_NEXTEVI_CONFIG_ID || "your-config-id"
},
audioConfig: {
sampleRate: 24000 ,
channels: 1 ,
echoCancellation: true ,
noiseSuppression: true
}
});
} catch ( error ) {
console . error ( 'Failed to connect:' , error );
alert ( `Connection failed: ${ error . message } ` );
} finally {
setIsConnecting ( false );
}
};
const handleDisconnect = () => {
disconnect ();
};
const getConnectionButtonText = () => {
if ( isConnecting ) return 'Connecting...' ;
if ( readyState === 'connected' ) return 'Connected ✓' ;
if ( readyState === 'connecting' ) return 'Connecting...' ;
return 'Connect to Voice AI' ;
};
const getStatusColor = () => {
switch ( readyState ) {
case 'connected' : return '#22c55e' ;
case 'connecting' : return '#f59e0b' ;
case 'error' : return '#ef4444' ;
default : return '#6b7280' ;
}
};
return (
< div className = "voice-chat" >
{ /* Connection Controls */ }
< div className = "controls" >
< button
className = "connect-button"
onClick = { handleConnect }
disabled = { isConnecting || readyState === 'connecting' }
style = { { backgroundColor: getStatusColor () } }
>
{ getConnectionButtonText () }
</ button >
{ readyState === 'connected' && (
< button
className = "disconnect-button"
onClick = { handleDisconnect }
>
Disconnect
</ button >
) }
{ messages . length > 0 && (
< button
className = "clear-button"
onClick = { clearMessages }
>
Clear Chat
</ button >
) }
</ div >
{ /* Status Display */ }
< div className = "status" >
< div className = "status-item" >
< span className = "label" > Status: </ span >
< span className = "value" style = { { color: getStatusColor () } } >
{ readyState }
</ span >
</ div >
< div className = "status-item" >
< span className = "label" > Audio: </ span >
< span className = "value" >
{ isRecording && '🎤 Listening' }
{ isTTSPlaying && '🔊 Speaking' }
{ ! isRecording && ! isTTSPlaying && '💤 Idle' }
</ span >
</ div >
</ div >
{ /* Error Display */ }
{ error && (
< div className = "error" >
< strong > Error: </ strong > { error . message }
{ error . code && < span className = "error-code" > ( { error . code } ) </ span > }
</ div >
) }
{ /* Messages */ }
< div className = "messages" >
{ messages . length === 0 ? (
< div className = "empty-state" >
< p > No messages yet. Connect and start speaking! </ p >
</ div >
) : (
messages . map ( message => (
< Message key = { message . id } message = { message } />
))
) }
</ div >
{ /* Instructions */ }
{ readyState === 'connected' && (
< div className = "instructions" >
< p > 💡 < strong > Tips: </ strong ></ p >
< ul >
< li > Speak clearly into your microphone </ li >
< li > Wait for the AI to finish speaking before responding </ li >
< li > You can interrupt the AI by speaking </ li >
</ ul >
</ div >
) }
</ div >
);
}
// Message component
function Message ({ message }) {
const isUser = message . type === 'user' ;
const isError = message . type === 'error' ;
return (
< div className = { `message ${ message . type } ` } >
< div className = "message-header" >
< span className = "sender" >
{ isUser ? '👤 You' : '🤖 Assistant' }
</ span >
< span className = "timestamp" >
{ message . timestamp . toLocaleTimeString () }
</ span >
</ div >
< div className = "message-content" >
{ message . content }
</ div >
{ /* Show emotion data if available */ }
{ message . metadata ?. emotions && (
< div className = "emotions" >
< EmotionDisplay emotions = { message . metadata . emotions } />
</ div >
) }
{ /* Show error details */ }
{ isError && message . metadata ?. errorCode && (
< div className = "error-details" >
Error Code: { message . metadata . errorCode }
</ div >
) }
</ div >
);
}
// Emotion display component
function EmotionDisplay ({ emotions }) {
const dominantEmotion = Object . entries ( emotions )
. reduce (( a , b ) => a [ 1 ] > b [ 1 ] ? a : b )[ 0 ];
const emotionEmojis = {
joy: '😊' ,
sadness: '😢' ,
anger: '😠' ,
fear: '😰' ,
surprise: '😲' ,
disgust: '🤢' ,
neutral: '😐'
};
return (
< div className = "emotion-display" >
< span className = "dominant-emotion" >
{ emotionEmojis [ dominantEmotion ] } { dominantEmotion }
</ span >
</ div >
);
}
export default App ;
Styling (VoiceChat.css)
.app {
max-width : 800 px ;
margin : 0 auto ;
padding : 20 px ;
font-family : -apple-system , BlinkMacSystemFont, 'Segoe UI' , Roboto, sans-serif ;
}
.app-header {
text-align : center ;
margin-bottom : 30 px ;
}
.app-header h1 {
color : #7c3aed ;
margin-bottom : 8 px ;
}
.app-header p {
color : #6b7280 ;
margin : 0 ;
}
.voice-chat {
background : white ;
border-radius : 12 px ;
box-shadow : 0 4 px 6 px -1 px rgba ( 0 , 0 , 0 , 0.1 );
padding : 24 px ;
}
/* Controls */
.controls {
display : flex ;
gap : 12 px ;
margin-bottom : 20 px ;
flex-wrap : wrap ;
}
.connect-button {
padding : 12 px 24 px ;
color : white ;
border : none ;
border-radius : 8 px ;
font-weight : 600 ;
cursor : pointer ;
transition : all 0.2 s ;
}
.connect-button:disabled {
opacity : 0.6 ;
cursor : not-allowed ;
}
.connect-button:hover:not ( :disabled ) {
transform : translateY ( -1 px );
box-shadow : 0 4 px 12 px rgba ( 0 , 0 , 0 , 0.15 );
}
.disconnect-button , .clear-button {
padding : 8 px 16 px ;
background : #6b7280 ;
color : white ;
border : none ;
border-radius : 6 px ;
cursor : pointer ;
transition : background 0.2 s ;
}
.disconnect-button:hover {
background : #ef4444 ;
}
.clear-button:hover {
background : #374151 ;
}
/* Status */
.status {
display : flex ;
gap : 24 px ;
margin-bottom : 20 px ;
padding : 16 px ;
background : #f9fafb ;
border-radius : 8 px ;
flex-wrap : wrap ;
}
.status-item {
display : flex ;
align-items : center ;
gap : 8 px ;
}
.status-item .label {
font-weight : 600 ;
color : #374151 ;
}
.status-item .value {
font-weight : 500 ;
}
/* Error */
.error {
background : #fef2f2 ;
color : #dc2626 ;
padding : 12 px ;
border-radius : 6 px ;
margin-bottom : 20 px ;
border-left : 4 px solid #dc2626 ;
}
.error-code {
font-family : monospace ;
font-size : 0.875 rem ;
opacity : 0.8 ;
margin-left : 8 px ;
}
/* Messages */
.messages {
max-height : 400 px ;
overflow-y : auto ;
border : 1 px solid #e5e7eb ;
border-radius : 8 px ;
padding : 16 px ;
margin-bottom : 20 px ;
}
.empty-state {
text-align : center ;
color : #6b7280 ;
font-style : italic ;
padding : 40 px 20 px ;
}
/* Message */
.message {
margin-bottom : 16 px ;
padding : 12 px ;
border-radius : 8 px ;
position : relative ;
}
.message.user {
background : #eff6ff ;
border-left : 4 px solid #3b82f6 ;
}
.message.assistant {
background : #f0fdf4 ;
border-left : 4 px solid #22c55e ;
}
.message.error {
background : #fef2f2 ;
border-left : 4 px solid #ef4444 ;
}
.message-header {
display : flex ;
justify-content : space-between ;
align-items : center ;
margin-bottom : 8 px ;
}
.sender {
font-weight : 600 ;
font-size : 0.875 rem ;
}
.timestamp {
font-size : 0.75 rem ;
color : #6b7280 ;
}
.message-content {
line-height : 1.5 ;
}
/* Emotions */
.emotions {
margin-top : 8 px ;
padding-top : 8 px ;
border-top : 1 px solid rgba ( 0 , 0 , 0 , 0.1 );
}
.emotion-display {
font-size : 0.875 rem ;
}
.dominant-emotion {
background : rgba ( 0 , 0 , 0 , 0.05 );
padding : 4 px 8 px ;
border-radius : 4 px ;
display : inline-block ;
}
/* Instructions */
.instructions {
background : #f0f9ff ;
padding : 16 px ;
border-radius : 8 px ;
border-left : 4 px solid #0ea5e9 ;
}
.instructions p {
margin : 0 0 8 px 0 ;
color : #0c4a6e ;
font-weight : 600 ;
}
.instructions ul {
margin : 0 ;
color : #075985 ;
}
.instructions li {
margin-bottom : 4 px ;
}
/* Responsive design */
@media ( max-width : 768 px ) {
.app {
padding : 12 px ;
}
.voice-chat {
padding : 16 px ;
}
.controls {
flex-direction : column ;
}
.status {
flex-direction : column ;
gap : 12 px ;
}
.messages {
max-height : 300 px ;
}
}
Environment Variables
Create a .env.local file in your React project:
REACT_APP_NEXTEVI_API_KEY = oak_your_api_key_here
REACT_APP_NEXTEVI_PROJECT_ID = your_project_id_here
REACT_APP_NEXTEVI_CONFIG_ID = your_config_id_here
Never commit API keys to version control. Use environment variables or secure configuration management.
Step-by-Step Breakdown
1. Setup the Provider
The VoiceProvider component manages the voice connection state and provides context to child components:
< VoiceProvider debug = { true } >
< VoiceChat />
</ VoiceProvider >
2. Use the Voice Hook
The useVoice hook gives you access to all voice functionality:
const {
connect , // Function to connect to voice AI
disconnect , // Function to disconnect
readyState , // Connection status
messages , // Array of conversation messages
isRecording , // Boolean: is microphone active
isTTSPlaying , // Boolean: is AI speaking
error , // Error object if any
clearMessages // Function to clear chat history
} = useVoice ();
3. Handle Connection
Connect to the voice AI with your credentials:
const handleConnect = async () => {
try {
await connect ({
auth: {
apiKey: "oak_your_api_key" ,
projectId: "your_project_id" ,
configId: "your-config-id"
},
audioConfig: {
sampleRate: 24000 ,
echoCancellation: true ,
noiseSuppression: true
}
});
} catch ( error ) {
console . error ( 'Connection failed:' , error );
}
};
4. Display Messages
Render the conversation messages with proper styling:
{ messages . map ( message => (
< div key = { message . id } className = { `message ${ message . type } ` } >
< div className = "sender" >
{ message . type === 'user' ? '👤 You' : '🤖 Assistant' }
</ div >
< div className = "content" > { message . content } </ div >
< div className = "timestamp" >
{ message . timestamp . toLocaleTimeString () }
</ div >
</ div >
))}
5. Handle Audio States
Show visual feedback for recording and playback states:
< div className = "audio-status" >
{ isRecording && '🎤 Listening' }
{ isTTSPlaying && '🔊 Speaking' }
{ ! isRecording && ! isTTSPlaying && '💤 Idle' }
</ div >
Enhancements
Add Emotion Display
Show detected emotions in messages:
function MessageWithEmotions ({ message }) {
const emotions = message . metadata ?. emotions ;
return (
< div className = "message" >
< div className = "content" > { message . content } </ div >
{ emotions && (
< div className = "emotions" >
{ Object . entries ( emotions ). map (([ emotion , score ]) => (
< span key = { emotion } className = "emotion-tag" >
{ emotion } : { ( score * 100 ). toFixed ( 0 ) } %
</ span >
)) }
</ div >
) }
</ div >
);
}
Add Connection Recovery
Handle network interruptions gracefully:
function useConnectionRecovery () {
const { readyState , connect } = useVoice ();
const [ retryCount , setRetryCount ] = useState ( 0 );
const [ lastConfig , setLastConfig ] = useState ( null );
const connectWithRetry = async ( config ) => {
setLastConfig ( config );
setRetryCount ( 0 );
try {
await connect ( config );
} catch ( error ) {
console . error ( 'Connection failed:' , error );
scheduleRetry ();
}
};
const scheduleRetry = useCallback (() => {
if ( retryCount < 3 ) {
const delay = Math . pow ( 2 , retryCount ) * 1000 ; // Exponential backoff
setTimeout (() => {
setRetryCount ( prev => prev + 1 );
connect ( lastConfig );
}, delay );
}
}, [ retryCount , lastConfig , connect ]);
useEffect (() => {
if ( readyState === 'error' && retryCount > 0 ) {
scheduleRetry ();
}
}, [ readyState , retryCount , scheduleRetry ]);
return { connectWithRetry , retryCount };
}
Add Voice Visualization
Show voice activity with visual feedback:
function VoiceVisualizer () {
const { isRecording } = useVoice ();
const [ audioLevel , setAudioLevel ] = useState ( 0 );
// Simulate audio level (in real app, get from audio context)
useEffect (() => {
if ( ! isRecording ) {
setAudioLevel ( 0 );
return ;
}
const interval = setInterval (() => {
setAudioLevel ( Math . random () * 100 );
}, 100 );
return () => clearInterval ( interval );
}, [ isRecording ]);
return (
< div className = "voice-visualizer" >
< div className = "audio-bars" >
{ Array . from ({ length: 5 }, ( _ , i ) => (
< div
key = { i }
className = "bar"
style = { {
height: ` ${ Math . max ( 10 , audioLevel - ( i * 20 )) } %` ,
backgroundColor: isRecording ? '#22c55e' : '#e5e7eb'
} }
/>
)) }
</ div >
</ div >
);
}
Common Issues & Solutions
Problem : Browser blocks microphone accessSolution :
Ensure HTTPS connection (required for microphone access)
Guide user to allow microphone permissions
Provide clear error message with instructions
Problem : Connection fails or times outSolution :
Implement retry logic with exponential backoff
Check network connectivity
Verify API credentials
Show helpful error messages
Problem : Poor transcription or audio qualitySolution :
Enable noise suppression and echo cancellation
Guide user to use headphones
Check microphone quality
Adjust audio configuration
Next Steps
This example provides a solid foundation for voice applications. Customize the UI, add your business logic, and enhance with additional NextEVI features as needed.