Important: This little demo will only work if you have the latest Chrome or Firefox browser and allow it to access your microphone. For Firefox go to about:config and enable the media.webspeech.recognition.enable and media.webspeech.synth.enabled flags.
A voice interface to navigate and interact with a narrated adventure game similar to the old command line games.
The little game makes use of the built in speech synthesis and recognition as well as UserMedia provided by current browsers.
To interact and navigate use:
Simple drawing routine:
drawSine = function( phase, freq, damping, amplitude, fillColor, strokeColor, lineWidth )
{
ctx.moveTo( 0, 0 );
ctx.beginPath();
ctx.strokeStyle = strokeColor;
ctx.fillStyle = fillColor;
ctx.lineWidth = lineWidth;
let w = canvas.width;
let h = canvas.height;
let s = 0.02;
let b = 2.8;
for( let x = -b; x <= b; x += s )
{
// not optimized
ctx.lineTo(
w * ( x + b ) / ( b * 2 ),
amplitude * damping * h *
Math.pow( ( b * 2 ) / ( ( b * 2 ) + Math.pow( x, 4 ) ), b ) *
Math.sin( freq * x - phase ) +
h * 0.5
);
}
ctx.fill();
ctx.stroke();
};
Very simple setup to handle speech recognition and synthesis
navigator.mediaDevices.getUserMedia( { audio: true } ).then( function( stream )
{
if( !audioAnalyser )
{
let audioContext = new AudioContext();
audioAnalyser = audioContext.createAnalyser();
audioAnalyser.smoothingTimeConstant = 0.8;
audioAnalyser.fftSize = 32;
audioData = new Uint8Array( audioAnalyser.frequencyBinCount );
audioStream = audioContext.createMediaStreamSource( stream );
audioStream.connect( audioAnalyser );
}
} )
.catch( function( err )
{
// Error connecting to audio
} );
voiceCommand = "";
recognition = new Recognition();
recognition.continuous = false;
recognition.interimResults = true;
recognition.maxAlternatives = 3;
recognition.lang = "en-IN";
recognition.addEventListener( "start", function()
{
recognizing = true;
} );
recognition.addEventListener( "end", function()
{
recognition.stop();
recognizing = false;
if( voiceCommand && voiceCommand.length > 1 )
{
speak( adventure.next( voiceCommand ) );
voiceCommand = "";
}
} );
recognition.addEventListener( "error", function( event )
{
} );
recognition.addEventListener( "result", function( event )
{
let interim = "";
for( let i = event.resultIndex; i < event.results.length; i++ )
{
let result = event.results[i];
let transcript = result[0].transcript.replace( "\n", " " );
if( result.isFinal )
{
voiceCommand += transcript;
}
else
{
interim += transcript;
}
}
// display voiceCommand and interim if you like
} );
function speak( text )
{
let utterance = new SpeechSynthesisUtterance();
utterance.text = text;
utterance.lang = "en-US";
utterance.volume = 1.0;
utterance.rate = 1.0;
utterance.pitch = 0.8;
utterances.push( utterance );
Synthesis.speak( utterance );
}
function onButtonDown()
{
if( Synthesis.speaking )
{
Synthesis.cancel()
}
recognition.start();
voiceCommand = "";
};
function onButtonUp()
{
recognition.abort();
} );