Uses 911K of libraries. Click here for Pure Java version (5888L/34K).
!7 cmodule CruddieSpeechDemo > DynPrintLogAndEnabled { set flag NoNanoHTTPD. !include #1029545 // API for Eleu switchable int vadUpdateInterval = 100; switchable double listenTime = 3.0; // listen for 3 seconds after voice activity switchable double listenTimeAfterActualSpeech = 10.0; // listen for 10 seconds after actual speech recognized switchable double transcriptTitleShowTime = 5.0; // how long to show recognized text in window title switchable bool showVadStatus; switchable int initialHumVolume = 0; S myLink() { ret "https://cruddie.site/"; } S controls() { ret " " + tag("button", "...", onclick := lineBreaksToSpaces([[ startOrStopSpeechRecog(); if (bigOn) { lastHadVoice = Date.now(); startVAD(); startUpdater(); humOn(); } else stopVAD(); ]]), type := 'button, class := 'speechOnBtn, disabled := 'disabled, display := 'inline) + hdiv(hsnippetimg(#1102938, width := 24, height := 24, title := "Streaming audio to cloud"), style := "display: inline; visibility: hidden; margin-left: 10px", class := "listenStatus") + (!showVadStatus ? "" : hdiv(hsnippetimg(#1102908, width := 24, height := 24, title := "Someone is speaking (either me or you)"), style := "display: inline; visibility: hidden; margin-left: 10px", class := "vadStatus")); } O html(virtual Request request) { try { S uri = cast get(request, 'uri); SS params = cast get(request, 'params); print(+params); S jsOnSpeech = [[ if (transcript == 'stop listening') stopVAD(); else window.submitAMsg(transcript); lastHeard = transcript; lastHeardWhen = Date.now(); ]]; int humVolume = initialHumVolume; ret hhtml(hmobilefix() + hhead( htitle("CRUDDIE Speech Recog Demo") + hLoadJQuery2() + hjs_humWithFade(humVolume/100.0)) + hbody( hdiv(controls()) + hjs_focusEnd() + hjs([[ window.submitAMsg = function(msg) { var ta = $("textarea[name=speechText]"); var text = ta.val(); ta.val((text && !text.endsWith("\n") ? text + "\n" : text) + msg); ta.focusEnd(true); // true = noFocus if (ta[0]) ta[0].scrollTop = ta[0].scrollHeight; }; ]]) + hdiv(htextarea("", name := "speechText", cols := 60, rows := 4)) + p("Hum volume (sound when listening): " + htextinput("", humVolume, style := "width: 5ch", id := "humVolumeInput", onInput := "updateHumVolume(parseInt(this.value)/100.0)")) + hSpeechRecognition(jsOnSpeech, true, "en-US", false, noWebKit := p("Use Chrome if you want speech recognition")) + hjavascript([[ function say(text) { console.log("Saying: " + text); var u = new SpeechSynthesisUtterance(text); u.lang = 'en-US'; u.onstart = function() { console.log("speech start"); meSpeaking = true; }; u.onend = function() { meSpeaking = false; }; window.speechSynthesis.speak(u); } ]]) + hVAD( [[console.log("voice start"); $(".vadStatus").css("visibility", "visible");]], [[console.log("voice stop"); $(".vadStatus").css("visibility", "hidden");]], false) + hjs_setTitleStatus() + hjs(replaceDollarVars([[ var updater; var lastHadVoice = 0; var lastHeard, lastHeardWhen = 0; var meSpeaking = false; //audioMeterDebug = true; function startUpdater() { if (updater) return; console.log("Starting updater"); updater = setInterval(vadMagicUpdate, $interval); srPause = true; } function stopUpdater() { if (!updater) return; console.log("Stopping updater"); clearInterval(updater); updater = null; window.resetTitle(); } function vadMagicUpdate() { var now = Date.now(); var hasVoice = vadHasVoice(); var clipping = vadHasClipping(); if (hasVoice) lastHadVoice = now; var shouldListen1 = bigOn && (lastHadVoice >= now-$listenTime || lastHeardWhen >= now-$listenTimeAfterActualSpeech); var shouldListen = !meSpeaking && shouldListen1; var titleStatus = ""; if (lastHeardWhen >= now-$transcriptTitleShowTime) titleStatus = lastHeard + " |"; else if (shouldListen) titleStatus = $listeningSymbol; else if (bigOn) titleStatus = $ear; if (clipping) titleStatus = "! " + titleStatus; window.setTitleStatus(titleStatus); if (srPause != !shouldListen) { console.log(shouldListen ? "Listening" : "Not listening"); srPause = !shouldListen; srUpdate(); } if (shouldListen1) humOn(); else humOff(); if (!bigOn) { stopUpdater(); return; } } // debug mic level /*setInterval(function() { if (audioMeter) console.log("Mic level: " + audioMeter.absLevel); }, 1000);*/ ]], interval := vadUpdateInterval, listenTime := toMS(listenTime), listenTimeAfterActualSpeech := toMS(listenTimeAfterActualSpeech), transcriptTitleShowTime := toMS(transcriptTitleShowTime), listeningSymbol := jsQuote(/*"[LISTENING]"*/unicode_cloud()), ear := jsQuote(unicode_ear()))) )); } catch e { printStackTrace(e); throw rethrow(e); } } }
Began life as a copy of #1028961
download show line numbers debug dex old transpilations
Travelled to 4 computer(s): bhatertpkbcr, mqqgnosmbjvj, pyentgdyhuwx, vouqrxazstgt
No comments. add comment
Snippet ID: | #1030310 |
Snippet name: | Cruddie, only the speech recognition part [OK] |
Eternal ID of this version: | #1030310/19 |
Text MD5: | 2bbc8952c69d37324e03db1d1e472baa |
Transpilation MD5: | d570a1bc1a63ae45714c87ddfe0bf95d |
Author: | stefan |
Category: | javax |
Type: | JavaX source code (Dynamic Module) |
Public (visible to everyone): | Yes |
Archived (hidden from active list): | No |
Created/modified: | 2020-11-30 15:15:46 |
Source code size: | 5725 bytes / 142 lines |
Pitched / IR pitched: | No / No |
Views / Downloads: | 207 / 22551 |
Version history: | 18 change(s) |
Referenced in: | [show references] |