!7 cmodule SpeechCorrelator1 > DynPrintLog { switchable bool react; // react to new recording long audioReceived; // timestamp S startTime /*, endTime*/; new L interpretations_raw; transient L interpretations; //Interpretation bestInterpretation; srecord Interpretation(S text, S info, long received) {} start { interpretations = dm_synchroList(interpretations_raw); dm_vmBus_answerToMessage dontApplyRecognizedSpeech(() -> react); dm_onNewRecording(voidfunc(File f) enter { if (!react) ret; print("Have WAV: " + f); //print(renderFileDateWithSeconds(f)); setStartTime(extractYMDminusHMS(fileName(f))); triggerSpeechRecognitions("wav", f); }); dm_vmBus_onMessage_q newMP3Recording(voidfunc(File f) enter { print("Have MP3: " + f); setStartTime(extractYMDminusHMS(fileName(f))); triggerSpeechRecognitions("mp3", f); }); dm_vmBus_onMessage_q speechRecognized_raw(voidfunc(Map map) enter{ // module := module(), +info, +text, audioFile := f) File f = cast map.get("audioFile"); S date = f == null ? ymdMinusHms() : extractYMDminusHMS(fileName(f)); if (f == null) setStartTime(date); // e.g. for Chrome recognizer if (!eq(date, startTime)) ret with print("Ignoring speech result for time " + date); Interpretation i; interpretations.add(i = nu Interpretation( text := map.get("text"), info := map.get("info"), received := now()-audioReceived )); print("New interpretation (total: " + l(interpretations) + "): " + i); }); } visual centerAndSouthWithMargins(super, jrightaligned(dm_checkBox react())); // type = "mp3" or "wav" void triggerSpeechRecognitions(S type, File audioFile) { } void setStartTime(S startTime) { if (setField(+startTime)) { print(+startTime); setField(audioReceived := now()); clear(interpretations); } } }