Not logged in.  Login/Logout/Register | List snippets | | Create snippet | Upload image | Upload data

378
LINES

< > BotCompany Repo | #1009816 // Continuous Recognition Module (just logs, German/English, with "bigOn" switch)

JavaX source code (desktop) [tags: use-pretranspiled] - run with: x30.jar

Download Jar. Uses 4758K of libraries. Click here for Pure Java version (17855L/138K).

1  
!7
2  
3  
sbool autoClose = true; // close JavaScript window on server lost
4  
sbool autoTurnOn = true; // turn on recognition on program start
5  
static int initialDelay = 0, autoCloseDelay = 5000;
6  
static double firstWindowDelay = 0;
7  
sbool infoBox, doLog = true;
8  
sbool blipOnLanguageSwitch = true; // instead of "switching to..."
9  
sbool blipOnActivation;
10  
sbool hideChromeWindow;
11  
sbool usePassiveLog; // use "passive" log (listening in background)
12  
13  
static int port;
14  
static O onUtterance; // voidfunc(S)
15  
sO onFirstWebSocket; // voidfunc()
16  
static L<WebSocket> webSockets = synchroList();
17  
sbool startRecognition;
18  
static java.util.Timer stopper;
19  
sS language = "en-US";
20  
sS myURL;
21  
static JButton btn;
22  
sbool hadAnyWebSockets; // Does Chrome work at all?
23  
sbool bigOn = true, justOnce = false;
24  
static long speechRecognizerOpened;
25  
static volatile S lastGlobalID;
26  
27  
p {
28  
  load('language);
29  
  if (isMainProgram()) {
30  
    if (isMain()) substance();
31  
    infoBox = true;
32  
  }
33  
  print("Logging speech to: " + speechRecognitionLog());
34  
35  
  NanoHTTPD.SOCKET_READ_TIMEOUT = 24*3600*1000; // not long enough - TODO: Fix in NanoHTTPD
36  
  
37  
  adjustMicrophoneLevel();
38  
  
39  
  port = serveHttpWithWebSockets(findFreePortAbove(9999), func(NanoHTTPD.IHTTPSession handshake) {
40  
    print("New WebSocket.");
41  
    WebSocket ws = new WebSocket(handshake) {
42  
      protected void onPong(WebSocketFrame pongFrame) { print("pong"); }
43  
      protected void onMessage(WebSocketFrame messageFrame) ctex {
44  
        fS s = messageFrame.getTextPayload();
45  
        //print("WebSocket message: " + s);
46  
        cancelTimeout();
47  
        infoBoxOrPrint(/*"User said: " +*/ s, infoBox); 
48  
          //send("stop");
49  
        //print("Sending start."); send("start");
50  
        new Matches m;
51  
        if (swic_trim(s, "Heard ", m))
52  
          handleUtterance(decensor($1), false);
53  
        else if (eqic(s, "big on")) bigOn = true;
54  
        else if (eqic(s, "big off")) bigOn = false;
55  
      }
56  
      protected void onClose(WebSocketFrame.CloseCode code, String reason, boolean initiatedByRemote) {
57  
        print("WebSocket closed.");
58  
        webSockets.remove(this);
59  
      }
60  
      protected void onException(IOException e) { printStackTrace(e); }
61  
    };
62  
    if (startRecognition) {
63  
      startRecognition = false;
64  
      pcall { ws.send(justOnce ? "just once" : "big on"); }
65  
      justOnce = false;
66  
    }
67  
    
68  
    // close any other recognizers
69  
    for (WebSocket ws2 : cloneList(webSockets)) {
70  
      print("Closing websocket.");
71  
      pcall { ws2.close(WebSocketFrame.CloseCode.NormalClosure, ""); }
72  
      webSockets.remove(ws2);
73  
    }
74  
    
75  
    if (!hadAnyWebSockets) {
76  
      hadAnyWebSockets = true;
77  
      pcallF(onFirstWebSocket);
78  
    }
79  
    
80  
    ret addAndReturn(webSockets, ws);
81  
  });
82  
  myURL = print("http://localhost:" + port + "/popup");
83  
  makeBot("Chrome Speech.");
84  
  
85  
  thread { sleepSeconds(firstWindowDelay); openSpeechRecognizer(); }
86  
  
87  
  if (isMain())
88  
    showControls(jcenteredline(btn = jbutton("Open Speech Recognizer", f openSpeechRecognizer)));
89  
  awtEvery(btn, 500, r { setEnabled(btn, empty(webSockets)) });
90  
  
91  
  /*thread "Chrome Re-Starter" {
92  
    sleepSeconds(20);
93  
    repeat with sleep 5 {
94  
      if (hadAnyWebSockets && empty(webSockets)) {
95  
        openSpeechRecognizer();
96  
        sleepSeconds(15);
97  
      }
98  
    }
99  
  });*/
100  
  
101  
  //if (autoTurnOn) startRecognition();
102  
}
103  
104  
html {
105  
  if (eqic(uri, "/favicon.ico"))
106  
    ret serveFile(loadLibrary(#1013028), "image/x-icon");
107  
    
108  
  if (neq(uri, "/popup"))
109  
    ret hbody("Opening popup..." + hjavascript([[
110  
      window.open('/popup', 'speech_recognizer', 'width=300,height=300,location=no');
111  
      setTimeout(function() { window.close(); }, 10000);
112  
    ]]));
113  
114  
  // Serve Popup
115  
  
116  
  ret hhtml(hhead(htitle("Continuous Speech Recognizer")) + hbody(div(
117  
    h3("Continuous Speech Recognizer")
118  
    + [[<link id="favicon" rel="shortcut icon" type="image/png" href="/favicon.png" />]]
119  
    + loadJQuery()
120  
    + hdiv("Language: " + language, id := 'lang, style := "font-size: 10px")
121  
    + hdiv("Results come here", id := 'results, style := "margin: 10px")
122  
  + hjavascript([[
123  
    var websocket;
124  
    var bigOn = #BIGON#, pause = false, listening = false, language = "#LANGUAGE#", justOnce = #JUSTONCE#;
125  
    //var stopUntil = 0;
126  
    
127  
    window.onfocus = function(event) {
128  
      //if (event.explicitOriginalTarget === window)
129  
        $("#btn").focus();
130  
    };
131  
132  
    function update() {
133  
      if (bigOn) {
134  
        $("#btn").html("Turn off");
135  
        document.title = (pause ? "Paused" : language.substring(3) /*"On"*/) + " - Speech Recognizer";
136  
      } else {
137  
        $("#btn").html("Turn on");
138  
        document.title = "[OFF] Speech Recognizer";
139  
      }
140  
141  
      var should = bigOn && !pause;
142  
      if (should && !listening) startRecognition();
143  
      else if (!should && listening) stopRecognition();
144  
    }
145  
    
146  
    function stopRecognition() {
147  
      listening = false;
148  
      recognition.stop();
149  
      update();
150  
    }
151  
    
152  
    function startRecognition() {
153  
      listening = true;
154  
      //if (Date.now() < stopUntil) return;
155  
      recognition.start();
156  
      update();
157  
    }
158  
    
159  
    function openWebSocket() {
160  
      websocket = new WebSocket("ws://localhost:#PORT#/");
161  
      websocket.onopen = function(event) {
162  
        $("#btn").prop('disabled', false);
163  
        $("#btn").focus();
164  
        $("#results").html(bigOn ? "Listening." : "Click to turn me on.");
165  
        if (bigOn)
166  
          startRecognition();
167  
      };
168  
    
169  
      websocket.onmessage = function(event) {
170  
        if (event.data == 'just once') { justOnce = bigOn = true; update(); }
171  
        if (event.data == 'big on') { bigOn = true; justOnce = false; update(); }
172  
        if (event.data == 'big off') { bigOn = false; update(); }
173  
        if (event.data == 'pause') { pause = true; update(); }
174  
        if (event.data == 'unpause') { pause = false; update(); }
175  
        if (event.data.substring(0, 9) == 'language ') {
176  
          var l = event.data.substring(9);
177  
          recognition.lang = language = l;
178  
          $("#lang").html("Language: " + l);
179  
        }
180  
      };
181  
    
182  
      websocket.onclose = function(event) {
183  
        $("#results").html("WebSocket closed");
184  
        if (#AUTOCLOSE#) setTimeout(function() { window.close(); }, autoCloseDelay);
185  
      };
186  
    }
187  
    
188  
    setTimeout(openWebSocket, #INITIALDELAY#);
189  
      
190  
    var recognition = new webkitSpeechRecognition();
191  
    recognition.lang = "#LANGUAGE#";
192  
    
193  
    recognition.onerror = function(event) { 
194  
      var s = "&nbsp;";
195  
      if (event.error != "no-speech") s = "Error: " + event.error;
196  
      $("#results").html(s);
197  
      //stopRecognition(); // do we get onEnd later?
198  
      //setTimeout(startRecognition, 1000); // safety delay
199  
    }
200  
    
201  
    recognition.onresult = function(event) { 
202  
      var result = event.results[0];
203  
      var transcript = result[0].transcript;
204  
      var s = "Transcript: " + transcript;
205  
      if (event.results.length > 1) s += " ." + event.results.length;
206  
      if (result.length > 1) s += " #" + result.length;
207  
      $("#results").html(s);
208  
      websocket.send("Heard " + transcript);
209  
      //stopUntil = Date.now()+200;
210  
      //stopRecognition(); setTimeout(startRecognition, 100);
211  
    }
212  
    
213  
    recognition.onnomatch = function(event) {
214  
      $("#results").html("-");
215  
      //stopRecognition(); setTimeout(startRecognition, 100);
216  
    }
217  
    
218  
    recognition.onend = function(event) { 
219  
      //$("#results").html("-end-");
220  
      //stopRecognition();
221  
      if (justOnce) justOnce = bigOn = false;
222  
      listening = false; setTimeout(update, 100);
223  
    }
224  
    
225  
    function startOrStop() {
226  
      bigOn = !bigOn;
227  
      websocket.send(bigOn ? "big on" : "big off");
228  
      update();
229  
    }
230  
    
231  
    window.resizeTo(300, 300);
232  
  ]]) // end of JavaScript, variables follow
233  
    .replace("#BIGON#", str(autoTurnOn))
234  
    .replace("#JUSTONCE#", str(justOnce))
235  
    .replace("#PORT#", str(port))
236  
    .replace("#AUTOCLOSE#", autoClose ? "true" : "false")
237  
    .replace("#INITIALDELAY#", str(initialDelay))
238  
    .replace("#LANGUAGE#", language)
239  
    .replace("autoCloseDelay", str(autoCloseDelay))
240  
    + tag('button, "...", onclick := "startOrStop()", type := 'button, id := 'btn, disabled := 'disabled)
241  
    + hdiv("", id := 'msgs, style := "margin: 10px; font-size: 10px")
242  
    //+ p(ahref("#", "Popup", onClick := "window.open('/', 'speech_recognizer', 'width=300,height=300,location=no'); return false;"));
243  
  , style := "text-align: center"));
244  
}
245  
246  
svoid justOnce() { startRecognition(true); }
247  
svoid startRecognition() { startRecognition(false); }
248  
249  
svoid startRecognition(bool justOnce) {
250  
  main.justOnce = justOnce;
251  
  bigOn = true;
252  
  L<WebSocket> l = cloneList(webSockets);
253  
  if (empty(l)) startRecognition = true;
254  
  else {
255  
    //print("Starting recognition." + (l(l) > 1 ? "Weird: Have " + l(l) + " websockets" : ""));
256  
    pcall {
257  
      first(l).send(justOnce ? "just once" : "big on");
258  
    }
259  
    justOnce = false;
260  
  }
261  
}
262  
263  
svoid stopRecognition() {
264  
  bigOn = false;
265  
  if (startRecognition) startRecognition = false;
266  
  if (nempty(webSockets)) pcall {
267  
    first(webSockets).send("big off");
268  
  }
269  
}
270  
271  
sS hotCommands(S s) {
272  
  if (ai_isStopListeningCommand(s)) { stopRecognition(); playBlip(); ret "OK"; }
273  
  S language = ai_extractChangeLanguageCommand(s);
274  
  if (eq(language, 'english)) ret answer("language " + quote("en-US"));
275  
  if (eq(language, 'german)) ret switchToGerman();
276  
  null;
277  
}
278  
279  
answer {
280  
  try answer hotCommands(s);
281  
  if "start recognition timeout *" {
282  
    final int seconds = parseInt($1);
283  
    startRecognition();
284  
    stopper = timerOnce(toMS(seconds), f stopRecognition);
285  
    ret "OK";
286  
  }
287  
  if "is on" ret yesno(bigOn);
288  
  if "has recognizer" ret yesno(nempty(webSockets));
289  
290  
  if "just once" {
291  
    if (nempty(webSockets)) justOnce();
292  
    else {
293  
      justOnce = true;
294  
      openSpeechRecognizerIfNone();
295  
    }
296  
    ret "OK";
297  
  }
298  
  
299  
  if "start recognition" {
300  
    openSpeechRecognizerIfNone();
301  
    if (nempty(webSockets)) {
302  
      bool on = bigOn;
303  
      startRecognition();
304  
      if (!on && blipOnActivation) blip();
305  
    }
306  
    ret "OK";
307  
  }
308  
  
309  
  if "language *" {
310  
    if (eq(language, $1)) ret "OK";
311  
    setAndSave('language, $1);
312  
    if (blipOnLanguageSwitch) blip(); else
313  
      if (eq(language, "de-DE")) william("Switching to German");
314  
        else william("Switching to English");
315  
    pcall { if (nempty(webSockets)) first(webSockets).send("language " + $1); }
316  
    stopRecognition();
317  
    sleep(500);
318  
    startRecognition();
319  
    ret "OK";
320  
  }
321  
  
322  
  if "user typed *" ret "OK" with handleUtterance($1, true);
323  
  
324  
  if "stop recognition" { stopRecognition(); ret "OK"; }
325  
  
326  
  if "use passive log" { usePassiveLog = true; ret "OK"; }
327  
  if "use active log" { usePassiveLog = false; ret "OK"; }
328  
  
329  
  if "log on" { doLog = true; ret "OK"; }
330  
  if "log off" { doLog = false; ret "OK"; }
331  
}
332  
333  
svoid cancelTimeout {
334  
  if (stopper != null) { stopper.cancel(); stopper = null; }
335  
}
336  
337  
sS switchToGerman {
338  
  ret answer("language " + quote("de-DE"));
339  
}
340  
341  
svoid handleUtterance(fS s, final bool typed) {
342  
  if (isStefanReichsPC()) {
343  
    mechAppendQ_noUniq("Katze Speech Recognition Log With Date", "[" + localDateWithMilliseconds() + (typed ? ", typed" : "") + "] " + s);
344  
    Map map = litorderedmap(
345  
      where := typed ? "typed" : "voice",
346  
      type := 'heard,
347  
      date := localDateWithMilliseconds(),
348  
      globalID := lastGlobalID = aGlobalID(),
349  
      text := s);
350  
    mechAppendQ_noUniq("Voice I/O Log", struct(map));
351  
  }
352  
  
353  
  S info = typed ? "User typed" : "Chrome Speech";
354  
  vmBus_send googleSpeechRecognized(s, info);
355  
  
356  
  if (doLog)
357  
    logQuoted(usePassiveLog ? passiveSpeechRecognitionLog() : speechRecognitionLog(), now() + " [" + info + "] " + s);
358  
  
359  
  thread {
360  
    pcallF(onUtterance, s);
361  
  }
362  
  hotCommands(s);
363  
}
364  
365  
svoid openSpeechRecognizer {
366  
  speechRecognizerOpened = sysNow();
367  
  if (hideChromeWindow)
368  
    startInvisibleChromeAppForSpeech(myURL);
369  
  else
370  
    startChromeAppForSpeech(myURL);
371  
}
372  
373  
svoid openSpeechRecognizerIfNone {
374  
  if (empty(webSockets) && sysNow() >= speechRecognizerOpened + 5000)
375  
    openSpeechRecognizer();
376  
}
377  
378  
sS decensor(S s) { ret googleDecensor_static(s); }

Author comment

Began life as a copy of #1009555

download  show line numbers  debug dex  old transpilations   

Travelled to 18 computer(s): aoiabmzegqzx, bhatertpkbcr, cbybwowwnfue, cfunsshuasjs, gwrvuhgaqvyk, irmadwmeruwu, ishqpsrjomds, lpdgvwnxivlt, mqqgnosmbjvj, onxytkatvevr, pyentgdyhuwx, pzhvpgtvlbxg, triorysbatvj, tslmcundralx, tvejysmllsmz, unoaxrwscvea, vouqrxazstgt, xrpafgyirdlv

No comments. add comment

Snippet ID: #1009816
Snippet name: Continuous Recognition Module (just logs, German/English, with "bigOn" switch)
Eternal ID of this version: #1009816/136
Text MD5: 208b2053dce2ff58ef2ebf8adf14ef81
Transpilation MD5: a3736bd4369a787a35e97399206d0cce
Author: stefan
Category: javax
Type: JavaX source code (desktop)
Public (visible to everyone): Yes
Archived (hidden from active list): No
Created/modified: 2020-07-21 21:44:01
Source code size: 12356 bytes / 378 lines
Pitched / IR pitched: No / No
Views / Downloads: 1087 / 6840
Version history: 135 change(s)
Referenced in: [show references]