svoid trainVAD() { final BWImage img1 = frequencyImage(voiceMegaMix()); final BWImage img2 = frequencyImage(nonVoiceMegaMix()); int clipLength = iround(2.0*spectro_pixelsPerSecond()); int stepSize = clipLength/4; print(+clipLength); L images1 = map(func(IntRange r) -> BWImage { bwHorizontalClip(img1, r) }, stepIntRange(clipLength, intRange(0, img1.getWidth()), stepSize)); L images2 = map(func(IntRange r) -> BWImage { bwHorizontalClip(img2, r) }, stepIntRange(clipLength, intRange(0, img2.getWidth()), stepSize)); print(allImageSizes(concatLists(images1, images2))); print(l(images1) + " + " + l(images2) + " images"); LPair> trainingList = trueFalseBPairs(images1, images2); replace Recognizer with F1. new Best best; Recognizer recognizer = func(BWImage) -> bool { false }; new Scorer scorer; for (Pair p : trainingList) scorer.add(recognizer.get(p.a) == p.b); best.putAndPrintIfNewBest(recognizer, scorer!); }