!7 cm LiveFFT > DynImageSurface { volatile bool enabled = true; switchable bool normalize = false; switchable double windowSize = 1; // seconds switchable double preMult = 3.5; switchable int brightness = -300; switchable int contrast = 187; transient Q q; transient L buffer = synchroList(); transient volatile int bufferSize; transient int neededSamples; transient Clip clip; transient short[] fromData; transient IF1 colorizer = x -> (brightness+contrast*log1p(abs(preMult*x)))/255; visualize { if (!hasImage()) setImage( whiteImage(audio_estimatedFrequencyImageWidth(windowSize), audio_frequencyImageHeight())); ret centerAndSouthWithMargins(super.visualizeWithDoubleBuffering(), jvstackWithSpacing( makeForm( Seconds := dm_doubleSlider windowSize(0, 2), PreMult := dm_doubleSlider preMult(0, 50), Brightness := dm_intSlider brightness(-300, 200), Contrast := dm_intSlider contrast(0, 300)), jrightalignedline(dm_fieldCheckBox enabled()) )); } void onNewWindowSize { neededSamples = iround(windowSize*44100); buffer.clear(); bufferSize = 0; } start { dm_watchFieldAndNow windowSize(r onNewWindowSize); q = dm_startQ(); dm_addAudioListener(voidfunc(short[] _data) { if (!enabled) ret; short[] data = _data; data = dm_audioInputIsStereo() ? mixStereoShortArrayToMono(data) : cloneShortArray(data); if (normalize) data = normalizeShortArray(data); buffer.add(data); bufferSize += l(data); if (bufferSize >= neededSamples) { bufferSize -= neededSamples; final L salvaged = cloneList(buffer); buffer.clear(); if (bufferSize > 0) { buffer.add(lastNShorts(bufferSize, last(salvaged))); replaceLastElement(salvaged, dropLastNShorts(bufferSize, last(salvaged))); } q.add(r { MultiShortArrayInputStream_resettable stream = new(salvaged); stream.bigEndian = true; clip = spectro_clipFromMonoInputStream(stream); vmBus_send newAudioFrequencyClip(clip); BWImage img = clipToFrequencyImage(clip, colorizer); vmBus_send newAudioFrequencyImage(img); vmBus_send newAudioFrequencyImageFromData(img, _data); setImage(img); fromData = _data; }); } }); } }