!7 cm LiveFFT > DynImageSurface { volatile bool enabled = true; switchable bool normalize = false; switchable double windowSize = 1; // seconds switchable int overlap = 2; switchable int frameBits = 10; // frameSize 1024 switchable double preMult = 3.5; switchable int brightness = -300; switchable int contrast = 187; switchable int downSampling = 1; transient Q q; transient L buffer = synchroList(); transient volatile int bufferSize; transient int neededSamples; transient Clip clip; transient short[] fromData; transient IF1 colorizer = x -> (brightness+contrast*log1p(abs(preMult*x)))/255; visualize { if (!hasImage()) setImage( whiteImage(audio_estimatedFrequencyImageWidth(windowSize), audio_frequencyImageHeight())); ret centerAndSouthWithMargins(super.visualizeWithDoubleBuffering(), jvstackWithSpacing( makeForm( "Window Size" := dm_doubleSlider windowSize(0, 2), "Frame Bits" := dm_intSlider frameBits(1, 12), Downsampling := dm_intSlider downSampling(1, 8), Overlap := dm_intSlider overlap(1, 16), PreMult := dm_doubleSlider preMult(0, 50), Brightness := dm_intSlider brightness(-300, 200), Contrast := dm_intSlider contrast(0, 300)), jrightalignedline(dm_fieldCheckBox enabled()) )); } void onNewWindowSize { neededSamples = iround(windowSize*44100); buffer.clear(); bufferSize = 0; } start { dm_watchFieldAndNow windowSize(r onNewWindowSize); q = dm_startQ(); dm_addAudioListener(voidfunc(short[] _data) { if (!enabled) ret; short[] data = _data; data = dm_audioInputIsStereo() ? mixStereoShortArrayToMono(data) : cloneShortArray(data); if (normalize) data = normalizeShortArray(data); buffer.add(data); bufferSize += l(data); if (bufferSize >= neededSamples) { bufferSize -= neededSamples; final L salvaged = cloneList(buffer); buffer.clear(); if (bufferSize > 0) { buffer.add(lastNShorts(bufferSize, last(salvaged))); replaceLastElement(salvaged, dropLastNShorts(bufferSize, last(salvaged))); } q.add(r { short[] data = concatShortArrays(salvaged); data = linearlyShortenArray(data, iround(l(data)/downSampling)); ShortArrayInputStream stream = new(data); stream.bigEndian = true; clip = spectro_clipFromMonoInputStream(stream, 1 << frameBits, overlap); vmBus_send newAudioFrequencyClip(clip); BWImage img = clipToFrequencyImage(clip, colorizer); vmBus_send newAudioFrequencyImage(img); vmBus_send newAudioFrequencyImageFromData(img, _data); setImage(img); fromData = _data; }); } }); } }