!7 cmodule VideoSplicer > DynPrintLog { S inputFile, outputFile; S timestamps; int volumeThresholdPercent = 15; S minSilence = "0.2", leadIn = "0.2", leadOut = "0.2"; transient double profileSamplingInterval = 0.05; // 50 ms transient int audioWindowSize = iround(profileSamplingInterval*16000); transient ImageSurface isPreview; transient JAmplitudeGraph graph; transient float[] profile; visual northAndCenterWithMargins( vstackWithSpacing( withLabel("Input video:", filePathInputWithBrowseButton(dm_textField('inputFile), onChoose := rThread loadVideo)), withLabel("Output video:", filePathInputWithBrowseButton(dm_textField('outputFile))), centeredLine( jbutton("Load video", rThread loadVideo), jbutton("Auto jump cuts!", rThread autoJumpCuts), jBoldButton("Make video", rThread makeVideo)), jSection("Jump cut parameters", centeredLine( withLabelLeftAndRight("Volume threshold:", dm_spinner volumeThresholdPercent(0, 100), "%"), withLabelLeftAndRight("Minimum silence:", dm_textField minSilence(), "s"), withLabelLeftAndRight("Lead-in:", dm_textField leadIn(), "s"), withLabelLeftAndRight("Lead-out:", dm_textField leadOut(), "s") ))), jhsplit(0.2, jLiveValueSection(dm_calculatedLiveValue(S, () -> "Timestamps (" + countLines(timestamps) + ")"), dm_textArea('timestamps)), northAndCenterWithMargins( jsection("Audio", jMinHeight(50, graph = setForeground(Color.red, swingNu(JAmplitudeGraph)))), hgridWithSpacing(jsection("Preview", jscroll_center(isPreview = jImageSurface())), super)))); void loadVideo enter { File f = newFile(inputFile); if (!fileExists(f)) ret with infoBox("File not found: " + f2s(f)); S id = md5(f2s(f)); File previewFile = prepareCacheProgramFile("preview-" + id + ".jpg"); File audioFile = prepareCacheProgramFile("preview-" + id + ".wav"); if (!fileExists(previewFile)) { print("Getting preview image..."); ffmpeg_getSingleFrame(f, previewFile, 0.0); print("Done"); } else print("Have preview image"); isPreview.setImageAndZoomToDisplay(loadImage2(previewFile)); if (!fileExists(audioFile)) { print("Extracting audio..."); ffmpeg_toMonoAudio_16k(f, audioFile); print("Done - " + fileInfo(audioFile)); } else print("Have audio"); print("Getting volume profile..."); profile = decodeWAVToMonoSamples_floatVolumeProfile(audioFile, audioWindowSize); print("Have volume profile (" + nEntries(l(profile)) + ")"); //printStruct(takeFirstOfFloatArray(100, profile)); graph.setValues(profile); } void makeVideo enter { if (empty(outputFile)) ret with infoBox("Need output file path"); File in = newFile(inputFile), out = newFile(outputFile); if (empty(timestamps)) autoJumpCuts(); L ranges = parseTimestampRanges(timestamps); if (empty(ranges)) ret with infoBox("No timestamps"); print(+ranges); if (fileExists(out) && !confirmOKCancel("Overwrite " + fileName(out) + "?")) ret; temp tempInfoBox_noHide("Splicing video..."); backtickToConsole(ffmpegCmd() + " -y " + ffmpeg_argsForSplice(in, out, ranges)); if (fileExists(out)) infoBox("Done splicing video!" + fileInfo(out)); else infoBox("Something went wrong..."); } void autoJumpCuts enter { if (profile == null) loadVideo(); if (profile == null) ret; L ranges = audio_findSpeechPartsFromVolumeProfile(profile, profileSamplingInterval); setField(timestamps := formatTimestampRanges(ranges)); infoBox("Created " + nSlices(ranges) + ", ready to make video"); } }