|
@@ -2,6 +2,11 @@
|
|
|
# Copyright 2022, 2024 Pavel Machek, GPLv2+
|
|
|
|
|
|
import os, sys, time, copy, subprocess
|
|
|
+import gi
|
|
|
+gi.require_version('Gst', '1.0')
|
|
|
+from gi.repository import Gst, GLib
|
|
|
+import os
|
|
|
+import time
|
|
|
|
|
|
# https://stackoverflow.com/questions/11779490/how-to-add-a-new-audio-not-mixing-into-a-video-using-ffmpeg
|
|
|
# https://ottverse.com/create-video-from-images-using-ffmpeg/
|
|
@@ -13,6 +18,349 @@ import os, sys, time, copy, subprocess
|
|
|
# Usage: mpegize convert
|
|
|
# head -c 1000 < /dev/zero > /tmp/delme.sm/1.foo.sa
|
|
|
|
|
|
+def gst_convert(mega_dir, out_file, use_jpeg):
|
|
|
+ def sa_read(name, t):
|
|
|
+ with open(name, "rb") as file:
|
|
|
+ rgb_data = file.read(10*1024*1024)
|
|
|
+
|
|
|
+ caps_string = "audio/x-raw,format=U16LE,channels=2,rate=48000,layout=interleaved,channel-mask=3"
|
|
|
+ caps = Gst.Caps.from_string(caps_string)
|
|
|
+
|
|
|
+ buffer = Gst.Buffer.new_wrapped(rgb_data)
|
|
|
+ if False:
|
|
|
+ time.sleep(1/30.)
|
|
|
+ # nanoseconds
|
|
|
+ buffer.pts = time.time() * 1000*1000*1000
|
|
|
+ buffer.dts = time.time() * 1000*1000*1000
|
|
|
+ elif True:
|
|
|
+ buffer.pts = t
|
|
|
+ buffer.dts = t
|
|
|
+ buffer.duration = (1000*1000*1000)/10.
|
|
|
+
|
|
|
+ return buffer, caps
|
|
|
+
|
|
|
+ def sa_src(appsrc):
|
|
|
+ def on_need_data(appsrc, data, length):
|
|
|
+ name = audio.get_path()
|
|
|
+ if name == None or name[-22:] != ".48000-s16le-stereo.sa":
|
|
|
+ appsrc.emit("end-of-stream")
|
|
|
+ print("End of audio stream")
|
|
|
+ return
|
|
|
+ t = audio.get_time()
|
|
|
+ #print("Audio: ", name, " need ", data, t)
|
|
|
+ audio.pop()
|
|
|
+ buffer, caps = sa_read(name, t)
|
|
|
+ appsrc.set_property("caps", caps)
|
|
|
+ appsrc.emit("push-buffer", buffer)
|
|
|
+
|
|
|
+ appsrc.set_property("format", Gst.Format.TIME)
|
|
|
+ appsrc.set_property("is-live", False)
|
|
|
+ appsrc.set_property("block", True)
|
|
|
+
|
|
|
+ name = audio.get_path()
|
|
|
+ buffer, caps = sa_read(name, 0)
|
|
|
+
|
|
|
+ appsrc.set_property("caps", caps)
|
|
|
+ #appsrc.emit("push-buffer", buffer)
|
|
|
+
|
|
|
+ s = appsrc.connect("need-data", on_need_data, "")
|
|
|
+ print("Connect", s)
|
|
|
+
|
|
|
+ class grwBase:
|
|
|
+ def init(m, dir):
|
|
|
+ m.dir = dir
|
|
|
+ m.list = os.listdir(dir)
|
|
|
+ m.list.sort()
|
|
|
+ m.slen = len(m.suffix)
|
|
|
+ m.start_time = 0
|
|
|
+ print("Movie", len(m.list))
|
|
|
+
|
|
|
+ def get_path(m):
|
|
|
+ s = m.get_name()
|
|
|
+ if s: return m.dir + s
|
|
|
+ return s
|
|
|
+
|
|
|
+ def get_name(m):
|
|
|
+ #print("Get path -- ")
|
|
|
+ while True:
|
|
|
+ if (len(m.list)) == 0:
|
|
|
+ return None
|
|
|
+ #print("Get path: ", m.list[0], m.suffix)
|
|
|
+ if m.list[0][-m.slen:] != m.suffix:
|
|
|
+ m.pop()
|
|
|
+ continue
|
|
|
+ return m.list[0]
|
|
|
+
|
|
|
+ def get_time(m):
|
|
|
+ s = m.get_name()
|
|
|
+ s = s[:-m.slen]
|
|
|
+ return int(s) * 1000 - m.start_time
|
|
|
+
|
|
|
+ def pop(m):
|
|
|
+ m.list = m.list[1:]
|
|
|
+
|
|
|
+ class grwVideo(grwBase):
|
|
|
+ suffix = ".grw"
|
|
|
+ def __init__(m, dir):
|
|
|
+ m.init(dir)
|
|
|
+
|
|
|
+ class grwJPEG(grwBase):
|
|
|
+ suffix = ".jpeg.sv"
|
|
|
+ def __init__(m, dir):
|
|
|
+ m.init(dir + "sm/")
|
|
|
+
|
|
|
+ class grwAudio(grwVideo):
|
|
|
+ suffix = ".48000-s16le-stereo.sa"
|
|
|
+ def __init__(m, dir):
|
|
|
+ m.init(dir + "sm/")
|
|
|
+
|
|
|
+ def grw_read(name, t):
|
|
|
+ with open(name, "rb") as file:
|
|
|
+ rgb_data = file.read(10*1024*1024)
|
|
|
+ i = len(rgb_data)
|
|
|
+ i -= 1
|
|
|
+ while rgb_data[i] != 0:
|
|
|
+ i -= 1
|
|
|
+ footer = rgb_data[i+1:]
|
|
|
+ sp = str(footer, 'ascii').split('\n')
|
|
|
+ # Create caps for the file
|
|
|
+ caps_string = sp[0][6:]
|
|
|
+ caps = Gst.Caps.from_string(caps_string)
|
|
|
+ if sp[0][:6] != "Caps: ":
|
|
|
+ print("Bad footer")
|
|
|
+ if sp[1][:6] != "Size: ":
|
|
|
+ print("Bad footer")
|
|
|
+ if sp[-1] != "GRW":
|
|
|
+ print("Missing GRW footer")
|
|
|
+
|
|
|
+ buffer = Gst.Buffer.new_wrapped(rgb_data)
|
|
|
+ # This does not work for interactive use.
|
|
|
+ if False:
|
|
|
+ time.sleep(1/30.)
|
|
|
+ # nanoseconds
|
|
|
+ buffer.pts = time.time() * 1000*1000*1000
|
|
|
+ buffer.dts = time.time() * 1000*1000*1000
|
|
|
+ elif True:
|
|
|
+ buffer.pts = t
|
|
|
+ buffer.dts = t
|
|
|
+ buffer.duration = (1000*1000*1000)/30.
|
|
|
+
|
|
|
+ return buffer, caps
|
|
|
+
|
|
|
+ def grwsrc(appsrc):
|
|
|
+ def on_need_data(appsrc, data, length):
|
|
|
+ name = movie.get_path()
|
|
|
+ if name == None or name[-4:] != ".grw":
|
|
|
+ appsrc.emit("end-of-stream")
|
|
|
+ print("End of video stream")
|
|
|
+ return
|
|
|
+ t = movie.get_time()
|
|
|
+ #print("Video: ", name, t)
|
|
|
+ movie.pop()
|
|
|
+ buffer, caps = grw_read(name, t)
|
|
|
+ appsrc.set_property("caps", caps)
|
|
|
+ appsrc.emit("push-buffer", buffer)
|
|
|
+
|
|
|
+ appsrc.set_property("format", Gst.Format.TIME)
|
|
|
+ appsrc.set_property("is-live", False)
|
|
|
+ appsrc.set_property("block", True)
|
|
|
+
|
|
|
+ name = movie.get_path()
|
|
|
+ buffer, caps = grw_read(name, 0)
|
|
|
+
|
|
|
+ appsrc.set_property("caps", caps)
|
|
|
+ #appsrc.emit("push-buffer", buffer)
|
|
|
+
|
|
|
+ s = appsrc.connect("need-data", on_need_data, "")
|
|
|
+ print("Connect", s)
|
|
|
+
|
|
|
+ def jpeg_read(name, t):
|
|
|
+ with open(name, "rb") as file:
|
|
|
+ rgb_data = file.read(10*1024*1024)
|
|
|
+ i = len(rgb_data)
|
|
|
+ buffer = Gst.Buffer.new_wrapped(rgb_data)
|
|
|
+
|
|
|
+ caps_string = "image/jpeg"
|
|
|
+ caps = Gst.Caps.from_string(caps_string)
|
|
|
+
|
|
|
+ # This does not work for interactive use.
|
|
|
+ if False:
|
|
|
+ time.sleep(1/30.)
|
|
|
+ # nanoseconds
|
|
|
+ buffer.pts = time.time() * 1000*1000*1000
|
|
|
+ buffer.dts = time.time() * 1000*1000*1000
|
|
|
+ elif True:
|
|
|
+ buffer.pts = t
|
|
|
+ buffer.dts = t
|
|
|
+ buffer.duration = (1000*1000*1000)/30.
|
|
|
+
|
|
|
+ return buffer, caps
|
|
|
+
|
|
|
+ def jpeg_src(appsrc):
|
|
|
+ def on_need_data(appsrc, data, length):
|
|
|
+ name = movie.get_path()
|
|
|
+ if name == None or name[-8:] != ".jpeg.sv":
|
|
|
+ appsrc.emit("end-of-stream")
|
|
|
+ print("End of video stream")
|
|
|
+ return
|
|
|
+ t = movie.get_time()
|
|
|
+ #print("Video: ", name, t)
|
|
|
+ movie.pop()
|
|
|
+ buffer, caps = jpeg_read(name, t)
|
|
|
+ appsrc.set_property("caps", caps)
|
|
|
+ appsrc.emit("push-buffer", buffer)
|
|
|
+
|
|
|
+ appsrc.set_property("format", Gst.Format.TIME)
|
|
|
+ appsrc.set_property("is-live", False)
|
|
|
+ appsrc.set_property("block", True)
|
|
|
+
|
|
|
+ name = movie.get_path()
|
|
|
+ buffer, caps = jpeg_read(name, 0)
|
|
|
+
|
|
|
+ appsrc.set_property("caps", caps)
|
|
|
+ #appsrc.emit("push-buffer", buffer)
|
|
|
+
|
|
|
+ s = appsrc.connect("need-data", on_need_data, "")
|
|
|
+ print("Connect", s)
|
|
|
+
|
|
|
+ def v_src(appsrc):
|
|
|
+ if not use_jpeg:
|
|
|
+ grwsrc(appsrc)
|
|
|
+ else:
|
|
|
+ jpeg_src(appsrc)
|
|
|
+
|
|
|
+ count = 0
|
|
|
+ path = mega_dir
|
|
|
+ if use_jpeg:
|
|
|
+ movie = grwJPEG(path)
|
|
|
+ else:
|
|
|
+ movie = grwVideo(path)
|
|
|
+ audio = grwAudio(path)
|
|
|
+ t1 = movie.get_time()
|
|
|
+ t2 = audio.get_time()
|
|
|
+ tm = min(t1,t2)
|
|
|
+ print("Time base is", tm)
|
|
|
+ movie.start_time = tm
|
|
|
+ audio.start_time = tm
|
|
|
+
|
|
|
+ def pipeline_video():
|
|
|
+ if True:
|
|
|
+ s = "appsrc name=source"
|
|
|
+ if use_jpeg:
|
|
|
+ s += " ! jpegdec "
|
|
|
+ else:
|
|
|
+ s = "videotestsrc"
|
|
|
+ s += " ! video/x-raw,width=(int)640,height=(int)480,format=(string)RGB "
|
|
|
+ if False:
|
|
|
+ s += " ! videoconvert ! jpegenc"
|
|
|
+ s += " ! appsink name=sink"
|
|
|
+ elif True:
|
|
|
+ s += " ! videoconvert ! autovideosink"
|
|
|
+ else:
|
|
|
+ s += " ! videoconvert ! x264enc bitrate=3072 speed-preset=ultrafast ! matroskamux ! filesink location=" + out_file
|
|
|
+
|
|
|
+ pipeline = Gst.parse_launch(s)
|
|
|
+
|
|
|
+ p = pipeline.get_by_name("source")
|
|
|
+ if p:
|
|
|
+ if False:
|
|
|
+ mysrc(p)
|
|
|
+ else:
|
|
|
+ v_src(p)
|
|
|
+ p = pipeline.get_by_name("sink")
|
|
|
+ if p:
|
|
|
+ mysink(p)
|
|
|
+ return pipeline
|
|
|
+
|
|
|
+ def pipeline_audio():
|
|
|
+ # audiotestsrc ! audioconvert ! audioresample ! autoaudiosink
|
|
|
+ if True:
|
|
|
+ s = "appsrc name=source"
|
|
|
+ else:
|
|
|
+ s = "audiotestsrc"
|
|
|
+
|
|
|
+ if True:
|
|
|
+ s += " ! audiobuffersplit ! audioconvert ! audioresample ! autoaudiosink"
|
|
|
+ else:
|
|
|
+ s += " ! ! ! "
|
|
|
+
|
|
|
+ pipeline = Gst.parse_launch(s)
|
|
|
+
|
|
|
+ p = pipeline.get_by_name("source")
|
|
|
+ if p:
|
|
|
+ sa_src(p)
|
|
|
+ p = pipeline.get_by_name("sink")
|
|
|
+ if p:
|
|
|
+ mysink(p)
|
|
|
+ return pipeline
|
|
|
+
|
|
|
+ def pipeline_both():
|
|
|
+ if True:
|
|
|
+ s = "appsrc name=asrc"
|
|
|
+ else:
|
|
|
+ s = "audiotestsrc"
|
|
|
+ # Audiobuffersplit creates problems with A/V synchronization, avoid.
|
|
|
+ #s += "! audiobuffersplit"
|
|
|
+ s += " ! audioconvert ! vorbisenc ! mux. "
|
|
|
+
|
|
|
+ if True:
|
|
|
+ s += "appsrc name=vsrc"
|
|
|
+ if use_jpeg:
|
|
|
+ s += " ! jpegdec "
|
|
|
+ else:
|
|
|
+ s += "videotestsrc"
|
|
|
+ s += " ! video/x-raw,width=(int)640,height=(int)480,format=(string)RGB "
|
|
|
+
|
|
|
+ s += " ! videoconvert ! x264enc bitrate=3072 speed-preset=ultrafast ! matroskamux name=mux"
|
|
|
+ if False:
|
|
|
+ s += " ! decodebin ! playsink"
|
|
|
+ else:
|
|
|
+ s += " ! filesink location="+out_file
|
|
|
+
|
|
|
+ pipeline = Gst.parse_launch(s)
|
|
|
+
|
|
|
+ p = pipeline.get_by_name("asrc")
|
|
|
+ if p:
|
|
|
+ sa_src(p)
|
|
|
+ p = pipeline.get_by_name("vsrc")
|
|
|
+ if p:
|
|
|
+ v_src(p)
|
|
|
+ return pipeline
|
|
|
+
|
|
|
+ Gst.init(None)
|
|
|
+ Gst.debug_set_default_threshold(Gst.DebugLevel.WARNING)
|
|
|
+ if False:
|
|
|
+ Gst.debug_set_default_threshold(Gst.DebugLevel.INFO)
|
|
|
+
|
|
|
+ if False:
|
|
|
+ pipeline = pipeline_video()
|
|
|
+ elif False:
|
|
|
+ pipeline = pipeline_audio()
|
|
|
+ else:
|
|
|
+ pipeline = pipeline_both()
|
|
|
+
|
|
|
+ # Function to handle end of stream
|
|
|
+ def on_eos(bus, message):
|
|
|
+ print("End of stream")
|
|
|
+ pipeline.set_state(Gst.State.NULL)
|
|
|
+ loop.quit()
|
|
|
+
|
|
|
+ # Set up bus to handle messages
|
|
|
+ bus = pipeline.get_bus()
|
|
|
+ bus.add_signal_watch()
|
|
|
+ bus.connect("message::eos", on_eos)
|
|
|
+
|
|
|
+ # Set the pipeline to the playing state
|
|
|
+ pipeline.set_state(Gst.State.PLAYING)
|
|
|
+
|
|
|
+ # Run the main loop to handle GStreamer events
|
|
|
+ loop = GLib.MainLoop()
|
|
|
+ try:
|
|
|
+ loop.run()
|
|
|
+ except KeyboardInterrupt:
|
|
|
+ pipeline.set_state(Gst.State.NULL)
|
|
|
+ loop.quit()
|
|
|
+
|
|
|
class Mpegize:
|
|
|
base = '/tmp/delme.'
|
|
|
fps = 30.5
|
|
@@ -48,14 +396,22 @@ class Mpegize:
|
|
|
return s[2], s[1], i
|
|
|
|
|
|
def help(m):
|
|
|
- print("mpegize command base-dir")
|
|
|
+ print("mpegize command base-dir destination-movie fps")
|
|
|
|
|
|
def run(m, argv):
|
|
|
if len(argv) > 2:
|
|
|
m.base = argv[2]
|
|
|
mode = argv[1]
|
|
|
- if mode == "stat" or mode == "convert" or mode == "gc" or mode == "convertall":
|
|
|
- m.process(mode)
|
|
|
+ if mode == "convert":
|
|
|
+ print("Phase 1: jpegize")
|
|
|
+ print("Message: 0%%")
|
|
|
+ sys.stdout.flush()
|
|
|
+ m.prepare()
|
|
|
+ m.jpegize()
|
|
|
+ print("Phase 2: mpegize -- ", argv[3])
|
|
|
+ print("Message: enc")
|
|
|
+ sys.stdout.flush()
|
|
|
+ gst_convert(m.base, argv[3], True)
|
|
|
return
|
|
|
if mode == "gaps":
|
|
|
print("Video gaps")
|
|
@@ -87,131 +443,12 @@ class Mpegize:
|
|
|
print("Total", num, "gaps of", total)
|
|
|
print("Expected", (1000000 / m.fps) / 1000., "msec, limit", limit / 1000., "msec")
|
|
|
|
|
|
- def process(m, mode):
|
|
|
- m.prepare_source()
|
|
|
- photos = 0
|
|
|
- video_frames = 0
|
|
|
- start = 0
|
|
|
- for n in m.frames:
|
|
|
- ext, mid, i = m.parse_frame(n)
|
|
|
- if ext != "mark":
|
|
|
- continue
|
|
|
- print(n)
|
|
|
- if mid == "start":
|
|
|
- start = i
|
|
|
- if mid == "stop":
|
|
|
- video_frames += m.extract_video(start, i, mode)
|
|
|
- start = 0
|
|
|
- if mid == "wow":
|
|
|
- if start:
|
|
|
- start -= 5000000
|
|
|
- else:
|
|
|
- photos += 5
|
|
|
- m.extract_photo(i - 1000000, mode)
|
|
|
- m.extract_photo(i - 2000000, mode)
|
|
|
- m.extract_photo(i - 3000000, mode)
|
|
|
- m.extract_photo(i - 4000000, mode)
|
|
|
- m.extract_photo(i - 5000000, mode)
|
|
|
- if mid == "photo":
|
|
|
- photos += 1
|
|
|
- m.extract_photo(i, mode)
|
|
|
- if mode == "convertall":
|
|
|
- video_frames += m.extract_video(0, 9999999999999999, "convert")
|
|
|
- return
|
|
|
- print("Total", photos, "photos and", m.display_frames(video_frames))
|
|
|
- print(len(m.unused_frames), "/", len(m.frames))
|
|
|
- if mode == "gc":
|
|
|
- os.chdir(m.source)
|
|
|
- for n in m.unused_frames:
|
|
|
- os.unlink(n)
|
|
|
- print(m.unused_frames)
|
|
|
-
|
|
|
def display_usec(m, v):
|
|
|
return "%.2f sec" % (v/1000000.)
|
|
|
|
|
|
def display_frames(m, v):
|
|
|
return "%d frames %s" % (v, m.display_usec(v * 1000000 / 30.))
|
|
|
|
|
|
- def frame_used(m, n):
|
|
|
- if n in m.unused_frames:
|
|
|
- m.unused_frames.remove(n)
|
|
|
-
|
|
|
- def extract_photo(m, around, mode):
|
|
|
- print("Photo:", around)
|
|
|
- best = None
|
|
|
- for n in m.frames:
|
|
|
- ext, mid, i = m.parse_frame(n)
|
|
|
- if ext != "sv":
|
|
|
- continue
|
|
|
- if i < around:
|
|
|
- best = n
|
|
|
- continue
|
|
|
- best = n
|
|
|
- break
|
|
|
-
|
|
|
- m.frame_used(best)
|
|
|
- out_file = m.output+"/image-%04d.jpeg" % m.out_index
|
|
|
- m.out_index += 1
|
|
|
- if mode == "convert":
|
|
|
- os.system("ln "+m.source+"/"+best+" "+out_file)
|
|
|
-
|
|
|
- def extract_video(m, start, end, mode):
|
|
|
- print("Searching video", start, end, "--", m.display_usec(end-start))
|
|
|
- if mode == "convert":
|
|
|
- m.prepare_work()
|
|
|
- t1 = time.time()
|
|
|
- seen_audio = False
|
|
|
- seen_video = False
|
|
|
- count = 0
|
|
|
- skip_audio = 0
|
|
|
- skip_video = 0
|
|
|
- num = 0
|
|
|
- for n in m.frames:
|
|
|
- num += 1
|
|
|
- if not num % 1000:
|
|
|
- print("Frame", num)
|
|
|
- ext, mid, i = m.parse_frame(n)
|
|
|
- if ext != "sa" and ext != "sv":
|
|
|
- m.frame_used(n)
|
|
|
- continue
|
|
|
- if i < start - 1000000 or i > end:
|
|
|
- continue
|
|
|
- if ext == "sa":
|
|
|
- seen_audio = True
|
|
|
- if not seen_video:
|
|
|
- continue
|
|
|
- if mode == "convert":
|
|
|
- os.system("cat "+m.source+"/"+n+" >> "+m.work+"/output.raw")
|
|
|
- if ext == "sv":
|
|
|
- if not seen_video:
|
|
|
- first_video = i
|
|
|
- seen_video = True
|
|
|
- if mode == "convert":
|
|
|
- os.system("ln "+m.source+"/"+n+" "+m.work+"/image-%06d.jpeg" % count)
|
|
|
- count += 1
|
|
|
- while i >= first_video + count * 1000000 / m.fps:
|
|
|
- print("Duplicating video frame at", i)
|
|
|
- if mode == "convert":
|
|
|
- os.system("ln "+m.source+"/"+n+" "+m.work+"/image-%06d.jpeg" % count)
|
|
|
- count += 1
|
|
|
- m.frame_used(n)
|
|
|
-
|
|
|
- if mode == "convert":
|
|
|
- os.chdir(m.work)
|
|
|
- print("Converting", m.display_frames(count), "skipped", skip_audio, "audio and", skip_video, "video frames")
|
|
|
- os.system("ffmpeg -f s16le -ac 2 -ar 48000 -i output.raw output.wav")
|
|
|
- options = "-b:v 4096k -c:v libx264 -preset ultrafast"
|
|
|
- os.system("ffmpeg -framerate %d -i image-%%06d.jpeg -i output.wav %s output.mp4" % (m.fps, options))
|
|
|
- os.system("rm output.raw")
|
|
|
- out_file = m.output+"/video-%04d.mp4" % m.out_index
|
|
|
- m.out_index += 1
|
|
|
- os.system("mv output.mp4 "+out_file)
|
|
|
- print("Converted", m.display_frames(count), "in", "%.1f" % (time.time()-t1), "seconds")
|
|
|
- if mode == "convert":
|
|
|
- print("Original size -> new size")
|
|
|
- os.system("du -sh .; du -sh "+out_file)
|
|
|
- return count
|
|
|
-
|
|
|
def jpegize(m):
|
|
|
i = 0
|
|
|
os.chdir(m.base)
|
|
@@ -225,7 +462,8 @@ class Mpegize:
|
|
|
print("Something went terribly wrong")
|
|
|
continue
|
|
|
i += 1
|
|
|
- print(i, '/', len(l))
|
|
|
+ print("Message: %.0f%%" % ((100*i) / len(l)))
|
|
|
+ sys.stdout.flush()
|
|
|
base = n[:-4]
|
|
|
subprocess.run(['dcraw',
|
|
|
'-w', # -w Use camera white balance
|