mpegize.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530
  1. #!/usr/bin/python3
  2. # Copyright 2022, 2024 Pavel Machek, GPLv2+
  3. import os, sys, time, copy, subprocess
  4. import gi
  5. gi.require_version('Gst', '1.0')
  6. from gi.repository import Gst, GLib
  7. import os
  8. import time
  9. # https://stackoverflow.com/questions/11779490/how-to-add-a-new-audio-not-mixing-into-a-video-using-ffmpeg
  10. # https://ottverse.com/create-video-from-images-using-ffmpeg/
  11. # https://github.com/kkroening/ffmpeg-python/issues/95
  12. # sudo apt install ffmpeg
  13. # Usage: mpegize convert
  14. # head -c 1000 < /dev/zero > /tmp/delme.sm/1.foo.sa
  15. def gst_convert(mega_dir, out_file, use_jpeg):
  16. def sa_read(name, t):
  17. with open(name, "rb") as file:
  18. rgb_data = file.read(10*1024*1024)
  19. caps_string = "audio/x-raw,format=U16LE,channels=2,rate=48000,layout=interleaved,channel-mask=3"
  20. caps = Gst.Caps.from_string(caps_string)
  21. buffer = Gst.Buffer.new_wrapped(rgb_data)
  22. if False:
  23. time.sleep(1/30.)
  24. # nanoseconds
  25. buffer.pts = time.time() * 1000*1000*1000
  26. buffer.dts = time.time() * 1000*1000*1000
  27. elif True:
  28. buffer.pts = t
  29. buffer.dts = t
  30. buffer.duration = (1000*1000*1000)/10.
  31. return buffer, caps
  32. def sa_src(appsrc):
  33. def on_need_data(appsrc, data, length):
  34. name = audio.get_path()
  35. if name == None or name[-22:] != ".48000-s16le-stereo.sa":
  36. appsrc.emit("end-of-stream")
  37. print("End of audio stream")
  38. return
  39. t = audio.get_time()
  40. #print("Audio: ", name, " need ", data, t)
  41. buffer, caps = sa_read(name, t)
  42. os.unlink(name)
  43. appsrc.set_property("caps", caps)
  44. appsrc.emit("push-buffer", buffer)
  45. appsrc.set_property("format", Gst.Format.TIME)
  46. appsrc.set_property("is-live", False)
  47. appsrc.set_property("block", True)
  48. name = audio.get_path()
  49. buffer, caps = sa_read(name, 0)
  50. appsrc.set_property("caps", caps)
  51. #appsrc.emit("push-buffer", buffer)
  52. s = appsrc.connect("need-data", on_need_data, "")
  53. print("Connect", s)
  54. class grwBase:
  55. def init(m, dir):
  56. m.dir = dir
  57. m.slen = len(m.suffix)
  58. m.start_time = 0
  59. m.scan()
  60. print("Movie", len(m.list))
  61. def scan(m):
  62. m.list = os.listdir(m.dir)
  63. m.list.sort()
  64. m.length = len(m.list)
  65. def get_path(m):
  66. s = m.get_name()
  67. if s: return m.dir + s
  68. return s
  69. def get_name(m):
  70. m.scan()
  71. #print("Get path -- ")
  72. while True:
  73. if (len(m.list)) == 0:
  74. return None
  75. #print("Get path: ", m.list[0], m.suffix)
  76. if m.list[0][-m.slen:] != m.suffix:
  77. m.pop()
  78. continue
  79. return m.list[0]
  80. def get_time(m):
  81. s = m.get_name()
  82. s = s[:-m.slen]
  83. t = int(s)
  84. res = t * 1000 - m.start_time
  85. t = t / (1000*1000.)
  86. while (time.time() - t < 1):
  87. print("Too fast: ", time.time(), t, file=sys.stderr)
  88. print("Message: WA")
  89. sys.stdout.flush()
  90. time.sleep(.1)
  91. return res
  92. def pop(m):
  93. m.list = m.list[1:]
  94. def progress(m):
  95. i = len(m.list)
  96. print("Message: %d" % i)
  97. sys.stdout.flush()
  98. class grwVideo(grwBase):
  99. suffix = ".grw"
  100. def __init__(m, dir):
  101. m.init(dir)
  102. class grwJPEG(grwBase):
  103. suffix = ".jpeg.sv"
  104. def __init__(m, dir):
  105. m.init(dir + "sm/")
  106. class grwAudio(grwVideo):
  107. suffix = ".48000-s16le-stereo.sa"
  108. def __init__(m, dir):
  109. m.init(dir + "sm/")
  110. def grw_read(name, t):
  111. with open(name, "rb") as file:
  112. rgb_data = file.read(10*1024*1024)
  113. i = len(rgb_data)
  114. i -= 1
  115. while rgb_data[i] != 0:
  116. i -= 1
  117. footer = rgb_data[i+1:]
  118. sp = str(footer, 'ascii').split('\n')
  119. # Create caps for the file
  120. caps_string = sp[0][6:]
  121. caps = Gst.Caps.from_string(caps_string)
  122. if sp[0][:6] != "Caps: ":
  123. print("Bad footer")
  124. if sp[1][:6] != "Size: ":
  125. print("Bad footer")
  126. if sp[-1] != "GRW":
  127. print("Missing GRW footer")
  128. buffer = Gst.Buffer.new_wrapped(rgb_data)
  129. # This does not work for interactive use.
  130. if False:
  131. time.sleep(1/30.)
  132. # nanoseconds
  133. buffer.pts = time.time() * 1000*1000*1000
  134. buffer.dts = time.time() * 1000*1000*1000
  135. elif True:
  136. buffer.pts = t
  137. buffer.dts = t
  138. buffer.duration = (1000*1000*1000)/30.
  139. return buffer, caps
  140. def grwsrc(appsrc):
  141. def on_need_data(appsrc, data, length):
  142. name = movie.get_path()
  143. if name == None or name[-4:] != ".grw":
  144. appsrc.emit("end-of-stream")
  145. print("End of video stream")
  146. return
  147. t = movie.get_time()
  148. #print("Video: ", name, t)
  149. movie.progress()
  150. buffer, caps = grw_read(name, t)
  151. os.unlink(name)
  152. appsrc.set_property("caps", caps)
  153. appsrc.emit("push-buffer", buffer)
  154. appsrc.set_property("format", Gst.Format.TIME)
  155. appsrc.set_property("is-live", False)
  156. appsrc.set_property("block", True)
  157. name = movie.get_path()
  158. buffer, caps = grw_read(name, 0)
  159. appsrc.set_property("caps", caps)
  160. #appsrc.emit("push-buffer", buffer)
  161. s = appsrc.connect("need-data", on_need_data, "")
  162. print("Connect", s)
  163. def jpeg_read(name, t):
  164. with open(name, "rb") as file:
  165. rgb_data = file.read(10*1024*1024)
  166. i = len(rgb_data)
  167. buffer = Gst.Buffer.new_wrapped(rgb_data)
  168. caps_string = "image/jpeg"
  169. caps = Gst.Caps.from_string(caps_string)
  170. # This does not work for interactive use.
  171. if False:
  172. time.sleep(1/30.)
  173. # nanoseconds
  174. buffer.pts = time.time() * 1000*1000*1000
  175. buffer.dts = time.time() * 1000*1000*1000
  176. elif True:
  177. buffer.pts = t
  178. buffer.dts = t
  179. buffer.duration = (1000*1000*1000)/30.
  180. return buffer, caps
  181. def jpeg_src(appsrc):
  182. def on_need_data(appsrc, data, length):
  183. name = movie.get_path()
  184. if name == None or name[-8:] != ".jpeg.sv":
  185. appsrc.emit("end-of-stream")
  186. print("End of video stream")
  187. return
  188. t = movie.get_time()
  189. #print("Video: ", name, t)
  190. buffer, caps = jpeg_read(name, t)
  191. os.unlink(name)
  192. appsrc.set_property("caps", caps)
  193. appsrc.emit("push-buffer", buffer)
  194. appsrc.set_property("format", Gst.Format.TIME)
  195. appsrc.set_property("is-live", False)
  196. appsrc.set_property("block", True)
  197. name = movie.get_path()
  198. buffer, caps = jpeg_read(name, 0)
  199. appsrc.set_property("caps", caps)
  200. #appsrc.emit("push-buffer", buffer)
  201. s = appsrc.connect("need-data", on_need_data, "")
  202. print("Connect", s)
  203. def v_src(appsrc):
  204. if not use_jpeg:
  205. grwsrc(appsrc)
  206. else:
  207. jpeg_src(appsrc)
  208. count = 0
  209. path = mega_dir
  210. if use_jpeg:
  211. movie = grwJPEG(path)
  212. else:
  213. movie = grwVideo(path)
  214. audio = grwAudio(path)
  215. t1 = movie.get_time()
  216. t2 = audio.get_time()
  217. tm = min(t1,t2)
  218. print("Time base is", tm)
  219. movie.start_time = tm
  220. audio.start_time = tm
  221. def pipeline_video():
  222. if True:
  223. s = "appsrc name=source"
  224. if use_jpeg:
  225. s += " ! jpegdec "
  226. else:
  227. s = "videotestsrc"
  228. s += " ! video/x-raw,width=(int)640,height=(int)480,format=(string)RGB "
  229. if False:
  230. s += " ! videoconvert ! jpegenc"
  231. s += " ! appsink name=sink"
  232. elif True:
  233. s += " ! videoconvert ! autovideosink"
  234. else:
  235. s += " ! videoconvert ! x264enc bitrate=3072 speed-preset=ultrafast ! matroskamux ! filesink location=" + out_file
  236. pipeline = Gst.parse_launch(s)
  237. p = pipeline.get_by_name("source")
  238. if p:
  239. if False:
  240. mysrc(p)
  241. else:
  242. v_src(p)
  243. p = pipeline.get_by_name("sink")
  244. if p:
  245. mysink(p)
  246. return pipeline
  247. def pipeline_audio():
  248. # audiotestsrc ! audioconvert ! audioresample ! autoaudiosink
  249. if True:
  250. s = "appsrc name=source"
  251. else:
  252. s = "audiotestsrc"
  253. if True:
  254. s += " ! audiobuffersplit ! audioconvert ! audioresample ! autoaudiosink"
  255. else:
  256. s += " ! ! ! "
  257. pipeline = Gst.parse_launch(s)
  258. p = pipeline.get_by_name("source")
  259. if p:
  260. sa_src(p)
  261. p = pipeline.get_by_name("sink")
  262. if p:
  263. mysink(p)
  264. return pipeline
  265. def pipeline_both():
  266. if True:
  267. s = "appsrc name=asrc"
  268. else:
  269. s = "audiotestsrc"
  270. # Audiobuffersplit creates problems with A/V synchronization, avoid.
  271. #s += "! audiobuffersplit"
  272. s += " ! audioconvert ! vorbisenc ! mux. "
  273. if True:
  274. s += "appsrc name=vsrc"
  275. if use_jpeg:
  276. s += " ! jpegdec "
  277. else:
  278. s += "videotestsrc"
  279. s += " ! video/x-raw,width=(int)640,height=(int)480,format=(string)RGB "
  280. s += " ! videoconvert ! x264enc bitrate=3072 speed-preset=ultrafast ! matroskamux name=mux"
  281. if False:
  282. s += " ! decodebin ! playsink"
  283. else:
  284. s += " ! filesink location="+out_file
  285. pipeline = Gst.parse_launch(s)
  286. p = pipeline.get_by_name("asrc")
  287. if p:
  288. sa_src(p)
  289. p = pipeline.get_by_name("vsrc")
  290. if p:
  291. v_src(p)
  292. return pipeline
  293. Gst.init(None)
  294. Gst.debug_set_default_threshold(Gst.DebugLevel.WARNING)
  295. if False:
  296. Gst.debug_set_default_threshold(Gst.DebugLevel.INFO)
  297. if False:
  298. pipeline = pipeline_video()
  299. elif False:
  300. pipeline = pipeline_audio()
  301. else:
  302. pipeline = pipeline_both()
  303. # Function to handle end of stream
  304. def on_eos(bus, message):
  305. print("End of stream")
  306. pipeline.set_state(Gst.State.NULL)
  307. loop.quit()
  308. # Set up bus to handle messages
  309. bus = pipeline.get_bus()
  310. bus.add_signal_watch()
  311. bus.connect("message::eos", on_eos)
  312. # Set the pipeline to the playing state
  313. pipeline.set_state(Gst.State.PLAYING)
  314. # Run the main loop to handle GStreamer events
  315. loop = GLib.MainLoop()
  316. try:
  317. loop.run()
  318. except KeyboardInterrupt:
  319. pipeline.set_state(Gst.State.NULL)
  320. loop.quit()
  321. class Mpegize:
  322. base = '/tmp/delme.'
  323. fps = 30.5
  324. def prepare(m):
  325. m.source = m.base+'sm'
  326. m.work = m.base+'smt'
  327. m.output = m.base+'smo'
  328. def prepare_work(m):
  329. m.prepare()
  330. if not os.path.exists(m.output):
  331. os.mkdir(m.output)
  332. if not os.path.exists(m.work):
  333. os.mkdir(m.work)
  334. os.chdir(m.work)
  335. os.system("rm *.jpeg output.*")
  336. def prepare_source(m):
  337. m.prepare()
  338. m.out_index = 0
  339. l = os.listdir(m.source)
  340. print("Have", m.display_frames(len(l)), "frames")
  341. l.sort()
  342. m.frames = l
  343. m.unused_frames = copy.deepcopy(l)
  344. def parse_frame(m, n):
  345. if n[-5:] != ".mark" and n[-3:] != ".sa" and n[-3:] != ".sv":
  346. return "", "", 0,
  347. s = n.split(".")
  348. i = int(s[0])
  349. return s[2], s[1], i
  350. def help(m):
  351. print("mpegize command base-dir destination-movie fps dng|grw")
  352. def cleanup(m):
  353. os.system("rmdir %s/sm/" % m.base)
  354. os.system("rmdir %s/" % m.base)
  355. print("Message: Rec")
  356. sys.stdout.flush()
  357. def run(m, argv):
  358. if len(argv) > 2:
  359. m.base = argv[2]
  360. mode = argv[1]
  361. fps = argv[4]
  362. ext = argv[5]
  363. if mode == "start":
  364. print("Phase 0: start, mode ", ext, file=sys.stderr)
  365. if ext!="grw":
  366. return
  367. print("Phase 0: wait", file=sys.stderr)
  368. print("Message: W1")
  369. sys.stdout.flush()
  370. time.sleep(1)
  371. print("Phase 1: parallel fun", file=sys.stderr)
  372. print("Message: proc")
  373. sys.stdout.flush()
  374. gst_convert(m.base, argv[3], argv[4]=="dng")
  375. m.cleanup()
  376. return
  377. if mode == "convert" or mode == "stop":
  378. if ext=="grw":
  379. return
  380. print("Phase 1: jpegize", file=sys.stderr)
  381. print("Message: 0%%")
  382. sys.stdout.flush()
  383. m.prepare()
  384. m.jpegize()
  385. print("Phase 2: mpegize -- ", argv[3], file=sys.stderr)
  386. print("Message: enc")
  387. sys.stdout.flush()
  388. gst_convert(m.base, argv[3], argv[4]=="dng")
  389. m.cleanup()
  390. return
  391. if mode == "gaps":
  392. print("Video gaps")
  393. m.stat_gaps("sv")
  394. print("Audio gaps")
  395. m.stat_gaps("sa")
  396. return
  397. if mode == "jpegize":
  398. m.prepare()
  399. m.jpegize()
  400. return
  401. m.help()
  402. def stat_gaps(m, e):
  403. m.prepare_source()
  404. last = 0
  405. num = 0
  406. total = 0
  407. limit = 1000000 / m.fps + 15000
  408. for n in m.frames:
  409. ext, mid, i = m.parse_frame(n)
  410. if ext != e:
  411. continue
  412. if i - last > limit:
  413. print("Gap at", i, (i - last) / 1000., "msec")
  414. num += 1
  415. last = i
  416. total += 1
  417. print("Total", num, "gaps of", total)
  418. print("Expected", (1000000 / m.fps) / 1000., "msec, limit", limit / 1000., "msec")
  419. def display_usec(m, v):
  420. return "%.2f sec" % (v/1000000.)
  421. def display_frames(m, v):
  422. return "%d frames %s" % (v, m.display_usec(v * 1000000 / 30.))
  423. def jpegize(m):
  424. i = 0
  425. os.chdir(m.base)
  426. l = os.listdir(m.base)
  427. l = filter(lambda n: n[-4:] == ".dng", l)
  428. l = list(l)
  429. l.sort()
  430. print("Have", m.display_frames(len(l)), "dngs")
  431. for n in l:
  432. if n[-4:] != ".dng":
  433. print("Something went terribly wrong")
  434. continue
  435. i += 1
  436. print("Message: %.0f%%" % ((100*i) / len(l)))
  437. sys.stdout.flush()
  438. base = n[:-4]
  439. subprocess.run(['dcraw',
  440. '-w', # -w Use camera white balance
  441. '+M', # +M use embedded color matrix
  442. '-H', '2', # -H 2 Recover highlights by blending them
  443. '-o', '1', # -o 1 Output in sRGB colorspace
  444. '-q', '0', # -q 0 Debayer with fast bi-linear interpolation
  445. '-f', # -f Interpolate RGGB as four colors
  446. '-T', n]) # -T Output TIFF
  447. subprocess.run(['convert', base+'.tiff', base+'.jpeg'])
  448. os.unlink(base+'.tiff')
  449. os.rename(base+'.jpeg', m.source+"/"+base+'.jpeg.sv')
  450. m = Mpegize()
  451. m.run(sys.argv)