mpegize.py 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. #!/usr/bin/python3
  2. # Copyright 2022 Pavel Machek, GPLv2+
  3. import os, sys, time, copy, subprocess
  4. # https://stackoverflow.com/questions/11779490/how-to-add-a-new-audio-not-mixing-into-a-video-using-ffmpeg
  5. # https://ottverse.com/create-video-from-images-using-ffmpeg/
  6. # https://github.com/kkroening/ffmpeg-python/issues/95
  7. # sudo apt install ffmpeg
  8. # Usage: mpegize convert
  9. # head -c 1000 < /dev/zero > /tmp/delme.sm/1.foo.sa
  10. class Mpegize:
  11. base = '/tmp/delme.'
  12. fps = 30.5
  13. def prepare(m):
  14. m.source = m.base+'sm'
  15. m.work = m.base+'smt'
  16. m.output = m.base+'smo'
  17. def prepare_work(m):
  18. m.prepare()
  19. if not os.path.exists(m.output):
  20. os.mkdir(m.output)
  21. if not os.path.exists(m.work):
  22. os.mkdir(m.work)
  23. os.chdir(m.work)
  24. os.system("rm *.jpeg output.*")
  25. def prepare_source(m):
  26. m.prepare()
  27. m.out_index = 0
  28. l = os.listdir(m.source)
  29. print("Have", m.display_frames(len(l)), "frames")
  30. l.sort()
  31. m.frames = l
  32. m.unused_frames = copy.deepcopy(l)
  33. def parse_frame(m, n):
  34. if n[-5:] != ".mark" and n[-3:] != ".sa" and n[-3:] != ".sv":
  35. return "", "", 0,
  36. s = n.split(".")
  37. i = int(s[0])
  38. return s[2], s[1], i
  39. def help(m):
  40. print("mpegize command base-dir")
  41. def run(m, argv):
  42. if len(argv) > 2:
  43. m.base = argv[2]
  44. mode = argv[1]
  45. if mode == "stat" or mode == "convert" or mode == "gc" or mode == "convertall":
  46. m.process(mode)
  47. return
  48. if mode == "gaps":
  49. print("Video gaps")
  50. m.stat_gaps("sv")
  51. print("Audio gaps")
  52. m.stat_gaps("sa")
  53. return
  54. if mode == "jpegize":
  55. m.jpegize()
  56. m.help()
  57. def stat_gaps(m, e):
  58. m.prepare_source()
  59. last = 0
  60. num = 0
  61. total = 0
  62. limit = 1000000 / m.fps + 15000
  63. for n in m.frames:
  64. ext, mid, i = m.parse_frame(n)
  65. if ext != e:
  66. continue
  67. if i - last > limit:
  68. print("Gap at", i, (i - last) / 1000., "msec")
  69. num += 1
  70. last = i
  71. total += 1
  72. print("Total", num, "gaps of", total)
  73. print("Expected", (1000000 / m.fps) / 1000., "msec, limit", limit / 1000., "msec")
  74. def process(m, mode):
  75. m.prepare_source()
  76. photos = 0
  77. video_frames = 0
  78. start = 0
  79. for n in m.frames:
  80. ext, mid, i = m.parse_frame(n)
  81. if ext != "mark":
  82. continue
  83. print(n)
  84. if mid == "start":
  85. start = i
  86. if mid == "stop":
  87. video_frames += m.extract_video(start, i, mode)
  88. start = 0
  89. if mid == "wow":
  90. if start:
  91. start -= 5000000
  92. else:
  93. photos += 5
  94. m.extract_photo(i - 1000000, mode)
  95. m.extract_photo(i - 2000000, mode)
  96. m.extract_photo(i - 3000000, mode)
  97. m.extract_photo(i - 4000000, mode)
  98. m.extract_photo(i - 5000000, mode)
  99. if mid == "photo":
  100. photos += 1
  101. m.extract_photo(i, mode)
  102. if mode == "convertall":
  103. video_frames += m.extract_video(0, 9999999999999999, "convert")
  104. return
  105. print("Total", photos, "photos and", m.display_frames(video_frames))
  106. print(len(m.unused_frames), "/", len(m.frames))
  107. if mode == "gc":
  108. os.chdir(m.source)
  109. for n in m.unused_frames:
  110. os.unlink(n)
  111. print(m.unused_frames)
  112. def display_usec(m, v):
  113. return "%.2f sec" % (v/1000000.)
  114. def display_frames(m, v):
  115. return "%d frames %s" % (v, m.display_usec(v * 1000000 / 30.))
  116. def frame_used(m, n):
  117. if n in m.unused_frames:
  118. m.unused_frames.remove(n)
  119. def extract_photo(m, around, mode):
  120. print("Photo:", around)
  121. best = None
  122. for n in m.frames:
  123. ext, mid, i = m.parse_frame(n)
  124. if ext != "sv":
  125. continue
  126. if i < around:
  127. best = n
  128. continue
  129. best = n
  130. break
  131. m.frame_used(best)
  132. out_file = m.output+"/image-%04d.jpeg" % m.out_index
  133. m.out_index += 1
  134. if mode == "convert":
  135. os.system("ln "+m.source+"/"+best+" "+out_file)
  136. def extract_video(m, start, end, mode):
  137. print("Searching video", start, end, "--", m.display_usec(end-start))
  138. if mode == "convert":
  139. m.prepare_work()
  140. t1 = time.time()
  141. seen_audio = False
  142. seen_video = False
  143. count = 0
  144. skip_audio = 0
  145. skip_video = 0
  146. num = 0
  147. for n in m.frames:
  148. num += 1
  149. if not num % 1000:
  150. print("Frame", num)
  151. ext, mid, i = m.parse_frame(n)
  152. if ext != "sa" and ext != "sv":
  153. m.frame_used(n)
  154. continue
  155. if i < start - 1000000 or i > end:
  156. continue
  157. if ext == "sa":
  158. seen_audio = True
  159. if not seen_video:
  160. continue
  161. if mode == "convert":
  162. os.system("cat "+m.source+"/"+n+" >> "+m.work+"/output.raw")
  163. if ext == "sv":
  164. if not seen_video:
  165. first_video = i
  166. seen_video = True
  167. if mode == "convert":
  168. os.system("ln "+m.source+"/"+n+" "+m.work+"/image-%06d.jpeg" % count)
  169. count += 1
  170. while i >= first_video + count * 1000000 / m.fps:
  171. print("Duplicating video frame at", i)
  172. if mode == "convert":
  173. os.system("ln "+m.source+"/"+n+" "+m.work+"/image-%06d.jpeg" % count)
  174. count += 1
  175. m.frame_used(n)
  176. if mode == "convert":
  177. os.chdir(m.work)
  178. print("Converting", m.display_frames(count), "skipped", skip_audio, "audio and", skip_video, "video frames")
  179. os.system("ffmpeg -f s16le -ac 2 -ar 48000 -i output.raw output.wav")
  180. options = "-b:v 4096k -c:v libx264 -preset ultrafast"
  181. os.system("ffmpeg -framerate %d -i image-%%06d.jpeg -i output.wav %s output.mp4" % (m.fps, options))
  182. os.system("rm output.raw")
  183. out_file = m.output+"/video-%04d.mp4" % m.out_index
  184. m.out_index += 1
  185. os.system("mv output.mp4 "+out_file)
  186. print("Converted", m.display_frames(count), "in", "%.1f" % (time.time()-t1), "seconds")
  187. if mode == "convert":
  188. print("Original size -> new size")
  189. os.system("du -sh .; du -sh "+out_file)
  190. return count
  191. def jpegize(m):
  192. i = 0
  193. os.chdir(m.source)
  194. l = os.listdir(m.source)
  195. l = filter(lambda n: n[-4:] == ".dng", l)
  196. l = list(l)
  197. l.sort()
  198. print("Have", m.display_frames(len(l)), "dngs")
  199. for n in l:
  200. if n[-4:] != ".dng":
  201. print("Something went terribly wrong")
  202. continue
  203. i += 1
  204. print(i, '/', len(l))
  205. base = n[:-4]
  206. subprocess.run(['dcraw',
  207. '-w', # -w Use camera white balance
  208. '+M', # +M use embedded color matrix
  209. '-H', '2', # -H 2 Recover highlights by blending them
  210. '-o', '1', # -o 1 Output in sRGB colorspace
  211. '-q', '0', # -q 0 Debayer with fast bi-linear interpolation
  212. '-f', # -f Interpolate RGGB as four colors
  213. '-T', n]) # -T Output TIFF
  214. subprocess.run(['convert', base+'.tiff', base+'.jpeg'])
  215. os.unlink(base+'.tiff')
  216. os.rename(base+'.jpeg', base+'.jpeg.sv')
  217. m = Mpegize()
  218. m.run(sys.argv)