123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241 |
- #!/usr/bin/python3
- # Copyright 2022 Pavel Machek, GPLv2+
- import os, sys, time, copy, subprocess
- # https://stackoverflow.com/questions/11779490/how-to-add-a-new-audio-not-mixing-into-a-video-using-ffmpeg
- # https://ottverse.com/create-video-from-images-using-ffmpeg/
- # https://github.com/kkroening/ffmpeg-python/issues/95
- # sudo apt install ffmpeg
- # Usage: mpegize convert
- # head -c 1000 < /dev/zero > /tmp/delme.sm/1.foo.sa
- class Mpegize:
- base = '/tmp/delme.'
- fps = 30.5
- def prepare(m):
- m.source = m.base+'sm'
- m.work = m.base+'smt'
- m.output = m.base+'smo'
- def prepare_work(m):
- m.prepare()
- if not os.path.exists(m.output):
- os.mkdir(m.output)
- if not os.path.exists(m.work):
- os.mkdir(m.work)
- os.chdir(m.work)
- os.system("rm *.jpeg output.*")
- def prepare_source(m):
- m.prepare()
- m.out_index = 0
- l = os.listdir(m.source)
- print("Have", m.display_frames(len(l)), "frames")
- l.sort()
- m.frames = l
- m.unused_frames = copy.deepcopy(l)
- def parse_frame(m, n):
- if n[-5:] != ".mark" and n[-3:] != ".sa" and n[-3:] != ".sv":
- return "", "", 0,
- s = n.split(".")
- i = int(s[0])
- return s[2], s[1], i
- def help(m):
- print("mpegize command base-dir")
- def run(m, argv):
- if len(argv) > 2:
- m.base = argv[2]
- mode = argv[1]
- if mode == "stat" or mode == "convert" or mode == "gc" or mode == "convertall":
- m.process(mode)
- return
- if mode == "gaps":
- print("Video gaps")
- m.stat_gaps("sv")
- print("Audio gaps")
- m.stat_gaps("sa")
- return
- if mode == "jpegize":
- m.jpegize()
- m.help()
- def stat_gaps(m, e):
- m.prepare_source()
- last = 0
- num = 0
- total = 0
- limit = 1000000 / m.fps + 15000
- for n in m.frames:
- ext, mid, i = m.parse_frame(n)
- if ext != e:
- continue
- if i - last > limit:
- print("Gap at", i, (i - last) / 1000., "msec")
- num += 1
- last = i
- total += 1
- print("Total", num, "gaps of", total)
- print("Expected", (1000000 / m.fps) / 1000., "msec, limit", limit / 1000., "msec")
- def process(m, mode):
- m.prepare_source()
- photos = 0
- video_frames = 0
- start = 0
- for n in m.frames:
- ext, mid, i = m.parse_frame(n)
- if ext != "mark":
- continue
- print(n)
- if mid == "start":
- start = i
- if mid == "stop":
- video_frames += m.extract_video(start, i, mode)
- start = 0
- if mid == "wow":
- if start:
- start -= 5000000
- else:
- photos += 5
- m.extract_photo(i - 1000000, mode)
- m.extract_photo(i - 2000000, mode)
- m.extract_photo(i - 3000000, mode)
- m.extract_photo(i - 4000000, mode)
- m.extract_photo(i - 5000000, mode)
- if mid == "photo":
- photos += 1
- m.extract_photo(i, mode)
- if mode == "convertall":
- video_frames += m.extract_video(0, 9999999999999999, "convert")
- return
- print("Total", photos, "photos and", m.display_frames(video_frames))
- print(len(m.unused_frames), "/", len(m.frames))
- if mode == "gc":
- os.chdir(m.source)
- for n in m.unused_frames:
- os.unlink(n)
- print(m.unused_frames)
- def display_usec(m, v):
- return "%.2f sec" % (v/1000000.)
- def display_frames(m, v):
- return "%d frames %s" % (v, m.display_usec(v * 1000000 / 30.))
- def frame_used(m, n):
- if n in m.unused_frames:
- m.unused_frames.remove(n)
- def extract_photo(m, around, mode):
- print("Photo:", around)
- best = None
- for n in m.frames:
- ext, mid, i = m.parse_frame(n)
- if ext != "sv":
- continue
- if i < around:
- best = n
- continue
- best = n
- break
- m.frame_used(best)
- out_file = m.output+"/image-%04d.jpeg" % m.out_index
- m.out_index += 1
- if mode == "convert":
- os.system("ln "+m.source+"/"+best+" "+out_file)
- def extract_video(m, start, end, mode):
- print("Searching video", start, end, "--", m.display_usec(end-start))
- if mode == "convert":
- m.prepare_work()
- t1 = time.time()
- seen_audio = False
- seen_video = False
- count = 0
- skip_audio = 0
- skip_video = 0
- num = 0
- for n in m.frames:
- num += 1
- if not num % 1000:
- print("Frame", num)
- ext, mid, i = m.parse_frame(n)
- if ext != "sa" and ext != "sv":
- m.frame_used(n)
- continue
- if i < start - 1000000 or i > end:
- continue
- if ext == "sa":
- seen_audio = True
- if not seen_video:
- continue
- if mode == "convert":
- os.system("cat "+m.source+"/"+n+" >> "+m.work+"/output.raw")
- if ext == "sv":
- if not seen_video:
- first_video = i
- seen_video = True
- if mode == "convert":
- os.system("ln "+m.source+"/"+n+" "+m.work+"/image-%06d.jpeg" % count)
- count += 1
- while i >= first_video + count * 1000000 / m.fps:
- print("Duplicating video frame at", i)
- if mode == "convert":
- os.system("ln "+m.source+"/"+n+" "+m.work+"/image-%06d.jpeg" % count)
- count += 1
- m.frame_used(n)
- if mode == "convert":
- os.chdir(m.work)
- print("Converting", m.display_frames(count), "skipped", skip_audio, "audio and", skip_video, "video frames")
- os.system("ffmpeg -f s16le -ac 2 -ar 48000 -i output.raw output.wav")
- options = "-b:v 4096k -c:v libx264 -preset ultrafast"
- os.system("ffmpeg -framerate %d -i image-%%06d.jpeg -i output.wav %s output.mp4" % (m.fps, options))
- os.system("rm output.raw")
- out_file = m.output+"/video-%04d.mp4" % m.out_index
- m.out_index += 1
- os.system("mv output.mp4 "+out_file)
- print("Converted", m.display_frames(count), "in", "%.1f" % (time.time()-t1), "seconds")
- if mode == "convert":
- print("Original size -> new size")
- os.system("du -sh .; du -sh "+out_file)
- return count
- def jpegize(m):
- i = 0
- os.chdir(m.source)
- l = os.listdir(m.source)
- l = filter(lambda n: n[-4:] == ".dng", l)
- l = list(l)
- l.sort()
- print("Have", m.display_frames(len(l)), "dngs")
- for n in l:
- if n[-4:] != ".dng":
- print("Something went terribly wrong")
- continue
- i += 1
- print(i, '/', len(l))
- base = n[:-4]
- subprocess.run(['dcraw',
- '-w', # -w Use camera white balance
- '+M', # +M use embedded color matrix
- '-H', '2', # -H 2 Recover highlights by blending them
- '-o', '1', # -o 1 Output in sRGB colorspace
- '-q', '0', # -q 0 Debayer with fast bi-linear interpolation
- '-f', # -f Interpolate RGGB as four colors
- '-T', n]) # -T Output TIFF
- subprocess.run(['convert', base+'.tiff', base+'.jpeg'])
- os.unlink(base+'.tiff')
- os.rename(base+'.jpeg', base+'.jpeg.sv')
- m = Mpegize()
- m.run(sys.argv)
|