dev #2

Merged
lovinervy merged 2 commits from dev into main 2026-04-03 18:28:31 +05:00
2 changed files with 74 additions and 22 deletions
Showing only changes of commit 61f8e0abe1 - Show all commits

42
main.py
View File

@@ -98,9 +98,8 @@ class InterpolationPipeline:
def run(self, video_path: Path, output_video: str): def run(self, video_path: Path, output_video: str):
prev_frames = tuple() prev_frames = tuple()
interpolated_frames = [] interpolated_frames: list["np.ndarray"] = []
part = 0 part = 0
source_frame_length = 0
chunk_seconds = 10 chunk_seconds = 10
length = self.video_maker.get_video_duration(video_path) length = self.video_maker.get_video_duration(video_path)
last_part_seconds = 1 if length % chunk_seconds else 0 last_part_seconds = 1 if length % chunk_seconds else 0
@@ -108,6 +107,7 @@ class InterpolationPipeline:
fps = self.video_maker.get_fps(video_path) fps = self.video_maker.get_fps(video_path)
logging.info(f"Video FPS: {fps}") logging.info(f"Video FPS: {fps}")
fps *= 2 # Doubling FPS fps *= 2 # Doubling FPS
width, height = self.video_maker.get_size(video_path)
for frames in self.video_maker.video_to_frames_generator( for frames in self.video_maker.video_to_frames_generator(
video_path, self.fs.frames_path, chunk_seconds video_path, self.fs.frames_path, chunk_seconds
): ):
@@ -117,13 +117,10 @@ class InterpolationPipeline:
img2 = frames[0] img2 = frames[0]
img1_2 = self.interpolator.interpolate(img1, img2) img1_2 = self.interpolator.interpolate(img1, img2)
interpolated_frames.append(img1_2) interpolated_frames.append(img1_2)
self.fs.clear_directory(self.fs.moved_path) generator = self._frame_generator(prev_frames, interpolated_frames)
self._save_images(prev_frames, interpolated_frames) part_path = self.fs.video_part_path / f"video_{part:08d}.mp4"
self.video_maker.images_to_video_pipeline(
self._merge_frames_to_video( generator, part_path, width, height, fps
self.fs.video_part_path / f"video_{part:08d}.mp4",
fps,
source_frame_length,
) )
interpolated_frames = [] interpolated_frames = []
logging.info(f"Finished processing part {part:08d}") logging.info(f"Finished processing part {part:08d}")
@@ -136,17 +133,13 @@ class InterpolationPipeline:
img2 = frames[i + 1] img2 = frames[i + 1]
img1_2 = self.interpolator.interpolate(img1, img2) img1_2 = self.interpolator.interpolate(img1, img2)
interpolated_frames.append(img1_2) interpolated_frames.append(img1_2)
source_frame_length = len(frames)
prev_frames = frames prev_frames = frames
self.fs.clear_directory(self.fs.moved_path) generator = self._frame_generator(prev_frames, interpolated_frames)
self._save_images(prev_frames, interpolated_frames) part_path = self.fs.video_part_path / f"video_{part:08d}.mp4"
self._merge_frames_to_video( self.video_maker.images_to_video_pipeline(
self.fs.video_part_path / f"video_{part:08d}.mp4", generator, part_path, width, height, fps
fps,
source_frame_length,
) )
self.fs.clear_directory(self.fs.moved_path)
logging.info(f"Finished processing part {part:08d}") logging.info(f"Finished processing part {part:08d}")
self._merge_video_parts(self.fs.output_path / output_video) self._merge_video_parts(self.fs.output_path / output_video)
logging.info( logging.info(
@@ -159,6 +152,7 @@ class InterpolationPipeline:
interpolated: list["np.ndarray"], interpolated: list["np.ndarray"],
): ):
logging.info("Saving images...") logging.info("Saving images...")
self.fs.clear_directory(self.fs.moved_path)
index = 0 index = 0
for i, frame in enumerate(source): for i, frame in enumerate(source):
name = self.fs.moved_path / f"img_{index:08d}.png" name = self.fs.moved_path / f"img_{index:08d}.png"
@@ -170,15 +164,23 @@ class InterpolationPipeline:
imwrite(name, interpolated[i]) imwrite(name, interpolated[i])
logging.info("Success...") logging.info("Success...")
def _merge_frames_to_video( def _merge_frames_to_video(self, output_video: Path, fps: float):
self, output_video: Path, fps: float, source_frame_length: int = 0
):
self.video_maker.images_to_video(self.fs.moved_path, output_video, fps) self.video_maker.images_to_video(self.fs.moved_path, output_video, fps)
def _merge_video_parts(self, output_video: Path): def _merge_video_parts(self, output_video: Path):
self.video_maker.concatenate_videos(self.fs.video_part_path, output_video) self.video_maker.concatenate_videos(self.fs.video_part_path, output_video)
self.fs.clear_directory(self.fs.video_part_path) self.fs.clear_directory(self.fs.video_part_path)
def _frame_generator(
self,
source: tuple["np.ndarray", ...],
interpolated: list["np.ndarray"],
):
for i, frame in enumerate(source):
yield frame
if i < len(interpolated):
yield interpolated[i]
def runner( def runner(
base_path: Path, base_path: Path,

View File

@@ -2,7 +2,7 @@ import os
import logging import logging
import subprocess import subprocess
from pathlib import Path from pathlib import Path
from typing import Generator from typing import Generator, Iterable
import cv2 import cv2
import numpy as np import numpy as np
@@ -67,7 +67,13 @@ class VideoMaker:
def run_command(self, cmd: str) -> int: def run_command(self, cmd: str) -> int:
try: try:
subprocess.run(cmd, shell=True, check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) subprocess.run(
cmd,
shell=True,
check=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
return 0 return 0
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
logging.error(f"Command failed with error: {e}") logging.error(f"Command failed with error: {e}")
@@ -95,3 +101,47 @@ class VideoMaker:
return return
paths.append(frame) paths.append(frame)
yield tuple(paths) yield tuple(paths)
def images_to_video_pipeline(
self,
frames: Iterable[np.ndarray],
output_path: Path,
width: int,
height: int,
fps: float,
):
pipeline = subprocess.Popen(
[
"ffmpeg",
"-y",
"-f", "rawvideo",
"-vcodec", "rawvideo",
"-pix_fmt", "bgr24",
"-s", f"{width}x{height}",
"-r", str(fps),
"-i", "-",
"-an",
"-vcodec", "libx264",
"-pix_fmt", "yuv420p",
str(output_path),
],
stdin=subprocess.PIPE,
stderr=subprocess.DEVNULL
)
if pipeline.stdin is None:
raise Exception("STDIN closed")
for frame in frames:
pipeline.stdin.write(frame.tobytes())
pipeline.stdin.close()
pipeline.wait()
def get_size(self, video_path: Path) -> tuple[int, int]:
cap = cv2.VideoCapture(str(video_path))
if not cap.isOpened():
raise ValueError(f"Cannot open video: {video_path}")
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
cap.release()
return width, height