15 Commits

Author SHA1 Message Date
Viner Abubakirov
1615cbc60d Попытка оптимизировать модель для более быстрого расчёта 2026-04-19 11:57:11 +05:00
Viner Abubakirov
c7acd66974 переименовал runner на run 2026-04-04 22:06:27 +05:00
Viner Abubakirov
2d67b72128 Перевел импорты модулей в относительные пути 2026-04-04 11:57:41 +05:00
c91cf6b53a Merge pull request 'dev' (#2) from dev into main
Reviewed-on: #2
2026-04-03 18:28:31 +05:00
Viner Abubakirov
61f8e0abe1 Поменял, метод формирования видео 2026-04-03 17:03:10 +05:00
Viner Abubakirov
c72e34f9dc checkout presets.py from dev 2026-04-02 18:31:54 +05:00
Viner Abubakirov
faf7aa8e81 Начал менять логику pipeline 2026-04-02 18:26:36 +05:00
359f20c3c4 Merge pull request 'dev' (#1) from dev into main
Reviewed-on: #1
2026-04-02 12:17:05 +05:00
Viner Abubakirov
bc09cd7b6c Убрал спам от ffmpeg 2026-04-02 11:53:02 +05:00
Viner Abubakirov
be794539ac Обновил video.py
Команда выполняющейся  при склейке видео по разному ввели в (sh, bash, zsh)
2026-04-02 10:47:41 +05:00
Viner Abubakirov
28e51d1c5e Забыл удалить переменные из runner 2026-04-02 10:17:17 +05:00
Viner Abubakirov
97ca8b19f8 Добавил argument parser 2026-04-02 10:16:36 +05:00
Viner Abubakirov
4fc13db0e8 Переместил interpolator.py внутрь src | добавил пресеты | добавил новые модели 2026-04-02 10:06:06 +05:00
Viner Abubakirov
c984b38904 Обновил main.py, добавил fs.py и video.py 2026-04-01 23:41:00 +05:00
Viner Abubakirov
888cdb3151 Перенес networks внутрь src 2026-04-01 21:41:05 +05:00
28 changed files with 871 additions and 841 deletions

1
.gitignore vendored
View File

@@ -175,5 +175,6 @@ cython_debug/
.pypirc
.DS_Store
source/
output/

259
main.py
View File

@@ -1,239 +1,42 @@
import logging
import subprocess
from pathlib import Path
import cv2
from tqdm import tqdm
from time import perf_counter
from decimal import Decimal
from interpolator import get_device
from interpolator import ImageInterpolator
from interpolator import ModelRunner, Anchor
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
from pathlib import Path
def move_images(src_dir: str, interpolated_dir: str, output_dir: str):
src_dir = Path(src_dir)
interpolated_dir = Path(interpolated_dir)
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
index = 0
src_frames = sorted(src_dir.glob("img_*.png"))
interp_frames = sorted(interpolated_dir.glob("img_*.png"))
for i in range(len(src_frames)):
output_frame = output_dir / f"img_{index:08d}.png"
src_frames[i].rename(output_frame)
index += 1
if i < len(interp_frames):
output_interp = output_dir / f"img_{index:08d}.png"
interp_frames[i].rename(output_interp)
index += 1
def build_file_list(moved_dir: str, list_path: str):
import os
moved_dir = Path(moved_dir)
frames = sorted(moved_dir.glob("img_*.png"))
print(frames[0])
with open(list_path, "w") as f:
for frame in frames:
f.write(f"file '{os.path.abspath(frame)}'\n")
def build_ffmpeg_file_list(frames_dir: str, interpolated_dir: str, list_path: str):
frames = sorted(Path(frames_dir).glob("img_*.png"))
interps = sorted(Path(interpolated_dir).glob("img_*.png"))
if len(interps) != len(frames) - 1:
raise ValueError("Interpolated frames must be N-1")
with open(list_path, "w") as f:
for i in range(len(frames)):
f.write(f"file '{frames[i].resolve().as_posix()}'\n")
if i < len(interps):
f.write(f"file '{interps[i].resolve().as_posix()}'\n")
def merge_with_ffmpeg(
original_video: str,
file_list: str,
output_video: str,
):
cap = cv2.VideoCapture(original_video)
if not cap.isOpened():
raise ValueError("Cannot open original video")
fps = cap.get(cv2.CAP_PROP_FPS)
cap.release()
new_fps = Decimal(fps * 2)
cmd = [
"ffmpeg",
"-y",
"-r", str(new_fps.quantize(Decimal("1.0000000000"))),
"-f", "concat",
"-safe", "0",
"-i", file_list,
"-c:v", "libx264rgb",
output_video,
]
print("Running ffmpeg command:", " ".join(cmd))
subprocess.run(cmd, check=True)
def video_frames_to_disk_generator(
video_path: str | Path,
output_dir: str | Path,
chunk_seconds: int = 10
):
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
cap = cv2.VideoCapture(str(video_path))
if not cap.isOpened():
raise ValueError(f"Cannot open video: {video_path}")
fps = cap.get(cv2.CAP_PROP_FPS)
frames_per_chunk = int(fps * chunk_seconds)
frame_index = 0
while True:
paths = []
for _ in range(frames_per_chunk):
ret, frame = cap.read()
if not ret:
cap.release()
return
frame_path = output_dir / f"img_{frame_index:08d}.png"
cv2.imwrite(str(frame_path), frame)
paths.append(frame_path)
frame_index += 1
yield tuple(paths)
from src.runner import run
from src.config import presets
def main():
start = perf_counter()
logging.info("Starting video interpolation process")
config_path = Path("src/config/AMT-G.yaml")
ckpt_path = Path("src/pretrained/amt-g.pth")
video_path = Path("example/video.mp4")
output_dir = Path("output/frames")
output_interpolated_dir = Path("output/interpolated")
output_interpolated_dir.mkdir(parents=True, exist_ok=True)
import argparse
device = get_device()
model_runner = ModelRunner(config_path, ckpt_path, device)
if device.type in ("cpu", "mps"):
if device.type == "mps":
logging.warning(
"Running on Apple Silicon GPU (MPS) may have limited performance. Consider using a CUDA-enabled GPU for better performance."
)
else:
logging.warning(
"Running on CPU may be very slow. Consider using a GPU for better performance."
)
anchor = Anchor(resolution=8192 * 8192, memory=1, memory_bias=0)
elif device.type == "cuda":
anchor = Anchor(
resolution=1024 * 512, memory=1500 * 1024**2, memory_bias=2500 * 1024**2
)
else:
raise Exception(f"Unsupported device type: {device.type}")
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
interpolator = ImageInterpolator(device, anchor, model_runner)
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--base_path", help="Base path", default="output")
parser.add_argument(
"-v", "--video_path", help="Video path", default="example/video.mp4"
)
parser.add_argument(
"-o",
"--output",
help="Output video name (example: 'interpolated_video.mp4')",
default="interpolated_video.mp4",
)
parser.add_argument(
"-p",
"--preset",
help="Model preset",
choices=["small", "large", "global"],
default="global",
)
args = parser.parse_args()
run(
base_path=Path(args.base_path),
video_path=Path(args.video_path),
output_video=args.output,
preset=getattr(presets, args.preset.upper()),
)
loaded_time = perf_counter() - start
logging.info(f"Model loaded and initialized in {loaded_time:.2f} seconds")
prev_frame_path = None
frame_count = 0
for frame_paths in video_frames_to_disk_generator(video_path, output_dir):
logging.info(f"Processing frames: {len(frame_paths)}")
if prev_frame_path is not None:
img1 = prev_frame_path[-1]
img2 = frame_paths[0]
output_path = output_interpolated_dir / f"img_{frame_count:08d}.png"
interpolator.interpolate(img1, img2, output_path)
logging.debug(f"Interpolated image saved to: {output_path}")
frame_count += 1
for i in tqdm(range(len(frame_paths) - 1), desc="Interpolating frames"):
img1 = frame_paths[i]
img2 = frame_paths[i + 1]
output_path = output_interpolated_dir / f"img_{frame_count:08d}.png"
interpolator.interpolate(img1, img2, output_path)
logging.debug(f"Interpolated image saved to: {output_path}")
frame_count += 1
prev_frame_path = frame_paths
total_time = perf_counter() - start
logging.info(f"Video interpolation completed in {total_time:.2f} seconds")
def builder():
frames_dir = "output/frames"
interpolated_dir = "output/interpolated"
moved_dir = "output/moved"
video_path = "example/video.mp4"
output_video = "output/interpolated_video.mp4"
move_images(frames_dir, interpolated_dir, moved_dir)
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
raise ValueError("Cannot open original video")
fps = cap.get(cv2.CAP_PROP_FPS)
cmd = [
"ffmpeg",
"-y",
"-framerate", str(fps * 2),
"-i", f"{moved_dir}/img_%08d.png",
"-i", video_path,
"-c:v", "libx264",
"-c:a", "copy",
"-shortest",
output_video,
]
logging.info("Running ffmpeg command to build final video: " + " ".join(cmd))
subprocess.run(cmd, check=True)
def cleanup():
import os
import shutil
frames_dir = "output/frames"
interpolated_dir = "output/interpolated"
moved_dir = "output/moved"
os.makedirs(frames_dir, exist_ok=True)
os.makedirs(interpolated_dir, exist_ok=True)
os.makedirs(moved_dir, exist_ok=True)
shutil.rmtree(frames_dir)
shutil.rmtree(interpolated_dir)
shutil.rmtree(moved_dir)
if __name__ == "__main__":
cleanup()
main()
builder()
cleanup()

View File

@@ -1,69 +0,0 @@
import torch
import torch.nn as nn
from src.utils.flow_utils import warp
from networks.blocks.ifrnet import (
convrelu, resize,
ResBlock,
)
def multi_flow_combine(comb_block, img0, img1, flow0, flow1,
mask=None, img_res=None, mean=None):
'''
A parallel implementation of multiple flow field warping
comb_block: An nn.Seqential object.
img shape: [b, c, h, w]
flow shape: [b, 2*num_flows, h, w]
mask (opt):
If 'mask' is None, the function conduct a simple average.
img_res (opt):
If 'img_res' is None, the function adds zero instead.
mean (opt):
If 'mean' is None, the function adds zero instead.
'''
b, c, h, w = flow0.shape
num_flows = c // 2
flow0 = flow0.reshape(b, num_flows, 2, h, w).reshape(-1, 2, h, w)
flow1 = flow1.reshape(b, num_flows, 2, h, w).reshape(-1, 2, h, w)
mask = mask.reshape(b, num_flows, 1, h, w
).reshape(-1, 1, h, w) if mask is not None else None
img_res = img_res.reshape(b, num_flows, 3, h, w
).reshape(-1, 3, h, w) if img_res is not None else 0
img0 = torch.stack([img0] * num_flows, 1).reshape(-1, 3, h, w)
img1 = torch.stack([img1] * num_flows, 1).reshape(-1, 3, h, w)
mean = torch.stack([mean] * num_flows, 1).reshape(-1, 1, 1, 1
) if mean is not None else 0
img0_warp = warp(img0, flow0)
img1_warp = warp(img1, flow1)
img_warps = mask * img0_warp + (1 - mask) * img1_warp + mean + img_res
img_warps = img_warps.reshape(b, num_flows, 3, h, w)
imgt_pred = img_warps.mean(1) + comb_block(img_warps.view(b, -1, h, w))
return imgt_pred
class MultiFlowDecoder(nn.Module):
def __init__(self, in_ch, skip_ch, num_flows=3):
super(MultiFlowDecoder, self).__init__()
self.num_flows = num_flows
self.convblock = nn.Sequential(
convrelu(in_ch*3+4, in_ch*3),
ResBlock(in_ch*3, skip_ch),
nn.ConvTranspose2d(in_ch*3, 8*num_flows, 4, 2, 1, bias=True)
)
def forward(self, ft_, f0, f1, flow0, flow1):
n = self.num_flows
f0_warp = warp(f0, flow0)
f1_warp = warp(f1, flow1)
out = self.convblock(torch.cat([ft_, f0_warp, f1_warp, flow0, flow1], 1))
delta_flow0, delta_flow1, mask, img_res = torch.split(out, [2*n, 2*n, n, 3*n], 1)
mask = torch.sigmoid(mask)
flow0 = delta_flow0 + 2.0 * resize(flow0, scale_factor=2.0
).repeat(1, self.num_flows, 1, 1)
flow1 = delta_flow1 + 2.0 * resize(flow1, scale_factor=2.0
).repeat(1, self.num_flows, 1, 1)
return flow0, flow1, mask, img_res

View File

@@ -5,7 +5,6 @@ description = "Add your description here"
readme = "README.md"
requires-python = ">=3.12"
dependencies = [
"imageio>=2.37.3",
"numpy>=2.4.4",
"omegaconf>=2.3.0",
"opencv-python>=4.13.0.92",

View File

@@ -10,7 +10,7 @@ save_dir: work_dir
eval_interval: 1
network:
name: networks.AMT-G.Model
name: AMT-G.Model
params:
corr_radius: 3
corr_lvls: 4

62
src/config/AMT-L.yaml Normal file
View File

@@ -0,0 +1,62 @@
exp_name: floloss1e-2_300epoch_bs24_lr2e-4
seed: 2023
epochs: 300
distributed: true
lr: 2e-4
lr_min: 2e-5
weight_decay: 0.0
resume_state: null
save_dir: work_dir
eval_interval: 1
network:
name: AMT-L.Model
params:
corr_radius: 3
corr_lvls: 4
num_flows: 5
data:
train:
name: datasets.vimeo_datasets.Vimeo90K_Train_Dataset
params:
dataset_dir: data/vimeo_triplet
val:
name: datasets.vimeo_datasets.Vimeo90K_Test_Dataset
params:
dataset_dir: data/vimeo_triplet
train_loader:
batch_size: 24
num_workers: 12
val_loader:
batch_size: 24
num_workers: 3
logger:
use_wandb: true
resume_id: null
losses:
- {
name: losses.loss.CharbonnierLoss,
nickname: l_rec,
params: {
loss_weight: 1.0,
keys: [imgt_pred, imgt]
}
}
- {
name: losses.loss.TernaryLoss,
nickname: l_ter,
params: {
loss_weight: 1.0,
keys: [imgt_pred, imgt]
}
}
- {
name: losses.loss.MultipleFlowLoss,
nickname: l_flo,
params: {
loss_weight: 0.002,
keys: [flow0_pred, flow1_pred, flow]
}
}

View File

@@ -10,7 +10,7 @@ save_dir: work_dir
eval_interval: 1
network:
name: networks.AMT-S.Model
name: AMT-S.Model
params:
corr_radius: 3
corr_lvls: 4

24
src/config/presets.py Normal file
View File

@@ -0,0 +1,24 @@
from pathlib import Path
from dataclasses import dataclass
@dataclass(frozen=True)
class Preset:
config: Path
checkpoint: Path
SMALL = Preset(
config=Path("src/config/AMT-S.yaml"),
checkpoint=Path("src/pretrained/amt-s.pth"),
)
LARGE = Preset(
config=Path("src/config/AMT-L.yaml"),
checkpoint=Path("src/pretrained/amt-l.pth"),
)
GLOBAL = Preset(
config=Path("src/config/AMT-G.yaml"),
checkpoint=Path("src/pretrained/amt-g.pth"),
)

View File

@@ -5,10 +5,9 @@ import torch
import numpy as np
from omegaconf import OmegaConf, DictConfig
from src.utils import utils
from src.utils.torch import img2tensor, check_dim_and_resize, tensor2img
from src.utils.build import build_from_cfg
from src.utils.padder import InputPadder
from .utils.torch import img2tensor, check_dim_and_resize, tensor2img
from .utils.build import build_from_cfg
from .utils.padder import InputPadder
class Anchor:
@@ -30,6 +29,7 @@ class ModelRunner:
ckpt_path (Path): Path to model checkpoint in .pth format
device (torch.device): Device to load the model on
"""
torch.set_float32_matmul_precision("high")
omega_config = OmegaConf.load(config)
network_config: DictConfig = omega_config.network
logging.info(
@@ -40,7 +40,7 @@ class ModelRunner:
model.load_state_dict(checkpoint["state_dict"])
model = model.to(get_device())
model.eval()
self.model = model
self.model = torch.compile(model, mode="max-autotune")
def get_vram_available(device: torch.device) -> int:
@@ -77,59 +77,46 @@ class ImageInterpolator:
self.device = device
self.anchor = anchor
self.vram_available = get_vram_available(device)
self._scale = None
self._padder = None
self.embt = torch.tensor(1 / 2).float().view(1, 1, 1, 1).to(device)
self.model_runner = model_runner
logging.debug(
f"Initialized ImageInterpolator with device: {device}, anchor: {anchor}, available VRAM: {self.vram_available} bytes"
)
def interpolate(self, image1: Path, image2: Path, output_path: Path):
logging.debug(f"Reading images: {image1} and {image2}")
tensor1 = img2tensor(utils.read(image1)).to(self.device)
tensor2 = img2tensor(utils.read(image2)).to(self.device)
logging.debug(
f"Image shapes after conversion to tensors: {tensor1.shape}, {tensor2.shape}"
)
tensor1, tensor2 = check_dim_and_resize(tensor1, tensor2)
logging.debug(f"Image shapes after resizing: {tensor1.shape}, {tensor2.shape}")
h, w = tensor1.shape[2], tensor1.shape[3]
logging.debug(f"Interpolating images of size: {h}x{w}")
def interpolate(self, image1: torch.Tensor, image2: torch.Tensor) -> torch.Tensor:
interpolated = self.model_runner.model(
image1, image2, self.embt, scale_factor=self._scale, eval=True
)["imgt_pred"]
if not self._padder:
raise NotImplemented("Padder not implemented")
return self._padder.unpad(interpolated)[0]
def make_tensor(self, img: np.ndarray) -> torch.Tensor:
tensor = img2tensor(img).to(self.device)
h, w = tensor.shape[2], tensor.shape[3]
scale = self.scale(h, w)
logging.debug(f"Calculated scale factor: {scale:.2f}")
padding = int(16 / scale)
logging.debug(f"Calculated padding: {padding} pixels")
padder = InputPadder(tensor1.shape, divisor=padding)
tensor1_padded, tensor2_padded = padder.pad(tensor1, tensor2)
logging.debug(
f"Image shapes after padding: {tensor1_padded.shape}, {tensor2_padded.shape}"
)
tensor1_padded = tensor1_padded.to(self.device)
tensor2_padded = tensor2_padded.to(self.device)
logging.debug("Running model inference for interpolation")
with torch.no_grad():
interpolated = self.model_runner.model(
tensor1_padded, tensor2_padded, self.embt, scale_factor=scale, eval=True
)["imgt_pred"]
logging.debug(f"Interpolated image shape before unpadding: {interpolated.shape}")
(interpolated,) = padder.unpad(interpolated)
logging.debug(f"Interpolated image shape after unpadding: {interpolated.shape}")
utils.write(output_path, tensor2img(interpolated.cpu()))
logging.debug(f"Saved interpolated image to: {output_path}")
if self._padder is None:
self._padder = InputPadder(tensor.shape, padding)
return self._padder.pad(tensor)[0]
def scale(self, height: int, width: int) -> float:
scale = (
self.anchor.resolution
/ (height * width)
* np.sqrt(
(self.vram_available - self.anchor.memory_bias) / self.anchor.memory
if self._scale is None:
scale = (
self.anchor.resolution
/ (height * width)
* np.sqrt(
(self.vram_available - self.anchor.memory_bias) / self.anchor.memory
)
)
)
scale = 1 if scale > 1 else scale
scale = 1 / np.floor(1 / np.sqrt(scale) * 16) * 16
if scale < 1:
logging.info(
f"Due to the limited VRAM, the video will be scaled by {scale:.2f}"
)
return scale
scale = 1 if scale > 1 else scale
scale = 1 / np.floor(1 / np.sqrt(scale) * 16) * 16
if scale < 1:
logging.debug(
f"Due to the limited VRAM, the video will be scaled by {scale:.2f}"
)
self._scale = float(scale)
logging.info(f"Calculated scale factor: {self._scale:.2f}")
return self._scale

View File

@@ -1,9 +1,11 @@
from typing import Optional
import torch
import torch.nn as nn
from networks.blocks.raft import coords_grid, BasicUpdateBlock, BidirCorrBlock
from networks.blocks.feat_enc import LargeEncoder
from networks.blocks.ifrnet import resize, Encoder, InitDecoder, IntermediateDecoder
from networks.blocks.multi_flow import multi_flow_combine, MultiFlowDecoder
from .blocks.raft import coords_grid, BasicUpdateBlock, BidirCorrBlock
from .blocks.feat_enc import LargeEncoder
from .blocks.ifrnet import resize, Encoder, InitDecoder, IntermediateDecoder
from .blocks.multi_flow import multi_flow_combine, MultiFlowDecoder
class Model(nn.Module):
@@ -42,7 +44,7 @@ class Model(nn.Module):
nn.Conv2d(6 * self.num_flows, 3, 7, 1, 3),
)
def _get_updateblock(self, cdim, scale_factor=None):
def _get_updateblock(self, cdim: int, scale_factor: Optional[float] = None):
return BasicUpdateBlock(
cdim=cdim,
hidden_dim=192,
@@ -55,7 +57,15 @@ class Model(nn.Module):
radius=self.radius,
)
def _corr_scale_lookup(self, corr_fn, coord, flow0, flow1, embt, downsample=1):
def _corr_scale_lookup(
self,
corr_fn: BidirCorrBlock,
coord: torch.Tensor,
flow0: torch.Tensor,
flow1: torch.Tensor,
embt: torch.Tensor,
downsample: int = 1,
):
# convert t -> 0 to 0 -> 1 | convert t -> 1 to 1 -> 0
# based on linear assumption
t1_scale = 1.0 / embt
@@ -70,7 +80,15 @@ class Model(nn.Module):
flow = torch.cat([flow0, flow1], dim=1)
return corr, flow
def forward(self, img0, img1, embt, scale_factor=1.0, eval=False, **kwargs):
def forward(
self,
img0: torch.Tensor,
img1: torch.Tensor,
embt: torch.Tensor,
scale_factor: float = 1.0,
eval: bool = False,
**kwargs,
):
mean_ = (
torch.cat([img0, img1], 2)
.mean(1, keepdim=True)
@@ -159,14 +177,11 @@ class Model(nn.Module):
)
if scale_factor != 1.0:
up_flow0_1 = resize(up_flow0_1, scale_factor=(1.0 / scale_factor)) * (
1.0 / scale_factor
)
up_flow1_1 = resize(up_flow1_1, scale_factor=(1.0 / scale_factor)) * (
1.0 / scale_factor
)
mask = resize(mask, scale_factor=(1.0 / scale_factor))
img_res = resize(img_res, scale_factor=(1.0 / scale_factor))
factor = 1.0 / scale_factor
up_flow0_1 = resize(up_flow0_1, factor) * factor
up_flow1_1 = resize(up_flow1_1, factor) * factor
mask = resize(mask, factor)
img_res = resize(img_res, factor)
# Merge multiple predictions
imgt_pred = multi_flow_combine(

View File

@@ -1,38 +1,29 @@
import torch
import torch.nn as nn
from networks.blocks.raft import (
coords_grid,
BasicUpdateBlock, BidirCorrBlock
)
from networks.blocks.feat_enc import (
BasicEncoder
)
from networks.blocks.ifrnet import (
resize,
Encoder,
InitDecoder,
IntermediateDecoder
)
from networks.blocks.multi_flow import (
multi_flow_combine,
MultiFlowDecoder
)
from .blocks.raft import coords_grid, BasicUpdateBlock, BidirCorrBlock
from .blocks.feat_enc import BasicEncoder
from .blocks.ifrnet import resize, Encoder, InitDecoder, IntermediateDecoder
from .blocks.multi_flow import multi_flow_combine, MultiFlowDecoder
class Model(nn.Module):
def __init__(self,
corr_radius=3,
corr_lvls=4,
num_flows=5,
channels=[48, 64, 72, 128],
skip_channels=48
):
def __init__(
self,
corr_radius=3,
corr_lvls=4,
num_flows=5,
channels=[48, 64, 72, 128],
skip_channels=48,
):
super(Model, self).__init__()
self.radius = corr_radius
self.corr_levels = corr_lvls
self.num_flows = num_flows
self.feat_encoder = BasicEncoder(output_dim=128, norm_fn='instance', dropout=0.)
self.feat_encoder = BasicEncoder(
output_dim=128, norm_fn="instance", dropout=0.0
)
self.encoder = Encoder([48, 64, 72, 128], large=True)
self.decoder4 = InitDecoder(channels[3], channels[2], skip_channels)
@@ -45,22 +36,29 @@ class Model(nn.Module):
self.update2 = self._get_updateblock(48, 4.0)
self.comb_block = nn.Sequential(
nn.Conv2d(3*self.num_flows, 6*self.num_flows, 7, 1, 3),
nn.PReLU(6*self.num_flows),
nn.Conv2d(6*self.num_flows, 3, 7, 1, 3),
nn.Conv2d(3 * self.num_flows, 6 * self.num_flows, 7, 1, 3),
nn.PReLU(6 * self.num_flows),
nn.Conv2d(6 * self.num_flows, 3, 7, 1, 3),
)
def _get_updateblock(self, cdim, scale_factor=None):
return BasicUpdateBlock(cdim=cdim, hidden_dim=128, flow_dim=48,
corr_dim=256, corr_dim2=160, fc_dim=124,
scale_factor=scale_factor, corr_levels=self.corr_levels,
radius=self.radius)
return BasicUpdateBlock(
cdim=cdim,
hidden_dim=128,
flow_dim=48,
corr_dim=256,
corr_dim2=160,
fc_dim=124,
scale_factor=scale_factor,
corr_levels=self.corr_levels,
radius=self.radius,
)
def _corr_scale_lookup(self, corr_fn, coord, flow0, flow1, embt, downsample=1):
# convert t -> 0 to 0 -> 1 | convert t -> 1 to 1 -> 0
# based on linear assumption
t1_scale = 1. / embt
t0_scale = 1. / (1. - embt)
t1_scale = 1.0 / embt
t0_scale = 1.0 / (1.0 - embt)
if downsample != 1:
inv = 1 / downsample
flow0 = inv * resize(flow0, scale_factor=inv)
@@ -72,7 +70,12 @@ class Model(nn.Module):
return corr, flow
def forward(self, img0, img1, embt, scale_factor=1.0, eval=False, **kwargs):
mean_ = torch.cat([img0, img1], 2).mean(1, keepdim=True).mean(2, keepdim=True).mean(3, keepdim=True)
mean_ = (
torch.cat([img0, img1], 2)
.mean(1, keepdim=True)
.mean(2, keepdim=True)
.mean(3, keepdim=True)
)
img0 = img0 - mean_
img1 = img1 - mean_
img0_ = resize(img0, scale_factor) if scale_factor != 1.0 else img0
@@ -80,8 +83,10 @@ class Model(nn.Module):
b, _, h, w = img0_.shape
coord = coords_grid(b, h // 8, w // 8, img0.device)
fmap0, fmap1 = self.feat_encoder([img0_, img1_]) # [1, 128, H//8, W//8]
corr_fn = BidirCorrBlock(fmap0, fmap1, radius=self.radius, num_levels=self.corr_levels)
fmap0, fmap1 = self.feat_encoder([img0_, img1_]) # [1, 128, H//8, W//8]
corr_fn = BidirCorrBlock(
fmap0, fmap1, radius=self.radius, num_levels=self.corr_levels
)
# f0_1: [1, c0, H//2, W//2] | f0_2: [1, c1, H//4, W//4]
# f0_3: [1, c2, H//8, W//8] | f0_4: [1, c3, H//16, W//16]
@@ -90,9 +95,9 @@ class Model(nn.Module):
######################################### the 4th decoder #########################################
up_flow0_4, up_flow1_4, ft_3_ = self.decoder4(f0_4, f1_4, embt)
corr_4, flow_4 = self._corr_scale_lookup(corr_fn, coord,
up_flow0_4, up_flow1_4,
embt, downsample=1)
corr_4, flow_4 = self._corr_scale_lookup(
corr_fn, coord, up_flow0_4, up_flow1_4, embt, downsample=1
)
# residue update with lookup corr
delta_ft_3_, delta_flow_4 = self.update4(ft_3_, flow_4, corr_4)
@@ -102,10 +107,12 @@ class Model(nn.Module):
ft_3_ = ft_3_ + delta_ft_3_
######################################### the 3rd decoder #########################################
up_flow0_3, up_flow1_3, ft_2_ = self.decoder3(ft_3_, f0_3, f1_3, up_flow0_4, up_flow1_4)
corr_3, flow_3 = self._corr_scale_lookup(corr_fn,
coord, up_flow0_3, up_flow1_3,
embt, downsample=2)
up_flow0_3, up_flow1_3, ft_2_ = self.decoder3(
ft_3_, f0_3, f1_3, up_flow0_4, up_flow1_4
)
corr_3, flow_3 = self._corr_scale_lookup(
corr_fn, coord, up_flow0_3, up_flow1_3, embt, downsample=2
)
# residue update with lookup corr
delta_ft_2_, delta_flow_3 = self.update3(ft_2_, flow_3, corr_3)
@@ -115,10 +122,12 @@ class Model(nn.Module):
ft_2_ = ft_2_ + delta_ft_2_
######################################### the 2nd decoder #########################################
up_flow0_2, up_flow1_2, ft_1_ = self.decoder2(ft_2_, f0_2, f1_2, up_flow0_3, up_flow1_3)
corr_2, flow_2 = self._corr_scale_lookup(corr_fn,
coord, up_flow0_2, up_flow1_2,
embt, downsample=4)
up_flow0_2, up_flow1_2, ft_1_ = self.decoder2(
ft_2_, f0_2, f1_2, up_flow0_3, up_flow1_3
)
corr_2, flow_2 = self._corr_scale_lookup(
corr_fn, coord, up_flow0_2, up_flow1_2, embt, downsample=4
)
# residue update with lookup corr
delta_ft_1_, delta_flow_2 = self.update2(ft_1_, flow_2, corr_2)
@@ -128,28 +137,33 @@ class Model(nn.Module):
ft_1_ = ft_1_ + delta_ft_1_
######################################### the 1st decoder #########################################
up_flow0_1, up_flow1_1, mask, img_res = self.decoder1(ft_1_, f0_1, f1_1, up_flow0_2, up_flow1_2)
up_flow0_1, up_flow1_1, mask, img_res = self.decoder1(
ft_1_, f0_1, f1_1, up_flow0_2, up_flow1_2
)
if scale_factor != 1.0:
up_flow0_1 = resize(up_flow0_1, scale_factor=(1.0/scale_factor)) * (1.0/scale_factor)
up_flow1_1 = resize(up_flow1_1, scale_factor=(1.0/scale_factor)) * (1.0/scale_factor)
mask = resize(mask, scale_factor=(1.0/scale_factor))
img_res = resize(img_res, scale_factor=(1.0/scale_factor))
factor = 1.0 / scale_factor
up_flow0_1 = resize(up_flow0_1, factor) * factor
up_flow1_1 = resize(up_flow1_1, factor) * factor
mask = resize(mask, factor)
img_res = resize(img_res, factor)
# Merge multiple predictions
imgt_pred = multi_flow_combine(self.comb_block, img0, img1, up_flow0_1, up_flow1_1,
mask, img_res, mean_)
imgt_pred = multi_flow_combine(
self.comb_block, img0, img1, up_flow0_1, up_flow1_1, mask, img_res, mean_
)
imgt_pred = torch.clamp(imgt_pred, 0, 1)
if eval:
return { 'imgt_pred': imgt_pred, }
return {
"imgt_pred": imgt_pred,
}
else:
up_flow0_1 = up_flow0_1.reshape(b, self.num_flows, 2, h, w)
up_flow1_1 = up_flow1_1.reshape(b, self.num_flows, 2, h, w)
return {
'imgt_pred': imgt_pred,
'flow0_pred': [up_flow0_1, up_flow0_2, up_flow0_3, up_flow0_4],
'flow1_pred': [up_flow1_1, up_flow1_2, up_flow1_3, up_flow1_4],
'ft_pred': [ft_1_, ft_2_, ft_3_],
"imgt_pred": imgt_pred,
"flow0_pred": [up_flow0_1, up_flow0_2, up_flow0_3, up_flow0_4],
"flow1_pred": [up_flow1_1, up_flow1_2, up_flow1_3, up_flow1_4],
"ft_pred": [ft_1_, ft_2_, ft_3_],
}

View File

@@ -1,31 +1,20 @@
import torch
import torch.nn as nn
from networks.blocks.raft import (
coords_grid,
SmallUpdateBlock, BidirCorrBlock
)
from networks.blocks.feat_enc import (
SmallEncoder
)
from networks.blocks.ifrnet import (
resize,
Encoder,
InitDecoder,
IntermediateDecoder
)
from networks.blocks.multi_flow import (
multi_flow_combine,
MultiFlowDecoder
)
from .blocks.raft import coords_grid, SmallUpdateBlock, BidirCorrBlock
from .blocks.feat_enc import SmallEncoder
from .blocks.ifrnet import resize, Encoder, InitDecoder, IntermediateDecoder
from .blocks.multi_flow import multi_flow_combine, MultiFlowDecoder
class Model(nn.Module):
def __init__(self,
corr_radius=3,
corr_lvls=4,
num_flows=3,
channels=[20, 32, 44, 56],
skip_channels=20):
def __init__(
self,
corr_radius=3,
corr_lvls=4,
num_flows=3,
channels=[20, 32, 44, 56],
skip_channels=20,
):
super(Model, self).__init__()
self.radius = corr_radius
self.corr_levels = corr_lvls
@@ -33,7 +22,7 @@ class Model(nn.Module):
self.channels = channels
self.skip_channels = skip_channels
self.feat_encoder = SmallEncoder(output_dim=84, norm_fn='instance', dropout=0.)
self.feat_encoder = SmallEncoder(output_dim=84, norm_fn="instance", dropout=0.0)
self.encoder = Encoder(channels)
self.decoder4 = InitDecoder(channels[3], channels[2], skip_channels)
@@ -46,21 +35,28 @@ class Model(nn.Module):
self.update2 = self._get_updateblock(20, 4)
self.comb_block = nn.Sequential(
nn.Conv2d(3*num_flows, 6*num_flows, 3, 1, 1),
nn.PReLU(6*num_flows),
nn.Conv2d(6*num_flows, 3, 3, 1, 1),
nn.Conv2d(3 * num_flows, 6 * num_flows, 3, 1, 1),
nn.PReLU(6 * num_flows),
nn.Conv2d(6 * num_flows, 3, 3, 1, 1),
)
def _get_updateblock(self, cdim, scale_factor=None):
return SmallUpdateBlock(cdim=cdim, hidden_dim=76, flow_dim=20, corr_dim=64,
fc_dim=68, scale_factor=scale_factor,
corr_levels=self.corr_levels, radius=self.radius)
return SmallUpdateBlock(
cdim=cdim,
hidden_dim=76,
flow_dim=20,
corr_dim=64,
fc_dim=68,
scale_factor=scale_factor,
corr_levels=self.corr_levels,
radius=self.radius,
)
def _corr_scale_lookup(self, corr_fn, coord, flow0, flow1, embt, downsample=1):
# convert t -> 0 to 0 -> 1 | convert t -> 1 to 1 -> 0
# based on linear assumption
t1_scale = 1. / embt
t0_scale = 1. / (1. - embt)
t1_scale = 1.0 / embt
t0_scale = 1.0 / (1.0 - embt)
if downsample != 1:
inv = 1 / downsample
flow0 = inv * resize(flow0, scale_factor=inv)
@@ -72,7 +68,12 @@ class Model(nn.Module):
return corr, flow
def forward(self, img0, img1, embt, scale_factor=1.0, eval=False, **kwargs):
mean_ = torch.cat([img0, img1], 2).mean(1, keepdim=True).mean(2, keepdim=True).mean(3, keepdim=True)
mean_ = (
torch.cat([img0, img1], 2)
.mean(1, keepdim=True)
.mean(2, keepdim=True)
.mean(3, keepdim=True)
)
img0 = img0 - mean_
img1 = img1 - mean_
img0_ = resize(img0, scale_factor) if scale_factor != 1.0 else img0
@@ -80,8 +81,10 @@ class Model(nn.Module):
b, _, h, w = img0_.shape
coord = coords_grid(b, h // 8, w // 8, img0.device)
fmap0, fmap1 = self.feat_encoder([img0_, img1_]) # [1, 128, H//8, W//8]
corr_fn = BidirCorrBlock(fmap0, fmap1, radius=self.radius, num_levels=self.corr_levels)
fmap0, fmap1 = self.feat_encoder([img0_, img1_]) # [1, 128, H//8, W//8]
corr_fn = BidirCorrBlock(
fmap0, fmap1, radius=self.radius, num_levels=self.corr_levels
)
# f0_1: [1, c0, H//2, W//2] | f0_2: [1, c1, H//4, W//4]
# f0_3: [1, c2, H//8, W//8] | f0_4: [1, c3, H//16, W//16]
@@ -90,9 +93,9 @@ class Model(nn.Module):
######################################### the 4th decoder #########################################
up_flow0_4, up_flow1_4, ft_3_ = self.decoder4(f0_4, f1_4, embt)
corr_4, flow_4 = self._corr_scale_lookup(corr_fn, coord,
up_flow0_4, up_flow1_4,
embt, downsample=1)
corr_4, flow_4 = self._corr_scale_lookup(
corr_fn, coord, up_flow0_4, up_flow1_4, embt, downsample=1
)
# residue update with lookup corr
delta_ft_3_, delta_flow_4 = self.update4(ft_3_, flow_4, corr_4)
@@ -102,10 +105,12 @@ class Model(nn.Module):
ft_3_ = ft_3_ + delta_ft_3_
######################################### the 3rd decoder #########################################
up_flow0_3, up_flow1_3, ft_2_ = self.decoder3(ft_3_, f0_3, f1_3, up_flow0_4, up_flow1_4)
corr_3, flow_3 = self._corr_scale_lookup(corr_fn,
coord, up_flow0_3, up_flow1_3,
embt, downsample=2)
up_flow0_3, up_flow1_3, ft_2_ = self.decoder3(
ft_3_, f0_3, f1_3, up_flow0_4, up_flow1_4
)
corr_3, flow_3 = self._corr_scale_lookup(
corr_fn, coord, up_flow0_3, up_flow1_3, embt, downsample=2
)
# residue update with lookup corr
delta_ft_2_, delta_flow_3 = self.update3(ft_2_, flow_3, corr_3)
@@ -115,10 +120,12 @@ class Model(nn.Module):
ft_2_ = ft_2_ + delta_ft_2_
######################################### the 2nd decoder #########################################
up_flow0_2, up_flow1_2, ft_1_ = self.decoder2(ft_2_, f0_2, f1_2, up_flow0_3, up_flow1_3)
corr_2, flow_2 = self._corr_scale_lookup(corr_fn,
coord, up_flow0_2, up_flow1_2,
embt, downsample=4)
up_flow0_2, up_flow1_2, ft_1_ = self.decoder2(
ft_2_, f0_2, f1_2, up_flow0_3, up_flow1_3
)
corr_2, flow_2 = self._corr_scale_lookup(
corr_fn, coord, up_flow0_2, up_flow1_2, embt, downsample=4
)
# residue update with lookup corr
delta_ft_1_, delta_flow_2 = self.update2(ft_1_, flow_2, corr_2)
@@ -128,27 +135,33 @@ class Model(nn.Module):
ft_1_ = ft_1_ + delta_ft_1_
######################################### the 1st decoder #########################################
up_flow0_1, up_flow1_1, mask, img_res = self.decoder1(ft_1_, f0_1, f1_1, up_flow0_2, up_flow1_2)
up_flow0_1, up_flow1_1, mask, img_res = self.decoder1(
ft_1_, f0_1, f1_1, up_flow0_2, up_flow1_2
)
if scale_factor != 1.0:
up_flow0_1 = resize(up_flow0_1, scale_factor=(1.0/scale_factor)) * (1.0/scale_factor)
up_flow1_1 = resize(up_flow1_1, scale_factor=(1.0/scale_factor)) * (1.0/scale_factor)
mask = resize(mask, scale_factor=(1.0/scale_factor))
img_res = resize(img_res, scale_factor=(1.0/scale_factor))
factor = 1.0 / scale_factor
up_flow0_1 = resize(up_flow0_1, factor) * factor
up_flow1_1 = resize(up_flow1_1, factor) * factor
mask = resize(mask, factor)
img_res = resize(img_res, factor)
# Merge multiple predictions
imgt_pred = multi_flow_combine(self.comb_block, img0, img1, up_flow0_1, up_flow1_1,
mask, img_res, mean_)
imgt_pred = multi_flow_combine(
self.comb_block, img0, img1, up_flow0_1, up_flow1_1, mask, img_res, mean_
)
imgt_pred = torch.clamp(imgt_pred, 0, 1)
if eval:
return { 'imgt_pred': imgt_pred, }
return {
"imgt_pred": imgt_pred,
}
else:
up_flow0_1 = up_flow0_1.reshape(b, self.num_flows, 2, h, w)
up_flow1_1 = up_flow1_1.reshape(b, self.num_flows, 2, h, w)
return {
'imgt_pred': imgt_pred,
'flow0_pred': [up_flow0_1, up_flow0_2, up_flow0_3, up_flow0_4],
'flow1_pred': [up_flow1_1, up_flow1_2, up_flow1_3, up_flow1_4],
'ft_pred': [ft_1_, ft_2_, ft_3_],
"imgt_pred": imgt_pred,
"flow0_pred": [up_flow0_1, up_flow0_2, up_flow0_3, up_flow0_4],
"flow1_pred": [up_flow1_1, up_flow1_2, up_flow1_3, up_flow1_4],
"ft_pred": [ft_1_, ft_2_, ft_3_],
}

View File

@@ -1,30 +1,23 @@
import torch
import torch.nn as nn
from src.utils.flow_utils import warp
from networks.blocks.ifrnet import (
convrelu, resize,
ResBlock,
)
from ..utils.flow_utils import warp
from .blocks.ifrnet import convrelu, resize, ResBlock
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.pyramid1 = nn.Sequential(
convrelu(3, 32, 3, 2, 1),
convrelu(32, 32, 3, 1, 1)
convrelu(3, 32, 3, 2, 1), convrelu(32, 32, 3, 1, 1)
)
self.pyramid2 = nn.Sequential(
convrelu(32, 48, 3, 2, 1),
convrelu(48, 48, 3, 1, 1)
convrelu(32, 48, 3, 2, 1), convrelu(48, 48, 3, 1, 1)
)
self.pyramid3 = nn.Sequential(
convrelu(48, 72, 3, 2, 1),
convrelu(72, 72, 3, 1, 1)
convrelu(48, 72, 3, 2, 1), convrelu(72, 72, 3, 1, 1)
)
self.pyramid4 = nn.Sequential(
convrelu(72, 96, 3, 2, 1),
convrelu(96, 96, 3, 1, 1)
convrelu(72, 96, 3, 2, 1), convrelu(96, 96, 3, 1, 1)
)
def forward(self, img):
@@ -39,9 +32,9 @@ class Decoder4(nn.Module):
def __init__(self):
super(Decoder4, self).__init__()
self.convblock = nn.Sequential(
convrelu(192+1, 192),
convrelu(192 + 1, 192),
ResBlock(192, 32),
nn.ConvTranspose2d(192, 76, 4, 2, 1, bias=True)
nn.ConvTranspose2d(192, 76, 4, 2, 1, bias=True),
)
def forward(self, f0, f1, embt):
@@ -58,7 +51,7 @@ class Decoder3(nn.Module):
self.convblock = nn.Sequential(
convrelu(220, 216),
ResBlock(216, 32),
nn.ConvTranspose2d(216, 52, 4, 2, 1, bias=True)
nn.ConvTranspose2d(216, 52, 4, 2, 1, bias=True),
)
def forward(self, ft_, f0, f1, up_flow0, up_flow1):
@@ -75,7 +68,7 @@ class Decoder2(nn.Module):
self.convblock = nn.Sequential(
convrelu(148, 144),
ResBlock(144, 32),
nn.ConvTranspose2d(144, 36, 4, 2, 1, bias=True)
nn.ConvTranspose2d(144, 36, 4, 2, 1, bias=True),
)
def forward(self, ft_, f0, f1, up_flow0, up_flow1):
@@ -92,7 +85,7 @@ class Decoder1(nn.Module):
self.convblock = nn.Sequential(
convrelu(100, 96),
ResBlock(96, 32),
nn.ConvTranspose2d(96, 8, 4, 2, 1, bias=True)
nn.ConvTranspose2d(96, 8, 4, 2, 1, bias=True),
)
def forward(self, ft_, f0, f1, up_flow0, up_flow1):
@@ -113,7 +106,12 @@ class Model(nn.Module):
self.decoder1 = Decoder1()
def forward(self, img0, img1, embt, scale_factor=1.0, eval=False, **kwargs):
mean_ = torch.cat([img0, img1], 2).mean(1, keepdim=True).mean(2, keepdim=True).mean(3, keepdim=True)
mean_ = (
torch.cat([img0, img1], 2)
.mean(1, keepdim=True)
.mean(2, keepdim=True)
.mean(3, keepdim=True)
)
img0 = img0 - mean_
img1 = img1 - mean_
@@ -145,10 +143,14 @@ class Model(nn.Module):
up_res_1 = out1[:, 5:]
if scale_factor != 1.0:
up_flow0_1 = resize(up_flow0_1, scale_factor=(1.0/scale_factor)) * (1.0/scale_factor)
up_flow1_1 = resize(up_flow1_1, scale_factor=(1.0/scale_factor)) * (1.0/scale_factor)
up_mask_1 = resize(up_mask_1, scale_factor=(1.0/scale_factor))
up_res_1 = resize(up_res_1, scale_factor=(1.0/scale_factor))
up_flow0_1 = resize(up_flow0_1, scale_factor=(1.0 / scale_factor)) * (
1.0 / scale_factor
)
up_flow1_1 = resize(up_flow1_1, scale_factor=(1.0 / scale_factor)) * (
1.0 / scale_factor
)
up_mask_1 = resize(up_mask_1, scale_factor=(1.0 / scale_factor))
up_res_1 = resize(up_res_1, scale_factor=(1.0 / scale_factor))
img0_warp = warp(img0, up_flow0_1)
img1_warp = warp(img1, up_flow1_1)
@@ -157,13 +159,15 @@ class Model(nn.Module):
imgt_pred = torch.clamp(imgt_pred, 0, 1)
if eval:
return { 'imgt_pred': imgt_pred, }
return {
"imgt_pred": imgt_pred,
}
else:
return {
'imgt_pred': imgt_pred,
'flow0_pred': [up_flow0_1, up_flow0_2, up_flow0_3, up_flow0_4],
'flow1_pred': [up_flow1_1, up_flow1_2, up_flow1_3, up_flow1_4],
'ft_pred': [ft_1_, ft_2_, ft_3_],
'img0_warp': img0_warp,
'img1_warp': img1_warp
"imgt_pred": imgt_pred,
"flow0_pred": [up_flow0_1, up_flow0_2, up_flow0_3, up_flow0_4],
"flow1_pred": [up_flow1_1, up_flow1_2, up_flow1_3, up_flow1_4],
"ft_pred": [ft_1_, ft_2_, ft_3_],
"img0_warp": img0_warp,
"img1_warp": img1_warp,
}

View File

@@ -1,7 +1,7 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
from src.utils.flow_utils import warp
from ...utils.flow_utils import warp
def resize(x, scale_factor):

View File

@@ -0,0 +1,80 @@
import torch
import torch.nn as nn
from ...utils.flow_utils import warp
from .ifrnet import convrelu, resize, ResBlock
def multi_flow_combine(
comb_block, img0, img1, flow0, flow1, mask=None, img_res=None, mean=None
):
"""
A parallel implementation of multiple flow field warping
comb_block: An nn.Seqential object.
img shape: [b, c, h, w]
flow shape: [b, 2*num_flows, h, w]
mask (opt):
If 'mask' is None, the function conduct a simple average.
img_res (opt):
If 'img_res' is None, the function adds zero instead.
mean (opt):
If 'mean' is None, the function adds zero instead.
"""
b, c, h, w = flow0.shape
num_flows = c // 2
flow0 = flow0.reshape(b, num_flows, 2, h, w).reshape(-1, 2, h, w)
flow1 = flow1.reshape(b, num_flows, 2, h, w).reshape(-1, 2, h, w)
mask = (
mask.reshape(b, num_flows, 1, h, w).reshape(-1, 1, h, w)
if mask is not None
else None
)
img_res = (
img_res.reshape(b, num_flows, 3, h, w).reshape(-1, 3, h, w)
if img_res is not None
else 0
)
img0 = torch.stack([img0] * num_flows, 1).reshape(-1, 3, h, w)
img1 = torch.stack([img1] * num_flows, 1).reshape(-1, 3, h, w)
mean = (
torch.stack([mean] * num_flows, 1).reshape(-1, 1, 1, 1)
if mean is not None
else 0
)
img0_warp = warp(img0, flow0)
img1_warp = warp(img1, flow1)
img_warps = mask * img0_warp + (1 - mask) * img1_warp + mean + img_res
img_warps = img_warps.reshape(b, num_flows, 3, h, w)
imgt_pred = img_warps.mean(1) + comb_block(img_warps.view(b, -1, h, w))
return imgt_pred
class MultiFlowDecoder(nn.Module):
def __init__(self, in_ch, skip_ch, num_flows=3):
super(MultiFlowDecoder, self).__init__()
self.num_flows = num_flows
self.convblock = nn.Sequential(
convrelu(in_ch * 3 + 4, in_ch * 3),
ResBlock(in_ch * 3, skip_ch),
nn.ConvTranspose2d(in_ch * 3, 8 * num_flows, 4, 2, 1, bias=True),
)
def forward(self, ft_, f0, f1, flow0, flow1):
n = self.num_flows
f0_warp = warp(f0, flow0)
f1_warp = warp(f1, flow1)
out = self.convblock(torch.cat([ft_, f0_warp, f1_warp, flow0, flow1], 1))
delta_flow0, delta_flow1, mask, img_res = torch.split(
out, [2 * n, 2 * n, n, 3 * n], 1
)
mask = torch.sigmoid(mask)
flow0 = delta_flow0 + 2.0 * resize(flow0, scale_factor=2.0).repeat(
1, self.num_flows, 1, 1
)
flow1 = delta_flow1 + 2.0 * resize(flow1, scale_factor=2.0).repeat(
1, self.num_flows, 1, 1
)
return flow0, flow1, mask, img_res

BIN
src/pretrained/amt-l.pth Normal file

Binary file not shown.

BIN
src/pretrained/amt-s.pth Normal file

Binary file not shown.

179
src/runner.py Normal file
View File

@@ -0,0 +1,179 @@
import logging
from pathlib import Path
from typing import TYPE_CHECKING
from cv2 import imwrite
import tqdm
import torch
from .config import presets
from .utils.fs import FileSystem
from .utils.video import VideoMaker
from .utils.torch import tensor2img
from .interpolator import (
ImageInterpolator,
Anchor,
get_device,
get_vram_available,
ModelRunner,
)
if TYPE_CHECKING:
import torch
import numpy as np
def performing_warning_message(device: "torch.device"):
if device.type in ("cpu", "mps"):
if device.type == "mps":
logging.warning(
"Running on Apple Silicon GPU (MPS) may have limited performance. Consider using a CUDA-enabled GPU for better performance."
)
else:
logging.warning(
"Running on CPU may be very slow. Consider using a GPU for better performance."
)
elif device.type == "cuda":
pass
else:
raise Exception(f"Unsupported device type: {device.type}")
def init_fs(base_path: Path) -> FileSystem:
fs = FileSystem(base_path)
fs.clear_directory(fs.frames_path)
fs.clear_directory(fs.interpolated_path)
fs.clear_directory(fs.moved_path)
fs.clear_directory(fs.video_part_path)
return fs
def init_video_maker() -> VideoMaker:
return VideoMaker()
def init_device() -> "torch.device":
device = get_device()
performing_warning_message(device)
vram_available = get_vram_available(device)
logging.info(f"Available VRAM: {vram_available / (1024**3):.2f} GB")
return device
def init_anchor(device: "torch.device") -> Anchor:
if device.type in ("cpu", "mps"):
return Anchor(resolution=8192 * 8192, memory=1, memory_bias=0)
elif device.type == "cuda":
# return Anchor(
# resolution=1024 * 512, memory=1500 * 1024**2, memory_bias=2500 * 1024**2
# )
return Anchor(
resolution=1280 * 720, memory=6500 * 1024**2, memory_bias=7500 * 1024**2
)
else:
raise Exception(f"Unsupported device type: {device.type}")
def init_model_runner(
config: Path, checkpoint_path: Path, device: "torch.device"
) -> ModelRunner:
return ModelRunner(config, checkpoint_path, device)
def init_interpolator(
model_runner: ModelRunner, device: "torch.device"
) -> ImageInterpolator:
anchor = init_anchor(device)
return ImageInterpolator(device, anchor, model_runner)
class InterpolationPipeline:
def __init__(
self,
config: Path,
checkpoint_path: Path,
base_path: Path,
):
self.fs = init_fs(base_path)
self.video_maker = init_video_maker()
self.device = init_device()
self.model_runner = init_model_runner(config, checkpoint_path, self.device)
self.interpolator = init_interpolator(self.model_runner, self.device)
def run(self, video_path: Path, output_video: str):
prev_frames: tuple["np.ndarray", ...] = tuple()
interpolated_frames: list["np.ndarray"] = []
part = 0
chunk_seconds = 10
length = self.video_maker.get_video_duration(video_path)
last_part_seconds = 1 if length % chunk_seconds else 0
total_parts = int(length // chunk_seconds) + last_part_seconds
fps = self.video_maker.get_fps(video_path)
logging.info(f"Video FPS: {fps}")
fps *= 2 # Doubling FPS
width, height = self.video_maker.get_size(video_path)
with torch.autocast(self.device.type, torch.float16):
with torch.no_grad():
prev_tensor = None
for idx, frames in enumerate(
self.video_maker.video_to_frames_generator(
video_path, self.fs.frames_path, chunk_seconds
)
):
interpolated_frames: list["np.ndarray"] = []
for frame in tqdm.tqdm(frames):
tensor = self.interpolator.make_tensor(frame)
if prev_tensor is None:
prev_tensor = tensor
continue
interpolated_frames.append(
tensor2img(
self.interpolator.interpolate(
prev_tensor, tensor
)
)
)
prev_tensor = tensor
generator = self._frame_generator(frames, interpolated_frames)
part_path = self.fs.video_part_path / f"video_{idx:08d}.mp4"
self.video_maker.images_to_video_pipeline(
generator, part_path, width, height, fps
)
self._merge_video_parts(self.fs.output_path / output_video)
def _merge_video_parts(self, output_video: Path):
self.video_maker.concatenate_videos(self.fs.video_part_path, output_video)
self.fs.clear_directory(self.fs.video_part_path)
def _frame_generator(
self,
source: tuple["np.ndarray", ...],
interpolated: list["np.ndarray"],
):
if len(source) == len(interpolated):
first = interpolated
second = source
else:
first = source
second = interpolated
for i, frame in enumerate(first):
yield frame
if i < len(second):
yield second[i]
def run(
base_path: Path,
video_path: Path,
output_video: str,
preset: presets.Preset = presets.LARGE,
):
pipeline = InterpolationPipeline(
config=preset.config,
checkpoint_path=preset.checkpoint,
base_path=base_path,
)
pipeline.run(video_path, output_video)

View File

@@ -1,15 +1,19 @@
from typing import TYPE_CHECKING
import importlib
from ..networks import AMT_G, AMT_L, AMT_S
if TYPE_CHECKING:
from omegaconf import DictConfig
def base_build_fn(module: str, cls: str, params: dict):
return getattr(importlib.import_module(module, package=None), cls)(**params)
def build_from_cfg(config: "DictConfig"):
packages = {
"AMT-G": AMT_G,
"AMT-L": AMT_L,
"AMT-S": AMT_S
}
module, cls = config["name"].rsplit(".", 1)
params: dict = config.get("params", {})
return base_build_fn(module, cls, params)
return getattr(packages[module], cls)(**params)

53
src/utils/fs.py Normal file
View File

@@ -0,0 +1,53 @@
from pathlib import Path
class FileSystem:
SOURCE_PATH = "source"
OUTPUT_PATH = "output"
FRAMES_PATH = "frames"
INTERPOLATED_PATH = "interpolated"
MOVED_PATH = "moved"
VIDEO_PART_PATH = "video_parts"
def __init__(self, base_path: Path):
self.base_path = base_path
self.base_path.mkdir(parents=True, exist_ok=True)
def create_directory(self, dir_name: str) -> Path:
"""Creates a directory under the base path."""
dir_path = self.base_path / dir_name
dir_path.mkdir(parents=True, exist_ok=True)
return dir_path
def clear_directory(self, dir_path: Path):
"""Clears all files in the specified directory."""
for item in dir_path.iterdir():
if item.is_file():
item.unlink()
elif item.is_dir():
self.clear_directory(item)
item.rmdir()
@property
def source_path(self) -> Path:
return self.create_directory(self.SOURCE_PATH)
@property
def output_path(self) -> Path:
return self.create_directory(self.OUTPUT_PATH)
@property
def frames_path(self) -> Path:
return self.create_directory(self.FRAMES_PATH)
@property
def interpolated_path(self) -> Path:
return self.create_directory(self.INTERPOLATED_PATH)
@property
def moved_path(self) -> Path:
return self.create_directory(self.MOVED_PATH)
@property
def video_part_path(self) -> Path:
return self.create_directory(self.VIDEO_PART_PATH)

View File

@@ -21,10 +21,7 @@ class InputPadder:
]
def pad(self, *inputs: "torch.Tensor"):
if len(inputs) == 1:
return F.pad(inputs[0], self._pad, mode="replicate")
else:
return [F.pad(x, self._pad, mode="replicate") for x in inputs]
return [F.pad(x, self._pad, mode="replicate") for x in inputs]
def unpad(self, *inputs: "torch.Tensor"):
return [self._unpad(x) for x in inputs]

View File

@@ -1,199 +0,0 @@
import re
import sys
from pathlib import Path
import numpy as np
from imageio import imread, imwrite
def read(file: Path) -> np.ndarray:
readers = {
".float3": readFloat,
".flo": readFlow,
".ppm": readImage,
".pgm": readImage,
".png": readImage,
".jpg": readImage,
".pfm": lambda f: readPFM(f)[0],
}
func = readers.get(file.suffix.lower())
if func is None:
raise Exception("don't know how to read %s" % file)
return func(file)
def write(file: Path, data: np.ndarray) -> None:
writers = {
".float3": writeFloat,
".flo": writeFlow,
".ppm": writeImage,
".pgm": writeImage,
".png": writeImage,
".jpg": writeImage,
".pfm": writePFM,
}
func = writers.get(file.suffix.lower())
if func is None:
raise Exception("don't know how to write %s" % file)
return func(file, data)
def readPFM(file: Path):
data = open(file, "rb")
color = None
width = None
height = None
scale = None
endian = None
header = data.readline().rstrip()
if header.decode("ascii") == "PF":
color = True
elif header.decode("ascii") == "Pf":
color = False
else:
raise Exception("Not a PFM file.")
dim_match = re.match(r"^(\d+)\s(\d+)\s$", data.readline().decode("ascii"))
if dim_match:
width, height = list(map(int, dim_match.groups()))
else:
raise Exception("Malformed PFM header.")
scale = float(data.readline().decode("ascii").rstrip())
if scale < 0:
endian = "<"
scale = -scale
else:
endian = ">"
result = np.fromfile(data, endian + "f")
shape = (height, width, 3) if color else (height, width)
result = np.reshape(result, shape)
result = np.flipud(result)
return result, scale
def writePFM(file: Path, image: np.ndarray, scale=1):
data = open(file, "wb")
color = None
if image.dtype.name != "float32":
raise Exception("Image dtype must be float32.")
image = np.flipud(image)
if len(image.shape) == 3 and image.shape[2] == 3:
color = True
elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1:
color = False
else:
raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.")
data.write("PF\n" if color else "Pf\n".encode()) # type: ignore
data.write("%d %d\n".encode() % (image.shape[1], image.shape[0]))
endian = image.dtype.byteorder
if endian == "<" or endian == "=" and sys.byteorder == "little":
scale = -scale
data.write("%f\n".encode() % scale)
image.tofile(data)
def readFlow(file: Path):
if file.suffix.lower() == ".pfm":
return readPFM(file)[0][:, :, 0:2]
f = open(file, "rb")
header = f.read(4)
if header.decode("utf-8") != "PIEH":
raise Exception("Flow file header does not contain PIEH")
width = np.fromfile(f, np.int32, 1).squeeze()
height = np.fromfile(f, np.int32, 1).squeeze()
flow = np.fromfile(f, np.float32, width * height * 2).reshape((height, width, 2))
return flow.astype(np.float32)
def readImage(file: Path):
if file.suffix.lower() == ".pfm":
data = readPFM(file)[0]
if len(data.shape) == 3:
return data[:, :, 0:3]
else:
return data
return imread(file)
def writeImage(file: Path, data: np.ndarray):
if file.suffix.lower() == ".pfm":
return writePFM(file, data, 1)
return imwrite(file, data)
def writeFlow(file: Path, flow: np.ndarray):
f = open(file, "wb")
f.write("PIEH".encode("utf-8"))
np.array([flow.shape[1], flow.shape[0]], dtype=np.int32).tofile(f)
flow = flow.astype(np.float32)
flow.tofile(f)
def readFloat(file: Path):
f = open(file, "rb")
if (f.readline().decode("utf-8")) != "float\n":
raise Exception("float file %s did not contain <float> keyword" % file)
dim = int(f.readline())
dims = []
count = 1
for _ in range(0, dim):
d = int(f.readline())
dims.append(d)
count *= d
dims = list(reversed(dims))
data = np.fromfile(f, np.float32, count).reshape(dims)
if dim > 2:
data = np.transpose(data, (2, 1, 0))
data = np.transpose(data, (1, 0, 2))
return data
def writeFloat(file: Path, data: np.ndarray):
f = open(file, "wb")
dim = len(data.shape)
if dim > 3:
raise Exception("bad float file dimension: %d" % dim)
f.write(("float\n").encode("ascii"))
f.write(("%d\n" % dim).encode("ascii"))
if dim == 1:
f.write(("%d\n" % data.shape[0]).encode("ascii"))
else:
f.write(("%d\n" % data.shape[1]).encode("ascii"))
f.write(("%d\n" % data.shape[0]).encode("ascii"))
for i in range(2, dim):
f.write(("%d\n" % data.shape[i]).encode("ascii"))
data = data.astype(np.float32)
if dim == 2:
data.tofile(f)
else:
np.transpose(data, (2, 0, 1)).tofile(f)

147
src/utils/video.py Normal file
View File

@@ -0,0 +1,147 @@
import os
import logging
import subprocess
from pathlib import Path
from typing import Generator, Iterable
import cv2
import numpy as np
class VideoMaker:
def images_to_video(
self,
images_path: Path,
output_path: Path,
fps: float,
image_numerator: str = "img_%08d.png",
):
"""Converts a sequence of images to a video using ffmpeg."""
cmd = f"ffmpeg -framerate {fps} -i {images_path / image_numerator} -c:v libx264 -pix_fmt yuv420p {output_path}"
logging.info(f"Running command: {cmd}")
result = self.run_command(cmd)
if result != 0:
logging.error(f"Failed to create video. Command returned {result}")
def concatenate_videos(
self,
videos_path: Path,
output_path: Path,
video_numerator: str = "video_%08d.mp4",
):
"""Concatenates a sequence of videos using ffmpeg."""
videos = sorted(videos_path.glob("*.mp4"))
file = "file.txt"
with open(file, "w") as f:
for video in videos:
f.write(f"file '{video}'\n")
cmd = f"ffmpeg -y -f concat -safe 0 -i {file} -c copy {output_path}"
logging.info(f"Running command: {cmd}")
result = self.run_command(cmd)
if result != 0:
logging.error(f"Failed to concatenate videos. Command returned {result}")
os.remove(file)
def get_fps(self, video_path: Path) -> float:
"""Gets the frames per second (FPS) of a video."""
cap = cv2.VideoCapture(str(video_path))
if not cap.isOpened():
raise ValueError(f"Cannot open video: {video_path}")
fps = cap.get(cv2.CAP_PROP_FPS)
cap.release()
logging.debug(f"FPS of video {video_path}: {fps}")
return fps
def get_video_duration(self, video_path: Path) -> float:
"""Gets the duration of a video in seconds."""
cap = cv2.VideoCapture(str(video_path))
if not cap.isOpened():
raise ValueError(f"Cannot open video: {video_path}")
fps = cap.get(cv2.CAP_PROP_FPS)
frame_count = cap.get(cv2.CAP_PROP_FRAME_COUNT)
cap.release()
duration = frame_count / fps
logging.debug(f"Duration of video {video_path}: {duration:.2f} seconds")
return duration
def run_command(self, cmd: str) -> int:
try:
subprocess.run(
cmd,
shell=True,
check=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
return 0
except subprocess.CalledProcessError as e:
logging.error(f"Command failed with error: {e}")
return e.returncode
def video_to_frames_generator(
self, video_path: Path, output_dir: Path, chunk_seconds: int = 10
) -> Generator[tuple[np.ndarray, ...], None, None]:
"""Extracts frames from a video and saves them to disk, yielding paths to the saved frames."""
cap = cv2.VideoCapture(str(video_path))
if not cap.isOpened():
raise ValueError(f"Cannot open video: {video_path}")
fps = cap.get(cv2.CAP_PROP_FPS)
frames_per_chunk = int(fps * chunk_seconds)
while True:
paths = []
for _ in range(frames_per_chunk):
ret, frame = cap.read()
if not ret:
cap.release()
return
paths.append(frame)
yield tuple(paths)
def images_to_video_pipeline(
self,
frames: Iterable[np.ndarray],
output_path: Path,
width: int,
height: int,
fps: float,
):
pipeline = subprocess.Popen(
[
"ffmpeg",
"-y",
"-f", "rawvideo",
"-vcodec", "rawvideo",
"-pix_fmt", "bgr24",
"-s", f"{width}x{height}",
"-r", str(fps),
"-i", "-",
"-an",
"-vcodec", "libx264",
"-pix_fmt", "yuv420p",
str(output_path),
],
stdin=subprocess.PIPE,
stderr=subprocess.DEVNULL
)
if pipeline.stdin is None:
raise Exception("STDIN closed")
for frame in frames:
pipeline.stdin.write(frame.tobytes())
pipeline.stdin.close()
pipeline.wait()
def get_size(self, video_path: Path) -> tuple[int, int]:
cap = cv2.VideoCapture(str(video_path))
if not cap.isOpened():
raise ValueError(f"Cannot open video: {video_path}")
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
cap.release()
return width, height

86
uv.lock generated
View File

@@ -1,5 +1,5 @@
version = 1
revision = 2
revision = 3
requires-python = ">=3.12"
[[package]]
@@ -7,7 +7,6 @@ name = "amt-apple"
version = "0.1.0"
source = { virtual = "." }
dependencies = [
{ name = "imageio" },
{ name = "numpy" },
{ name = "omegaconf" },
{ name = "opencv-python" },
@@ -17,7 +16,6 @@ dependencies = [
[package.metadata]
requires-dist = [
{ name = "imageio", specifier = ">=2.37.3" },
{ name = "numpy", specifier = ">=2.4.4" },
{ name = "omegaconf", specifier = ">=2.3.0" },
{ name = "opencv-python", specifier = ">=4.13.0.92" },
@@ -127,19 +125,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/d5/1f/5f4a3cd9e4440e9d9bc78ad0a91a1c8d46b4d429d5239ebe6793c9fe5c41/fsspec-2026.3.0-py3-none-any.whl", hash = "sha256:d2ceafaad1b3457968ed14efa28798162f1638dbb5d2a6868a2db002a5ee39a4", size = 202595, upload-time = "2026-03-27T19:11:13.595Z" },
]
[[package]]
name = "imageio"
version = "2.37.3"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "numpy" },
{ name = "pillow" },
]
sdist = { url = "https://files.pythonhosted.org/packages/b1/84/93bcd1300216ea50811cee96873b84a1bebf8d0489ffaf7f2a3756bab866/imageio-2.37.3.tar.gz", hash = "sha256:bbb37efbfc4c400fcd534b367b91fcd66d5da639aaa138034431a1c5e0a41451", size = 389673, upload-time = "2026-03-09T11:31:12.573Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/49/fa/391e437a34e55095173dca5f24070d89cbc233ff85bf1c29c93248c6588d/imageio-2.37.3-py3-none-any.whl", hash = "sha256:46f5bb8522cd421c0f5ae104d8268f569d856b29eb1a13b92829d1970f32c9f0", size = 317646, upload-time = "2026-03-09T11:31:10.771Z" },
]
[[package]]
name = "jinja2"
version = "3.1.6"
@@ -474,75 +459,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/e9/a5/1be1516390333ff9be3a9cb648c9f33df79d5096e5884b5df71a588af463/opencv_python-4.13.0.92-cp37-abi3-win_amd64.whl", hash = "sha256:423d934c9fafb91aad38edf26efb46da91ffbc05f3f59c4b0c72e699720706f5", size = 40212062, upload-time = "2026-02-05T07:02:12.724Z" },
]
[[package]]
name = "pillow"
version = "12.1.1"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/1f/42/5c74462b4fd957fcd7b13b04fb3205ff8349236ea74c7c375766d6c82288/pillow-12.1.1.tar.gz", hash = "sha256:9ad8fa5937ab05218e2b6a4cff30295ad35afd2f83ac592e68c0d871bb0fdbc4", size = 46980264, upload-time = "2026-02-11T04:23:07.146Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/07/d3/8df65da0d4df36b094351dce696f2989bec731d4f10e743b1c5f4da4d3bf/pillow-12.1.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ab323b787d6e18b3d91a72fc99b1a2c28651e4358749842b8f8dfacd28ef2052", size = 5262803, upload-time = "2026-02-11T04:20:47.653Z" },
{ url = "https://files.pythonhosted.org/packages/d6/71/5026395b290ff404b836e636f51d7297e6c83beceaa87c592718747e670f/pillow-12.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:adebb5bee0f0af4909c30db0d890c773d1a92ffe83da908e2e9e720f8edf3984", size = 4657601, upload-time = "2026-02-11T04:20:49.328Z" },
{ url = "https://files.pythonhosted.org/packages/b1/2e/1001613d941c67442f745aff0f7cc66dd8df9a9c084eb497e6a543ee6f7e/pillow-12.1.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bb66b7cc26f50977108790e2456b7921e773f23db5630261102233eb355a3b79", size = 6234995, upload-time = "2026-02-11T04:20:51.032Z" },
{ url = "https://files.pythonhosted.org/packages/07/26/246ab11455b2549b9233dbd44d358d033a2f780fa9007b61a913c5b2d24e/pillow-12.1.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:aee2810642b2898bb187ced9b349e95d2a7272930796e022efaf12e99dccd293", size = 8045012, upload-time = "2026-02-11T04:20:52.882Z" },
{ url = "https://files.pythonhosted.org/packages/b2/8b/07587069c27be7535ac1fe33874e32de118fbd34e2a73b7f83436a88368c/pillow-12.1.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a0b1cd6232e2b618adcc54d9882e4e662a089d5768cd188f7c245b4c8c44a397", size = 6349638, upload-time = "2026-02-11T04:20:54.444Z" },
{ url = "https://files.pythonhosted.org/packages/ff/79/6df7b2ee763d619cda2fb4fea498e5f79d984dae304d45a8999b80d6cf5c/pillow-12.1.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7aac39bcf8d4770d089588a2e1dd111cbaa42df5a94be3114222057d68336bd0", size = 7041540, upload-time = "2026-02-11T04:20:55.97Z" },
{ url = "https://files.pythonhosted.org/packages/2c/5e/2ba19e7e7236d7529f4d873bdaf317a318896bac289abebd4bb00ef247f0/pillow-12.1.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ab174cd7d29a62dd139c44bf74b698039328f45cb03b4596c43473a46656b2f3", size = 6462613, upload-time = "2026-02-11T04:20:57.542Z" },
{ url = "https://files.pythonhosted.org/packages/03/03/31216ec124bb5c3dacd74ce8efff4cc7f52643653bad4825f8f08c697743/pillow-12.1.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:339ffdcb7cbeaa08221cd401d517d4b1fe7a9ed5d400e4a8039719238620ca35", size = 7166745, upload-time = "2026-02-11T04:20:59.196Z" },
{ url = "https://files.pythonhosted.org/packages/1f/e7/7c4552d80052337eb28653b617eafdef39adfb137c49dd7e831b8dc13bc5/pillow-12.1.1-cp312-cp312-win32.whl", hash = "sha256:5d1f9575a12bed9e9eedd9a4972834b08c97a352bd17955ccdebfeca5913fa0a", size = 6328823, upload-time = "2026-02-11T04:21:01.385Z" },
{ url = "https://files.pythonhosted.org/packages/3d/17/688626d192d7261bbbf98846fc98995726bddc2c945344b65bec3a29d731/pillow-12.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:21329ec8c96c6e979cd0dfd29406c40c1d52521a90544463057d2aaa937d66a6", size = 7033367, upload-time = "2026-02-11T04:21:03.536Z" },
{ url = "https://files.pythonhosted.org/packages/ed/fe/a0ef1f73f939b0eca03ee2c108d0043a87468664770612602c63266a43c4/pillow-12.1.1-cp312-cp312-win_arm64.whl", hash = "sha256:af9a332e572978f0218686636610555ae3defd1633597be015ed50289a03c523", size = 2453811, upload-time = "2026-02-11T04:21:05.116Z" },
{ url = "https://files.pythonhosted.org/packages/d5/11/6db24d4bd7685583caeae54b7009584e38da3c3d4488ed4cd25b439de486/pillow-12.1.1-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:d242e8ac078781f1de88bf823d70c1a9b3c7950a44cdf4b7c012e22ccbcd8e4e", size = 4062689, upload-time = "2026-02-11T04:21:06.804Z" },
{ url = "https://files.pythonhosted.org/packages/33/c0/ce6d3b1fe190f0021203e0d9b5b99e57843e345f15f9ef22fcd43842fd21/pillow-12.1.1-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:02f84dfad02693676692746df05b89cf25597560db2857363a208e393429f5e9", size = 4138535, upload-time = "2026-02-11T04:21:08.452Z" },
{ url = "https://files.pythonhosted.org/packages/a0/c6/d5eb6a4fb32a3f9c21a8c7613ec706534ea1cf9f4b3663e99f0d83f6fca8/pillow-12.1.1-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:e65498daf4b583091ccbb2556c7000abf0f3349fcd57ef7adc9a84a394ed29f6", size = 3601364, upload-time = "2026-02-11T04:21:10.194Z" },
{ url = "https://files.pythonhosted.org/packages/14/a1/16c4b823838ba4c9c52c0e6bbda903a3fe5a1bdbf1b8eb4fff7156f3e318/pillow-12.1.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6c6db3b84c87d48d0088943bf33440e0c42370b99b1c2a7989216f7b42eede60", size = 5262561, upload-time = "2026-02-11T04:21:11.742Z" },
{ url = "https://files.pythonhosted.org/packages/bb/ad/ad9dc98ff24f485008aa5cdedaf1a219876f6f6c42a4626c08bc4e80b120/pillow-12.1.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:8b7e5304e34942bf62e15184219a7b5ad4ff7f3bb5cca4d984f37df1a0e1aee2", size = 4657460, upload-time = "2026-02-11T04:21:13.786Z" },
{ url = "https://files.pythonhosted.org/packages/9e/1b/f1a4ea9a895b5732152789326202a82464d5254759fbacae4deea3069334/pillow-12.1.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:18e5bddd742a44b7e6b1e773ab5db102bd7a94c32555ba656e76d319d19c3850", size = 6232698, upload-time = "2026-02-11T04:21:15.949Z" },
{ url = "https://files.pythonhosted.org/packages/95/f4/86f51b8745070daf21fd2e5b1fe0eb35d4db9ca26e6d58366562fb56a743/pillow-12.1.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc44ef1f3de4f45b50ccf9136999d71abb99dca7706bc75d222ed350b9fd2289", size = 8041706, upload-time = "2026-02-11T04:21:17.723Z" },
{ url = "https://files.pythonhosted.org/packages/29/9b/d6ecd956bb1266dd1045e995cce9b8d77759e740953a1c9aad9502a0461e/pillow-12.1.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5a8eb7ed8d4198bccbd07058416eeec51686b498e784eda166395a23eb99138e", size = 6346621, upload-time = "2026-02-11T04:21:19.547Z" },
{ url = "https://files.pythonhosted.org/packages/71/24/538bff45bde96535d7d998c6fed1a751c75ac7c53c37c90dc2601b243893/pillow-12.1.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:47b94983da0c642de92ced1702c5b6c292a84bd3a8e1d1702ff923f183594717", size = 7038069, upload-time = "2026-02-11T04:21:21.378Z" },
{ url = "https://files.pythonhosted.org/packages/94/0e/58cb1a6bc48f746bc4cb3adb8cabff73e2742c92b3bf7a220b7cf69b9177/pillow-12.1.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:518a48c2aab7ce596d3bf79d0e275661b846e86e4d0e7dec34712c30fe07f02a", size = 6460040, upload-time = "2026-02-11T04:21:23.148Z" },
{ url = "https://files.pythonhosted.org/packages/6c/57/9045cb3ff11eeb6c1adce3b2d60d7d299d7b273a2e6c8381a524abfdc474/pillow-12.1.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a550ae29b95c6dc13cf69e2c9dc5747f814c54eeb2e32d683e5e93af56caa029", size = 7164523, upload-time = "2026-02-11T04:21:25.01Z" },
{ url = "https://files.pythonhosted.org/packages/73/f2/9be9cb99f2175f0d4dbadd6616ce1bf068ee54a28277ea1bf1fbf729c250/pillow-12.1.1-cp313-cp313-win32.whl", hash = "sha256:a003d7422449f6d1e3a34e3dd4110c22148336918ddbfc6a32581cd54b2e0b2b", size = 6332552, upload-time = "2026-02-11T04:21:27.238Z" },
{ url = "https://files.pythonhosted.org/packages/3f/eb/b0834ad8b583d7d9d42b80becff092082a1c3c156bb582590fcc973f1c7c/pillow-12.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:344cf1e3dab3be4b1fa08e449323d98a2a3f819ad20f4b22e77a0ede31f0faa1", size = 7040108, upload-time = "2026-02-11T04:21:29.462Z" },
{ url = "https://files.pythonhosted.org/packages/d5/7d/fc09634e2aabdd0feabaff4a32f4a7d97789223e7c2042fd805ea4b4d2c2/pillow-12.1.1-cp313-cp313-win_arm64.whl", hash = "sha256:5c0dd1636633e7e6a0afe7bf6a51a14992b7f8e60de5789018ebbdfae55b040a", size = 2453712, upload-time = "2026-02-11T04:21:31.072Z" },
{ url = "https://files.pythonhosted.org/packages/19/2a/b9d62794fc8a0dd14c1943df68347badbd5511103e0d04c035ffe5cf2255/pillow-12.1.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0330d233c1a0ead844fc097a7d16c0abff4c12e856c0b325f231820fee1f39da", size = 5264880, upload-time = "2026-02-11T04:21:32.865Z" },
{ url = "https://files.pythonhosted.org/packages/26/9d/e03d857d1347fa5ed9247e123fcd2a97b6220e15e9cb73ca0a8d91702c6e/pillow-12.1.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5dae5f21afb91322f2ff791895ddd8889e5e947ff59f71b46041c8ce6db790bc", size = 4660616, upload-time = "2026-02-11T04:21:34.97Z" },
{ url = "https://files.pythonhosted.org/packages/f7/ec/8a6d22afd02570d30954e043f09c32772bfe143ba9285e2fdb11284952cd/pillow-12.1.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2e0c664be47252947d870ac0d327fea7e63985a08794758aa8af5b6cb6ec0c9c", size = 6269008, upload-time = "2026-02-11T04:21:36.623Z" },
{ url = "https://files.pythonhosted.org/packages/3d/1d/6d875422c9f28a4a361f495a5f68d9de4a66941dc2c619103ca335fa6446/pillow-12.1.1-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:691ab2ac363b8217f7d31b3497108fb1f50faab2f75dfb03284ec2f217e87bf8", size = 8073226, upload-time = "2026-02-11T04:21:38.585Z" },
{ url = "https://files.pythonhosted.org/packages/a1/cd/134b0b6ee5eda6dc09e25e24b40fdafe11a520bc725c1d0bbaa5e00bf95b/pillow-12.1.1-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e9e8064fb1cc019296958595f6db671fba95209e3ceb0c4734c9baf97de04b20", size = 6380136, upload-time = "2026-02-11T04:21:40.562Z" },
{ url = "https://files.pythonhosted.org/packages/7a/a9/7628f013f18f001c1b98d8fffe3452f306a70dc6aba7d931019e0492f45e/pillow-12.1.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:472a8d7ded663e6162dafdf20015c486a7009483ca671cece7a9279b512fcb13", size = 7067129, upload-time = "2026-02-11T04:21:42.521Z" },
{ url = "https://files.pythonhosted.org/packages/1e/f8/66ab30a2193b277785601e82ee2d49f68ea575d9637e5e234faaa98efa4c/pillow-12.1.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:89b54027a766529136a06cfebeecb3a04900397a3590fd252160b888479517bf", size = 6491807, upload-time = "2026-02-11T04:21:44.22Z" },
{ url = "https://files.pythonhosted.org/packages/da/0b/a877a6627dc8318fdb84e357c5e1a758c0941ab1ddffdafd231983788579/pillow-12.1.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:86172b0831b82ce4f7877f280055892b31179e1576aa00d0df3bb1bbf8c3e524", size = 7190954, upload-time = "2026-02-11T04:21:46.114Z" },
{ url = "https://files.pythonhosted.org/packages/83/43/6f732ff85743cf746b1361b91665d9f5155e1483817f693f8d57ea93147f/pillow-12.1.1-cp313-cp313t-win32.whl", hash = "sha256:44ce27545b6efcf0fdbdceb31c9a5bdea9333e664cda58a7e674bb74608b3986", size = 6336441, upload-time = "2026-02-11T04:21:48.22Z" },
{ url = "https://files.pythonhosted.org/packages/3b/44/e865ef3986611bb75bfabdf94a590016ea327833f434558801122979cd0e/pillow-12.1.1-cp313-cp313t-win_amd64.whl", hash = "sha256:a285e3eb7a5a45a2ff504e31f4a8d1b12ef62e84e5411c6804a42197c1cf586c", size = 7045383, upload-time = "2026-02-11T04:21:50.015Z" },
{ url = "https://files.pythonhosted.org/packages/a8/c6/f4fb24268d0c6908b9f04143697ea18b0379490cb74ba9e8d41b898bd005/pillow-12.1.1-cp313-cp313t-win_arm64.whl", hash = "sha256:cc7d296b5ea4d29e6570dabeaed58d31c3fea35a633a69679fb03d7664f43fb3", size = 2456104, upload-time = "2026-02-11T04:21:51.633Z" },
{ url = "https://files.pythonhosted.org/packages/03/d0/bebb3ffbf31c5a8e97241476c4cf8b9828954693ce6744b4a2326af3e16b/pillow-12.1.1-cp314-cp314-ios_13_0_arm64_iphoneos.whl", hash = "sha256:417423db963cb4be8bac3fc1204fe61610f6abeed1580a7a2cbb2fbda20f12af", size = 4062652, upload-time = "2026-02-11T04:21:53.19Z" },
{ url = "https://files.pythonhosted.org/packages/2d/c0/0e16fb0addda4851445c28f8350d8c512f09de27bbb0d6d0bbf8b6709605/pillow-12.1.1-cp314-cp314-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:b957b71c6b2387610f556a7eb0828afbe40b4a98036fc0d2acfa5a44a0c2036f", size = 4138823, upload-time = "2026-02-11T04:22:03.088Z" },
{ url = "https://files.pythonhosted.org/packages/6b/fb/6170ec655d6f6bb6630a013dd7cf7bc218423d7b5fa9071bf63dc32175ae/pillow-12.1.1-cp314-cp314-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:097690ba1f2efdeb165a20469d59d8bb03c55fb6621eb2041a060ae8ea3e9642", size = 3601143, upload-time = "2026-02-11T04:22:04.909Z" },
{ url = "https://files.pythonhosted.org/packages/59/04/dc5c3f297510ba9a6837cbb318b87dd2b8f73eb41a43cc63767f65cb599c/pillow-12.1.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:2815a87ab27848db0321fb78c7f0b2c8649dee134b7f2b80c6a45c6831d75ccd", size = 5266254, upload-time = "2026-02-11T04:22:07.656Z" },
{ url = "https://files.pythonhosted.org/packages/05/30/5db1236b0d6313f03ebf97f5e17cda9ca060f524b2fcc875149a8360b21c/pillow-12.1.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:f7ed2c6543bad5a7d5530eb9e78c53132f93dfa44a28492db88b41cdab885202", size = 4657499, upload-time = "2026-02-11T04:22:09.613Z" },
{ url = "https://files.pythonhosted.org/packages/6f/18/008d2ca0eb612e81968e8be0bbae5051efba24d52debf930126d7eaacbba/pillow-12.1.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:652a2c9ccfb556235b2b501a3a7cf3742148cd22e04b5625c5fe057ea3e3191f", size = 6232137, upload-time = "2026-02-11T04:22:11.434Z" },
{ url = "https://files.pythonhosted.org/packages/70/f1/f14d5b8eeb4b2cd62b9f9f847eb6605f103df89ef619ac68f92f748614ea/pillow-12.1.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d6e4571eedf43af33d0fc233a382a76e849badbccdf1ac438841308652a08e1f", size = 8042721, upload-time = "2026-02-11T04:22:13.321Z" },
{ url = "https://files.pythonhosted.org/packages/5a/d6/17824509146e4babbdabf04d8171491fa9d776f7061ff6e727522df9bd03/pillow-12.1.1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b574c51cf7d5d62e9be37ba446224b59a2da26dc4c1bb2ecbe936a4fb1a7cb7f", size = 6347798, upload-time = "2026-02-11T04:22:15.449Z" },
{ url = "https://files.pythonhosted.org/packages/d1/ee/c85a38a9ab92037a75615aba572c85ea51e605265036e00c5b67dfafbfe2/pillow-12.1.1-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a37691702ed687799de29a518d63d4682d9016932db66d4e90c345831b02fb4e", size = 7039315, upload-time = "2026-02-11T04:22:17.24Z" },
{ url = "https://files.pythonhosted.org/packages/ec/f3/bc8ccc6e08a148290d7523bde4d9a0d6c981db34631390dc6e6ec34cacf6/pillow-12.1.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:f95c00d5d6700b2b890479664a06e754974848afaae5e21beb4d83c106923fd0", size = 6462360, upload-time = "2026-02-11T04:22:19.111Z" },
{ url = "https://files.pythonhosted.org/packages/f6/ab/69a42656adb1d0665ab051eec58a41f169ad295cf81ad45406963105408f/pillow-12.1.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:559b38da23606e68681337ad74622c4dbba02254fc9cb4488a305dd5975c7eeb", size = 7165438, upload-time = "2026-02-11T04:22:21.041Z" },
{ url = "https://files.pythonhosted.org/packages/02/46/81f7aa8941873f0f01d4b55cc543b0a3d03ec2ee30d617a0448bf6bd6dec/pillow-12.1.1-cp314-cp314-win32.whl", hash = "sha256:03edcc34d688572014ff223c125a3f77fb08091e4607e7745002fc214070b35f", size = 6431503, upload-time = "2026-02-11T04:22:22.833Z" },
{ url = "https://files.pythonhosted.org/packages/40/72/4c245f7d1044b67affc7f134a09ea619d4895333d35322b775b928180044/pillow-12.1.1-cp314-cp314-win_amd64.whl", hash = "sha256:50480dcd74fa63b8e78235957d302d98d98d82ccbfac4c7e12108ba9ecbdba15", size = 7176748, upload-time = "2026-02-11T04:22:24.64Z" },
{ url = "https://files.pythonhosted.org/packages/e4/ad/8a87bdbe038c5c698736e3348af5c2194ffb872ea52f11894c95f9305435/pillow-12.1.1-cp314-cp314-win_arm64.whl", hash = "sha256:5cb1785d97b0c3d1d1a16bc1d710c4a0049daefc4935f3a8f31f827f4d3d2e7f", size = 2544314, upload-time = "2026-02-11T04:22:26.685Z" },
{ url = "https://files.pythonhosted.org/packages/6c/9d/efd18493f9de13b87ede7c47e69184b9e859e4427225ea962e32e56a49bc/pillow-12.1.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:1f90cff8aa76835cba5769f0b3121a22bd4eb9e6884cfe338216e557a9a548b8", size = 5268612, upload-time = "2026-02-11T04:22:29.884Z" },
{ url = "https://files.pythonhosted.org/packages/f8/f1/4f42eb2b388eb2ffc660dcb7f7b556c1015c53ebd5f7f754965ef997585b/pillow-12.1.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1f1be78ce9466a7ee64bfda57bdba0f7cc499d9794d518b854816c41bf0aa4e9", size = 4660567, upload-time = "2026-02-11T04:22:31.799Z" },
{ url = "https://files.pythonhosted.org/packages/01/54/df6ef130fa43e4b82e32624a7b821a2be1c5653a5fdad8469687a7db4e00/pillow-12.1.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:42fc1f4677106188ad9a55562bbade416f8b55456f522430fadab3cef7cd4e60", size = 6269951, upload-time = "2026-02-11T04:22:33.921Z" },
{ url = "https://files.pythonhosted.org/packages/a9/48/618752d06cc44bb4aae8ce0cd4e6426871929ed7b46215638088270d9b34/pillow-12.1.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:98edb152429ab62a1818039744d8fbb3ccab98a7c29fc3d5fcef158f3f1f68b7", size = 8074769, upload-time = "2026-02-11T04:22:35.877Z" },
{ url = "https://files.pythonhosted.org/packages/c3/bd/f1d71eb39a72fa088d938655afba3e00b38018d052752f435838961127d8/pillow-12.1.1-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d470ab1178551dd17fdba0fef463359c41aaa613cdcd7ff8373f54be629f9f8f", size = 6381358, upload-time = "2026-02-11T04:22:37.698Z" },
{ url = "https://files.pythonhosted.org/packages/64/ef/c784e20b96674ed36a5af839305f55616f8b4f8aa8eeccf8531a6e312243/pillow-12.1.1-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6408a7b064595afcab0a49393a413732a35788f2a5092fdc6266952ed67de586", size = 7068558, upload-time = "2026-02-11T04:22:39.597Z" },
{ url = "https://files.pythonhosted.org/packages/73/cb/8059688b74422ae61278202c4e1ad992e8a2e7375227be0a21c6b87ca8d5/pillow-12.1.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5d8c41325b382c07799a3682c1c258469ea2ff97103c53717b7893862d0c98ce", size = 6493028, upload-time = "2026-02-11T04:22:42.73Z" },
{ url = "https://files.pythonhosted.org/packages/c6/da/e3c008ed7d2dd1f905b15949325934510b9d1931e5df999bb15972756818/pillow-12.1.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:c7697918b5be27424e9ce568193efd13d925c4481dd364e43f5dff72d33e10f8", size = 7191940, upload-time = "2026-02-11T04:22:44.543Z" },
{ url = "https://files.pythonhosted.org/packages/01/4a/9202e8d11714c1fc5951f2e1ef362f2d7fbc595e1f6717971d5dd750e969/pillow-12.1.1-cp314-cp314t-win32.whl", hash = "sha256:d2912fd8114fc5545aa3a4b5576512f64c55a03f3ebcca4c10194d593d43ea36", size = 6438736, upload-time = "2026-02-11T04:22:46.347Z" },
{ url = "https://files.pythonhosted.org/packages/f3/ca/cbce2327eb9885476b3957b2e82eb12c866a8b16ad77392864ad601022ce/pillow-12.1.1-cp314-cp314t-win_amd64.whl", hash = "sha256:4ceb838d4bd9dab43e06c363cab2eebf63846d6a4aeaea283bbdfd8f1a8ed58b", size = 7182894, upload-time = "2026-02-11T04:22:48.114Z" },
{ url = "https://files.pythonhosted.org/packages/ec/d2/de599c95ba0a973b94410477f8bf0b6f0b5e67360eb89bcb1ad365258beb/pillow-12.1.1-cp314-cp314t-win_arm64.whl", hash = "sha256:7b03048319bfc6170e93bd60728a1af51d3dd7704935feb228c4d4faab35d334", size = 2546446, upload-time = "2026-02-11T04:22:50.342Z" },
]
[[package]]
name = "pyyaml"
version = "6.0.3"