aggregate all scripts into one function
Browse files- .gitignore +1 -0
- app.py +18 -35
- dynamic_predictor/dust3r/training.py +1 -0
- dynamic_predictor/launch.py +30 -0
- gaussian_renderer/__init__.py +2 -2
- render.py +36 -4
- scene/__init__.py +2 -2
- scene/cameras.py +1 -1
- scene/dataset_readers.py +4 -4
- scene/gaussian_model.py +8 -8
- train_gui.py +58 -6
- {utils β utils_das3r}/camera_utils.py +2 -2
- {utils β utils_das3r}/dust3r_utils.py +0 -0
- {utils β utils_das3r}/general_utils.py +0 -0
- {utils β utils_das3r}/graphics_utils.py +0 -0
- {utils β utils_das3r}/gui_utils.py +0 -0
- {utils β utils_das3r}/image_utils.py +0 -0
- {utils β utils_das3r}/loss_utils.py +0 -0
- {utils β utils_das3r}/pose_utils.py +1 -1
- {utils β utils_das3r}/push_to_hf.py +0 -0
- {utils β utils_das3r}/rearrange.py +6 -5
- {utils β utils_das3r}/rearrange_davis.py +0 -0
- {utils β utils_das3r}/rearrange_sintel.py +0 -0
- {utils β utils_das3r}/sh_utils.py +0 -0
- {utils β utils_das3r}/stepfun.py +0 -0
- {utils β utils_das3r}/system_utils.py +0 -0
- {utils β utils_das3r}/utils_poses/ATE/align_trajectory.py +0 -0
- {utils β utils_das3r}/utils_poses/ATE/align_utils.py +0 -0
- {utils β utils_das3r}/utils_poses/ATE/compute_trajectory_errors.py +0 -0
- {utils β utils_das3r}/utils_poses/ATE/results_writer.py +0 -0
- {utils β utils_das3r}/utils_poses/ATE/trajectory_utils.py +0 -0
- {utils β utils_das3r}/utils_poses/ATE/transformations.py +0 -0
- {utils β utils_das3r}/utils_poses/align_traj.py +0 -0
- {utils β utils_das3r}/utils_poses/comp_ate.py +0 -0
- {utils β utils_das3r}/utils_poses/lie_group_helper.py +0 -0
- {utils β utils_das3r}/utils_poses/relative_pose.py +0 -0
- {utils β utils_das3r}/utils_poses/vis_cam_traj.py +0 -0
- {utils β utils_das3r}/utils_poses/vis_pose_utils.py +0 -0
- {utils β utils_das3r}/vo_eval.py +0 -0
.gitignore
CHANGED
|
@@ -23,3 +23,4 @@ build/
|
|
| 23 |
/dev/
|
| 24 |
gradio_cache_folder/
|
| 25 |
results/
|
|
|
|
|
|
| 23 |
/dev/
|
| 24 |
gradio_cache_folder/
|
| 25 |
results/
|
| 26 |
+
.gradio
|
app.py
CHANGED
|
@@ -10,12 +10,18 @@ import re
|
|
| 10 |
import torch
|
| 11 |
import spaces
|
| 12 |
|
| 13 |
-
subprocess.run(shlex.split("pip install wheel/diff_gaussian_rasterization-0.0.0-cp310-cp310-linux_x86_64.whl
|
| 14 |
-
subprocess.run(shlex.split("pip install wheel/simple_knn-0.0.0-cp310-cp310-linux_x86_64.whl
|
| 15 |
-
subprocess.run(shlex.split("pip install wheel/curope-0.0.0-cp310-cp310-linux_x86_64.whl
|
| 16 |
-
|
|
|
|
|
|
|
| 17 |
GRADIO_CACHE_FOLDER = './gradio_cache_folder'
|
| 18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
|
| 20 |
def get_dust3r_args_parser():
|
| 21 |
parser = argparse.ArgumentParser()
|
|
@@ -37,25 +43,7 @@ def natural_sort(l):
|
|
| 37 |
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key.split('/')[-1])]
|
| 38 |
return sorted(l, key=alphanum_key)
|
| 39 |
|
| 40 |
-
|
| 41 |
-
print(command)
|
| 42 |
-
subprocess.run(shlex.split(command))
|
| 43 |
-
|
| 44 |
-
@spaces.GPU(duration=70)
|
| 45 |
-
def cmd_gpu_s1(command):
|
| 46 |
-
print('gpu:', command)
|
| 47 |
-
subprocess.run(shlex.split(command))
|
| 48 |
-
|
| 49 |
-
@spaces.GPU(duration=40)
|
| 50 |
-
def cmd_gpu_s2(command):
|
| 51 |
-
print('gpu:', command)
|
| 52 |
-
subprocess.run(shlex.split(command))
|
| 53 |
-
|
| 54 |
-
@spaces.GPU(duration=20)
|
| 55 |
-
def cmd_gpu_s3(command):
|
| 56 |
-
print('gpu:', command)
|
| 57 |
-
subprocess.run(shlex.split(command))
|
| 58 |
-
|
| 59 |
def process(inputfiles, input_path='demo'):
|
| 60 |
if inputfiles:
|
| 61 |
frames = natural_sort(inputfiles)
|
|
@@ -75,22 +63,17 @@ def process(inputfiles, input_path='demo'):
|
|
| 75 |
|
| 76 |
imgs_path = temp_dir
|
| 77 |
output_path = f'./results/{input_path}/output'
|
| 78 |
-
|
| 79 |
-
--pretrained=Kai422kx/das3r \
|
| 80 |
-
--dir_path={imgs_path} \
|
| 81 |
-
--output_dir={output_path} \
|
| 82 |
-
--use_pred_mask --n_iter 150")
|
| 83 |
-
|
| 84 |
-
cmd(f"python utils/rearrange.py --output_dir={output_path}")
|
| 85 |
-
output_path = f'{output_path}_rearranged'
|
| 86 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
|
| 88 |
-
cmd_gpu_s2(f"python train_gui.py -s {output_path} -m {output_path} --iter 2000")
|
| 89 |
-
cmd_gpu_s3(f"python render.py -s {output_path} -m {output_path} --iter 2000 --get_video")
|
| 90 |
|
| 91 |
|
| 92 |
-
output_video_path = f"{
|
| 93 |
-
output_ply_path = f"{
|
| 94 |
return output_video_path, output_ply_path, output_ply_path
|
| 95 |
|
| 96 |
|
|
|
|
| 10 |
import torch
|
| 11 |
import spaces
|
| 12 |
|
| 13 |
+
subprocess.run(shlex.split("pip install wheel/diff_gaussian_rasterization-0.0.0-cp310-cp310-linux_x86_64.whl"))
|
| 14 |
+
subprocess.run(shlex.split("pip install wheel/simple_knn-0.0.0-cp310-cp310-linux_x86_64.whl"))
|
| 15 |
+
subprocess.run(shlex.split("pip install wheel/curope-0.0.0-cp310-cp310-linux_x86_64.whl"))
|
| 16 |
+
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
| 17 |
+
os.sys.path.append(os.path.abspath(os.path.join(BASE_DIR, "dynamic_predictor")))
|
| 18 |
+
os.sys.path.append(os.path.abspath(os.path.join(BASE_DIR)))
|
| 19 |
GRADIO_CACHE_FOLDER = './gradio_cache_folder'
|
| 20 |
|
| 21 |
+
from dynamic_predictor.launch import main as dynamic_predictor_main
|
| 22 |
+
from utils_das3r.rearrange import main as rearrange_main
|
| 23 |
+
from train_gui import main as train_main
|
| 24 |
+
from render import main as render_main
|
| 25 |
|
| 26 |
def get_dust3r_args_parser():
|
| 27 |
parser = argparse.ArgumentParser()
|
|
|
|
| 43 |
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key.split('/')[-1])]
|
| 44 |
return sorted(l, key=alphanum_key)
|
| 45 |
|
| 46 |
+
@spaces.GPU(duration=150)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
def process(inputfiles, input_path='demo'):
|
| 48 |
if inputfiles:
|
| 49 |
frames = natural_sort(inputfiles)
|
|
|
|
| 63 |
|
| 64 |
imgs_path = temp_dir
|
| 65 |
output_path = f'./results/{input_path}/output'
|
| 66 |
+
rearranged_path = f'{output_path}_rearranged'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
|
| 68 |
+
dynamic_predictor_main(pretrained='Kai422kx/das3r', dir_path=imgs_path, output_dir=output_path, use_pred_mask=True, n_iter=150)
|
| 69 |
+
rearrange_main(output_dir=output_path, rearranged_path = rearranged_path)
|
| 70 |
+
train_main(s = rearranged_path, m = rearranged_path, iter = 2000)
|
| 71 |
+
render_main(s = rearranged_path, m = rearranged_path, iter = 2000, get_video = True)
|
| 72 |
|
|
|
|
|
|
|
| 73 |
|
| 74 |
|
| 75 |
+
output_video_path = f"{rearranged_path}/rendered.mp4"
|
| 76 |
+
output_ply_path = f"{rearranged_path}/point_cloud/iteration_2000/point_cloud.ply"
|
| 77 |
return output_video_path, output_ply_path, output_ply_path
|
| 78 |
|
| 79 |
|
dynamic_predictor/dust3r/training.py
CHANGED
|
@@ -217,6 +217,7 @@ def train(args):
|
|
| 217 |
data_loader_test = {}
|
| 218 |
for dataset in args.test_dataset.split('+'):
|
| 219 |
testset = build_dataset(dataset, args.test_batch_size, args.num_workers, test=True)
|
|
|
|
| 220 |
name_testset = dataset.split('(')[0]
|
| 221 |
if getattr(testset.dataset.dataset, 'strides', None) is not None:
|
| 222 |
name_testset += f'_stride{testset.dataset.dataset.strides}'
|
|
|
|
| 217 |
data_loader_test = {}
|
| 218 |
for dataset in args.test_dataset.split('+'):
|
| 219 |
testset = build_dataset(dataset, args.test_batch_size, args.num_workers, test=True)
|
| 220 |
+
print(args.test_dataset)
|
| 221 |
name_testset = dataset.split('(')[0]
|
| 222 |
if getattr(testset.dataset.dataset, 'strides', None) is not None:
|
| 223 |
name_testset += f'_stride{testset.dataset.dataset.strides}'
|
dynamic_predictor/launch.py
CHANGED
|
@@ -10,6 +10,35 @@ import torch.backends.cudnn as cudnn
|
|
| 10 |
import numpy as np
|
| 11 |
import os
|
| 12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
if __name__ == '__main__':
|
| 14 |
args = get_args_parser()
|
| 15 |
args = args.parse_args()
|
|
@@ -39,3 +68,4 @@ if __name__ == '__main__':
|
|
| 39 |
|
| 40 |
exit(0)
|
| 41 |
train(args)
|
|
|
|
|
|
| 10 |
import numpy as np
|
| 11 |
import os
|
| 12 |
|
| 13 |
+
def main(pretrained, dir_path, output_dir, use_pred_mask, n_iter):
|
| 14 |
+
|
| 15 |
+
args = get_args_parser()
|
| 16 |
+
args = args.parse_args()
|
| 17 |
+
args.pretrained = pretrained
|
| 18 |
+
args.dir_path = dir_path
|
| 19 |
+
args.output_dir = output_dir
|
| 20 |
+
args.use_pred_mask = use_pred_mask
|
| 21 |
+
args.n_iter = n_iter
|
| 22 |
+
|
| 23 |
+
misc.init_distributed_mode(args)
|
| 24 |
+
global_rank = misc.get_rank()
|
| 25 |
+
world_size = misc.get_world_size()
|
| 26 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 27 |
+
device = torch.device(device)
|
| 28 |
+
|
| 29 |
+
# fix the seed
|
| 30 |
+
seed = args.seed + misc.get_rank()
|
| 31 |
+
torch.manual_seed(seed)
|
| 32 |
+
np.random.seed(seed)
|
| 33 |
+
cudnn.benchmark = args.cudnn_benchmark
|
| 34 |
+
model, _ = load_model(args, device)
|
| 35 |
+
os.makedirs(args.output_dir, exist_ok=True)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
pose_estimation_custom(args, model, device, save_dir=args.output_dir)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
|
| 42 |
if __name__ == '__main__':
|
| 43 |
args = get_args_parser()
|
| 44 |
args = args.parse_args()
|
|
|
|
| 68 |
|
| 69 |
exit(0)
|
| 70 |
train(args)
|
| 71 |
+
|
gaussian_renderer/__init__.py
CHANGED
|
@@ -16,8 +16,8 @@ from diff_gaussian_rasterization import (
|
|
| 16 |
GaussianRasterizer,
|
| 17 |
)
|
| 18 |
from scene.gaussian_model import GaussianModel
|
| 19 |
-
from
|
| 20 |
-
from
|
| 21 |
|
| 22 |
|
| 23 |
def render(
|
|
|
|
| 16 |
GaussianRasterizer,
|
| 17 |
)
|
| 18 |
from scene.gaussian_model import GaussianModel
|
| 19 |
+
from utils_das3r.sh_utils import eval_sh
|
| 20 |
+
from utils_das3r.pose_utils import get_camera_from_tensor, quadmultiply
|
| 21 |
|
| 22 |
|
| 23 |
def render(
|
render.py
CHANGED
|
@@ -10,19 +10,20 @@
|
|
| 10 |
#
|
| 11 |
|
| 12 |
import torch
|
|
|
|
| 13 |
from scene import Scene
|
| 14 |
import os
|
| 15 |
from tqdm import tqdm
|
| 16 |
from os import makedirs
|
| 17 |
from gaussian_renderer import render_test as render
|
| 18 |
import torchvision
|
| 19 |
-
from
|
| 20 |
from argparse import ArgumentParser
|
| 21 |
from arguments import ModelParams, PipelineParams, get_combined_args
|
| 22 |
from gaussian_renderer import GaussianModel
|
| 23 |
-
from
|
| 24 |
-
from
|
| 25 |
-
from
|
| 26 |
import cv2
|
| 27 |
import numpy as np
|
| 28 |
import imageio
|
|
@@ -122,6 +123,37 @@ def render_sets(
|
|
| 122 |
output_video_file = os.path.join(dataset.model_path, f'rendered.mp4')
|
| 123 |
images_to_video(image_folder, output_video_file, fps=15)
|
| 124 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 125 |
|
| 126 |
if __name__ == "__main__":
|
| 127 |
# Set up command line argument parser
|
|
|
|
| 10 |
#
|
| 11 |
|
| 12 |
import torch
|
| 13 |
+
import sys
|
| 14 |
from scene import Scene
|
| 15 |
import os
|
| 16 |
from tqdm import tqdm
|
| 17 |
from os import makedirs
|
| 18 |
from gaussian_renderer import render_test as render
|
| 19 |
import torchvision
|
| 20 |
+
from utils_das3r.general_utils import safe_state
|
| 21 |
from argparse import ArgumentParser
|
| 22 |
from arguments import ModelParams, PipelineParams, get_combined_args
|
| 23 |
from gaussian_renderer import GaussianModel
|
| 24 |
+
from utils_das3r.pose_utils import get_tensor_from_camera
|
| 25 |
+
from utils_das3r.camera_utils import generate_interpolated_path
|
| 26 |
+
from utils_das3r.camera_utils import visualizer
|
| 27 |
import cv2
|
| 28 |
import numpy as np
|
| 29 |
import imageio
|
|
|
|
| 123 |
output_video_file = os.path.join(dataset.model_path, f'rendered.mp4')
|
| 124 |
images_to_video(image_folder, output_video_file, fps=15)
|
| 125 |
|
| 126 |
+
def main(s, m, iter, get_video):
|
| 127 |
+
# Set up command line argument parser
|
| 128 |
+
parser = ArgumentParser(description="script parameters")
|
| 129 |
+
lp = ModelParams(parser)
|
| 130 |
+
pp = PipelineParams(parser)
|
| 131 |
+
parser.add_argument("--iteration", default=-1, type=int)
|
| 132 |
+
parser.add_argument("--skip_train", action="store_true")
|
| 133 |
+
parser.add_argument("--skip_test", action="store_true")
|
| 134 |
+
parser.add_argument("--quiet", action="store_true")
|
| 135 |
+
|
| 136 |
+
parser.add_argument("--get_video", action="store_true")
|
| 137 |
+
parser.add_argument("--n_views", default=None, type=int)
|
| 138 |
+
parser.add_argument("--scene", default=None, type=str)
|
| 139 |
+
|
| 140 |
+
args = parser.parse_args(sys.argv[1:])
|
| 141 |
+
|
| 142 |
+
args.source_path = s
|
| 143 |
+
args.model_path = m
|
| 144 |
+
args.iteration = iter
|
| 145 |
+
args.get_video = get_video
|
| 146 |
+
# Initialize system state (RNG)
|
| 147 |
+
# safe_state(args.quiet)
|
| 148 |
+
args.eval = False
|
| 149 |
+
render_sets(
|
| 150 |
+
lp.extract(args),
|
| 151 |
+
args.iteration,
|
| 152 |
+
pp.extract(args),
|
| 153 |
+
args.skip_train,
|
| 154 |
+
args.skip_test,
|
| 155 |
+
args,
|
| 156 |
+
)
|
| 157 |
|
| 158 |
if __name__ == "__main__":
|
| 159 |
# Set up command line argument parser
|
scene/__init__.py
CHANGED
|
@@ -12,11 +12,11 @@
|
|
| 12 |
import os
|
| 13 |
import random
|
| 14 |
import json
|
| 15 |
-
from
|
| 16 |
from scene.dataset_readers import sceneLoadTypeCallbacks
|
| 17 |
from scene.gaussian_model import GaussianModel
|
| 18 |
from arguments import ModelParams
|
| 19 |
-
from
|
| 20 |
import open3d as o3d
|
| 21 |
|
| 22 |
class Scene:
|
|
|
|
| 12 |
import os
|
| 13 |
import random
|
| 14 |
import json
|
| 15 |
+
from utils_das3r.system_utils import searchForMaxIteration
|
| 16 |
from scene.dataset_readers import sceneLoadTypeCallbacks
|
| 17 |
from scene.gaussian_model import GaussianModel
|
| 18 |
from arguments import ModelParams
|
| 19 |
+
from utils_das3r.camera_utils import cameraList_from_camInfos, camera_to_JSON
|
| 20 |
import open3d as o3d
|
| 21 |
|
| 22 |
class Scene:
|
scene/cameras.py
CHANGED
|
@@ -12,7 +12,7 @@
|
|
| 12 |
import torch
|
| 13 |
from torch import nn
|
| 14 |
import numpy as np
|
| 15 |
-
from
|
| 16 |
|
| 17 |
class Camera(nn.Module):
|
| 18 |
def __init__(self, colmap_id, intr, R, T, original_pose, FoVx, FoVy, image, gt_alpha_mask, dynamic_mask, enlarged_dynamic_mask,
|
|
|
|
| 12 |
import torch
|
| 13 |
from torch import nn
|
| 14 |
import numpy as np
|
| 15 |
+
from utils_das3r.graphics_utils import getWorld2View2, getProjectionMatrix
|
| 16 |
|
| 17 |
class Camera(nn.Module):
|
| 18 |
def __init__(self, colmap_id, intr, R, T, original_pose, FoVx, FoVy, image, gt_alpha_mask, dynamic_mask, enlarged_dynamic_mask,
|
scene/dataset_readers.py
CHANGED
|
@@ -15,16 +15,16 @@ from PIL import Image
|
|
| 15 |
from typing import NamedTuple
|
| 16 |
from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \
|
| 17 |
read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text
|
| 18 |
-
from
|
| 19 |
import numpy as np
|
| 20 |
import json
|
| 21 |
from pathlib import Path
|
| 22 |
from plyfile import PlyData, PlyElement
|
| 23 |
-
from
|
| 24 |
from scene.gaussian_model import BasicPointCloud
|
| 25 |
-
from
|
| 26 |
import torch
|
| 27 |
-
from
|
| 28 |
|
| 29 |
class CameraInfo(NamedTuple):
|
| 30 |
uid: int
|
|
|
|
| 15 |
from typing import NamedTuple
|
| 16 |
from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \
|
| 17 |
read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text
|
| 18 |
+
from utils_das3r.graphics_utils import getWorld2View2, focal2fov, fov2focal, to_open3d_point_cloud
|
| 19 |
import numpy as np
|
| 20 |
import json
|
| 21 |
from pathlib import Path
|
| 22 |
from plyfile import PlyData, PlyElement
|
| 23 |
+
from utils_das3r.sh_utils import SH2RGB
|
| 24 |
from scene.gaussian_model import BasicPointCloud
|
| 25 |
+
from utils_das3r.vo_eval import file_interface
|
| 26 |
import torch
|
| 27 |
+
from utils_das3r.pose_utils import quad2rotation
|
| 28 |
|
| 29 |
class CameraInfo(NamedTuple):
|
| 30 |
uid: int
|
scene/gaussian_model.py
CHANGED
|
@@ -12,19 +12,19 @@
|
|
| 12 |
import torch
|
| 13 |
# from lietorch import SO3, SE3, Sim3, LieGroupParameter
|
| 14 |
import numpy as np
|
| 15 |
-
from
|
| 16 |
from torch import nn
|
| 17 |
import os
|
| 18 |
-
from
|
| 19 |
from plyfile import PlyData, PlyElement
|
| 20 |
-
from
|
| 21 |
from simple_knn._C import distCUDA2
|
| 22 |
-
from
|
| 23 |
-
from
|
| 24 |
from scipy.spatial.transform import Rotation as R
|
| 25 |
-
from
|
| 26 |
-
from
|
| 27 |
-
from
|
| 28 |
|
| 29 |
class GaussianModel:
|
| 30 |
|
|
|
|
| 12 |
import torch
|
| 13 |
# from lietorch import SO3, SE3, Sim3, LieGroupParameter
|
| 14 |
import numpy as np
|
| 15 |
+
from utils_das3r.general_utils import inverse_sigmoid, get_expon_lr_func, build_rotation
|
| 16 |
from torch import nn
|
| 17 |
import os
|
| 18 |
+
from utils_das3r.system_utils import mkdir_p
|
| 19 |
from plyfile import PlyData, PlyElement
|
| 20 |
+
from utils_das3r.sh_utils import RGB2SH
|
| 21 |
from simple_knn._C import distCUDA2
|
| 22 |
+
from utils_das3r.graphics_utils import BasicPointCloud
|
| 23 |
+
from utils_das3r.general_utils import strip_symmetric, build_scaling_rotation
|
| 24 |
from scipy.spatial.transform import Rotation as R
|
| 25 |
+
from utils_das3r.pose_utils import rotation2quad, get_tensor_from_camera
|
| 26 |
+
from utils_das3r.graphics_utils import getWorld2View2
|
| 27 |
+
from utils_das3r.pose_utils import rotation2quad, get_tensor_from_camera, depth_to_pts3d
|
| 28 |
|
| 29 |
class GaussianModel:
|
| 30 |
|
train_gui.py
CHANGED
|
@@ -14,19 +14,19 @@ import numpy as np
|
|
| 14 |
import torch
|
| 15 |
from PIL import Image
|
| 16 |
from random import randint
|
| 17 |
-
from
|
| 18 |
from gaussian_renderer import render, network_gui, render_confidence
|
| 19 |
import sys
|
| 20 |
from scene import Scene, GaussianModel
|
| 21 |
-
from
|
| 22 |
import uuid
|
| 23 |
from tqdm import tqdm
|
| 24 |
-
from
|
| 25 |
from argparse import ArgumentParser, Namespace
|
| 26 |
from arguments import ModelParams, PipelineParams, OptimizationParams
|
| 27 |
-
from
|
| 28 |
-
from
|
| 29 |
-
from
|
| 30 |
import dearpygui.dearpygui as dpg
|
| 31 |
from scipy.spatial.transform import Rotation
|
| 32 |
|
|
@@ -710,7 +710,59 @@ def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_i
|
|
| 710 |
tb_writer.add_scalar(config['name'] + '/loss_viewpoint - psnr', psnr_test, iteration)
|
| 711 |
|
| 712 |
return log
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 713 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 714 |
if __name__ == "__main__":
|
| 715 |
# Set up command line argument parser
|
| 716 |
parser = ArgumentParser(description="Training script parameters")
|
|
|
|
| 14 |
import torch
|
| 15 |
from PIL import Image
|
| 16 |
from random import randint
|
| 17 |
+
from utils_das3r.loss_utils import l1_loss, ssim
|
| 18 |
from gaussian_renderer import render, network_gui, render_confidence
|
| 19 |
import sys
|
| 20 |
from scene import Scene, GaussianModel
|
| 21 |
+
from utils_das3r.general_utils import safe_state
|
| 22 |
import uuid
|
| 23 |
from tqdm import tqdm
|
| 24 |
+
from utils_das3r.image_utils import psnr
|
| 25 |
from argparse import ArgumentParser, Namespace
|
| 26 |
from arguments import ModelParams, PipelineParams, OptimizationParams
|
| 27 |
+
from utils_das3r.pose_utils import get_camera_from_tensor
|
| 28 |
+
from utils_das3r.vo_eval import load_traj, eval_metrics, plot_trajectory
|
| 29 |
+
from utils_das3r.gui_utils import orbit_camera, OrbitCamera
|
| 30 |
import dearpygui.dearpygui as dpg
|
| 31 |
from scipy.spatial.transform import Rotation
|
| 32 |
|
|
|
|
| 710 |
tb_writer.add_scalar(config['name'] + '/loss_viewpoint - psnr', psnr_test, iteration)
|
| 711 |
|
| 712 |
return log
|
| 713 |
+
|
| 714 |
+
def main(s, m, iter = 2000):
|
| 715 |
+
# Set up command line argument parser
|
| 716 |
+
parser = ArgumentParser(description="Training script parameters")
|
| 717 |
+
lp = ModelParams(parser)
|
| 718 |
+
op = OptimizationParams(parser)
|
| 719 |
+
pp = PipelineParams(parser)
|
| 720 |
+
parser.add_argument('--ip', type=str, default="127.0.0.1")
|
| 721 |
+
parser.add_argument('--port', type=int, default=6009)
|
| 722 |
+
parser.add_argument('--debug_from', type=int, default=-1)
|
| 723 |
+
parser.add_argument('--detect_anomaly', action='store_true', default=False)
|
| 724 |
+
parser.add_argument("--test_iterations", nargs="+", type=int, default=[1, 500, 800, 1000, 1500, 2000, 3000, 4000, 5000, 6000, 7_000, 30_000])
|
| 725 |
+
parser.add_argument("--save_iterations", nargs="+", type=int, default=[])
|
| 726 |
+
parser.add_argument("--quiet", action="store_true")
|
| 727 |
+
parser.add_argument("--checkpoint_iterations", nargs="+", type=int, default=[])
|
| 728 |
+
parser.add_argument("--start_checkpoint", type=str, default = None)
|
| 729 |
+
parser.add_argument("--get_video", action="store_true")
|
| 730 |
+
parser.add_argument("--optim_pose", type=bool, default = True)
|
| 731 |
+
parser.add_argument("--gui", action="store_true")
|
| 732 |
+
parser.add_argument("--eval_pose", action="store_true")
|
| 733 |
+
parser.add_argument('--pose_eval_interval', type=int, default=100)
|
| 734 |
+
parser.add_argument('--psnr_threshold', type=float, default=26)
|
| 735 |
+
parser.add_argument('--gt_dynamic_mask', type=str, default='/home/remote/data/sintel/training/dynamic_label_perfect')
|
| 736 |
+
parser.add_argument('--dataset', type=str, default='sintel')
|
| 737 |
+
|
| 738 |
|
| 739 |
+
args = parser.parse_args(sys.argv[1:])
|
| 740 |
+
args.source_path = s
|
| 741 |
+
args.model_path = m
|
| 742 |
+
args.iterations = iter
|
| 743 |
+
|
| 744 |
+
args.save_iterations.append(args.iterations)
|
| 745 |
+
|
| 746 |
+
os.makedirs(args.model_path, exist_ok=True)
|
| 747 |
+
|
| 748 |
+
print("Optimizing " + args.model_path)
|
| 749 |
+
|
| 750 |
+
# Initialize system state (RNG)
|
| 751 |
+
# safe_state(args.quiet)
|
| 752 |
+
|
| 753 |
+
torch.autograd.set_detect_anomaly(args.detect_anomaly)
|
| 754 |
+
|
| 755 |
+
if args.gui:
|
| 756 |
+
w, h = Image.open(os.path.join(args.source_path, 'images', 'frame_0000.png')).size
|
| 757 |
+
gui = GUI(gui = args.gui, w=w, h=h)
|
| 758 |
+
else:
|
| 759 |
+
gui = None
|
| 760 |
+
|
| 761 |
+
training(lp.extract(args), op.extract(args), pp.extract(args), args.test_iterations, args.save_iterations, args.checkpoint_iterations, args.start_checkpoint, args.debug_from, args, gui)
|
| 762 |
+
|
| 763 |
+
# All done
|
| 764 |
+
print("\nTraining complete.")
|
| 765 |
+
|
| 766 |
if __name__ == "__main__":
|
| 767 |
# Set up command line argument parser
|
| 768 |
parser = ArgumentParser(description="Training script parameters")
|
{utils β utils_das3r}/camera_utils.py
RENAMED
|
@@ -11,8 +11,8 @@
|
|
| 11 |
|
| 12 |
from scene.cameras import Camera
|
| 13 |
import numpy as np
|
| 14 |
-
from
|
| 15 |
-
from
|
| 16 |
import torch
|
| 17 |
import scipy
|
| 18 |
import matplotlib.pyplot as plt
|
|
|
|
| 11 |
|
| 12 |
from scene.cameras import Camera
|
| 13 |
import numpy as np
|
| 14 |
+
from utils_das3r.general_utils import PILtoTorch
|
| 15 |
+
from utils_das3r.graphics_utils import fov2focal
|
| 16 |
import torch
|
| 17 |
import scipy
|
| 18 |
import matplotlib.pyplot as plt
|
{utils β utils_das3r}/dust3r_utils.py
RENAMED
|
File without changes
|
{utils β utils_das3r}/general_utils.py
RENAMED
|
File without changes
|
{utils β utils_das3r}/graphics_utils.py
RENAMED
|
File without changes
|
{utils β utils_das3r}/gui_utils.py
RENAMED
|
File without changes
|
{utils β utils_das3r}/image_utils.py
RENAMED
|
File without changes
|
{utils β utils_das3r}/loss_utils.py
RENAMED
|
File without changes
|
{utils β utils_das3r}/pose_utils.py
RENAMED
|
@@ -3,7 +3,7 @@ import numpy as np
|
|
| 3 |
import torch
|
| 4 |
import torch.nn.functional as F
|
| 5 |
from typing import Tuple
|
| 6 |
-
from
|
| 7 |
import scipy
|
| 8 |
|
| 9 |
|
|
|
|
| 3 |
import torch
|
| 4 |
import torch.nn.functional as F
|
| 5 |
from typing import Tuple
|
| 6 |
+
from utils_das3r.stepfun import sample_np, sample
|
| 7 |
import scipy
|
| 8 |
|
| 9 |
|
{utils β utils_das3r}/push_to_hf.py
RENAMED
|
File without changes
|
{utils β utils_das3r}/rearrange.py
RENAMED
|
@@ -2,7 +2,7 @@ import os
|
|
| 2 |
import cv2
|
| 3 |
import PIL.Image as Image
|
| 4 |
import numpy as np
|
| 5 |
-
from vo_eval import file_interface
|
| 6 |
from pathlib import Path
|
| 7 |
from plyfile import PlyData, PlyElement
|
| 8 |
import torch
|
|
@@ -38,7 +38,7 @@ def quaternion_to_matrix(quaternions):
|
|
| 38 |
)
|
| 39 |
return o.reshape(quaternions.shape[:-1] + (3, 3))
|
| 40 |
|
| 41 |
-
def
|
| 42 |
# load pred_traj
|
| 43 |
|
| 44 |
output_colmap_path=os.path.join(output_path, 'sparse/0')
|
|
@@ -353,12 +353,13 @@ def R_to_quaternion(R):
|
|
| 353 |
|
| 354 |
return np.array([w, x, y, z])
|
| 355 |
|
|
|
|
|
|
|
|
|
|
| 356 |
if __name__ == "__main__":
|
| 357 |
-
dataset_path = 'results/sintel'
|
| 358 |
-
output_path = dataset_path.replace('sintel', 'sintel_rearranged')
|
| 359 |
parser = argparse.ArgumentParser(description='Rearrange dataset.')
|
| 360 |
parser.add_argument('--output_dir', type=str, default='data/custom/output', help='Output directory')
|
| 361 |
args = parser.parse_args()
|
| 362 |
output_path = args.output_dir
|
| 363 |
rearranged_path = output_path+'_rearranged'
|
| 364 |
-
|
|
|
|
| 2 |
import cv2
|
| 3 |
import PIL.Image as Image
|
| 4 |
import numpy as np
|
| 5 |
+
from utils_das3r.vo_eval import file_interface
|
| 6 |
from pathlib import Path
|
| 7 |
from plyfile import PlyData, PlyElement
|
| 8 |
import torch
|
|
|
|
| 38 |
)
|
| 39 |
return o.reshape(quaternions.shape[:-1] + (3, 3))
|
| 40 |
|
| 41 |
+
def rearrange(dataset_path, output_path):
|
| 42 |
# load pred_traj
|
| 43 |
|
| 44 |
output_colmap_path=os.path.join(output_path, 'sparse/0')
|
|
|
|
| 353 |
|
| 354 |
return np.array([w, x, y, z])
|
| 355 |
|
| 356 |
+
def main(output_dir, rearranged_path):
|
| 357 |
+
rearrange(output_dir, rearranged_path)
|
| 358 |
+
|
| 359 |
if __name__ == "__main__":
|
|
|
|
|
|
|
| 360 |
parser = argparse.ArgumentParser(description='Rearrange dataset.')
|
| 361 |
parser.add_argument('--output_dir', type=str, default='data/custom/output', help='Output directory')
|
| 362 |
args = parser.parse_args()
|
| 363 |
output_path = args.output_dir
|
| 364 |
rearranged_path = output_path+'_rearranged'
|
| 365 |
+
rearrange(args.output_dir, rearranged_path)
|
{utils β utils_das3r}/rearrange_davis.py
RENAMED
|
File without changes
|
{utils β utils_das3r}/rearrange_sintel.py
RENAMED
|
File without changes
|
{utils β utils_das3r}/sh_utils.py
RENAMED
|
File without changes
|
{utils β utils_das3r}/stepfun.py
RENAMED
|
File without changes
|
{utils β utils_das3r}/system_utils.py
RENAMED
|
File without changes
|
{utils β utils_das3r}/utils_poses/ATE/align_trajectory.py
RENAMED
|
File without changes
|
{utils β utils_das3r}/utils_poses/ATE/align_utils.py
RENAMED
|
File without changes
|
{utils β utils_das3r}/utils_poses/ATE/compute_trajectory_errors.py
RENAMED
|
File without changes
|
{utils β utils_das3r}/utils_poses/ATE/results_writer.py
RENAMED
|
File without changes
|
{utils β utils_das3r}/utils_poses/ATE/trajectory_utils.py
RENAMED
|
File without changes
|
{utils β utils_das3r}/utils_poses/ATE/transformations.py
RENAMED
|
File without changes
|
{utils β utils_das3r}/utils_poses/align_traj.py
RENAMED
|
File without changes
|
{utils β utils_das3r}/utils_poses/comp_ate.py
RENAMED
|
File without changes
|
{utils β utils_das3r}/utils_poses/lie_group_helper.py
RENAMED
|
File without changes
|
{utils β utils_das3r}/utils_poses/relative_pose.py
RENAMED
|
File without changes
|
{utils β utils_das3r}/utils_poses/vis_cam_traj.py
RENAMED
|
File without changes
|
{utils β utils_das3r}/utils_poses/vis_pose_utils.py
RENAMED
|
File without changes
|
{utils β utils_das3r}/vo_eval.py
RENAMED
|
File without changes
|