From 2c07c849b57463debbca505a439ab3b2aa2744a9 Mon Sep 17 00:00:00 2001 From: Francesco Christopher Date: Thu, 8 Jan 2026 18:40:28 +0100 Subject: [PATCH 1/4] test patches --- .gitignore | 25 + nerfstudio.code-workspace | 84 +++ nerfstudio/configs/360.gin | 15 + nerfstudio/configs/360_glo.gin | 15 + nerfstudio/configs/blender.gin | 15 + nerfstudio/configs/blender_refnerf.gin | 41 + nerfstudio/configs/llff_256.gin | 19 + nerfstudio/configs/llff_512.gin | 19 + nerfstudio/configs/llff_raw.gin | 73 ++ nerfstudio/configs/multi360.gin | 5 + .../process_data/colmap_utils - Copia.py | 714 ++++++++++++++++++ nerfstudio/scripts/exporter_frank.py | 676 +++++++++++++++++ ...tudio_stable_environment_post_zipnerf.yaml | 319 ++++++++ requirements_post_zipnerf - Copia.txt | 253 +++++++ requirements_post_zipnerf.txt | 251 ++++++ 15 files changed, 2524 insertions(+) create mode 100644 nerfstudio.code-workspace create mode 100644 nerfstudio/configs/360.gin create mode 100644 nerfstudio/configs/360_glo.gin create mode 100644 nerfstudio/configs/blender.gin create mode 100644 nerfstudio/configs/blender_refnerf.gin create mode 100644 nerfstudio/configs/llff_256.gin create mode 100644 nerfstudio/configs/llff_512.gin create mode 100644 nerfstudio/configs/llff_raw.gin create mode 100644 nerfstudio/configs/multi360.gin create mode 100644 nerfstudio/process_data/colmap_utils - Copia.py create mode 100644 nerfstudio/scripts/exporter_frank.py create mode 100644 nerfstudio_stable_environment_post_zipnerf.yaml create mode 100644 requirements_post_zipnerf - Copia.txt create mode 100644 requirements_post_zipnerf.txt diff --git a/.gitignore b/.gitignore index 28afce358a..f2f95b5401 100644 --- a/.gitignore +++ b/.gitignore @@ -188,6 +188,31 @@ camera_paths/ */**/.DS_Store */**/._.DS_Store +#Requirements and test stuff: +# Root-level exclusions +/*.txt +/*.pdf +/*.yml +/*.yaml + +# āœ… Exceptions: Include specific files in root dir +!requirements_post_zipnerf.txt +!requirements_post_zipnerf - Copia.txt +!nerfstudio_stable_environment_post_zipnerf.yaml +# External Submodules: +NeRFtoGSandBack/ +glomap/ +instruct-gs2gs/ +opennerf/ +pytorch_scatter/ +splatfacto-w/ +src/igs2gs/ +src/nerfgs/ +src/splatfacto-w/ +src/zipnerf/ +tetra-nerf/ +zipnerf-pytorch/ + # pixi environments .pixi /third_party \ No newline at end of file diff --git a/nerfstudio.code-workspace b/nerfstudio.code-workspace new file mode 100644 index 0000000000..5e645c9817 --- /dev/null +++ b/nerfstudio.code-workspace @@ -0,0 +1,84 @@ +{ + "folders": [ + { + "path": "." + }, + { + "path": "../nerfstudio1.0.3" + }, + { + "path": "../neuralangelo" + }, + { + "path": "../sdfstudio" + } + ], + "settings": { + "javascript.validate.enable": false, + "files.associations": { + "array": "cpp", + "bitset": "cpp", + "string_view": "cpp", + "initializer_list": "cpp", + "utility": "cpp", + "__hash_table": "cpp", + "__split_buffer": "cpp", + "deque": "cpp", + "iterator": "cpp", + "queue": "cpp", + "stack": "cpp", + "string": "cpp", + "unordered_map": "cpp", + "vector": "cpp", + "atomic": "cpp", + "*.tcc": "cpp", + "cctype": "cpp", + "chrono": "cpp", + "clocale": "cpp", + "cmath": "cpp", + "codecvt": "cpp", + "condition_variable": "cpp", + "cstdarg": "cpp", + "cstddef": "cpp", + "cstdint": "cpp", + "cstdio": "cpp", + "cstdlib": "cpp", + "cstring": "cpp", + "ctime": "cpp", + "cwchar": "cpp", + "cwctype": "cpp", + "exception": "cpp", + "algorithm": "cpp", + "filesystem": "cpp", + "functional": "cpp", + "memory": "cpp", + "memory_resource": "cpp", + "optional": "cpp", + "ratio": "cpp", + "system_error": "cpp", + "tuple": "cpp", + "type_traits": "cpp", + "fstream": "cpp", + "iomanip": "cpp", + "iosfwd": "cpp", + "iostream": "cpp", + "istream": "cpp", + "limits": "cpp", + "mutex": "cpp", + "new": "cpp", + "ostream": "cpp", + "sstream": "cpp", + "stdexcept": "cpp", + "streambuf": "cpp", + "thread": "cpp", + "typeinfo": "cpp", + "__nullptr": "cpp", + "__config": "cpp", + "__locale": "cpp", + "__bit_reference": "cpp", + "ios": "cpp", + "__atomic": "cpp", + "__node_handle": "cpp" + } + } +} \ No newline at end of file diff --git a/nerfstudio/configs/360.gin b/nerfstudio/configs/360.gin new file mode 100644 index 0000000000..d39fe8bab2 --- /dev/null +++ b/nerfstudio/configs/360.gin @@ -0,0 +1,15 @@ +Config.exp_name = 'test' +Config.dataset_loader = 'llff' +Config.near = 0.2 +Config.far = 1e6 +Config.factor = 4 + +Model.raydist_fn = 'power_transformation' +Model.opaque_background = True + +PropMLP.disable_density_normals = True +PropMLP.disable_rgb = True +PropMLP.grid_level_dim = 1 + +NerfMLP.disable_density_normals = True + diff --git a/nerfstudio/configs/360_glo.gin b/nerfstudio/configs/360_glo.gin new file mode 100644 index 0000000000..42a2436218 --- /dev/null +++ b/nerfstudio/configs/360_glo.gin @@ -0,0 +1,15 @@ +Config.dataset_loader = 'llff' +Config.near = 0.2 +Config.far = 1e6 +Config.factor = 4 + +Model.raydist_fn = 'power_transformation' +Model.num_glo_features = 128 +Model.opaque_background = True + +PropMLP.disable_density_normals = True +PropMLP.disable_rgb = True +PropMLP.grid_level_dim = 1 + + +NerfMLP.disable_density_normals = True diff --git a/nerfstudio/configs/blender.gin b/nerfstudio/configs/blender.gin new file mode 100644 index 0000000000..20f74c9d27 --- /dev/null +++ b/nerfstudio/configs/blender.gin @@ -0,0 +1,15 @@ +Config.exp_name = 'test' +Config.dataset_loader = 'blender' +Config.near = 2 +Config.far = 6 +Config.factor = 0 +Config.hash_decay_mults = 10 + +Model.raydist_fn = None + +PropMLP.disable_density_normals = True +PropMLP.disable_rgb = True +PropMLP.grid_level_dim = 1 + +NerfMLP.disable_density_normals = True + diff --git a/nerfstudio/configs/blender_refnerf.gin b/nerfstudio/configs/blender_refnerf.gin new file mode 100644 index 0000000000..42da4df7dc --- /dev/null +++ b/nerfstudio/configs/blender_refnerf.gin @@ -0,0 +1,41 @@ +Config.dataset_loader = 'blender' +Config.batching = 'single_image' +Config.near = 2 +Config.far = 6 + +Config.eval_render_interval = 5 +Config.compute_normal_metrics = True +Config.data_loss_type = 'mse' +Config.distortion_loss_mult = 0.0 +Config.orientation_loss_mult = 0.1 +Config.orientation_loss_target = 'normals_pred' +Config.predicted_normal_loss_mult = 3e-4 +Config.orientation_coarse_loss_mult = 0.01 +Config.predicted_normal_coarse_loss_mult = 3e-5 +Config.interlevel_loss_mult = 0.0 +Config.data_coarse_loss_mult = 0.1 +Config.adam_eps = 1e-8 + +Model.num_levels = 2 +Model.single_mlp = True +Model.num_prop_samples = 128 # This needs to be set despite single_mlp = True. +Model.num_nerf_samples = 128 +Model.anneal_slope = 0. +Model.dilation_multiplier = 0. +Model.dilation_bias = 0. +Model.single_jitter = False +Model.resample_padding = 0.01 +Model.distinct_prop = False + +NerfMLP.disable_density_normals = False +NerfMLP.enable_pred_normals = True +NerfMLP.use_directional_enc = True +NerfMLP.use_reflections = True +NerfMLP.deg_view = 5 +NerfMLP.enable_pred_roughness = True +NerfMLP.use_diffuse_color = True +NerfMLP.use_specular_tint = True +NerfMLP.use_n_dot_v = True +NerfMLP.bottleneck_width = 128 +NerfMLP.density_bias = 0.5 +NerfMLP.max_deg_point = 16 diff --git a/nerfstudio/configs/llff_256.gin b/nerfstudio/configs/llff_256.gin new file mode 100644 index 0000000000..eeeba4fc80 --- /dev/null +++ b/nerfstudio/configs/llff_256.gin @@ -0,0 +1,19 @@ +Config.dataset_loader = 'llff' +Config.near = 0. +Config.far = 1. +Config.factor = 4 +Config.forward_facing = True +Config.adam_eps = 1e-8 + +Model.opaque_background = True +Model.num_levels = 2 +Model.num_prop_samples = 128 +Model.num_nerf_samples = 32 + +PropMLP.disable_density_normals = True +PropMLP.disable_rgb = True + +NerfMLP.disable_density_normals = True + +NerfMLP.max_deg_point = 16 +PropMLP.max_deg_point = 16 diff --git a/nerfstudio/configs/llff_512.gin b/nerfstudio/configs/llff_512.gin new file mode 100644 index 0000000000..eeeba4fc80 --- /dev/null +++ b/nerfstudio/configs/llff_512.gin @@ -0,0 +1,19 @@ +Config.dataset_loader = 'llff' +Config.near = 0. +Config.far = 1. +Config.factor = 4 +Config.forward_facing = True +Config.adam_eps = 1e-8 + +Model.opaque_background = True +Model.num_levels = 2 +Model.num_prop_samples = 128 +Model.num_nerf_samples = 32 + +PropMLP.disable_density_normals = True +PropMLP.disable_rgb = True + +NerfMLP.disable_density_normals = True + +NerfMLP.max_deg_point = 16 +PropMLP.max_deg_point = 16 diff --git a/nerfstudio/configs/llff_raw.gin b/nerfstudio/configs/llff_raw.gin new file mode 100644 index 0000000000..343f226b90 --- /dev/null +++ b/nerfstudio/configs/llff_raw.gin @@ -0,0 +1,73 @@ +# General LLFF settings + +Config.dataset_loader = 'llff' +Config.near = 0. +Config.far = 1. +Config.factor = 4 +Config.forward_facing = True + +PropMLP.disable_density_normals = True # Turn this off if using orientation loss. +PropMLP.disable_rgb = True + +NerfMLP.disable_density_normals = True # Turn this off if using orientation loss. + +NerfMLP.max_deg_point = 16 +PropMLP.max_deg_point = 16 + +Config.train_render_every = 5000 + + +########################## RawNeRF specific settings ########################## + +Config.rawnerf_mode = True +Config.data_loss_type = 'rawnerf' +Config.apply_bayer_mask = True +Model.learned_exposure_scaling = True + +Model.num_levels = 2 +Model.num_prop_samples = 128 # Using extra samples for now because of noise instability. +Model.num_nerf_samples = 128 +Model.opaque_background = True +Model.distinct_prop = False + +# RGB activation we use for linear color outputs is exp(x - 5). +NerfMLP.rgb_padding = 0. +NerfMLP.rgb_activation = @math.safe_exp +NerfMLP.rgb_bias = -5. +PropMLP.rgb_padding = 0. +PropMLP.rgb_activation = @math.safe_exp +PropMLP.rgb_bias = -5. + +## Experimenting with the various regularizers and losses: +Config.interlevel_loss_mult = .0 # Turning off interlevel for now (default = 1.). +Config.distortion_loss_mult = .01 # Distortion loss helps with floaters (default = .01). +Config.orientation_loss_mult = 0. # Orientation loss also not great (try .01). +Config.data_coarse_loss_mult = 0.1 # Setting this to match old MipNeRF. + +## Density noise used in original NeRF: +NerfMLP.density_noise = 1. +PropMLP.density_noise = 1. + +## Use a single MLP for all rounds of sampling: +Model.single_mlp = True + +## Some algorithmic settings to match the paper: +Model.anneal_slope = 0. +Model.dilation_multiplier = 0. +Model.dilation_bias = 0. +Model.single_jitter = False +NerfMLP.weight_init = 'glorot_uniform' +PropMLP.weight_init = 'glorot_uniform' + +## Training hyperparameters used in the paper: +Config.batch_size = 16384 +Config.render_chunk_size = 16384 +Config.lr_init = 1e-3 +Config.lr_final = 1e-5 +Config.max_steps = 500000 +Config.checkpoint_every = 25000 +Config.lr_delay_steps = 2500 +Config.lr_delay_mult = 0.01 +Config.grad_max_norm = 0.1 +Config.grad_max_val = 0.1 +Config.adam_eps = 1e-8 diff --git a/nerfstudio/configs/multi360.gin b/nerfstudio/configs/multi360.gin new file mode 100644 index 0000000000..e9bef1a30c --- /dev/null +++ b/nerfstudio/configs/multi360.gin @@ -0,0 +1,5 @@ +include 'configs/360.gin' +Config.multiscale = True +Config.multiscale_levels = 4 + + diff --git a/nerfstudio/process_data/colmap_utils - Copia.py b/nerfstudio/process_data/colmap_utils - Copia.py new file mode 100644 index 0000000000..1d9405c81a --- /dev/null +++ b/nerfstudio/process_data/colmap_utils - Copia.py @@ -0,0 +1,714 @@ +# Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Tools supporting the execution of COLMAP and preparation of COLMAP-based datasets for nerfstudio training. +""" + +import json +from pathlib import Path +from typing import Any, Dict, Literal, Optional, Union + +import appdirs +import cv2 +import numpy as np +import requests +import torch +from packaging.version import Version +from rich.progress import track + +# TODO(1480) use pycolmap instead of colmap_parsing_utils +# import pycolmap +from nerfstudio.data.utils.colmap_parsing_utils import ( + qvec2rotmat, + read_cameras_binary, + read_images_binary, + read_points3D_binary, + read_points3D_text, +) +from nerfstudio.process_data.process_data_utils import CameraModel +from nerfstudio.utils import colormaps +from nerfstudio.utils.rich_utils import CONSOLE, status +from nerfstudio.utils.scripts import run_command + + +def get_colmap_version(colmap_cmd: str, default_version: str = "3.8") -> Version: + """Returns the version of COLMAP. + This code assumes that colmap returns a version string of the form + "COLMAP 3.8 ..." which may not be true for all versions of COLMAP. + + Args: + default_version: Default version to return if COLMAP version can't be determined. + Returns: + The version of COLMAP. + """ + output = run_command(f"{colmap_cmd} -h", verbose=False) + assert output is not None + for line in output.split("\n"): + if line.startswith("COLMAP"): + version = line.split(" ")[1] + version = Version(version) + return version + CONSOLE.print(f"[bold red]Could not find COLMAP version. Using default {default_version}") + return Version(default_version) + + +def get_vocab_tree() -> Path: + """Return path to vocab tree. Downloads vocab tree if it doesn't exist. + + Returns: + The path to the vocab tree. + """ + vocab_tree_filename = Path(appdirs.user_data_dir("nerfstudio")) / "vocab_tree.fbow" + + if not vocab_tree_filename.exists(): + r = requests.get("https://demuc.de/colmap/vocab_tree_flickr100K_words32K.bin", stream=True) + vocab_tree_filename.parent.mkdir(parents=True, exist_ok=True) + with open(vocab_tree_filename, "wb") as f: + total_length = r.headers.get("content-length") + assert total_length is not None + for chunk in track( + r.iter_content(chunk_size=1024), + total=int(total_length) / 1024 + 1, + description="Downloading vocab tree...", + ): + if chunk: + f.write(chunk) + f.flush() + return vocab_tree_filename + + +def run_colmap( + image_dir: Path, + colmap_dir: Path, + camera_model: CameraModel, + camera_mask_path: Optional[Path] = None, + gpu: bool = True, + verbose: bool = False, + matching_method: Literal["vocab_tree", "exhaustive", "sequential"] = "vocab_tree", + refine_intrinsics: bool = True, + colmap_cmd: str = "colmap", +) -> None: + """Runs COLMAP on the images. + + Args: + image_dir: Path to the directory containing the images. + colmap_dir: Path to the output directory. + camera_model: Camera model to use. + camera_mask_path: Path to the camera mask. + gpu: If True, use GPU. + verbose: If True, logs the output of the command. + matching_method: Matching method to use. + refine_intrinsics: If True, refine intrinsics. + colmap_cmd: Path to the COLMAP executable. + """ + + colmap_version = get_colmap_version(colmap_cmd) + + colmap_database_path = colmap_dir / "database.db" + colmap_database_path.unlink(missing_ok=True) + + # Feature extraction + feature_extractor_cmd = [ + f"{colmap_cmd} feature_extractor", + f"--database_path {colmap_dir / 'database.db'}", + f"--image_path {image_dir}", + "--ImageReader.single_camera 1", + f"--ImageReader.camera_model {camera_model.value}", + f"--SiftExtraction.use_gpu {int(gpu)}", + ] + if camera_mask_path is not None: + feature_extractor_cmd.append(f"--ImageReader.camera_mask_path {camera_mask_path}") + feature_extractor_cmd = " ".join(feature_extractor_cmd) + with status(msg="[bold yellow]Running COLMAP feature extractor...", spinner="moon", verbose=verbose): + run_command(feature_extractor_cmd, verbose=verbose) + + CONSOLE.log("[bold green]:tada: Done extracting COLMAP features.") + + # Feature matching + feature_matcher_cmd = [ + f"{colmap_cmd} {matching_method}_matcher", + f"--database_path {colmap_dir / 'database.db'}", + f"--SiftMatching.use_gpu {int(gpu)}", + ] + if matching_method == "vocab_tree": + vocab_tree_filename = get_vocab_tree() + feature_matcher_cmd.append(f'--VocabTreeMatching.vocab_tree_path "{vocab_tree_filename}"') + feature_matcher_cmd = " ".join(feature_matcher_cmd) + with status(msg="[bold yellow]Running COLMAP feature matcher...", spinner="runner", verbose=verbose): + run_command(feature_matcher_cmd, verbose=verbose) + CONSOLE.log("[bold green]:tada: Done matching COLMAP features.") + + # Bundle adjustment + sparse_dir = colmap_dir / "sparse" + sparse_dir.mkdir(parents=True, exist_ok=True) + mapper_cmd = [ + f"{colmap_cmd} mapper", + f"--database_path {colmap_dir / 'database.db'}", + f"--image_path {image_dir}", + f"--output_path {sparse_dir}", + ] + if colmap_version >= Version("3.7"): + mapper_cmd.append("--Mapper.ba_global_function_tolerance=1e-6") + + mapper_cmd = " ".join(mapper_cmd) + + with status( + msg="[bold yellow]Running COLMAP bundle adjustment... (This may take a while)", + spinner="circle", + verbose=verbose, + ): + run_command(mapper_cmd, verbose=verbose) + CONSOLE.log("[bold green]:tada: Done COLMAP bundle adjustment.") + + if refine_intrinsics: + with status(msg="[bold yellow]Refine intrinsics...", spinner="dqpb", verbose=verbose): + bundle_adjuster_cmd = [ + f"{colmap_cmd} bundle_adjuster", + f"--input_path {sparse_dir}/0", + f"--output_path {sparse_dir}/0", + "--BundleAdjustment.refine_principal_point 1", + ] + run_command(" ".join(bundle_adjuster_cmd), verbose=verbose) + CONSOLE.log("[bold green]:tada: Done refining intrinsics.") + + +def parse_colmap_camera_params(camera) -> Dict[str, Any]: + """ + Parses all currently supported COLMAP cameras into the transforms.json metadata + + Args: + camera: COLMAP camera + Returns: + transforms.json metadata containing camera's intrinsics and distortion parameters + + """ + out: Dict[str, Any] = { + "w": camera.width, + "h": camera.height, + } + + # Parameters match https://github.com/colmap/colmap/blob/dev/src/base/camera_models.h + camera_params = camera.params + if camera.model == "SIMPLE_PINHOLE": + # du = 0 + # dv = 0 + out["fl_x"] = float(camera_params[0]) + out["fl_y"] = float(camera_params[0]) + out["cx"] = float(camera_params[1]) + out["cy"] = float(camera_params[2]) + out["k1"] = 0.0 + out["k2"] = 0.0 + out["p1"] = 0.0 + out["p2"] = 0.0 + camera_model = CameraModel.OPENCV + elif camera.model == "PINHOLE": + # f, cx, cy, k + + # du = 0 + # dv = 0 + out["fl_x"] = float(camera_params[0]) + out["fl_y"] = float(camera_params[1]) + out["cx"] = float(camera_params[2]) + out["cy"] = float(camera_params[3]) + out["k1"] = 0.0 + out["k2"] = 0.0 + out["p1"] = 0.0 + out["p2"] = 0.0 + camera_model = CameraModel.OPENCV + elif camera.model == "SIMPLE_RADIAL": + # f, cx, cy, k + + # r2 = u**2 + v**2; + # radial = k * r2 + # du = u * radial + # dv = u * radial + out["fl_x"] = float(camera_params[0]) + out["fl_y"] = float(camera_params[0]) + out["cx"] = float(camera_params[1]) + out["cy"] = float(camera_params[2]) + out["k1"] = float(camera_params[3]) + out["k2"] = 0.0 + out["p1"] = 0.0 + out["p2"] = 0.0 + camera_model = CameraModel.OPENCV + elif camera.model == "RADIAL": + # f, cx, cy, k1, k2 + + # r2 = u**2 + v**2; + # radial = k1 * r2 + k2 * r2 ** 2 + # du = u * radial + # dv = v * radial + out["fl_x"] = float(camera_params[0]) + out["fl_y"] = float(camera_params[0]) + out["cx"] = float(camera_params[1]) + out["cy"] = float(camera_params[2]) + out["k1"] = float(camera_params[3]) + out["k2"] = float(camera_params[4]) + out["p1"] = 0.0 + out["p2"] = 0.0 + camera_model = CameraModel.OPENCV + elif camera.model == "OPENCV": + # fx, fy, cx, cy, k1, k2, p1, p2 + + # uv = u * v; + # r2 = u**2 + v**2 + # radial = k1 * r2 + k2 * r2 ** 2 + # du = u * radial + 2 * p1 * u*v + p2 * (r2 + 2 * u**2) + # dv = v * radial + 2 * p2 * u*v + p1 * (r2 + 2 * v**2) + out["fl_x"] = float(camera_params[0]) + out["fl_y"] = float(camera_params[1]) + out["cx"] = float(camera_params[2]) + out["cy"] = float(camera_params[3]) + out["k1"] = float(camera_params[4]) + out["k2"] = float(camera_params[5]) + out["p1"] = float(camera_params[6]) + out["p2"] = float(camera_params[7]) + camera_model = CameraModel.OPENCV + elif camera.model == "OPENCV_FISHEYE": + # fx, fy, cx, cy, k1, k2, k3, k4 + + # r = sqrt(u**2 + v**2) + + # if r > eps: + # theta = atan(r) + # theta2 = theta ** 2 + # theta4 = theta2 ** 2 + # theta6 = theta4 * theta2 + # theta8 = theta4 ** 2 + # thetad = theta * (1 + k1 * theta2 + k2 * theta4 + k3 * theta6 + k4 * theta8) + # du = u * thetad / r - u; + # dv = v * thetad / r - v; + # else: + # du = dv = 0 + out["fl_x"] = float(camera_params[0]) + out["fl_y"] = float(camera_params[1]) + out["cx"] = float(camera_params[2]) + out["cy"] = float(camera_params[3]) + out["k1"] = float(camera_params[4]) + out["k2"] = float(camera_params[5]) + out["k3"] = float(camera_params[6]) + out["k4"] = float(camera_params[7]) + camera_model = CameraModel.OPENCV_FISHEYE + elif camera.model == "FULL_OPENCV": + # fx, fy, cx, cy, k1, k2, p1, p2, k3, k4, k5, k6 + + # u2 = u ** 2 + # uv = u * v + # v2 = v ** 2 + # r2 = u2 + v2 + # r4 = r2 * r2 + # r6 = r4 * r2 + # radial = (1 + k1 * r2 + k2 * r4 + k3 * r6) / + # (1 + k4 * r2 + k5 * r4 + k6 * r6) + # du = u * radial + 2 * p1 * uv + p2 * (r2 + 2 * u2) - u + # dv = v * radial + 2 * p2 * uv + p1 * (r2 + 2 * v2) - v + out["fl_x"] = float(camera_params[0]) + out["fl_y"] = float(camera_params[1]) + out["cx"] = float(camera_params[2]) + out["cy"] = float(camera_params[3]) + out["k1"] = float(camera_params[4]) + out["k2"] = float(camera_params[5]) + out["p1"] = float(camera_params[6]) + out["p2"] = float(camera_params[7]) + out["k3"] = float(camera_params[8]) + out["k4"] = float(camera_params[9]) + out["k5"] = float(camera_params[10]) + out["k6"] = float(camera_params[11]) + raise NotImplementedError(f"{camera.model} camera model is not supported yet!") + elif camera.model == "FOV": + # fx, fy, cx, cy, omega + out["fl_x"] = float(camera_params[0]) + out["fl_y"] = float(camera_params[1]) + out["cx"] = float(camera_params[2]) + out["cy"] = float(camera_params[3]) + out["omega"] = float(camera_params[4]) + raise NotImplementedError(f"{camera.model} camera model is not supported yet!") + elif camera.model == "SIMPLE_RADIAL_FISHEYE": + # f, cx, cy, k + + # r = sqrt(u ** 2 + v ** 2) + # if r > eps: + # theta = atan(r) + # theta2 = theta ** 2 + # thetad = theta * (1 + k * theta2) + # du = u * thetad / r - u; + # dv = v * thetad / r - v; + # else: + # du = dv = 0 + out["fl_x"] = float(camera_params[0]) + out["fl_y"] = float(camera_params[0]) + out["cx"] = float(camera_params[1]) + out["cy"] = float(camera_params[2]) + out["k1"] = float(camera_params[3]) + out["k2"] = 0.0 + out["k3"] = 0.0 + out["k4"] = 0.0 + camera_model = CameraModel.OPENCV_FISHEYE + elif camera.model == "RADIAL_FISHEYE": + # f, cx, cy, k1, k2 + + # r = sqrt(u ** 2 + v ** 2) + # if r > eps: + # theta = atan(r) + # theta2 = theta ** 2 + # theta4 = theta2 ** 2 + # thetad = theta * (1 + k * theta2) + # thetad = theta * (1 + k1 * theta2 + k2 * theta4) + # du = u * thetad / r - u; + # dv = v * thetad / r - v; + # else: + # du = dv = 0 + out["fl_x"] = float(camera_params[0]) + out["fl_y"] = float(camera_params[0]) + out["cx"] = float(camera_params[1]) + out["cy"] = float(camera_params[2]) + out["k1"] = float(camera_params[3]) + out["k2"] = float(camera_params[4]) + out["k3"] = 0 + out["k4"] = 0 + camera_model = CameraModel.OPENCV_FISHEYE + else: + # THIN_PRISM_FISHEYE not supported! + raise NotImplementedError(f"{camera.model} camera model is not supported yet!") + + out["camera_model"] = camera_model.value + return out + + +def colmap_to_json( + recon_dir: Path, + output_dir: Path, + camera_mask_path: Optional[Path] = None, + image_id_to_depth_path: Optional[Dict[int, Path]] = None, + image_rename_map: Optional[Dict[str, str]] = None, + ply_filename="sparse_pc.ply", + keep_original_world_coordinate: bool = False, + use_single_camera_mode: bool = True, +) -> int: + """Converts COLMAP's cameras.bin and images.bin to a JSON file. + + Args: + recon_dir: Path to the reconstruction directory, e.g. "sparse/0" + output_dir: Path to the output directory. + camera_model: Camera model used. + camera_mask_path: Path to the camera mask. + image_id_to_depth_path: When including sfm-based depth, embed these depth file paths in the exported json + image_rename_map: Use these image names instead of the names embedded in the COLMAP db + keep_original_world_coordinate: If True, no extra transform will be applied to world coordinate. + Colmap optimized world often have y direction of the first camera pointing towards down direction, + while nerfstudio world set z direction to be up direction for viewer. + Returns: + The number of registered images. + """ + + # TODO(1480) use pycolmap + # recon = pycolmap.Reconstruction(recon_dir) + # cam_id_to_camera = recon.cameras + # im_id_to_image = recon.images + cam_id_to_camera = read_cameras_binary(recon_dir / "cameras.bin") + im_id_to_image = read_images_binary(recon_dir / "images.bin") + if set(cam_id_to_camera.keys()) != {1}: + CONSOLE.print(f"[bold yellow]Warning: More than one camera is found in {recon_dir}") + print(cam_id_to_camera) + use_single_camera_mode = False # update bool: one camera per frame + out = {} # out = {"camera_model": parse_colmap_camera_params(cam_id_to_camera[1])["camera_model"]} + else: # one camera for all frames + out = parse_colmap_camera_params(cam_id_to_camera[1]) + + frames = [] + for im_id, im_data in im_id_to_image.items(): + # NB: COLMAP uses Eigen / scalar-first quaternions + # * https://colmap.github.io/format.html + # * https://github.com/colmap/colmap/blob/bf3e19140f491c3042bfd85b7192ef7d249808ec/src/base/pose.cc#L75 + # the `rotation_matrix()` handles that format for us. + + # TODO(1480) BEGIN use pycolmap API + # rotation = im_data.rotation_matrix() + rotation = qvec2rotmat(im_data.qvec) + + translation = im_data.tvec.reshape(3, 1) + w2c = np.concatenate([rotation, translation], 1) + w2c = np.concatenate([w2c, np.array([[0, 0, 0, 1]])], 0) + c2w = np.linalg.inv(w2c) + # Convert from COLMAP's camera coordinate system (OpenCV) to ours (OpenGL) + c2w[0:3, 1:3] *= -1 + if not keep_original_world_coordinate: + c2w = c2w[np.array([0, 2, 1, 3]), :] + c2w[2, :] *= -1 + + name = im_data.name + if image_rename_map is not None: + name = image_rename_map[name] + name = Path(f"./images/{name}") + + frame = { + "file_path": name.as_posix(), + "transform_matrix": c2w.tolist(), + "colmap_im_id": im_id, + } + if camera_mask_path is not None: + frame["mask_path"] = camera_mask_path.relative_to(camera_mask_path.parent.parent).as_posix() + if image_id_to_depth_path is not None: + depth_path = image_id_to_depth_path[im_id] + frame["depth_file_path"] = str(depth_path.relative_to(depth_path.parent.parent)) + + if not use_single_camera_mode: # add the camera parameters for this frame + frame.update(parse_colmap_camera_params(cam_id_to_camera[im_data.camera_id])) + + frames.append(frame) + + out["frames"] = frames + + applied_transform = None + if not keep_original_world_coordinate: + applied_transform = np.eye(4)[:3, :] + applied_transform = applied_transform[np.array([0, 2, 1]), :] + applied_transform[2, :] *= -1 + out["applied_transform"] = applied_transform.tolist() + + # create ply from colmap + assert ply_filename.endswith(".ply"), f"ply_filename: {ply_filename} does not end with '.ply'" + create_ply_from_colmap( + ply_filename, + recon_dir, + output_dir, + torch.from_numpy(applied_transform).float() if applied_transform is not None else None, + ) + out["ply_file_path"] = ply_filename + + with open(output_dir / "transforms.json", "w", encoding="utf-8") as f: + json.dump(out, f, indent=4) + + return len(frames) + + +def create_sfm_depth( + recon_dir: Path, + output_dir: Path, + verbose: bool = True, + depth_scale_to_integer_factor: float = 1000.0, + min_depth: float = 0.001, + max_depth: float = 10000, + max_repoj_err: float = 2.5, + min_n_visible: int = 2, + include_depth_debug: bool = False, + input_images_dir: Optional[Path] = None, +) -> Dict[int, Path]: + """Converts COLMAP's points3d.bin to sparse depth map images encoded as + 16-bit "millimeter depth" PNGs. + + Notes: + * This facility does NOT use COLMAP dense reconstruction; it creates depth + maps from sparse SfM points here. + * COLMAP does *not* reconstruct metric depth unless you give it calibrated + (metric) intrinsics as input. Therefore, "depth" in this function has + potentially ambiguous units. + + Args: + recon_dir: Path to the reconstruction directory, e.g. "sparse/0" + output_dir: Path to the output directory. + verbose: If True, logs progress of depth image creation. + depth_scale_to_integer_factor: Use this parameter to tune the conversion of + raw depth measurements to integer depth values. This value should + be equal to 1. / `depth_unit_scale_factor`, where + `depth_unit_scale_factor` is the value you provide at training time. + E.g. for millimeter depth, leave `depth_unit_scale_factor` at 1e-3 + and depth_scale_to_integer_factor at 1000. + min_depth: Discard points closer than this to the camera. + max_depth: Discard points farther than this from the camera. + max_repoj_err: Discard points with reprojection error greater than this + amount (in pixels). + min_n_visible: Discard 3D points that have been triangulated with fewer + than this many frames. + include_depth_debug: Also include debug images showing depth overlaid + upon RGB. + Returns: + Depth file paths indexed by COLMAP image id + """ + + # TODO(1480) use pycolmap + # recon = pycolmap.Reconstruction(recon_dir) + # ptid_to_info = recon.points3D + # cam_id_to_camera = recon.cameras + # im_id_to_image = recon.images + ptid_to_info = read_points3D_binary(recon_dir / "points3D.bin") + cam_id_to_camera = read_cameras_binary(recon_dir / "cameras.bin") + im_id_to_image = read_images_binary(recon_dir / "images.bin") + + # Only support first camera + CAMERA_ID = 1 + W = cam_id_to_camera[CAMERA_ID].width + H = cam_id_to_camera[CAMERA_ID].height + + if verbose: + iter_images = track( + im_id_to_image.items(), total=len(im_id_to_image.items()), description="Creating depth maps ..." + ) + else: + iter_images = iter(im_id_to_image.items()) + + image_id_to_depth_path = {} + for im_id, im_data in iter_images: + # TODO(1480) BEGIN delete when abandoning colmap_parsing_utils + pids = [pid for pid in im_data.point3D_ids if pid != -1] + xyz_world = np.array([ptid_to_info[pid].xyz for pid in pids]) + rotation = qvec2rotmat(im_data.qvec) + z = (rotation @ xyz_world.T)[-1] + im_data.tvec[-1] + errors = np.array([ptid_to_info[pid].error for pid in pids]) + n_visible = np.array([len(ptid_to_info[pid].image_ids) for pid in pids]) + uv = np.array([im_data.xys[i] for i in range(len(im_data.xys)) if im_data.point3D_ids[i] != -1]) + # TODO(1480) END delete when abandoning colmap_parsing_utils + + # TODO(1480) BEGIN use pycolmap API + + # # Get only keypoints that have corresponding triangulated 3D points + # p2ds = im_data.get_valid_points2D() + + # xyz_world = np.array([ptid_to_info[p2d.point3D_id].xyz for p2d in p2ds]) + + # # COLMAP OpenCV convention: z is always positive + # z = (im_data.rotation_matrix() @ xyz_world.T)[-1] + im_data.tvec[-1] + + # # Mean reprojection error in image space + # errors = np.array([ptid_to_info[p2d.point3D_id].error for p2d in p2ds]) + + # # Number of frames in which each frame is visible + # n_visible = np.array([ptid_to_info[p2d.point3D_id].track.length() for p2d in p2ds]) + + # Note: these are *unrectified* pixel coordinates that should match the original input + # no matter the camera model + # uv = np.array([p2d.xy for p2d in p2ds]) + + # TODO(1480) END use pycolmap API + + idx = np.where( + (z >= min_depth) + & (z <= max_depth) + & (errors <= max_repoj_err) + & (n_visible >= min_n_visible) + & (uv[:, 0] >= 0) + & (uv[:, 0] < W) + & (uv[:, 1] >= 0) + & (uv[:, 1] < H) + ) + z = z[idx] + uv = uv[idx] + + uu, vv = uv[:, 0].astype(int), uv[:, 1].astype(int) + depth = np.zeros((H, W), dtype=np.float32) + depth[vv, uu] = z + + # E.g. if `depth` is metric and in units of meters, and `depth_scale_to_integer_factor` + # is 1000, then `depth_img` will be integer millimeters. + depth_img = (depth_scale_to_integer_factor * depth).astype(np.uint16) + + out_name = str(im_data.name) + depth_path = output_dir / out_name + if depth_path.suffix == ".jpg": + depth_path = depth_path.with_suffix(".png") + cv2.imwrite(str(depth_path), depth_img) # type: ignore + + image_id_to_depth_path[im_id] = depth_path + + if include_depth_debug: + assert input_images_dir is not None, "Need explicit input_images_dir for debug images" + assert input_images_dir.exists(), input_images_dir + + depth_flat = depth.flatten()[:, None] + overlay = 255.0 * colormaps.apply_depth_colormap(torch.from_numpy(depth_flat)).numpy() + overlay = overlay.reshape([H, W, 3]) + input_image_path = input_images_dir / im_data.name + input_image = cv2.imread(str(input_image_path)) # type: ignore + debug = 0.3 * input_image + 0.7 + overlay + + out_name = out_name + ".debug.jpg" + output_path = output_dir / "debug_depth" / out_name + output_path.parent.mkdir(parents=True, exist_ok=True) + cv2.imwrite(str(output_path), debug.astype(np.uint8)) # type: ignore + + return image_id_to_depth_path + + +def get_matching_summary(num_initial_frames: int, num_matched_frames: int) -> str: + """Returns a summary of the matching results. + + Args: + num_initial_frames: The number of initial frames. + num_matched_frames: The number of matched frames. + + Returns: + A summary of the matching results. + """ + match_ratio = num_matched_frames / num_initial_frames + if match_ratio == 1: + return "[bold green]COLMAP found poses for all images, CONGRATS!" + if match_ratio < 0.4: + result = f"[bold red]COLMAP only found poses for {num_matched_frames / num_initial_frames * 100:.2f}%" + result += " of the images. This is low.\nThis can be caused by a variety of reasons," + result += " such poor scene coverage, blurry images, or large exposure changes." + return result + if match_ratio < 0.8: + result = f"[bold yellow]COLMAP only found poses for {num_matched_frames / num_initial_frames * 100:.2f}%" + result += " of the images.\nThis isn't great, but may be ok." + result += "\nMissing poses can be caused by a variety of reasons, such poor scene coverage, blurry images," + result += " or large exposure changes." + return result + return f"[bold green]COLMAP found poses for {num_matched_frames / num_initial_frames * 100:.2f}% of the images." + + +def create_ply_from_colmap( + filename: str, recon_dir: Path, output_dir: Path, applied_transform: Union[torch.Tensor, None] +) -> None: + """Writes a ply file from colmap. + + Args: + filename: file name for .ply + recon_dir: Directory to grab colmap points + output_dir: Directory to output .ply + """ + if (recon_dir / "points3D.bin").exists(): + colmap_points = read_points3D_binary(recon_dir / "points3D.bin") + elif (recon_dir / "points3D.txt").exists(): + colmap_points = read_points3D_text(recon_dir / "points3D.txt") + else: + raise ValueError(f"Could not find points3D.txt or points3D.bin in {recon_dir}") + + # Load point Positions + points3D = torch.from_numpy(np.array([p.xyz for p in colmap_points.values()], dtype=np.float32)) + if applied_transform is not None: + assert applied_transform.shape == (3, 4) + points3D = torch.einsum("ij,bj->bi", applied_transform[:3, :3], points3D) + applied_transform[:3, 3] + + # Load point colours + points3D_rgb = torch.from_numpy(np.array([p.rgb for p in colmap_points.values()], dtype=np.uint8)) + + # write ply + with open(output_dir / filename, "w") as f: + # Header + f.write("ply\n") + f.write("format ascii 1.0\n") + f.write(f"element vertex {len(points3D)}\n") + f.write("property float x\n") + f.write("property float y\n") + f.write("property float z\n") + f.write("property uint8 red\n") + f.write("property uint8 green\n") + f.write("property uint8 blue\n") + f.write("end_header\n") + + for coord, color in zip(points3D, points3D_rgb): + x, y, z = coord + r, g, b = color + f.write(f"{x:8f} {y:8f} {z:8f} {r} {g} {b}\n") diff --git a/nerfstudio/scripts/exporter_frank.py b/nerfstudio/scripts/exporter_frank.py new file mode 100644 index 0000000000..7d7d54be40 --- /dev/null +++ b/nerfstudio/scripts/exporter_frank.py @@ -0,0 +1,676 @@ +# Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Script for exporting NeRF into other formats. +""" + +from __future__ import annotations + +import json +import os +import sys +import typing +from collections import OrderedDict +from dataclasses import dataclass, field +from importlib.metadata import version +from pathlib import Path +from typing import List, Optional, Tuple, Union, cast + +import numpy as np +import open3d as o3d +import torch +import tyro +from typing_extensions import Annotated, Literal + +from nerfstudio.cameras.rays import RayBundle +from nerfstudio.data.datamanagers.full_images_datamanager import FullImageDatamanager +from nerfstudio.data.datamanagers.random_cameras_datamanager import RandomCamerasDataManager +from nerfstudio.data.datamanagers.base_datamanager import VanillaDataManager +from nerfstudio.data.datamanagers.parallel_datamanager import ParallelDataManager +from nerfstudio.data.scene_box import OrientedBox +from nerfstudio.exporter import texture_utils, tsdf_utils +from nerfstudio.exporter.exporter_utils import collect_camera_poses, generate_point_cloud, get_mesh_from_filename +from nerfstudio.exporter.marching_cubes import generate_mesh_with_multires_marching_cubes +from nerfstudio.fields.sdf_field import SDFField # noqa +from nerfstudio.models.splatfacto import SplatfactoModel +from nerfstudio.pipelines.base_pipeline import Pipeline, VanillaPipeline +from nerfstudio.utils.eval_utils import eval_setup +from nerfstudio.utils.rich_utils import CONSOLE + + +@dataclass +class Exporter: + """Export the mesh from a YML config to a folder.""" + + load_config: Path + """Path to the config YAML file.""" + output_dir: Path + """Path to the output directory.""" + + +def validate_pipeline(normal_method: str, normal_output_name: str, pipeline: Pipeline) -> None: + """Check that the pipeline is valid for this exporter. + + Args: + normal_method: Method to estimate normals with. Either "open3d" or "model_output". + normal_output_name: Name of the normal output. + pipeline: Pipeline to evaluate with. + """ + if normal_method == "model_output": + CONSOLE.print("Checking that the pipeline has a normal output.") + origins = torch.zeros((1, 3), device=pipeline.device) + directions = torch.ones_like(origins) + pixel_area = torch.ones_like(origins[..., :1]) + camera_indices = torch.zeros_like(origins[..., :1]) + metadata = {"directions_norm": torch.linalg.vector_norm(directions, dim=-1, keepdim=True)} + ray_bundle = RayBundle( + origins=origins, + directions=directions, + pixel_area=pixel_area, + camera_indices=camera_indices, + metadata=metadata, + ) + outputs = pipeline.model(ray_bundle) + if normal_output_name not in outputs: + CONSOLE.print(f"[bold yellow]Warning: Normal output '{normal_output_name}' not found in pipeline outputs.") + CONSOLE.print(f"Available outputs: {list(outputs.keys())}") + CONSOLE.print( + "[bold yellow]Warning: Please train a model with normals " + "(e.g., nerfacto with predicted normals turned on)." + ) + CONSOLE.print("[bold yellow]Warning: Or change --normal-method") + CONSOLE.print("[bold yellow]Exiting early.") + sys.exit(1) + + +@dataclass +class ExportPointCloud(Exporter): + """Export NeRF as a point cloud.""" + + num_points: int = 1000000 + """Number of points to generate. May result in less if outlier removal is used.""" + remove_outliers: bool = True + """Remove outliers from the point cloud.""" + reorient_normals: bool = True + """Reorient point cloud normals based on view direction.""" + normal_method: Literal["open3d", "model_output"] = "model_output" + """Method to estimate normals with.""" + normal_output_name: str = "normals" + """Name of the normal output.""" + depth_output_name: str = "depth" + """Name of the depth output.""" + rgb_output_name: str = "rgb" + """Name of the RGB output.""" + + obb_center: Optional[Tuple[float, float, float]] = None + """Center of the oriented bounding box.""" + obb_rotation: Optional[Tuple[float, float, float]] = None + """Rotation of the oriented bounding box. Expressed as RPY Euler angles in radians""" + obb_scale: Optional[Tuple[float, float, float]] = None + """Scale of the oriented bounding box along each axis.""" + num_rays_per_batch: int = 32768 + """Number of rays to evaluate per batch. Decrease if you run out of memory.""" + std_ratio: float = 10.0 + """Threshold based on STD of the average distances across the point cloud to remove outliers.""" + save_world_frame: bool = False + """If set, saves the point cloud in the same frame as the original dataset. Otherwise, uses the + scaled and reoriented coordinate space expected by the NeRF models.""" + + def main(self) -> None: + """Export point cloud.""" + + if not self.output_dir.exists(): + self.output_dir.mkdir(parents=True) + + _, pipeline, _, _ = eval_setup(self.load_config) + + validate_pipeline(self.normal_method, self.normal_output_name, pipeline) + + # Increase the batchsize to speed up the evaluation. + assert isinstance( + pipeline.datamanager, + (VanillaDataManager, ParallelDataManager,FullImageDatamanager, RandomCamerasDataManager)) + if isinstance(pipeline.datamanager, VanillaDataManager): + assert pipeline.datamanager.train_pixel_sampler is not None + pipeline.datamanager.train_pixel_sampler.num_rays_per_batch = self.num_rays_per_batch + + # Whether the normals should be estimated based on the point cloud. + estimate_normals = self.normal_method == "open3d" + crop_obb = None + if self.obb_center is not None and self.obb_rotation is not None and self.obb_scale is not None: + crop_obb = OrientedBox.from_params(self.obb_center, self.obb_rotation, self.obb_scale) + pcd = generate_point_cloud( + pipeline=pipeline, + num_points=self.num_points, + remove_outliers=self.remove_outliers, + reorient_normals=self.reorient_normals, + estimate_normals=estimate_normals, + rgb_output_name=self.rgb_output_name, + depth_output_name=self.depth_output_name, + normal_output_name=self.normal_output_name if self.normal_method == "model_output" else None, + crop_obb=crop_obb, + std_ratio=self.std_ratio, + ) + if self.save_world_frame: + # apply the inverse dataparser transform to the point cloud + points = np.asarray(pcd.points) + poses = np.eye(4, dtype=np.float32)[None, ...].repeat(points.shape[0], axis=0)[:, :3, :] + poses[:, :3, 3] = points + poses = pipeline.datamanager.train_dataparser_outputs.transform_poses_to_original_space( + torch.from_numpy(poses) + ) + points = poses[:, :3, 3].numpy() + pcd.points = o3d.utility.Vector3dVector(points) + + torch.cuda.empty_cache() + + CONSOLE.print(f"[bold green]:white_check_mark: Generated {pcd}") + CONSOLE.print("Saving Point Cloud...") + tpcd = o3d.t.geometry.PointCloud.from_legacy(pcd) + # The legacy PLY writer converts colors to UInt8, + # let us do the same to save space. + tpcd.point.colors = (tpcd.point.colors * 255).to(o3d.core.Dtype.UInt8) # type: ignore + o3d.t.io.write_point_cloud(str(self.output_dir / "point_cloud.ply"), tpcd) + print("\033[A\033[A") + CONSOLE.print("[bold green]:white_check_mark: Saving Point Cloud") + + +@dataclass +class ExportTSDFMesh(Exporter): + """ + Export a mesh using TSDF processing. + """ + + downscale_factor: int = 2 + """Downscale the images starting from the resolution used for training.""" + depth_output_name: str = "depth" + """Name of the depth output.""" + rgb_output_name: str = "rgb" + """Name of the RGB output.""" + resolution: Union[int, List[int]] = field(default_factory=lambda: [128, 128, 128]) + """Resolution of the TSDF volume or [x, y, z] resolutions individually.""" + batch_size: int = 10 + """How many depth images to integrate per batch.""" + use_bounding_box: bool = True + """Whether to use a bounding box for the TSDF volume.""" + bounding_box_min: Tuple[float, float, float] = (-1, -1, -1) + """Minimum of the bounding box, used if use_bounding_box is True.""" + bounding_box_max: Tuple[float, float, float] = (1, 1, 1) + """Minimum of the bounding box, used if use_bounding_box is True.""" + texture_method: Literal["tsdf", "nerf"] = "nerf" + """Method to texture the mesh with. Either 'tsdf' or 'nerf'.""" + px_per_uv_triangle: int = 4 + """Number of pixels per UV triangle.""" + unwrap_method: Literal["xatlas", "custom"] = "xatlas" + """The method to use for unwrapping the mesh.""" + num_pixels_per_side: int = 2048 + """If using xatlas for unwrapping, the pixels per side of the texture image.""" + target_num_faces: Optional[int] = 50000 + """Target number of faces for the mesh to texture.""" + refine_mesh_using_initial_aabb_estimate: bool = False + """Refine the mesh using the initial AABB estimate.""" + refinement_epsilon: float = 1e-2 + """Refinement epsilon for the mesh. This is the distance in meters that the refined AABB/OBB will be expanded by + in each direction.""" + + def main(self) -> None: + """Export mesh""" + + if not self.output_dir.exists(): + self.output_dir.mkdir(parents=True) + + _, pipeline, _, _ = eval_setup(self.load_config) + + tsdf_utils.export_tsdf_mesh( + pipeline, + self.output_dir, + self.downscale_factor, + self.depth_output_name, + self.rgb_output_name, + self.resolution, + self.batch_size, + use_bounding_box=self.use_bounding_box, + bounding_box_min=self.bounding_box_min, + bounding_box_max=self.bounding_box_max, + refine_mesh_using_initial_aabb_estimate=self.refine_mesh_using_initial_aabb_estimate, + refinement_epsilon=self.refinement_epsilon, + ) + + # possibly + # texture the mesh with NeRF and export to a mesh.obj file + # and a material and texture file + if self.texture_method == "nerf": + # load the mesh from the tsdf export + mesh = get_mesh_from_filename( + str(self.output_dir / "tsdf_mesh.ply"), target_num_faces=self.target_num_faces + ) + CONSOLE.print("Texturing mesh with NeRF") + texture_utils.export_textured_mesh( + mesh, + pipeline, + self.output_dir, + px_per_uv_triangle=self.px_per_uv_triangle if self.unwrap_method == "custom" else None, + unwrap_method=self.unwrap_method, + num_pixels_per_side=self.num_pixels_per_side, + ) + + +@dataclass +class ExportPoissonMesh(Exporter): + """ + Export a mesh using poisson surface reconstruction. + """ + + num_points: int = 1000000 + """Number of points to generate. May result in less if outlier removal is used.""" + remove_outliers: bool = True + """Remove outliers from the point cloud.""" + reorient_normals: bool = True + """Reorient point cloud normals based on view direction.""" + depth_output_name: str = "depth" + """Name of the depth output.""" + rgb_output_name: str = "rgb" + """Name of the RGB output.""" + normal_method: Literal["open3d", "model_output"] = "model_output" + """Method to estimate normals with.""" + normal_output_name: str = "normals" + """Name of the normal output.""" + save_point_cloud: bool = False + """Whether to save the point cloud.""" + obb_center: Optional[Tuple[float, float, float]] = None + """Center of the oriented bounding box.""" + obb_rotation: Optional[Tuple[float, float, float]] = None + """Rotation of the oriented bounding box. Expressed as RPY Euler angles in radians""" + obb_scale: Optional[Tuple[float, float, float]] = None + """Scale of the oriented bounding box along each axis.""" + num_rays_per_batch: int = 32768 + """Number of rays to evaluate per batch. Decrease if you run out of memory.""" + texture_method: Literal["point_cloud", "nerf"] = "nerf" + """Method to texture the mesh with. Either 'point_cloud' or 'nerf'.""" + px_per_uv_triangle: int = 4 + """Number of pixels per UV triangle.""" + unwrap_method: Literal["xatlas", "custom"] = "xatlas" + """The method to use for unwrapping the mesh.""" + num_pixels_per_side: int = 2048 + """If using xatlas for unwrapping, the pixels per side of the texture image.""" + target_num_faces: Optional[int] = 50000 + """Target number of faces for the mesh to texture.""" + std_ratio: float = 10.0 + """Threshold based on STD of the average distances across the point cloud to remove outliers.""" + + def main(self) -> None: + """Export mesh""" + + if not self.output_dir.exists(): + self.output_dir.mkdir(parents=True) + + _, pipeline, _, _ = eval_setup(self.load_config) + + validate_pipeline(self.normal_method, self.normal_output_name, pipeline) + + # Increase the batchsize to speed up the evaluation. + assert isinstance( + pipeline.datamanager, + (VanillaDataManager, ParallelDataManager,FullImageDatamanager,RandomCamerasDataManager)) + if isinstance(pipeline.datamanager, VanillaDataManager): + assert pipeline.datamanager.train_pixel_sampler is not None + pipeline.datamanager.train_pixel_sampler.num_rays_per_batch = self.num_rays_per_batch + + # Whether the normals should be estimated based on the point cloud. + estimate_normals = self.normal_method == "open3d" + if self.obb_center is not None and self.obb_rotation is not None and self.obb_scale is not None: + crop_obb = OrientedBox.from_params(self.obb_center, self.obb_rotation, self.obb_scale) + else: + crop_obb = None + + pcd = generate_point_cloud( + pipeline=pipeline, + num_points=self.num_points, + remove_outliers=self.remove_outliers, + reorient_normals=self.reorient_normals, + estimate_normals=estimate_normals, + rgb_output_name=self.rgb_output_name, + depth_output_name=self.depth_output_name, + normal_output_name=self.normal_output_name if self.normal_method == "model_output" else None, + crop_obb=crop_obb, + std_ratio=self.std_ratio, + ) + torch.cuda.empty_cache() + CONSOLE.print(f"[bold green]:white_check_mark: Generated {pcd}") + + if self.save_point_cloud: + CONSOLE.print("Saving Point Cloud...") + o3d.io.write_point_cloud(str(self.output_dir / "point_cloud.ply"), pcd) + print("\033[A\033[A") + CONSOLE.print("[bold green]:white_check_mark: Saving Point Cloud") + + CONSOLE.print("Computing Mesh... this may take a while.") + mesh, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(pcd, depth=9) + vertices_to_remove = densities < np.quantile(densities, 0.1) + mesh.remove_vertices_by_mask(vertices_to_remove) + print("\033[A\033[A") + CONSOLE.print("[bold green]:white_check_mark: Computing Mesh") + + CONSOLE.print("Saving Mesh...") + o3d.io.write_triangle_mesh(str(self.output_dir / "poisson_mesh.ply"), mesh) + print("\033[A\033[A") + CONSOLE.print("[bold green]:white_check_mark: Saving Mesh") + + # This will texture the mesh with NeRF and export to a mesh.obj file + # and a material and texture file + if self.texture_method == "nerf": + # load the mesh from the poisson reconstruction + mesh = get_mesh_from_filename( + str(self.output_dir / "poisson_mesh.ply"), target_num_faces=self.target_num_faces + ) + CONSOLE.print("Texturing mesh with NeRF") + texture_utils.export_textured_mesh( + mesh, + pipeline, + self.output_dir, + px_per_uv_triangle=self.px_per_uv_triangle if self.unwrap_method == "custom" else None, + unwrap_method=self.unwrap_method, + num_pixels_per_side=self.num_pixels_per_side, + ) + + +@dataclass +class ExportMarchingCubesMesh(Exporter): + """Export a mesh using marching cubes.""" + + isosurface_threshold: float = 0.0 + """The isosurface threshold for extraction. For SDF based methods the surface is the zero level set.""" + resolution: int = 1024 + """Marching cube resolution.""" + simplify_mesh: bool = False + """Whether to simplify the mesh.""" + bounding_box_min: Tuple[float, float, float] = (-1.0, -1.0, -1.0) + """Minimum of the bounding box.""" + bounding_box_max: Tuple[float, float, float] = (1.0, 1.0, 1.0) + """Maximum of the bounding box.""" + px_per_uv_triangle: int = 4 + """Number of pixels per UV triangle.""" + unwrap_method: Literal["xatlas", "custom"] = "xatlas" + """The method to use for unwrapping the mesh.""" + num_pixels_per_side: int = 2048 + """If using xatlas for unwrapping, the pixels per side of the texture image.""" + target_num_faces: Optional[int] = 50000 + """Target number of faces for the mesh to texture.""" + + def main(self) -> None: + """Main function.""" + if not self.output_dir.exists(): + self.output_dir.mkdir(parents=True) + + _, pipeline, _, _ = eval_setup(self.load_config) + + # TODO: Make this work with Density Field + assert hasattr(pipeline.model.config, "sdf_field"), "Model must have an SDF field." + + CONSOLE.print("Extracting mesh with marching cubes... which may take a while") + + assert self.resolution % 512 == 0, f"""resolution must be divisible by 512, got {self.resolution}. + This is important because the algorithm uses a multi-resolution approach + to evaluate the SDF where the minimum resolution is 512.""" + + # Extract mesh using marching cubes for sdf at a multi-scale resolution. + multi_res_mesh = generate_mesh_with_multires_marching_cubes( + geometry_callable_field=lambda x: cast(SDFField, pipeline.model.field) + .forward_geonetwork(x)[:, 0] + .contiguous(), + resolution=self.resolution, + bounding_box_min=self.bounding_box_min, + bounding_box_max=self.bounding_box_max, + isosurface_threshold=self.isosurface_threshold, + coarse_mask=None, + ) + filename = self.output_dir / "sdf_marching_cubes_mesh.ply" + multi_res_mesh.export(filename) + + # load the mesh from the marching cubes export + mesh = get_mesh_from_filename(str(filename), target_num_faces=self.target_num_faces) + CONSOLE.print("Texturing mesh with NeRF...") + texture_utils.export_textured_mesh( + mesh, + pipeline, + self.output_dir, + px_per_uv_triangle=self.px_per_uv_triangle if self.unwrap_method == "custom" else None, + unwrap_method=self.unwrap_method, + num_pixels_per_side=self.num_pixels_per_side, + ) + + +@dataclass +class ExportCameraPoses(Exporter): + """ + Export camera poses to a .json file. + """ + + def main(self) -> None: + """Export camera poses""" + if not self.output_dir.exists(): + self.output_dir.mkdir(parents=True) + + _, pipeline, _, _ = eval_setup(self.load_config) + assert isinstance(pipeline, VanillaPipeline) + train_frames, eval_frames = collect_camera_poses(pipeline) + + for file_name, frames in [("transforms_train.json", train_frames), ("transforms_eval.json", eval_frames)]: + if len(frames) == 0: + CONSOLE.print(f"[bold yellow]No frames found for {file_name}. Skipping.") + continue + + output_file_path = os.path.join(self.output_dir, file_name) + + with open(output_file_path, "w", encoding="UTF-8") as f: + json.dump(frames, f, indent=4) + + CONSOLE.print(f"[bold green]:white_check_mark: Saved poses to {output_file_path}") + + +@dataclass +class ExportGaussianSplat(Exporter): + """ + Export 3D Gaussian Splatting model to a .ply + """ + + output_filename: str = "splat.ply" + """Name of the output file.""" + obb_center: Optional[Tuple[float, float, float]] = None + """Center of the oriented bounding box.""" + obb_rotation: Optional[Tuple[float, float, float]] = None + """Rotation of the oriented bounding box. Expressed as RPY Euler angles in radians""" + obb_scale: Optional[Tuple[float, float, float]] = None + """Scale of the oriented bounding box along each axis.""" + ply_color_mode: Literal["sh_coeffs", "rgb"] = "sh_coeffs" + """If "rgb", export colors as red/green/blue fields. Otherwise, export colors as + spherical harmonics coefficients.""" + + @staticmethod + def write_ply( + filename: str, + count: int, + map_to_tensors: typing.OrderedDict[str, np.ndarray], + ): + """ + Writes a PLY file with given vertex properties and a tensor of float or uint8 values in the order specified by the OrderedDict. + Note: All float values will be converted to float32 for writing. + + Parameters: + filename (str): The name of the file to write. + count (int): The number of vertices to write. + map_to_tensors (OrderedDict[str, np.ndarray]): An ordered dictionary mapping property names to numpy arrays of float or uint8 values. + Each array should be 1-dimensional and of equal length matching 'count'. Arrays should not be empty. + """ + + # Ensure count matches the length of all tensors + if not all(tensor.size == count for tensor in map_to_tensors.values()): + raise ValueError("Count does not match the length of all tensors") + + # Type check for numpy arrays of type float or uint8 and non-empty + if not all( + isinstance(tensor, np.ndarray) + and (tensor.dtype.kind == "f" or tensor.dtype == np.uint8) + and tensor.size > 0 + for tensor in map_to_tensors.values() + ): + raise ValueError("All tensors must be numpy arrays of float or uint8 type and not empty") + + with open(filename, "wb") as ply_file: + nerfstudio_version = version("nerfstudio") + # Write PLY header + ply_file.write(b"ply\n") + ply_file.write(b"format binary_little_endian 1.0\n") + ply_file.write(f"comment Generated by Nerstudio {nerfstudio_version}\n".encode()) + ply_file.write(b"comment Vertical Axis: z\n") + ply_file.write(f"element vertex {count}\n".encode()) + + # Write properties, in order due to OrderedDict + for key, tensor in map_to_tensors.items(): + data_type = "float" if tensor.dtype.kind == "f" else "uchar" + ply_file.write(f"property {data_type} {key}\n".encode()) + + ply_file.write(b"end_header\n") + + # Write binary data + # Note: If this is a performance bottleneck consider using numpy.hstack for efficiency improvement + for i in range(count): + for tensor in map_to_tensors.values(): + value = tensor[i] + if tensor.dtype.kind == "f": + ply_file.write(np.float32(value).tobytes()) + elif tensor.dtype == np.uint8: + ply_file.write(value.tobytes()) + + def main(self) -> None: + if not self.output_dir.exists(): + self.output_dir.mkdir(parents=True) + + _, pipeline, _, _ = eval_setup(self.load_config, test_mode="inference") + + assert isinstance(pipeline.model, SplatfactoModel) + + model: SplatfactoModel = pipeline.model + + filename = self.output_dir / self.output_filename + + map_to_tensors = OrderedDict() + + with torch.no_grad(): + positions = model.means.cpu().numpy() + count = positions.shape[0] + n = count + map_to_tensors["x"] = positions[:, 0] + map_to_tensors["y"] = positions[:, 1] + map_to_tensors["z"] = positions[:, 2] + map_to_tensors["nx"] = np.zeros(n, dtype=np.float32) + map_to_tensors["ny"] = np.zeros(n, dtype=np.float32) + map_to_tensors["nz"] = np.zeros(n, dtype=np.float32) + + if self.ply_color_mode == "rgb": + colors = torch.clamp(model.colors.clone(), 0.0, 1.0).data.cpu().numpy() + colors = (colors * 255).astype(np.uint8) + map_to_tensors["red"] = colors[:, 0] + map_to_tensors["green"] = colors[:, 1] + map_to_tensors["blue"] = colors[:, 2] + elif self.ply_color_mode == "sh_coeffs": + shs_0 = model.shs_0.contiguous().cpu().numpy() + for i in range(shs_0.shape[1]): + map_to_tensors[f"f_dc_{i}"] = shs_0[:, i, None] + + if model.config.sh_degree > 0: + if self.ply_color_mode == "rgb": + CONSOLE.print( + "Warning: model has higher level of spherical harmonics, ignoring them and only export rgb." + ) + elif self.ply_color_mode == "sh_coeffs": + # transpose(1, 2) was needed to match the sh order in Inria version + shs_rest = model.shs_rest.transpose(1, 2).contiguous().cpu().numpy() + shs_rest = shs_rest.reshape((n, -1)) + for i in range(shs_rest.shape[-1]): + map_to_tensors[f"f_rest_{i}"] = shs_rest[:, i, None] + + map_to_tensors["opacity"] = model.opacities.data.cpu().numpy() + + scales = model.scales.data.cpu().numpy() + for i in range(3): + map_to_tensors[f"scale_{i}"] = scales[:, i, None] + + quats = model.quats.data.cpu().numpy() + for i in range(4): + map_to_tensors[f"rot_{i}"] = quats[:, i, None] + + if self.obb_center is not None and self.obb_rotation is not None and self.obb_scale is not None: + crop_obb = OrientedBox.from_params(self.obb_center, self.obb_rotation, self.obb_scale) + assert crop_obb is not None + mask = crop_obb.within(torch.from_numpy(positions)).numpy() + for k, t in map_to_tensors.items(): + map_to_tensors[k] = map_to_tensors[k][mask] + + n = map_to_tensors["x"].shape[0] + count = n + + # post optimization, it is possible have NaN/Inf values in some attributes + # to ensure the exported ply file has finite values, we enforce finite filters. + select = np.ones(n, dtype=bool) + for k, t in map_to_tensors.items(): + n_before = np.sum(select) + select = np.logical_and(select, np.isfinite(t).all(axis=-1)) + n_after = np.sum(select) + if n_after < n_before: + CONSOLE.print(f"{n_before - n_after} NaN/Inf elements in {k}") + nan_count = np.sum(select) - n + + # filter gaussians that have opacities < 1/255, because they are skipped in cuda rasterization + low_opacity_gaussians = (map_to_tensors["opacity"]).squeeze(axis=-1) < -5.5373 # logit(1/255) + lowopa_count = np.sum(low_opacity_gaussians) + select[low_opacity_gaussians] = 0 + + if np.sum(select) < n: + CONSOLE.print( + f"{nan_count} Gaussians have NaN/Inf and {lowopa_count} have low opacity, only export {np.sum(select)}/{n}" + ) + for k, t in map_to_tensors.items(): + map_to_tensors[k] = map_to_tensors[k][select] + count = np.sum(select) + + ExportGaussianSplat.write_ply(str(filename), count, map_to_tensors) + + +Commands = tyro.conf.FlagConversionOff[ + Union[ + Annotated[ExportPointCloud, tyro.conf.subcommand(name="pointcloud")], + Annotated[ExportTSDFMesh, tyro.conf.subcommand(name="tsdf")], + Annotated[ExportPoissonMesh, tyro.conf.subcommand(name="poisson")], + Annotated[ExportMarchingCubesMesh, tyro.conf.subcommand(name="marching-cubes")], + Annotated[ExportCameraPoses, tyro.conf.subcommand(name="cameras")], + Annotated[ExportGaussianSplat, tyro.conf.subcommand(name="gaussian-splat")], + ] +] + + +def entrypoint(): + """Entrypoint for use with pyproject scripts.""" + tyro.extras.set_accent_color("bright_yellow") + tyro.cli(Commands).main() + + +if __name__ == "__main__": + entrypoint() + + +def get_parser_fn(): + """Get the parser function for the sphinx docs.""" + return tyro.extras.get_parser(Commands) # noqa diff --git a/nerfstudio_stable_environment_post_zipnerf.yaml b/nerfstudio_stable_environment_post_zipnerf.yaml new file mode 100644 index 0000000000..91fb5ad9d7 --- /dev/null +++ b/nerfstudio_stable_environment_post_zipnerf.yaml @@ -0,0 +1,319 @@ +name: nerfstudio_test +channels: + - nvidia/label/cuda-11.8.0 + - defaults + - https://repo.anaconda.com/pkgs/main + - https://repo.anaconda.com/pkgs/r + - https://repo.anaconda.com/pkgs/msys2 +dependencies: + - bzip2=1.0.8=h2bbff1b_6 + - ca-certificates=2025.2.25=haa95532_0 + - cuda-cccl=11.8.89=0 + - cuda-command-line-tools=11.8.0=0 + - cuda-compiler=11.8.0=0 + - cuda-cudart=11.8.89=0 + - cuda-cudart-dev=11.8.89=0 + - cuda-cuobjdump=11.8.86=0 + - cuda-cupti=11.8.87=0 + - cuda-cuxxfilt=11.8.86=0 + - cuda-documentation=11.8.86=0 + - cuda-libraries=11.8.0=0 + - cuda-libraries-dev=11.8.0=0 + - cuda-memcheck=11.8.86=0 + - cuda-nsight-compute=11.8.0=0 + - cuda-nvcc=11.8.89=0 + - cuda-nvdisasm=11.8.86=0 + - cuda-nvml-dev=11.8.86=0 + - cuda-nvprof=11.8.87=0 + - cuda-nvprune=11.8.86=0 + - cuda-nvrtc=11.8.89=0 + - cuda-nvrtc-dev=11.8.89=0 + - cuda-nvtx=11.8.86=0 + - cuda-nvvp=11.8.87=0 + - cuda-profiler-api=11.8.86=0 + - cuda-sanitizer-api=11.8.86=0 + - cuda-toolkit=11.8.0=0 + - cuda-tools=11.8.0=0 + - cuda-visual-tools=11.8.0=0 + - libcublas=11.11.3.6=0 + - libcublas-dev=11.11.3.6=0 + - libcufft=10.9.0.58=0 + - libcufft-dev=10.9.0.58=0 + - libcurand=10.3.0.86=0 + - libcurand-dev=10.3.0.86=0 + - libcusolver=11.4.1.48=0 + - libcusolver-dev=11.4.1.48=0 + - libcusparse=11.7.5.86=0 + - libcusparse-dev=11.7.5.86=0 + - libffi=3.4.4=hd77b12b_1 + - libnpp=11.8.0.86=0 + - libnpp-dev=11.8.0.86=0 + - libnvjpeg=11.9.0.86=0 + - libnvjpeg-dev=11.9.0.86=0 + - nsight-compute=2022.3.0.22=0 + - openssl=3.0.16=h3f729d1_0 + - python=3.10.16=h4607a30_1 + - sqlite=3.45.3=h2bbff1b_0 + - tk=8.6.14=h0416ee5_0 + - vc=14.42=haa95532_4 + - vs2015_runtime=14.42.34433=he0abc0d_4 + - wheel=0.45.1=py310haa95532_0 + - xz=5.6.4=h4754444_1 + - zlib=1.2.13=h8cc25b3_1 + - pip: + - absl-py==2.2.2 + - accelerate==0.19.0 + - annotated-types==0.7.0 + - anyio==4.9.0 + - appdirs==1.4.4 + - argon2-cffi==23.1.0 + - argon2-cffi-bindings==21.2.0 + - arrow==1.3.0 + - asttokens==3.0.0 + - async-lru==2.0.5 + - attrs==25.3.0 + - av==14.3.0 + - babel==2.17.0 + - beautifulsoup4==4.13.3 + - bidict==0.23.1 + - bitsandbytes==0.43.0 + - bleach==6.2.0 + - blinker==1.9.0 + - cachetools==5.5.2 + - certifi==2025.1.31 + - cffi==1.17.1 + - charset-normalizer==3.4.1 + - click==8.1.8 + - colorama==0.4.6 + - colorlog==6.9.0 + - comet-ml==3.49.7 + - comm==0.2.2 + - configargparse==1.7 + - configobj==5.0.9 + - contourpy==1.3.1 + - cryptography==44.0.2 + - cuda-backend==0.0.0 + - cycler==0.12.1 + - dash==3.0.2 + - debugpy==1.8.14 + - decorator==5.2.1 + - defusedxml==0.7.1 + - descartes==1.1.0 + - diffusers==0.16.1 + - dill==0.3.9 + - docker-pycreds==0.4.0 + - docstring-parser==0.16 + - dulwich==0.22.8 + - everett==3.1.0 + - exceptiongroup==1.2.2 + - executing==2.2.0 + - fastjsonschema==2.21.1 + - filelock==3.18.0 + - fire==0.7.0 + - flask==3.0.3 + - fonttools==4.57.0 + - fpsample==0.3.3 + - fqdn==1.5.1 + - fsspec==2025.3.2 + - gdown==5.2.0 + - gin-config==0.5.0 + - gitdb==4.0.12 + - gitpython==3.1.44 + - grpcio==1.71.0 + - gsplat==1.5.0 + - h11==0.14.0 + - h5py==3.13.0 + - httpcore==1.0.8 + - httpx==0.28.1 + - huggingface-hub==0.25.2 + - idna==3.10 + - imageio==2.37.0 + - imageio-ffmpeg==0.6.0 + - importlib-metadata==8.6.1 + - ipykernel==6.29.5 + - ipython==8.35.0 + - ipywidgets==8.1.6 + - isoduration==20.11.0 + - itsdangerous==2.2.0 + - jaxtyping==0.3.1 + - jedi==0.19.2 + - jinja2==3.1.6 + - joblib==1.4.2 + - json5==0.12.0 + - jsonpointer==3.0.0 + - jsonschema==4.23.0 + - jsonschema-specifications==2024.10.1 + - jupyter==1.1.1 + - jupyter-client==8.6.3 + - jupyter-console==6.6.3 + - jupyter-core==5.7.2 + - jupyter-events==0.12.0 + - jupyter-lsp==2.2.5 + - jupyter-server==2.15.0 + - jupyter-server-terminals==0.5.3 + - jupyterlab==4.4.0 + - jupyterlab-pygments==0.3.0 + - jupyterlab-server==2.27.3 + - jupyterlab-widgets==3.0.14 + - kiwisolver==1.4.8 + - lazy-loader==0.4 + - lightning-utilities==0.14.3 + - lxml==5.3.2 + - manifold3d==3.0.1 + - mapbox-earcut==1.0.3 + - markdown==3.8 + - markdown-it-py==3.0.0 + - markupsafe==3.0.2 + - matplotlib==3.10.1 + - matplotlib-inline==0.1.7 + - mdurl==0.1.2 + - mediapy==1.2.2 + - mistune==3.1.3 + - mpmath==1.3.0 + - msgpack==1.1.0 + - msgpack-numpy==0.4.8 + - msgspec==0.19.0 + - msvc-runtime==14.42.34433 + - multiprocess==0.70.17 + - narwhals==1.34.1 + - nbclient==0.10.2 + - nbconvert==7.16.6 + - nbformat==5.10.4 + - nerfacc==0.5.3 + - nerfstudio==1.1.5 + - nest-asyncio==1.6.0 + - networkx==3.4.2 + - ninja==1.11.1.4 + - nodeenv==1.9.1 + - notebook==7.4.0 + - notebook-shim==0.2.4 + - numpy==1.26.4 + - nuscenes-devkit==1.1.9 + - open3d==0.19.0 + - opencv-contrib-python==4.11.0.86 + - opencv-python==4.11.0.86 + - opencv-python-headless==4.10.0.84 + - overrides==7.7.0 + - packaging==24.2 + - pandas==2.2.3 + - pandocfilters==1.5.1 + - parso==0.8.4 + - pathos==0.3.3 + - pillow==11.1.0 + - pip==25.0.1 + - platformdirs==4.3.7 + - plotly==6.0.1 + - plyfile==1.1 + - pox==0.3.5 + - ppft==1.7.6.9 + - prometheus-client==0.21.1 + - prompt-toolkit==3.0.50 + - protobuf==3.20.3 + - psutil==7.0.0 + - pure-eval==0.2.3 + - pycocotools==2.0.8 + - pycollada==0.9 + - pycparser==2.22 + - pydantic==2.11.3 + - pydantic-core==2.33.1 + - pygments==2.19.1 + - pymeshlab==2023.12.post1 + - pyngrok==7.2.3 + - pyparsing==3.2.3 + - pyquaternion==0.9.9 + - pysocks==1.7.1 + - python-box==6.1.0 + - python-dateutil==2.9.0.post0 + - python-engineio==4.11.2 + - python-json-logger==3.3.0 + - python-socketio==5.12.1 + - pytorch-msssim==1.0.0 + - pytz==2025.2 + - pywin32==310 + - pywinpty==2.0.15 + - pyyaml==6.0.2 + - pyzmq==26.4.0 + - rawpy==0.24.0 + - referencing==0.36.2 + - regex==2024.11.6 + - requests==2.32.3 + - requests-toolbelt==1.0.0 + - retrying==1.3.4 + - rfc3339-validator==0.1.4 + - rfc3986-validator==0.1.1 + - rich==14.0.0 + - rpds-py==0.24.0 + - rtree==1.4.0 + - scikit-image==0.25.2 + - scikit-learn==1.6.1 + - scipy==1.15.2 + - semantic-version==2.10.0 + - send2trash==1.8.3 + - sentencepiece==0.1.99 + - sentry-sdk==2.25.1 + - setproctitle==1.3.5 + - setuptools==78.1.0 + - shapely==2.1.0 + - shtab==1.7.1 + - simple-websocket==1.1.0 + - simplejson==3.20.1 + - six==1.17.0 + - smmap==5.0.2 + - sniffio==1.3.1 + - soupsieve==2.6 + - splatfacto-w==0.1.5 + - splines==0.3.0 + - stack-data==0.6.3 + - svg-path==6.3 + - sympy==1.13.3 + - tensorboard==2.19.0 + - tensorboard-data-server==0.7.2 + - tensorboardx==2.6.2.2 + - tensorly==0.9.0 + - termcolor==3.0.1 + - terminado==0.18.1 + - threadpoolctl==3.6.0 + - tifffile==2025.3.30 + - timm==0.6.7 + - tinycss2==1.4.0 + - tinycudann==1.7 + - tokenizers==0.13.3 + - tomli==2.2.1 + - torch==2.1.2+cu118 + - torch-fidelity==0.3.0 + - torch-scatter==2.1.2 + - torchmetrics==1.7.1 + - torchvision==0.16.2+cu118 + - tornado==6.4.2 + - tqdm==4.67.1 + - traitlets==5.14.3 + - transformers==4.29.2 + - trimesh==4.6.6 + - typeguard==4.4.2 + - types-python-dateutil==2.9.0.20241206 + - typing-extensions==4.13.2 + - typing-inspection==0.4.0 + - tyro==0.8.12 + - tzdata==2025.2 + - uri-template==1.3.0 + - urllib3==2.4.0 + - vhacdx==0.0.8.post2 + - viser==0.2.7 + - wadler-lindig==0.1.4 + - wandb==0.19.9 + - wcwidth==0.2.13 + - webcolors==24.11.1 + - webencodings==0.5.1 + - websocket-client==1.8.0 + - websockets==15.0.1 + - werkzeug==3.0.6 + - widgetsnbextension==4.0.14 + - wrapt==1.17.2 + - wsproto==1.2.0 + - wurlitzer==3.1.1 + - xatlas==0.0.10 + - xxhash==3.5.0 + - yourdfpy==0.0.57 + - zipnerf==0.1.0 + - zipp==3.21.0 +prefix: C:\Users\crist\anaconda3\envs\nerfstudio_test diff --git a/requirements_post_zipnerf - Copia.txt b/requirements_post_zipnerf - Copia.txt new file mode 100644 index 0000000000..284b87b139 --- /dev/null +++ b/requirements_post_zipnerf - Copia.txt @@ -0,0 +1,253 @@ +absl-py==2.2.2 +accelerate==0.19.0 +annotated-types==0.7.0 +anyio==4.9.0 +appdirs==1.4.4 +argon2-cffi==23.1.0 +argon2-cffi-bindings==21.2.0 +arrow==1.3.0 +asttokens==3.0.0 +async-lru==2.0.5 +attrs==25.3.0 +av==14.3.0 +babel==2.17.0 +beautifulsoup4==4.13.3 +bidict==0.23.1 +bitsandbytes==0.43.0 +bleach==6.2.0 +blinker==1.9.0 +cachetools==5.5.2 +certifi==2025.1.31 +cffi==1.17.1 +charset-normalizer==3.4.1 +click==8.1.8 +colorama==0.4.6 +colorlog==6.9.0 +comet_ml==3.49.7 +comm==0.2.2 +ConfigArgParse==1.7 +configobj==5.0.9 +contourpy==1.3.1 +cryptography==44.0.2 +cuda_backend==0.0.0 +cycler==0.12.1 +dash==3.0.2 +debugpy==1.8.14 +decorator==5.2.1 +defusedxml==0.7.1 +descartes==1.1.0 +diffusers==0.16.1 +dill==0.3.9 +docker-pycreds==0.4.0 +docstring_parser==0.16 +dulwich==0.22.8 +everett==3.1.0 +exceptiongroup==1.2.2 +executing==2.2.0 +fastjsonschema==2.21.1 +filelock==3.18.0 +fire==0.7.0 +Flask==3.0.3 +fonttools==4.57.0 +fpsample==0.3.3 +fqdn==1.5.1 +fsspec==2025.3.2 +gdown==5.2.0 +gin-config==0.5.0 +gitdb==4.0.12 +GitPython==3.1.44 +grpcio==1.71.0 +gsplat @ git+https://github.com/nerfstudio-project/gsplat.git@d23d7ca5dd26c3756967304b621ae88521672ed5 +h11==0.14.0 +h5py==3.13.0 +httpcore==1.0.8 +httpx==0.28.1 +huggingface-hub==0.25.2 +idna==3.10 +imageio==2.37.0 +imageio-ffmpeg==0.6.0 +importlib_metadata==8.6.1 +ipykernel==6.29.5 +ipython==8.35.0 +ipywidgets==8.1.6 +isoduration==20.11.0 +itsdangerous==2.2.0 +jaxtyping==0.3.1 +jedi==0.19.2 +Jinja2==3.1.6 +joblib==1.4.2 +json5==0.12.0 +jsonpointer==3.0.0 +jsonschema==4.23.0 +jsonschema-specifications==2024.10.1 +jupyter==1.1.1 +jupyter-console==6.6.3 +jupyter-events==0.12.0 +jupyter-lsp==2.2.5 +jupyter_client==8.6.3 +jupyter_core==5.7.2 +jupyter_server==2.15.0 +jupyter_server_terminals==0.5.3 +jupyterlab==4.4.0 +jupyterlab_pygments==0.3.0 +jupyterlab_server==2.27.3 +jupyterlab_widgets==3.0.14 +kiwisolver==1.4.8 +lazy_loader==0.4 +lightning-utilities==0.14.3 +lxml==5.3.2 +manifold3d==3.0.1 +mapbox_earcut==1.0.3 +Markdown==3.8 +markdown-it-py==3.0.0 +MarkupSafe==3.0.2 +matplotlib==3.10.1 +matplotlib-inline==0.1.7 +mdurl==0.1.2 +mediapy==1.2.2 +mistune==3.1.3 +mpmath==1.3.0 +msgpack==1.1.0 +msgpack-numpy==0.4.8 +msgspec==0.19.0 +msvc_runtime==14.42.34433 +multiprocess==0.70.17 +narwhals==1.34.1 +nbclient==0.10.2 +nbconvert==7.16.6 +nbformat==5.10.4 +nerfacc @ git+https://github.com/nerfstudio-project/nerfacc.git@57ccfa14feb94975836ea6913149a86737220f2b +nerfstudio==1.1.5 +nest-asyncio==1.6.0 +networkx==3.4.2 +ninja==1.11.1.4 +nodeenv==1.9.1 +notebook==7.4.0 +notebook_shim==0.2.4 +numpy==1.26.4 +nuscenes-devkit==1.1.9 +open3d==0.19.0 +opencv-contrib-python==4.11.0.86 +opencv-python==4.11.0.86 +opencv-python-headless==4.10.0.84 +overrides==7.7.0 +packaging==24.2 +pandas==2.2.3 +pandocfilters==1.5.1 +parso==0.8.4 +pathos==0.3.3 +pillow==11.1.0 +platformdirs==4.3.7 +plotly==6.0.1 +plyfile==1.1 +pox==0.3.5 +ppft==1.7.6.9 +prometheus_client==0.21.1 +prompt_toolkit==3.0.50 +protobuf==3.20.3 +psutil==7.0.0 +pure_eval==0.2.3 +pycocotools==2.0.8 +pycollada==0.9 +pycparser==2.22 +pydantic==2.11.3 +pydantic_core==2.33.1 +Pygments==2.19.1 +pymeshlab==2023.12.post1 +pyngrok==7.2.3 +pyparsing==3.2.3 +pyquaternion==0.9.9 +PySocks==1.7.1 +python-box==6.1.0 +python-dateutil==2.9.0.post0 +python-engineio==4.11.2 +python-json-logger==3.3.0 +python-socketio==5.12.1 +pytorch-msssim==1.0.0 +pytz==2025.2 +pywin32==310 +pywinpty==2.0.15 +PyYAML==6.0.2 +pyzmq==26.4.0 +rawpy==0.24.0 +referencing==0.36.2 +regex==2024.11.6 +requests==2.32.3 +requests-toolbelt==1.0.0 +retrying==1.3.4 +rfc3339-validator==0.1.4 +rfc3986-validator==0.1.1 +rich==14.0.0 +rpds-py==0.24.0 +rtree==1.4.0 +scikit-image==0.25.2 +scikit-learn==1.6.1 +scipy==1.15.2 +semantic-version==2.10.0 +Send2Trash==1.8.3 +sentencepiece==0.1.99 +sentry-sdk==2.25.1 +setproctitle==1.3.5 +shapely==2.1.0 +shtab==1.7.1 +simple-websocket==1.1.0 +simplejson==3.20.1 +six==1.17.0 +smmap==5.0.2 +sniffio==1.3.1 +soupsieve==2.6 +splatfacto-w @ git+https://github.com/KevinXu02/splatfacto-w@119a3bfb3aa03669278e174ff11c4dfdcbcf97d7 +splines==0.3.0 +stack-data==0.6.3 +svg.path==6.3 +sympy==1.13.3 +tensorboard==2.19.0 +tensorboard-data-server==0.7.2 +tensorboardX==2.6.2.2 +tensorly==0.9.0 +termcolor==3.0.1 +terminado==0.18.1 +threadpoolctl==3.6.0 +tifffile==2025.3.30 +timm==0.6.7 +tinycss2==1.4.0 +tinycudann @ git+https://github.com/NVlabs/tiny-cuda-nn/@075158a70b87dba8729188a9cadc9411cfa4b71d#subdirectory=bindings/torch +tokenizers==0.13.3 +tomli==2.2.1 +torch==2.1.2+cu118 +torch-fidelity==0.3.0 +torch_scatter==2.1.2 +torchmetrics==1.7.1 +torchvision==0.16.2+cu118 +tornado==6.4.2 +tqdm==4.67.1 +traitlets==5.14.3 +transformers==4.29.2 +trimesh==4.6.6 +typeguard==4.4.2 +types-python-dateutil==2.9.0.20241206 +typing-inspection==0.4.0 +typing_extensions==4.13.2 +tyro==0.8.12 +tzdata==2025.2 +uri-template==1.3.0 +urllib3==2.4.0 +vhacdx==0.0.8.post2 +viser==0.2.7 +wadler_lindig==0.1.4 +wandb==0.19.9 +wcwidth==0.2.13 +webcolors==24.11.1 +webencodings==0.5.1 +websocket-client==1.8.0 +websockets==15.0.1 +Werkzeug==3.0.6 +widgetsnbextension==4.0.14 +wrapt==1.17.2 +wsproto==1.2.0 +wurlitzer==3.1.1 +xatlas==0.0.10 +xxhash==3.5.0 +yourdfpy==0.0.57 +-e git+https://github.com/SuLvXiangXin/zipnerf-pytorch.git@e081caeb81473fac6f057ceb2e560207b4ba5112#egg=zipnerf +zipp==3.21.0 diff --git a/requirements_post_zipnerf.txt b/requirements_post_zipnerf.txt new file mode 100644 index 0000000000..096495458c --- /dev/null +++ b/requirements_post_zipnerf.txt @@ -0,0 +1,251 @@ +absl-py==2.2.2 +accelerate==0.19.0 +annotated-types==0.7.0 +anyio==4.9.0 +appdirs==1.4.4 +argon2-cffi==23.1.0 +argon2-cffi-bindings==21.2.0 +arrow==1.3.0 +asttokens==3.0.0 +async-lru==2.0.5 +attrs==25.3.0 +av==14.3.0 +babel==2.17.0 +beautifulsoup4==4.13.3 +bidict==0.23.1 +bitsandbytes==0.43.0 +bleach==6.2.0 +blinker==1.9.0 +cachetools==5.5.2 +certifi==2025.1.31 +cffi==1.17.1 +charset-normalizer==3.4.1 +click==8.1.8 +colorama==0.4.6 +colorlog==6.9.0 +comet_ml==3.49.7 +comm==0.2.2 +ConfigArgParse==1.7 +configobj==5.0.9 +contourpy==1.3.1 +cryptography==44.0.2 +cuda_backend==0.0.0 +cycler==0.12.1 +dash==3.0.2 +debugpy==1.8.14 +decorator==5.2.1 +defusedxml==0.7.1 +descartes==1.1.0 +diffusers==0.16.1 +dill==0.3.9 +docker-pycreds==0.4.0 +docstring_parser==0.16 +dulwich==0.22.8 +everett==3.1.0 +exceptiongroup==1.2.2 +executing==2.2.0 +fastjsonschema==2.21.1 +filelock==3.18.0 +fire==0.7.0 +Flask==3.0.3 +fonttools==4.57.0 +fpsample==0.3.3 +fqdn==1.5.1 +fsspec==2025.3.2 +gdown==5.2.0 +gin-config==0.5.0 +gitdb==4.0.12 +GitPython==3.1.44 +grpcio==1.71.0 +h11==0.14.0 +h5py==3.13.0 +httpcore==1.0.8 +httpx==0.28.1 +huggingface-hub==0.25.2 +idna==3.10 +imageio==2.37.0 +imageio-ffmpeg==0.6.0 +importlib_metadata==8.6.1 +ipykernel==6.29.5 +ipython==8.35.0 +ipywidgets==8.1.6 +isoduration==20.11.0 +itsdangerous==2.2.0 +jaxtyping==0.3.1 +jedi==0.19.2 +Jinja2==3.1.6 +joblib==1.4.2 +json5==0.12.0 +jsonpointer==3.0.0 +jsonschema==4.23.0 +jsonschema-specifications==2024.10.1 +jupyter==1.1.1 +jupyter-console==6.6.3 +jupyter-events==0.12.0 +jupyter-lsp==2.2.5 +jupyter_client==8.6.3 +jupyter_core==5.7.2 +jupyter_server==2.15.0 +jupyter_server_terminals==0.5.3 +jupyterlab==4.4.0 +jupyterlab_pygments==0.3.0 +jupyterlab_server==2.27.3 +jupyterlab_widgets==3.0.14 +kiwisolver==1.4.8 +lazy_loader==0.4 +lightning-utilities==0.14.3 +lxml==5.3.2 +manifold3d==3.0.1 +mapbox_earcut==1.0.3 +Markdown==3.8 +markdown-it-py==3.0.0 +MarkupSafe==3.0.2 +matplotlib==3.10.1 +matplotlib-inline==0.1.7 +mdurl==0.1.2 +mediapy==1.2.2 +mistune==3.1.3 +mpmath==1.3.0 +msgpack==1.1.0 +msgpack-numpy==0.4.8 +msgspec==0.19.0 +msvc_runtime==14.42.34433 +multiprocess==0.70.17 +narwhals==1.34.1 +nbclient==0.10.2 +nbconvert==7.16.6 +nbformat==5.10.4 +nerfstudio==1.1.5 +nest-asyncio==1.6.0 +networkx==3.4.2 +ninja==1.11.1.4 +nodeenv==1.9.1 +notebook==7.4.0 +notebook_shim==0.2.4 +numpy==1.26.4 +nuscenes-devkit==1.1.9 +open3d==0.19.0 +opencv-contrib-python==4.11.0.86 +opencv-python==4.11.0.86 +opencv-python-headless==4.10.0.84 +overrides==7.7.0 +packaging==24.2 +pandas==2.2.3 +pandocfilters==1.5.1 +parso==0.8.4 +pathos==0.3.3 +pillow==11.1.0 +platformdirs==4.3.7 +plotly==6.0.1 +plyfile==1.1 +pox==0.3.5 +ppft==1.7.6.9 +prometheus_client==0.21.1 +prompt_toolkit==3.0.50 +protobuf==3.20.3 +psutil==7.0.0 +pure_eval==0.2.3 +pycocotools==2.0.8 +pycollada==0.9 +pycparser==2.22 +pydantic==2.11.3 +pydantic_core==2.33.1 +Pygments==2.19.1 +pymeshlab==2023.12.post1 +pyngrok==7.2.3 +pyparsing==3.2.3 +pyquaternion==0.9.9 +PySocks==1.7.1 +python-box==6.1.0 +python-dateutil==2.9.0.post0 +python-engineio==4.11.2 +python-json-logger==3.3.0 +python-socketio==5.12.1 +pytorch-msssim==1.0.0 +pytz==2025.2 +pywin32==310 +pywinpty==2.0.15 +PyYAML==6.0.2 +pyzmq==26.4.0 +rawpy==0.24.0 +referencing==0.36.2 +regex==2024.11.6 +requests==2.32.3 +requests-toolbelt==1.0.0 +retrying==1.3.4 +rfc3339-validator==0.1.4 +rfc3986-validator==0.1.1 +rich==14.0.0 +rpds-py==0.24.0 +rtree==1.4.0 +scikit-image==0.25.2 +scikit-learn==1.6.1 +scipy==1.15.2 +semantic-version==2.10.0 +Send2Trash==1.8.3 +sentencepiece==0.1.99 +sentry-sdk==2.25.1 +setproctitle==1.3.5 +shapely==2.1.0 +shtab==1.7.1 +simple-websocket==1.1.0 +simplejson==3.20.1 +six==1.17.0 +smmap==5.0.2 +sniffio==1.3.1 +soupsieve==2.6 +splatfacto-w @ git+https://github.com/KevinXu02/splatfacto-w@119a3bfb3aa03669278e174ff11c4dfdcbcf97d7 +splines==0.3.0 +stack-data==0.6.3 +svg.path==6.3 +sympy==1.13.3 +tensorboard==2.19.0 +tensorboard-data-server==0.7.2 +tensorboardX==2.6.2.2 +tensorly==0.9.0 +termcolor==3.0.1 +terminado==0.18.1 +threadpoolctl==3.6.0 +tifffile==2025.3.30 +timm==0.6.7 +tinycss2==1.4.0 +tinycudann @ git+https://github.com/NVlabs/tiny-cuda-nn/@075158a70b87dba8729188a9cadc9411cfa4b71d#subdirectory=bindings/torch +tokenizers==0.13.3 +tomli==2.2.1 +torch==2.1.2+cu118 +torch-fidelity==0.3.0 +torch_scatter==2.1.2 +torchmetrics==1.7.1 +torchvision==0.16.2+cu118 +tornado==6.4.2 +tqdm==4.67.1 +traitlets==5.14.3 +transformers==4.29.2 +trimesh==4.6.6 +typeguard==4.4.2 +types-python-dateutil==2.9.0.20241206 +typing-inspection==0.4.0 +typing_extensions==4.13.2 +tyro==0.8.12 +tzdata==2025.2 +uri-template==1.3.0 +urllib3==2.4.0 +vhacdx==0.0.8.post2 +viser==0.2.7 +wadler_lindig==0.1.4 +wandb==0.19.9 +wcwidth==0.2.13 +webcolors==24.11.1 +webencodings==0.5.1 +websocket-client==1.8.0 +websockets==15.0.1 +Werkzeug==3.0.6 +widgetsnbextension==4.0.14 +wrapt==1.17.2 +wsproto==1.2.0 +wurlitzer==3.1.1 +xatlas==0.0.10 +xxhash==3.5.0 +yourdfpy==0.0.57 +-e git+https://github.com/SuLvXiangXin/zipnerf-pytorch.git@e081caeb81473fac6f057ceb2e560207b4ba5112#egg=zipnerf +zipp==3.21.0 From 9688a3d62a0823d03c5ec08ade1c5f249bbe1a02 Mon Sep 17 00:00:00 2001 From: Francesco Christopher Date: Thu, 8 Jan 2026 21:58:45 +0100 Subject: [PATCH 2/4] cleanup --- .gitignore | 6 +- base.bat | 50 ++++++ extras.bat | 60 +++++++ requirements_conda.yaml | 319 ++++++++++++++++++++++++++++++++++ requirements_pip.txt | 253 +++++++++++++++++++++++++++ requirements_post_zipnerf.txt | 2 + test_cli.py | 86 +++++++++ 7 files changed, 773 insertions(+), 3 deletions(-) create mode 100644 base.bat create mode 100644 extras.bat create mode 100644 requirements_conda.yaml create mode 100644 requirements_pip.txt create mode 100644 test_cli.py diff --git a/.gitignore b/.gitignore index f2f95b5401..a85e0412e7 100644 --- a/.gitignore +++ b/.gitignore @@ -129,6 +129,7 @@ venv/ ENV/ env.bak/ venv.bak/ +.nerfstudio.code-workspace # Spyder project settings .spyderproject @@ -196,9 +197,8 @@ camera_paths/ /*.yaml # āœ… Exceptions: Include specific files in root dir -!requirements_post_zipnerf.txt -!requirements_post_zipnerf - Copia.txt -!nerfstudio_stable_environment_post_zipnerf.yaml +!requirements_pip.txt +!requirements_conda.yaml # External Submodules: NeRFtoGSandBack/ glomap/ diff --git a/base.bat b/base.bat new file mode 100644 index 0000000000..a0a3a2a6ad --- /dev/null +++ b/base.bat @@ -0,0 +1,50 @@ +@echo off +setlocal enabledelayedexpansion + +REM ==== CONFIG ==== +set ENV_NAME=nerfstudio +set YAML_FILE=nerfstudio_stable_environment_post_zipnerf.yaml +set REQUIREMENTS=requirements_post_zipnerf.txt +set PYTHON_EXE=python + +echo. +echo === Nerfstudio Base Installer === +echo. + +REM === Choose install mode === +choice /C CVP /M "Choose environment type: (C)onda, (V)env, or (P)reinstalled python?" +if errorlevel 3 set INSTALL_MODE=PYTHON +if errorlevel 2 set INSTALL_MODE=VENV +if errorlevel 1 set INSTALL_MODE=CONDA + +REM === Setup environment === +if "%INSTALL_MODE%"=="CONDA" ( + choice /C YN /M "Use YAML file? (Y=yes, N=use requirements.txt)" + if errorlevel 2 ( + echo Creating conda env from pip reqs... + conda create -y -n %ENV_NAME% python=3.10 + call conda activate %ENV_NAME% + pip install -r %REQUIREMENTS% + ) else ( + echo Creating conda env from YAML... + conda env create -f %YAML_FILE% + call conda activate %ENV_NAME% + ) +) else if "%INSTALL_MODE%"=="VENV" ( + echo Creating venv... + %PYTHON_EXE% -m venv %ENV_NAME% + call %ENV_NAME%\Scripts\activate + pip install -r %REQUIREMENTS% +) else ( + echo Using system-installed Python... + pip install -r %REQUIREMENTS% +) + +REM === Nerfstudio CLI === +pip install nerfstudio +call ns-install-cli + +echo. +echo āœ… Base Nerfstudio environment ready. +echo Run extras.bat to install optional modules. +pause diff --git a/extras.bat b/extras.bat new file mode 100644 index 0000000000..2c2277cf5f --- /dev/null +++ b/extras.bat @@ -0,0 +1,60 @@ +@echo off +setlocal enabledelayedexpansion + +REM ==== Optional modules config ==== +set MODULES=zipnerf-pytorch sdfstudio splatfacto-w NeRFtoGSandBack opennerf instruct-gs2gs +set FLAG_REFRESH_CLI=0 + +echo. +echo === Nerfstudio Optional Module Installer === +echo. + +REM === CUDA / TORCH INFO === +echo Detecting CUDA + Torch info... +python -c "import torch; print(f'Torch: {torch.__version__}, CUDA: {torch.version.cuda}, Available: {torch.cuda.is_available()}')" +echo. + +REM === Iterate modules === +for %%M in (%MODULES%) do ( + choice /C YN /M "Install %%M? (Y/N)" + if errorlevel 1 ( + if not exist %%M ( + if "%%M"=="zipnerf-pytorch" ( + git clone https://github.com/SuLvXiangXin/zipnerf-pytorch.git + cd zipnerf-pytorch + pip install -r requirements.txt + pip install ./extensions/cuda + if not exist nvdiffrast ( + git clone https://github.com/NVlabs/nvdiffrast + pip install ./nvdiffrast + ) + set CUDA=cu118 + pip install torch-scatter -f https://data.pyg.org/whl/torch-2.1.0+%CUDA%.html + cd .. + ) else if "%%M"=="splatfacto-w" ( + git clone https://github.com/KevinXu02/splatfacto-w + pip install -e splatfacto-w + ) else if "%%M"=="sdfstudio" ( + git clone https://github.com/autonomousvision/sdfstudio.git + pip install -e sdfstudio + ) else if "%%M"=="NeRFtoGSandBack" ( + git clone https://github.com/grasp-lyrl/NeRFtoGSandBack + ) else if "%%M"=="opennerf" ( + git clone https://github.com/opennerf/opennerf + ) else if "%%M"=="instruct-gs2gs" ( + git clone https://github.com/cvachha/instruct-gs2gs + pip install git+https://github.com/cvachha/instruct-gs2gs + ) + ) + set FLAG_REFRESH_CLI=1 + ) +) + +if %FLAG_REFRESH_CLI%==1 ( + echo Refreshing Nerfstudio CLI... + call ns-install-cli +) + +echo. +echo āœ… Optional module setup complete. +pause diff --git a/requirements_conda.yaml b/requirements_conda.yaml new file mode 100644 index 0000000000..91fb5ad9d7 --- /dev/null +++ b/requirements_conda.yaml @@ -0,0 +1,319 @@ +name: nerfstudio_test +channels: + - nvidia/label/cuda-11.8.0 + - defaults + - https://repo.anaconda.com/pkgs/main + - https://repo.anaconda.com/pkgs/r + - https://repo.anaconda.com/pkgs/msys2 +dependencies: + - bzip2=1.0.8=h2bbff1b_6 + - ca-certificates=2025.2.25=haa95532_0 + - cuda-cccl=11.8.89=0 + - cuda-command-line-tools=11.8.0=0 + - cuda-compiler=11.8.0=0 + - cuda-cudart=11.8.89=0 + - cuda-cudart-dev=11.8.89=0 + - cuda-cuobjdump=11.8.86=0 + - cuda-cupti=11.8.87=0 + - cuda-cuxxfilt=11.8.86=0 + - cuda-documentation=11.8.86=0 + - cuda-libraries=11.8.0=0 + - cuda-libraries-dev=11.8.0=0 + - cuda-memcheck=11.8.86=0 + - cuda-nsight-compute=11.8.0=0 + - cuda-nvcc=11.8.89=0 + - cuda-nvdisasm=11.8.86=0 + - cuda-nvml-dev=11.8.86=0 + - cuda-nvprof=11.8.87=0 + - cuda-nvprune=11.8.86=0 + - cuda-nvrtc=11.8.89=0 + - cuda-nvrtc-dev=11.8.89=0 + - cuda-nvtx=11.8.86=0 + - cuda-nvvp=11.8.87=0 + - cuda-profiler-api=11.8.86=0 + - cuda-sanitizer-api=11.8.86=0 + - cuda-toolkit=11.8.0=0 + - cuda-tools=11.8.0=0 + - cuda-visual-tools=11.8.0=0 + - libcublas=11.11.3.6=0 + - libcublas-dev=11.11.3.6=0 + - libcufft=10.9.0.58=0 + - libcufft-dev=10.9.0.58=0 + - libcurand=10.3.0.86=0 + - libcurand-dev=10.3.0.86=0 + - libcusolver=11.4.1.48=0 + - libcusolver-dev=11.4.1.48=0 + - libcusparse=11.7.5.86=0 + - libcusparse-dev=11.7.5.86=0 + - libffi=3.4.4=hd77b12b_1 + - libnpp=11.8.0.86=0 + - libnpp-dev=11.8.0.86=0 + - libnvjpeg=11.9.0.86=0 + - libnvjpeg-dev=11.9.0.86=0 + - nsight-compute=2022.3.0.22=0 + - openssl=3.0.16=h3f729d1_0 + - python=3.10.16=h4607a30_1 + - sqlite=3.45.3=h2bbff1b_0 + - tk=8.6.14=h0416ee5_0 + - vc=14.42=haa95532_4 + - vs2015_runtime=14.42.34433=he0abc0d_4 + - wheel=0.45.1=py310haa95532_0 + - xz=5.6.4=h4754444_1 + - zlib=1.2.13=h8cc25b3_1 + - pip: + - absl-py==2.2.2 + - accelerate==0.19.0 + - annotated-types==0.7.0 + - anyio==4.9.0 + - appdirs==1.4.4 + - argon2-cffi==23.1.0 + - argon2-cffi-bindings==21.2.0 + - arrow==1.3.0 + - asttokens==3.0.0 + - async-lru==2.0.5 + - attrs==25.3.0 + - av==14.3.0 + - babel==2.17.0 + - beautifulsoup4==4.13.3 + - bidict==0.23.1 + - bitsandbytes==0.43.0 + - bleach==6.2.0 + - blinker==1.9.0 + - cachetools==5.5.2 + - certifi==2025.1.31 + - cffi==1.17.1 + - charset-normalizer==3.4.1 + - click==8.1.8 + - colorama==0.4.6 + - colorlog==6.9.0 + - comet-ml==3.49.7 + - comm==0.2.2 + - configargparse==1.7 + - configobj==5.0.9 + - contourpy==1.3.1 + - cryptography==44.0.2 + - cuda-backend==0.0.0 + - cycler==0.12.1 + - dash==3.0.2 + - debugpy==1.8.14 + - decorator==5.2.1 + - defusedxml==0.7.1 + - descartes==1.1.0 + - diffusers==0.16.1 + - dill==0.3.9 + - docker-pycreds==0.4.0 + - docstring-parser==0.16 + - dulwich==0.22.8 + - everett==3.1.0 + - exceptiongroup==1.2.2 + - executing==2.2.0 + - fastjsonschema==2.21.1 + - filelock==3.18.0 + - fire==0.7.0 + - flask==3.0.3 + - fonttools==4.57.0 + - fpsample==0.3.3 + - fqdn==1.5.1 + - fsspec==2025.3.2 + - gdown==5.2.0 + - gin-config==0.5.0 + - gitdb==4.0.12 + - gitpython==3.1.44 + - grpcio==1.71.0 + - gsplat==1.5.0 + - h11==0.14.0 + - h5py==3.13.0 + - httpcore==1.0.8 + - httpx==0.28.1 + - huggingface-hub==0.25.2 + - idna==3.10 + - imageio==2.37.0 + - imageio-ffmpeg==0.6.0 + - importlib-metadata==8.6.1 + - ipykernel==6.29.5 + - ipython==8.35.0 + - ipywidgets==8.1.6 + - isoduration==20.11.0 + - itsdangerous==2.2.0 + - jaxtyping==0.3.1 + - jedi==0.19.2 + - jinja2==3.1.6 + - joblib==1.4.2 + - json5==0.12.0 + - jsonpointer==3.0.0 + - jsonschema==4.23.0 + - jsonschema-specifications==2024.10.1 + - jupyter==1.1.1 + - jupyter-client==8.6.3 + - jupyter-console==6.6.3 + - jupyter-core==5.7.2 + - jupyter-events==0.12.0 + - jupyter-lsp==2.2.5 + - jupyter-server==2.15.0 + - jupyter-server-terminals==0.5.3 + - jupyterlab==4.4.0 + - jupyterlab-pygments==0.3.0 + - jupyterlab-server==2.27.3 + - jupyterlab-widgets==3.0.14 + - kiwisolver==1.4.8 + - lazy-loader==0.4 + - lightning-utilities==0.14.3 + - lxml==5.3.2 + - manifold3d==3.0.1 + - mapbox-earcut==1.0.3 + - markdown==3.8 + - markdown-it-py==3.0.0 + - markupsafe==3.0.2 + - matplotlib==3.10.1 + - matplotlib-inline==0.1.7 + - mdurl==0.1.2 + - mediapy==1.2.2 + - mistune==3.1.3 + - mpmath==1.3.0 + - msgpack==1.1.0 + - msgpack-numpy==0.4.8 + - msgspec==0.19.0 + - msvc-runtime==14.42.34433 + - multiprocess==0.70.17 + - narwhals==1.34.1 + - nbclient==0.10.2 + - nbconvert==7.16.6 + - nbformat==5.10.4 + - nerfacc==0.5.3 + - nerfstudio==1.1.5 + - nest-asyncio==1.6.0 + - networkx==3.4.2 + - ninja==1.11.1.4 + - nodeenv==1.9.1 + - notebook==7.4.0 + - notebook-shim==0.2.4 + - numpy==1.26.4 + - nuscenes-devkit==1.1.9 + - open3d==0.19.0 + - opencv-contrib-python==4.11.0.86 + - opencv-python==4.11.0.86 + - opencv-python-headless==4.10.0.84 + - overrides==7.7.0 + - packaging==24.2 + - pandas==2.2.3 + - pandocfilters==1.5.1 + - parso==0.8.4 + - pathos==0.3.3 + - pillow==11.1.0 + - pip==25.0.1 + - platformdirs==4.3.7 + - plotly==6.0.1 + - plyfile==1.1 + - pox==0.3.5 + - ppft==1.7.6.9 + - prometheus-client==0.21.1 + - prompt-toolkit==3.0.50 + - protobuf==3.20.3 + - psutil==7.0.0 + - pure-eval==0.2.3 + - pycocotools==2.0.8 + - pycollada==0.9 + - pycparser==2.22 + - pydantic==2.11.3 + - pydantic-core==2.33.1 + - pygments==2.19.1 + - pymeshlab==2023.12.post1 + - pyngrok==7.2.3 + - pyparsing==3.2.3 + - pyquaternion==0.9.9 + - pysocks==1.7.1 + - python-box==6.1.0 + - python-dateutil==2.9.0.post0 + - python-engineio==4.11.2 + - python-json-logger==3.3.0 + - python-socketio==5.12.1 + - pytorch-msssim==1.0.0 + - pytz==2025.2 + - pywin32==310 + - pywinpty==2.0.15 + - pyyaml==6.0.2 + - pyzmq==26.4.0 + - rawpy==0.24.0 + - referencing==0.36.2 + - regex==2024.11.6 + - requests==2.32.3 + - requests-toolbelt==1.0.0 + - retrying==1.3.4 + - rfc3339-validator==0.1.4 + - rfc3986-validator==0.1.1 + - rich==14.0.0 + - rpds-py==0.24.0 + - rtree==1.4.0 + - scikit-image==0.25.2 + - scikit-learn==1.6.1 + - scipy==1.15.2 + - semantic-version==2.10.0 + - send2trash==1.8.3 + - sentencepiece==0.1.99 + - sentry-sdk==2.25.1 + - setproctitle==1.3.5 + - setuptools==78.1.0 + - shapely==2.1.0 + - shtab==1.7.1 + - simple-websocket==1.1.0 + - simplejson==3.20.1 + - six==1.17.0 + - smmap==5.0.2 + - sniffio==1.3.1 + - soupsieve==2.6 + - splatfacto-w==0.1.5 + - splines==0.3.0 + - stack-data==0.6.3 + - svg-path==6.3 + - sympy==1.13.3 + - tensorboard==2.19.0 + - tensorboard-data-server==0.7.2 + - tensorboardx==2.6.2.2 + - tensorly==0.9.0 + - termcolor==3.0.1 + - terminado==0.18.1 + - threadpoolctl==3.6.0 + - tifffile==2025.3.30 + - timm==0.6.7 + - tinycss2==1.4.0 + - tinycudann==1.7 + - tokenizers==0.13.3 + - tomli==2.2.1 + - torch==2.1.2+cu118 + - torch-fidelity==0.3.0 + - torch-scatter==2.1.2 + - torchmetrics==1.7.1 + - torchvision==0.16.2+cu118 + - tornado==6.4.2 + - tqdm==4.67.1 + - traitlets==5.14.3 + - transformers==4.29.2 + - trimesh==4.6.6 + - typeguard==4.4.2 + - types-python-dateutil==2.9.0.20241206 + - typing-extensions==4.13.2 + - typing-inspection==0.4.0 + - tyro==0.8.12 + - tzdata==2025.2 + - uri-template==1.3.0 + - urllib3==2.4.0 + - vhacdx==0.0.8.post2 + - viser==0.2.7 + - wadler-lindig==0.1.4 + - wandb==0.19.9 + - wcwidth==0.2.13 + - webcolors==24.11.1 + - webencodings==0.5.1 + - websocket-client==1.8.0 + - websockets==15.0.1 + - werkzeug==3.0.6 + - widgetsnbextension==4.0.14 + - wrapt==1.17.2 + - wsproto==1.2.0 + - wurlitzer==3.1.1 + - xatlas==0.0.10 + - xxhash==3.5.0 + - yourdfpy==0.0.57 + - zipnerf==0.1.0 + - zipp==3.21.0 +prefix: C:\Users\crist\anaconda3\envs\nerfstudio_test diff --git a/requirements_pip.txt b/requirements_pip.txt new file mode 100644 index 0000000000..05ae5e0508 --- /dev/null +++ b/requirements_pip.txt @@ -0,0 +1,253 @@ +absl-py==2.2.2 +accelerate==0.19.0 +annotated-types==0.7.0 +anyio==4.9.0 +appdirs==1.4.4 +argon2-cffi==23.1.0 +argon2-cffi-bindings==21.2.0 +arrow==1.3.0 +asttokens==3.0.0 +async-lru==2.0.5 +attrs==25.3.0 +av==14.3.0 +babel==2.17.0 +beautifulsoup4==4.13.3 +bidict==0.23.1 +bitsandbytes==0.43.0 +bleach==6.2.0 +blinker==1.9.0 +cachetools==5.5.2 +certifi==2025.1.31 +cffi==1.17.1 +charset-normalizer==3.4.1 +click==8.1.8 +colorama==0.4.6 +colorlog==6.9.0 +comet_ml==3.49.7 +comm==0.2.2 +ConfigArgParse==1.7 +configobj==5.0.9 +contourpy==1.3.1 +cryptography==44.0.2 +cuda_backend==0.0.0 +cycler==0.12.1 +dash==3.0.2 +debugpy==1.8.14 +decorator==5.2.1 +defusedxml==0.7.1 +descartes==1.1.0 +diffusers==0.16.1 +dill==0.3.9 +docker-pycreds==0.4.0 +docstring_parser==0.16 +dulwich==0.22.8 +everett==3.1.0 +exceptiongroup==1.2.2 +executing==2.2.0 +fastjsonschema==2.21.1 +filelock==3.18.0 +fire==0.7.0 +Flask==3.0.3 +fonttools==4.57.0 +fpsample==0.3.3 +fqdn==1.5.1 +fsspec==2025.3.2 +gdown==5.2.0 +gin-config==0.5.0 +gitdb==4.0.12 +GitPython==3.1.44 +grpcio==1.71.0 +h11==0.14.0 +h5py==3.13.0 +httpcore==1.0.8 +httpx==0.28.1 +huggingface-hub==0.25.2 +idna==3.10 +imageio==2.37.0 +imageio-ffmpeg==0.6.0 +importlib_metadata==8.6.1 +ipykernel==6.29.5 +ipython==8.35.0 +ipywidgets==8.1.6 +isoduration==20.11.0 +itsdangerous==2.2.0 +jaxtyping==0.3.1 +jedi==0.19.2 +Jinja2==3.1.6 +joblib==1.4.2 +json5==0.12.0 +jsonpointer==3.0.0 +jsonschema==4.23.0 +jsonschema-specifications==2024.10.1 +jupyter==1.1.1 +jupyter-console==6.6.3 +jupyter-events==0.12.0 +jupyter-lsp==2.2.5 +jupyter_client==8.6.3 +jupyter_core==5.7.2 +jupyter_server==2.15.0 +jupyter_server_terminals==0.5.3 +jupyterlab==4.4.0 +jupyterlab_pygments==0.3.0 +jupyterlab_server==2.27.3 +jupyterlab_widgets==3.0.14 +kiwisolver==1.4.8 +lazy_loader==0.4 +lightning-utilities==0.14.3 +lxml==5.3.2 +manifold3d==3.0.1 +mapbox_earcut==1.0.3 +Markdown==3.8 +markdown-it-py==3.0.0 +MarkupSafe==3.0.2 +matplotlib==3.10.1 +matplotlib-inline==0.1.7 +mdurl==0.1.2 +mediapy==1.2.2 +mistune==3.1.3 +mpmath==1.3.0 +msgpack==1.1.0 +msgpack-numpy==0.4.8 +msgspec==0.19.0 +msvc_runtime==14.42.34433 +multiprocess==0.70.17 +narwhals==1.34.1 +nbclient==0.10.2 +nbconvert==7.16.6 +nbformat==5.10.4 +nerfstudio==1.1.5 +nest-asyncio==1.6.0 +networkx==3.4.2 +ninja==1.11.1.4 +nodeenv==1.9.1 +notebook==7.4.0 +notebook_shim==0.2.4 +numpy==1.26.4 +nuscenes-devkit==1.1.9 +open3d==0.19.0 +opencv-contrib-python==4.11.0.86 +opencv-python==4.11.0.86 +opencv-python-headless==4.10.0.84 +overrides==7.7.0 +packaging==24.2 +pandas==2.2.3 +pandocfilters==1.5.1 +parso==0.8.4 +pathos==0.3.3 +pillow==11.1.0 +platformdirs==4.3.7 +plotly==6.0.1 +plyfile==1.1 +pox==0.3.5 +ppft==1.7.6.9 +prometheus_client==0.21.1 +prompt_toolkit==3.0.50 +protobuf==3.20.3 +psutil==7.0.0 +pure_eval==0.2.3 +pycocotools==2.0.8 +pycollada==0.9 +pycparser==2.22 +pydantic==2.11.3 +pydantic_core==2.33.1 +Pygments==2.19.1 +pymeshlab==2023.12.post1 +pyngrok==7.2.3 +pyparsing==3.2.3 +pyquaternion==0.9.9 +PySocks==1.7.1 +python-box==6.1.0 +python-dateutil==2.9.0.post0 +python-engineio==4.11.2 +python-json-logger==3.3.0 +python-socketio==5.12.1 +pytorch-msssim==1.0.0 +pytz==2025.2 +pywin32==310 +pywinpty==2.0.15 +PyYAML==6.0.2 +pyzmq==26.4.0 +rawpy==0.24.0 +referencing==0.36.2 +regex==2024.11.6 +requests==2.32.3 +requests-toolbelt==1.0.0 +retrying==1.3.4 +rfc3339-validator==0.1.4 +rfc3986-validator==0.1.1 +rich==14.0.0 +rpds-py==0.24.0 +rtree==1.4.0 +scikit-image==0.25.2 +scikit-learn==1.6.1 +scipy==1.15.2 +semantic-version==2.10.0 +Send2Trash==1.8.3 +sentencepiece==0.1.99 +sentry-sdk==2.25.1 +setproctitle==1.3.5 +shapely==2.1.0 +shtab==1.7.1 +simple-websocket==1.1.0 +simplejson==3.20.1 +six==1.17.0 +smmap==5.0.2 +sniffio==1.3.1 +soupsieve==2.6 +splatfacto-w @ git+https://github.com/KevinXu02/splatfacto-w@119a3bfb3aa03669278e174ff11c4dfdcbcf97d7 +splines==0.3.0 +stack-data==0.6.3 +svg.path==6.3 +sympy==1.13.3 +tensorboard==2.19.0 +tensorboard-data-server==0.7.2 +tensorboardX==2.6.2.2 +tensorly==0.9.0 +termcolor==3.0.1 +terminado==0.18.1 +threadpoolctl==3.6.0 +tifffile==2025.3.30 +timm==0.6.7 +tinycss2==1.4.0 +tinycudann @ git+https://github.com/NVlabs/tiny-cuda-nn/@075158a70b87dba8729188a9cadc9411cfa4b71d#subdirectory=bindings/torch +tokenizers==0.13.3 +tomli==2.2.1 +pip install git+https://github.com/cvachha/instruct-gs2gs +torch==2.1.2+cu118 +torch-fidelity==0.3.0 +torch_scatter==2.1.2 +torchmetrics==1.7.1 +torchvision==0.16.2+cu118 +tornado==6.4.2 +tqdm==4.67.1 +traitlets==5.14.3 +transformers==4.29.2 +trimesh==4.6.6 +typeguard==4.4.2 +types-python-dateutil==2.9.0.20241206 +typing-inspection==0.4.0 +typing_extensions==4.13.2 +tyro==0.8.12 +tzdata==2025.2 +uri-template==1.3.0 +urllib3==2.4.0 +vhacdx==0.0.8.post2 +viser==0.2.7 +wadler_lindig==0.1.4 +wandb==0.19.9 +wcwidth==0.2.13 +webcolors==24.11.1 +webencodings==0.5.1 +websocket-client==1.8.0 +websockets==15.0.1 +Werkzeug==3.0.6 +widgetsnbextension==4.0.14 +wrapt==1.17.2 +wsproto==1.2.0 +wurlitzer==3.1.1 +xatlas==0.0.10 +xxhash==3.5.0 +yourdfpy==0.0.57 +-e git+https://github.com/SuLvXiangXin/zipnerf-pytorch.git@e081caeb81473fac6f057ceb2e560207b4ba5112#egg=zipnerf +pip install torch-scatter -f https://data.pyg.org/whl/torch-2.1.2+${CUDA}.html +zipp==3.21.0 diff --git a/requirements_post_zipnerf.txt b/requirements_post_zipnerf.txt index 096495458c..05ae5e0508 100644 --- a/requirements_post_zipnerf.txt +++ b/requirements_post_zipnerf.txt @@ -212,6 +212,7 @@ tinycss2==1.4.0 tinycudann @ git+https://github.com/NVlabs/tiny-cuda-nn/@075158a70b87dba8729188a9cadc9411cfa4b71d#subdirectory=bindings/torch tokenizers==0.13.3 tomli==2.2.1 +pip install git+https://github.com/cvachha/instruct-gs2gs torch==2.1.2+cu118 torch-fidelity==0.3.0 torch_scatter==2.1.2 @@ -248,4 +249,5 @@ xatlas==0.0.10 xxhash==3.5.0 yourdfpy==0.0.57 -e git+https://github.com/SuLvXiangXin/zipnerf-pytorch.git@e081caeb81473fac6f057ceb2e560207b4ba5112#egg=zipnerf +pip install torch-scatter -f https://data.pyg.org/whl/torch-2.1.2+${CUDA}.html zipp==3.21.0 diff --git a/test_cli.py b/test_cli.py new file mode 100644 index 0000000000..21f4d449c5 --- /dev/null +++ b/test_cli.py @@ -0,0 +1,86 @@ +import subprocess +import sys +import shutil + + +# Optional expected modules to validate trainer registration +EXPECTED_TRAINERS = [ + "zipnerf", + "splatfacto", + "sdfstudio", + "gs2gs", + "nerfgs" +] + +def run_command(cmd, description=""): + print(f"\nšŸ”¹ Running: {cmd}") + try: + result = subprocess.run(cmd, shell=True, capture_output=True, text=True) + if result.returncode != 0: + print(f"āŒ Failed: {description or cmd}") + print(result.stderr) + return False + print("āœ… Success") + return result.stdout + except Exception as e: + print(f"āŒ Exception while running {cmd}:\n{e}") + return False + + +def command_exists(command): + return shutil.which(command) is not None + + +def main(): + print("šŸ”§ Nerfstudio CLI Validation Tool\n") + + # 1. Check if CLI commands exist + for cmd in ["ns-train", "ns-viewer", "ns-process-data"]: + if not command_exists(cmd): + print(f"āŒ Command not found: {cmd}. Is nerfstudio CLI installed?") + return + else: + print(f"āœ”ļø Found CLI command: {cmd}") + + # 2. Check basic help commands + if not run_command("ns-train --help", "Check ns-train help"): return + if not run_command("ns-viewer --help", "Check ns-viewer help"): return + + # 3. Parse trainer list + print("\nšŸ“‹ Checking registered trainers:") + output = run_command("ns-train --help", "List trainers") + if not output: return + + trainers = [] + for line in output.splitlines(): + if "usage: ns-train" in line.lower(): + continue + if "--" in line or "Options:" in line: + break + if line.strip(): + trainers.append(line.strip()) + + print(f"\nāœ… Detected trainers:") + for t in trainers: + print(f" - {t}") + + # 4. Check for expected trainers (from add-ons) + print("\nšŸ” Verifying addon trainer registration:") + missing = [] + for expected in EXPECTED_TRAINERS: + if not any(expected in t for t in trainers): + print(f"ā— Missing expected trainer: {expected}") + missing.append(expected) + else: + print(f"āœ”ļø Found: {expected}") + + if missing: + print("\nāš ļø Some trainers appear to be missing. Did you run `ns-install-cli` after installing modules?") + else: + print("\nšŸŽ‰ All expected addon trainers are registered.") + + print("\nāœ… CLI Validation Complete.") + + +if __name__ == "__main__": + main() From 362247c7af953d53536e6634424ca90c26a5baff Mon Sep 17 00:00:00 2001 From: Francesco Christopher Date: Thu, 8 Jan 2026 22:00:52 +0100 Subject: [PATCH 3/4] cleanup --- test_cli.bat | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 test_cli.bat diff --git a/test_cli.bat b/test_cli.bat new file mode 100644 index 0000000000..a315890a05 --- /dev/null +++ b/test_cli.bat @@ -0,0 +1,34 @@ +@echo off +echo. +echo ========== Nerfstudio CLI Validator ========== +echo. + +REM Check if CLI works +echo Running: ns-train --help +ns-train --help +if errorlevel 1 ( + echo āŒ ERROR: ns-train failed to run. CLI may be broken or module not registered. + goto end +) + +REM Check if viewer works +echo. +echo Running: ns-viewer --help +ns-viewer --help +if errorlevel 1 ( + echo āŒ ERROR: ns-viewer failed to run. + goto end +) + +REM Optional: Detect available trainers +echo. +echo šŸ” Listing available trainers: +ns-train --help | findstr /R "Usage:.*" + +REM (Optional dry-run logic can be added here per addon) + +echo. +echo āœ… CLI appears to be working correctly. + +:end +pause \ No newline at end of file From ec93779ada399347f6105e36d5e61906ae38b6c5 Mon Sep 17 00:00:00 2001 From: Francesco Christopher Date: Thu, 8 Jan 2026 22:36:42 +0100 Subject: [PATCH 4/4] new basic + extra algorythms installation batches + cli validation test --- README_FULL.md | 3 +++ base.bat | 4 ++-- test_cli.bat | 6 +++++- test_cli.py | 20 ++++++++++++-------- 4 files changed, 22 insertions(+), 11 deletions(-) create mode 100644 README_FULL.md diff --git a/README_FULL.md b/README_FULL.md new file mode 100644 index 0000000000..25a213d3f0 --- /dev/null +++ b/README_FULL.md @@ -0,0 +1,3 @@ +install nerfstudio via base.bat Conda or python venv(experimental) +install extra nerfstudio algorythms via extras.bat +validate the installed and available algorythms with test_cli.py diff --git a/base.bat b/base.bat index a0a3a2a6ad..a06a7e1930 100644 --- a/base.bat +++ b/base.bat @@ -3,8 +3,8 @@ setlocal enabledelayedexpansion REM ==== CONFIG ==== set ENV_NAME=nerfstudio -set YAML_FILE=nerfstudio_stable_environment_post_zipnerf.yaml -set REQUIREMENTS=requirements_post_zipnerf.txt +set YAML_FILE=requirements_conda.yaml +set REQUIREMENTS=requirements_pip.txt set PYTHON_EXE=python echo. diff --git a/test_cli.bat b/test_cli.bat index a315890a05..b04e03822b 100644 --- a/test_cli.bat +++ b/test_cli.bat @@ -2,6 +2,10 @@ echo. echo ========== Nerfstudio CLI Validator ========== echo. +@echo off +REM Ensure UTF-8 output +chcp 65001 > nul +set PYTHONIOENCODING=utf-8 REM Check if CLI works echo Running: ns-train --help @@ -31,4 +35,4 @@ echo. echo āœ… CLI appears to be working correctly. :end -pause \ No newline at end of file +pause diff --git a/test_cli.py b/test_cli.py index 21f4d449c5..8cd3ec2aea 100644 --- a/test_cli.py +++ b/test_cli.py @@ -1,7 +1,14 @@ +# test_cli.py import subprocess import sys import shutil +import io +import os +# Force UTF-8 encoding +sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding='utf-8') +sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding='utf-8') +os.environ["PYTHONIOENCODING"] = "utf-8" # Optional expected modules to validate trainer registration EXPECTED_TRAINERS = [ @@ -15,7 +22,7 @@ def run_command(cmd, description=""): print(f"\nšŸ”¹ Running: {cmd}") try: - result = subprocess.run(cmd, shell=True, capture_output=True, text=True) + result = subprocess.run(cmd, shell=True, capture_output=True, text=True, encoding='utf-8') if result.returncode != 0: print(f"āŒ Failed: {description or cmd}") print(result.stderr) @@ -26,15 +33,13 @@ def run_command(cmd, description=""): print(f"āŒ Exception while running {cmd}:\n{e}") return False - def command_exists(command): return shutil.which(command) is not None - def main(): print("šŸ”§ Nerfstudio CLI Validation Tool\n") - # 1. Check if CLI commands exist + # 1. Check CLI tools for cmd in ["ns-train", "ns-viewer", "ns-process-data"]: if not command_exists(cmd): print(f"āŒ Command not found: {cmd}. Is nerfstudio CLI installed?") @@ -42,11 +47,11 @@ def main(): else: print(f"āœ”ļø Found CLI command: {cmd}") - # 2. Check basic help commands + # 2. CLI Help Check if not run_command("ns-train --help", "Check ns-train help"): return if not run_command("ns-viewer --help", "Check ns-viewer help"): return - # 3. Parse trainer list + # 3. Trainer Parsing print("\nšŸ“‹ Checking registered trainers:") output = run_command("ns-train --help", "List trainers") if not output: return @@ -64,7 +69,7 @@ def main(): for t in trainers: print(f" - {t}") - # 4. Check for expected trainers (from add-ons) + # 4. Validate Addons print("\nšŸ” Verifying addon trainer registration:") missing = [] for expected in EXPECTED_TRAINERS: @@ -81,6 +86,5 @@ def main(): print("\nāœ… CLI Validation Complete.") - if __name__ == "__main__": main()