Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
570 changes: 233 additions & 337 deletions README.md

Large diffs are not rendered by default.

3 changes: 2 additions & 1 deletion docker/Dockerfile → docker/Dockerfile.amd64
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,8 @@ RUN apt-get update \
# ROS installs
RUN apt-get install -y \
ros-kilted-ros2cli-common-extensions \
ros-kilted-vision-opencv
ros-kilted-vision-opencv \
nlohmann-json3-dev

# other deps
RUN apt-get install libpng++-dev
Expand Down
182 changes: 182 additions & 0 deletions docker/Dockerfile.l4t_jetpack6
Original file line number Diff line number Diff line change
@@ -0,0 +1,182 @@
FROM rwthika/ros2-torch:jazzy-ros-base-torch2.5.0

MAINTAINER Jesse Morris "jesse.morris@sydney.edu.au"


# To avoid tzdata asking for geographic location...
ENV DEBIAN_FRONTEND=noninteractive

#Install build dependencies
RUN apt-get update && apt-get upgrade -y --allow-downgrades --no-install-recommends apt-utils
RUN apt-get update && apt-get install -y --allow-downgrades git cmake build-essential pkg-config
# Install xvfb to provide a display to container for GUI realted testing.
RUN apt-get update && apt-get install -y --allow-downgrades xvfb

# In the arm64 version of the base image we do not have the nvidia-cuda-development toolkit
# as explained https://github.com/ika-rwth-aachen/docker-ros-ml-images/issues/16
# this contains nvcc (ie we dont have it)
# we need nvcc to build opencv with cuda
# add extra nvidia container apt repository (otherwise we cannot find nvidia-cuda-toolkit)
RUN curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \
&& curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | \
sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \
sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list


#NOTE: both nvidia-cuda-toolkit and cuda-toolkit are needed (particularly for header files)
RUN apt-get update \
&& apt-get install -y \
python3-pip \
openssh-client \
software-properties-common \
nano \
vim \
clang-format \
nvidia-cuda-toolkit \
cuda-toolkit \
libnvinfer-dev \
libnvonnxparsers-dev \
tensorrt-dev \
&& pip3 install black pre-commit \
&& rm -rf /var/lib/apt/lists/*



# # ROS installs
RUN apt-get update && apt-get install -y \
ros-jazzy-ros2cli-common-extensions \
ros-jazzy-cv-bridge \
ros-jazzy-vision-opencv \
ros-jazzy-pcl-ros \
ros-jazzy-rmw-fastrtps-cpp \
ros-jazzy-rmw-zenoh-cpp \
ros-jazzy-image-transport \
libpcl-conversions-dev

# other deps
# RUN apt-get install libpng++-dev
# is libpng-dev different from libpng++? I think not available as a apt for aarm64
# RUN apt-get install libpng-dev

RUN python3 -m pip install pylatex evo setuptools pre-commit scipy argcomplete black pre-commit

# Install glog, gflags
RUN apt-get update && apt-get install -y libgflags2.2 libgflags-dev libgoogle-glog-dev

# Install xvfb to provide a display to container for GUI realted testing.
# vtk is needed for OpenCV to build its viz module (from contrib!)
RUN apt-get update && apt-get install -y xvfb python3-dev python3-setuptools libvtk9-dev

RUN pip3 install setuptools pre-commit scipy matplotlib argcomplete

# install CSparse
RUN DEBIAN_FRONTEND=noninteractive apt install -y libsuitesparse-dev

RUN python3 -m pip install "ultralytics==8.3.0" "numpy<2.0" "opencv-python<5.0"
RUN python3 -m pip install https://github.com/ultralytics/assets/releases/download/v0.0.0/onnxruntime_gpu-1.23.0-cp310-cp310-linux_aarch64.whl

# Parallelism C++ for CPU
RUN apt-get update && apt-get install -y libboost-all-dev libtbb-dev

WORKDIR /root/



# Install OpenCV for Ubuntu
RUN apt-get update && apt-get install -y \
unzip \
libjpeg-dev libpng-dev libpng++-dev libtiff-dev libgtk2.0-dev \
libatlas-base-dev gfortran


RUN git clone https://github.com/opencv/opencv.git
RUN cd opencv && \
git checkout tags/4.10.0 && \
mkdir build

RUN git clone https://github.com/opencv/opencv_contrib.git
RUN cd opencv_contrib && \
git checkout tags/4.10.0

# on aarch64 there is no binary for nlohmann-json3
RUN git clone https://github.com/nlohmann/json.git
RUN cd json && mkdir build && \
cd build && \
cmake .. && make -j$(nproc) install

# OpenCV looks for the cuDNN version in cudnn_version.h, but it's been renamed to cudnn_version_v8.h
RUN ln -sfnv /usr/include/$(uname -i)-linux-gnu/cudnn_version_v*.h /usr/include/$(uname -i)-linux-gnu/cudnn_version.h

# IMPORTANT: must specify CXX_17 version. Default is c++11 which will cause compilation issues
# On aarch64 we can enable NEON for CPU level optimisations
# It is vital we use gcc-11 as otheriwise we cannot compile the cuda level code (ie. CUDA 12.9)
RUN cd opencv/build && \
cmake -DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX=/usr/local \
-DCMAKE_CXX_STANDARD=17 \
-D BUILD_opencv_python=OFF \
-D BUILD_opencv_python2=OFF \
-D BUILD_opencv_python3=ON \
-D WITH_CUDA=ON \
-D WITH_CUDNN=ON \
-D INSTALL_CUDA_LIBS=OFF \
-D CUDA_ARCH_PTX= \
-D CUDA_ARCH_BIN=8.7 \
-D CUDNN_INCLUDE_DIR=/usr/include/$(uname -i)-linux-gnu \
-D OPENCV_DNN_CUDA=ON \
-D ENABLE_FAST_MATH=ON \
-D CUDA_FAST_MATH=ON \
-D WITH_VTK=ON \
-D WITH_CUFFT=ON \
-D WITH_CUBLAS=ON \
-D WITH_TBB=ON \
-DENABLE_NEON=ON \
-D BUILD_TESTS=OFF \
-D BUILD_PERF_TESTS=OFF \
-D BUILD_opencv_ts=OFF \
-D BUILD_opencv_sfm=OFF \
-D BUILD_JAVA=OFF \
-DOPENCV_EXTRA_MODULES_PATH=/root/opencv_contrib/modules .. && \
make -j$(nproc) install

RUN git clone https://github.com/MIT-SPARK/config_utilities.git
RUN cd config_utilities/config_utilities && mkdir build && \
cd build && \
cmake .. && make -j$(nproc) install



# Install GTSAM
RUN git clone https://github.com/borglab/gtsam.git
RUN cd gtsam && \
git fetch && \
git checkout tags/4.2.0 && \
mkdir build && \
cd build && \
cmake -DCMAKE_INSTALL_PREFIX=/usr/local \
-DGTSAM_USE_SYSTEM_EIGEN=ON \
-DGTSAM_BUILD_TESTS=OFF -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF -DCMAKE_BUILD_TYPE=Release -DGTSAM_BUILD_UNSTABLE=ON -DGTSAM_POSE3_EXPMAP=ON -DGTSAM_ROT3_EXPMAP=ON -DGTSAM_TANGENT_PREINTEGRATION=OFF .. && \
make -j$(nproc) install


# Install Open_GV
RUN git clone https://github.com/MIT-SPARK/opengv
RUN cd opengv && \
mkdir build
RUN cd opengv/build && \
cmake -DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX=/usr/local \
.. && make -j$(nproc) install


RUN echo 'export RMW_IMPLEMENTATION=rmw_zenoh_cpp' >> ~/.bashrc

RUN mkdir -p /home/user/dev_ws/src/core
RUN mkdir -p /home/user/dev_ws/src/third_parties
RUN mkdir -p /home/user/upstream_ws/src

SHELL ["/bin/bash", "-c"]

RUN source /opt/ros/jazzy/setup.bash

WORKDIR /home/user/dev_ws
26 changes: 26 additions & 0 deletions docker/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
# Docker Files for DynoSAM

Base images are pulled from [docker-ros-ml-images](https://github.com/ika-rwth-aachen/docker-ros-ml-images)

- Dockerfile.amd64 is a linux/amd64 image tested on x86_64 desktop
- Dockerfile.l4t_jetpack6 is build from linux/arm64 tested on an NVIDIA ORIN NX with Jetpack 6

## Jetson Settings
Architecture | aarch64
Ubuntu | 22.04.5 LTS (Jammy Jellyfish)
Jetson Linux | 36.4.7
Python | 3.10.12
ROS | jazzy
CMake | 3.22.1
CUDA | 12.6.77-1
cuDNN | 9.3.0.75-1
TensorRT | 10.7.0.23-1+cuda12.6
PyTorch | 2.8.0
GPUs | (Orin (nvgpu))
OpenCV | 4.10.0

> NOTE: The CUDA/Pytorch/TensorRT versions settings come with the base dockerfile but in practice we have been using CUDA 12.9.

## Other versioning
matplotlib=3.6.3
numpy=1.26.4
9 changes: 0 additions & 9 deletions docker/build_docker.sh

This file was deleted.

2 changes: 2 additions & 0 deletions docker/build_docker_amd64.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
#!/usr/bin/env bash
bash build_docker_base.sh Dockerfile.amd64 acfr_rpg/dyno_sam_cuda
29 changes: 29 additions & 0 deletions docker/build_docker_base.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
#!/usr/bin/env bash
DOCKERFILE=$1
TAG=$2

if [[ -z $TAG ]] || [[ -z $DOCKERFILE ]]; then
SCRIPT_NAME=$(basename "$0")
echo "Usage: ./$SCRIPT_NAME DOCKERFILE TAG"
exit -1
fi


# 2. Identify the directory where this script is located
# This resolves the path even if called from a different location
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"

# 3. Temporarily change into the script's directory
echo "Changing working directory to: $SCRIPT_DIR"
cd "$SCRIPT_DIR" || { echo "Failed to change directory"; exit 1; }

# 4. Verify the file exists in this directory
if [ -f "$DOCKERFILE" ]; then
echo "Building dockerfile $DOCKERFILE with tag $TAG"
DOCKER_BUILDKIT=1 docker build --network=host -f $DOCKERFILE -t $TAG .
# ----------------------------

else
echo "Error: File '$DOCKERFILE' not found in $SCRIPT_DIR"
exit 1
fi
2 changes: 2 additions & 0 deletions docker/build_docker_l4t_jetpack6.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
#!/usr/bin/env bash
bash build_docker_base.sh Dockerfile.l4t_jetpack6 acfr_rpg/dynosam_cuda_l4t
10 changes: 10 additions & 0 deletions docker/create_container_amd64.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
#!/usr/bin/env bash

### EDIT THIS TO WHEREVER YOU'RE STORING YOU DATA ###
# folder should exist before you mount it
LOCAL_DATA_FOLDER=/media/jmor6670/T7/datasets
LOCAL_RESULTS_FOLDER=~/results/
LOCAL_DYNO_SAM_FOLDER=~/Code/src/DynOSAM/
LOCAL_THIRD_PARTY_DYNO_SAM_FOLDER=~/Code/src/third_party_dynosam/

bash create_container_base.sh acfr_rpg/dyno_sam_cuda dyno_sam $LOCAL_DATA_FOLDER $LOCAL_RESULTS_FOLDER $LOCAL_DYNO_SAM_FOLDER $LOCAL_THIRD_PARTY_DYNO_SAM_FOLDER
50 changes: 38 additions & 12 deletions docker/create_container.sh → docker/create_container_base.sh
Original file line number Diff line number Diff line change
@@ -1,14 +1,18 @@
#!/usr/bin/env bash

CONTAINER_NAME=dyno_sam
CONTAINER_IMAGE_NAME=acfr_rpg/dyno_sam_cuda
CONTAINER_IMAGE_NAME=$1
CONTAINER_NAME=$2

### EDIT THIS TO WHEREVER YOU'RE STORING YOU DATA ###
# folder should exist before you mount it
LOCAL_DATA_FOLDER=/media/jmor6670/T7/datasets
LOCAL_RESULTS_FOLDER=~/results/
LOCAL_DYNO_SAM_FOLDER=~/Code/src/DynOSAM/
LOCAL_THIRD_PARTY_DYNO_SAM_FOLDER=~/Code/src/third_party_dynosam/
LOCAL_DATA_FOLDER=$3
LOCAL_RESULTS_FOLDER=$4
LOCAL_DYNO_SAM_FOLDER=$5
LOCAL_THIRD_PARTY_DYNO_SAM_FOLDER=$6

echo "Creating dynosam container ($CONTAINER_NAME) from image: $CONTAINER_IMAGE_NAME"
echo "Local data folder: $LOCAL_DATA_FOLDER"
echo "Local results folder: $LOCAL_RESULTS_FOLDER"
echo "Local DynoSAM folder: $LOCAL_DYNO_SAM_FOLDER"
echo "Local third party dynosam folder: $LOCAL_THIRD_PARTY_DYNO_SAM_FOLDER"


CONTAINER_DATA_FOLDER=/root/data
Expand All @@ -17,8 +21,6 @@ CONTAINER_WORKSPACE_FOLDER=/home/user/dev_ws/src/core/
CONTAINER_WORKSPACE_FOLDER_THIRD_PARTY=/home/user/dev_ws/src/third_parties/




USE_NVIDIA=false

# If we are running in a docker-in-docker scenario NVIDIA_SOS will be populated
Expand Down Expand Up @@ -78,11 +80,34 @@ if "$USE_NVIDIA"; then
else
TERMINAL_FLAGS='-t'
fi

echo "$@"
# Create the container based on the launchfile it's launching (if any)
# removes '.launch' from the last argument
echo "Container name will be: $CONTAINER_NAME"
# docker run $DOCKER_NVIDIA_SO_VOLUMES \
docker run \
# docker run \
# --privileged \
# --gpus all \
# -i -d \
# --volume $XSOCK:$XSOCK:rw \
# -v $LOCAL_DATA_FOLDER:$CONTAINER_DATA_FOLDER \
# -v $LOCAL_RESULTS_FOLDER:$CONTAINER_RESULTS_FOLDER \
# -v $LOCAL_DYNO_SAM_FOLDER:$CONTAINER_WORKSPACE_FOLDER \
# -v $LOCAL_THIRD_PARTY_DYNO_SAM_FOLDER:$CONTAINER_WORKSPACE_FOLDER_THIRD_PARTY \
# -v /var/run/docker.sock:/var/run/docker.sock \
# --env DISPLAY=$DISPLAY \
# --env XAUTHORITY=$XAUTH \
# --env QT_X11_NO_MITSHM=0 \
# --env QT_X11_NO_XRENDER=0 \
# --volume $XAUTH:$XAUTH:rw \
# --net host \
# --pid host \
# --ipc host \
# -it \
# --name=$CONTAINER_NAME \
# $CONTAINER_IMAGE_NAME "$@"
docker run \
--privileged \
--gpus all \
-i -d \
Expand All @@ -102,7 +127,8 @@ if "$USE_NVIDIA"; then
--ipc host \
-it \
--name=$CONTAINER_NAME \
$CONTAINER_IMAGE_NAME "$@"
$CONTAINER_IMAGE_NAME
fi

# FOR NOW?
xhost +local:`docker inspect --format='{{ .Config.Hostname }}' $CONTAINER_NAME`
11 changes: 11 additions & 0 deletions docker/create_container_l4t_jetpack6.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
#!/usr/bin/env bash


### EDIT THIS TO WHEREVER YOU'RE STORING YOU DATA ###
# folder should exist before you mount it
LOCAL_DATA_FOLDER=/media/jmor6670/T7/datasets/
LOCAL_RESULTS_FOLDER=/home/usyd/dynosam/results/
LOCAL_DYNO_SAM_FOLDER=/home/usyd/dynosam/DynOSAM/
LOCAL_THIRD_PARTY_DYNO_SAM_FOLDER=/home/usyd/dynosam/extra

bash create_container_base.sh acfr_rpg/dynosam_cuda_l4t dyno_sam_l4t $LOCAL_DATA_FOLDER $LOCAL_RESULTS_FOLDER $LOCAL_DYNO_SAM_FOLDER $LOCAL_THIRD_PARTY_DYNO_SAM_FOLDER
Loading