Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -31,3 +31,4 @@ container_root/profilers/**
**container_root/trav_ws/**
**traversability_mapping**
container_root/ros_env_vars.sh
container_root/.nv/
3 changes: 3 additions & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
[submodule "ORB_SLAM3"]
path = ORB_SLAM3
url = https://github.com/suchetanrs/ORB_SLAM3
[submodule "FastTrack"]
path = FastTrack
url = https://github.com/suchetanrs/FastTrack
93 changes: 83 additions & 10 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -1,5 +1,13 @@
# Image taken from https://github.com/turlucode/ros-docker-gui
FROM osrf/ros:humble-desktop-full-jammy
# ===============================================================================
# Default to cpu build. Cuda kernels are built only if `--target nvidia` is specified.
# ===============================================================================
ARG TARGET=cpu

# ===============================================================================
# Base stage (Common dependencies for CPU and NVIDIA GPU builds)
# ===============================================================================
FROM osrf/ros:humble-desktop-full-jammy AS base
ARG USE_CI

RUN apt-get update
Expand Down Expand Up @@ -28,7 +36,6 @@ RUN apt-get install -y \

RUN apt update


# Build OpenCV
RUN apt-get install -y python3-dev python3-numpy python2-dev
RUN apt-get install -y libavcodec-dev libavformat-dev libswscale-dev
Expand All @@ -42,13 +49,6 @@ RUN cd /tmp && git clone https://github.com/opencv/opencv.git && \
make -j8 && make install && \
cd / && rm -rf /tmp/opencv

# Build Pangolin
RUN cd /tmp && git clone https://github.com/stevenlovegrove/Pangolin && \
cd Pangolin && git checkout v0.9.1 && mkdir build && cd build && \
cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS=-std=c++14 -DCMAKE_INSTALL_PREFIX=/usr/local .. && \
make -j8 && make install && \
cd / && rm -rf /tmp/Pangolin

# Build vscode (can be removed later for deployment)
COPY ./container_root/shell_scripts/vscode_install.sh /root/
RUN cd /root/ && sudo chmod +x * && ./vscode_install.sh && rm -rf vscode_install.sh
Expand All @@ -57,6 +57,74 @@ RUN apt-get update && apt-get install ros-humble-pcl-ros tmux -y
RUN apt-get install ros-humble-nav2-common x11-apps nano -y
RUN apt-get install -y gdb gdbserver ros-humble-rmw-cyclonedds-cpp ros-humble-cv-bridge ros-humble-image-transport ros-humble-image-common ros-humble-vision-opencv


# ===============================================================================
# NVIDIA GPU image stage (built if `--target nvidia_gpu` is specified)
# ===============================================================================
FROM nvidia/opengl:1.0-glvnd-devel-ubuntu18.04 AS glvnd
FROM base AS nvidia_gpu

RUN apt-get update && apt-get install -y --no-install-recommends \
libglvnd0 \
libgl1 \
libglx0 \
libegl1 \
libgles2
COPY --from=glvnd /usr/share/glvnd/egl_vendor.d/10_nvidia.json /usr/share/glvnd/egl_vendor.d/10_nvidia.json
ENV NVIDIA_VISIBLE_DEVICES ${NVIDIA_VISIBLE_DEVICES:-all}
ENV NVIDIA_DRIVER_CAPABILITIES ${NVIDIA_DRIVER_CAPABILITIES:-all}

# Basic deps for adding NVIDIA apt repo
RUN apt-get update && apt-get install -y --no-install-recommends \
wget ca-certificates gnupg

# Add NVIDIA CUDA repo keyring (Ubuntu 22.04 repo)
RUN wget -q https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb \
&& dpkg -i cuda-keyring_1.1-1_all.deb && rm -rf cuda-keyring_1.1-1_all.deb

# Install CUDA Toolkit 12.2 (no driver)
RUN apt-get update && apt-get install -y --no-install-recommends \
cuda-toolkit-12-2

# make CUDA visible on PATH
ENV PATH=/usr/local/cuda-12.2/bin:${PATH}
ENV LD_LIBRARY_PATH=/usr/local/cuda-12.2/lib64:${LD_LIBRARY_PATH}
ENV CUDA_CACHE_PATH=/tmp/cuda_cache

COPY FastTrack/Thirdparty/Pangolin /tmp/Pangolin
RUN cd /tmp/Pangolin && mkdir build && cd build && \
cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS=-std=c++14 -DCMAKE_INSTALL_PREFIX=/usr/local .. && \
make -j8 && \
make install && \
cd / && rm -rf /tmp/Pangolin && ldconfig

COPY FastTrack /home/orb/ORB_SLAM3
COPY orb_slam3_ros2_wrapper /root/colcon_ws/src/orb_slam3_ros2_wrapper
COPY orb_slam3_map_generator /root/colcon_ws/src/orb_slam3_map_generator
COPY slam_msgs /root/colcon_ws/src/slam_msgs

# Build ORB-SLAM3 with its dependencies.
RUN if [ "$USE_CI" = "true" ]; then \
. /opt/ros/humble/setup.sh && cd /home/orb/ORB_SLAM3 && mkdir -p build && ./build.sh && \
. /opt/ros/humble/setup.sh && cd /root/colcon_ws/ && colcon build --symlink-install; \
fi

RUN rm -rf /home/orb/ORB_SLAM3 /root/colcon_ws

# ===============================================================================
# CPU image stage (Default) (ignored if `--target nvidia_gpu` is specified)
# ===============================================================================

FROM base AS cpu

# Build Pangolin
RUN cd /tmp && git clone https://github.com/stevenlovegrove/Pangolin && \
cd Pangolin && git checkout v0.9.1 && mkdir build && cd build && \
cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS=-std=c++14 -DCMAKE_INSTALL_PREFIX=/usr/local .. && \
make -j8 && \
make install && \
cd / && rm -rf /tmp/Pangolin && ldconfig

COPY ORB_SLAM3 /home/orb/ORB_SLAM3
COPY orb_slam3_ros2_wrapper /root/colcon_ws/src/orb_slam3_ros2_wrapper
COPY orb_slam3_map_generator /root/colcon_ws/src/orb_slam3_map_generator
Expand All @@ -68,4 +136,9 @@ RUN if [ "$USE_CI" = "true" ]; then \
. /opt/ros/humble/setup.sh && cd /root/colcon_ws/ && colcon build --symlink-install; \
fi

RUN rm -rf /home/orb/ORB_SLAM3 /root/colcon_ws
RUN rm -rf /home/orb/ORB_SLAM3 /root/colcon_ws

# ===============================================================================
# Final stage (Either CPU or NVIDIA GPU based on `--target` flag)
# ===============================================================================
FROM ${TARGET} AS final
1 change: 1 addition & 0 deletions FastTrack
Submodule FastTrack added at 30cab0
16 changes: 16 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -36,12 +36,20 @@ sudo chmod +x container_root/shell_scripts/docker_install.sh
3. ```source ~/.bashrc```
4. You can see the built images on your machine by running ```sudo docker images```.

### To build NVIDIA CUDA version:

Replace step 1. with ```sudo docker build --build-arg USE_CI=false --build-arg TARGET=nvidia_gpu -t orb-slam3-humble-nvidia:22.04 .```

## 4. Running the container

1. ```cd ORB-SLAM3-ROS2-Docker``` (ignore if you are already in the folder)
2. ```sudo docker compose run orb_slam3_22_humble```
3. This should take you inside the container. Once you are inside, run the command ```xeyes``` and a pair of eyes should pop-up. If they do, x11 forwarding has correctly been setup on your computer.

### To run the NVIDIA CUDA version:

Replace step 2. with ```sudo docker compose run orb_slam3_22_nvidia```

## 5. Building the ORB-SLAM3 Wrapper

Launch the container using steps in (4).
Expand All @@ -50,6 +58,14 @@ cd /home/orb/ORB_SLAM3/ && sudo chmod +x build.sh && ./build.sh
cd /root/colcon_ws/ && colcon build --symlink-install && source install/setup.bash
```

### To build with CUDA:

Launch the container using steps in (4).
```bash
cd /home/orb/ORB_SLAM3/ && sudo chmod +x build.sh && ./build.sh
cd /root/colcon_ws/ && rm -rf build && colcon build --symlink-install --cmake-args -DORB_SLAM3_ROS2_WRAPPER_ENABLE_CUDA=ON && source install/setup.bash
```

## Launching ORB-SLAM3

Launch the container using steps in (4).
Expand Down
Loading