diff --git a/README.md b/README.md index 5e1f6aa98..5b7e53b66 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,11 @@ ## 📚 Publications The offical code used for our paper: -- [Jesse Morris](https://jessemorris.github.io/), Yiduo Wang, Mikolaj Kliniewski, Viorela Ila, [*DynoSAM: Open-Source Smoothing and Mapping Framework for Dynamic SLAM*](https://arxiv.org/pdf/2501.11893). Accepted Transactions on Robotics (T-RO) Visual SLAM Special Issue (2025). +- [Jesse Morris](https://jessemorris.github.io/), Yiduo Wang, Mikolaj Kliniewski, Viorela Ila, [*DynoSAM: Open-Source Smoothing and Mapping Framework for Dynamic SLAM*](https://arxiv.org/pdf/2501.11893). Accepted Transactions on Robotics (T-RO) Visual SLAM Special Issue, 2025. + + +#### ** Update December 2025 ** +Our work has been accepted to IEEE Robotics and Automation Letters (RA-L) #### ** Update November 2025 ** Our work has been accepted to IEEE Transactions on Robotics (T-RO) @@ -37,7 +41,7 @@ Our work has been accepted to IEEE Transactions on Robotics (T-RO) #### ** Update September 2025 ** This code now also contains the code for our new work -- J.Morris, Y. Wang, V. Ila. [*Online Dynamic SLAM with Incremental Smoothing and Mapping*](https://www.arxiv.org/abs/2509.08197), Arxiv. Submitted RA-L (2025) +- J.Morris, Y. Wang, V. Ila. [*Online Dynamic SLAM with Incremental Smoothing and Mapping*](https://www.arxiv.org/abs/2509.08197), Accepted Robotics and Automation Letters (RA-L), 2025 We kindly ask to cite our papers if you find these works useful: @@ -53,11 +57,15 @@ We kindly ask to cite our papers if you find these works useful: doi={10.1109/TRO.2025.3641813} } -@article{morris2025online, - title={Online Dynamic SLAM with Incremental Smoothing and Mapping}, +@inproceedings{morris2025online, author={Morris, Jesse and Wang, Yiduo and Ila, Viorela}, - journal={arXiv preprint arXiv:2509.08197}, - year={2025} + journal={IEEE Robotics and Automation Letters}, + title={Online Dynamic SLAM with Incremental Smoothing and Mapping}, + year={2026}, + volume={}, + number={}, + pages={1-8}, + doi={10.1109/LRA.2026.3655286}} } ``` @@ -92,7 +100,7 @@ We currently support building for AARM64 devices and DynoSAM has been tested on > NOTE: DynoSAM does not currently run real-time on the ORIN NX (mostly bottlenecked by the object detection process). On a more powerful device better performance is expected. -Also see the [Docker REAMDE.md](./docker/README.md) and the [dynosam_nn README.md](./dynosam_nn//README.md) for more information on hardware and performance. +Also see the [Docker README.md](./docker/README.md) and the [dynosam_nn README.md](./dynosam_nn//README.md) for more information on hardware and performance. # 2. 🚀 Running DynoSAM diff --git a/docker/Dockerfile.amd64 b/docker/Dockerfile.amd64 index 7d0806905..77aa1b4cc 100644 --- a/docker/Dockerfile.amd64 +++ b/docker/Dockerfile.amd64 @@ -30,7 +30,7 @@ RUN apt-get update \ && rm -rf /var/lib/apt/lists/* # ROS installs -RUN apt-get install -y \ +RUN apt-get update && apt-get install -y \ ros-kilted-ros2cli-common-extensions \ ros-kilted-vision-opencv \ nlohmann-json3-dev diff --git a/docker/README.md b/docker/README.md index 4db1cbc4e..3a7c70fc0 100644 --- a/docker/README.md +++ b/docker/README.md @@ -2,10 +2,11 @@ Base images are pulled from [docker-ros-ml-images](https://github.com/ika-rwth-aachen/docker-ros-ml-images) -- Dockerfile.amd64 is a linux/amd64 image tested on x86_64 desktop -- Dockerfile.l4t_jetpack6 is build from linux/arm64 tested on an NVIDIA ORIN NX with Jetpack 6 +- `Dockerfile.amd64` is a linux/amd64 image tested on x86_64 desktop +- `Dockerfile.l4t_jetpack6` is build from linux/arm64 tested on an NVIDIA ORIN NX with Jetpack 6 ## Jetson Settings +``` Architecture | aarch64 Ubuntu | 22.04.5 LTS (Jammy Jellyfish) Jetson Linux | 36.4.7 @@ -18,9 +19,12 @@ TensorRT | 10.7.0.23-1+cuda12.6 PyTorch | 2.8.0 GPUs | (Orin (nvgpu)) OpenCV | 4.10.0 +``` > NOTE: The CUDA/Pytorch/TensorRT versions settings come with the base dockerfile but in practice we have been using CUDA 12.9. ## Other versioning +```txt matplotlib=3.6.3 -numpy=1.26.4 \ No newline at end of file +numpy=1.26.4 +``` \ No newline at end of file diff --git a/docs/media/INSTALL.md b/docs/media/INSTALL.md index dca9a197f..5d43fd175 100644 --- a/docs/media/INSTALL.md +++ b/docs/media/INSTALL.md @@ -29,9 +29,9 @@ To install natively, install the dependancies as required by docker and build as ## Docker Install instructions -DynoSAM has been tested on __x86_64__ and __aarm64__ (with a NVIDIA ORIN NX) devices using the [two docker files](../../docker/) provided. See the [REAMDE.md](../../docker/README.md) for more detail on hardware used etc. +DynoSAM has been tested on __x86_64__ and __aarm64__ (with a NVIDIA ORIN NX) devices using the [two docker files](../../docker/) provided. See the [README.md](../../docker/README.md) for more detail on hardware used etc. -We provide scripts to build and create docker containers to run and develop DynoSAM which is intendend to be mounted within the created container. +We provide scripts to build and create docker containers to run and develop DynoSAM which is intended to be mounted within the created container. > NOTE: with the current setup the embedded device only supports ROS Jazzy. The code should compile on either device without modification. diff --git a/dynosam/include/dynosam/frontend/FrontendModule.hpp b/dynosam/include/dynosam/frontend/FrontendModule.hpp index 018588e51..1cbc140cc 100644 --- a/dynosam/include/dynosam/frontend/FrontendModule.hpp +++ b/dynosam/include/dynosam/frontend/FrontendModule.hpp @@ -39,6 +39,8 @@ #include "dynosam_common/ModuleBase.hpp" #include "dynosam_common/SharedModuleInfo.hpp" #include "dynosam_common/Types.hpp" +#include "dynosam/frontend/vision/FeatureTracker.hpp" + // #include "dynosam_common" @@ -84,6 +86,8 @@ class FrontendModule virtual void onBackendUpdateCallback(const FrameId /*frame_id*/, const Timestamp /*timestamp*/) {} + void FrameToClassMap(const Frame::Ptr& frame) const; + protected: /** * @brief Defines the result of checking the image container which is a done diff --git a/dynosam/include/dynosam/frontend/RGBDInstanceFrontendModule.hpp b/dynosam/include/dynosam/frontend/RGBDInstanceFrontendModule.hpp index 1b17db39a..3545f3584 100644 --- a/dynosam/include/dynosam/frontend/RGBDInstanceFrontendModule.hpp +++ b/dynosam/include/dynosam/frontend/RGBDInstanceFrontendModule.hpp @@ -126,6 +126,13 @@ class RGBDInstanceFrontendModule : public FrontendModule { //! Last keyframe Frame::Ptr frame_lkf_; + + // Previous packet + VisionImuPacket::Ptr previous_vision_imu_packet_; + + }; + + } // namespace dyno diff --git a/dynosam/include/dynosam/frontend/VisionImuOutputPacket.hpp b/dynosam/include/dynosam/frontend/VisionImuOutputPacket.hpp index 563951a06..286b44583 100644 --- a/dynosam/include/dynosam/frontend/VisionImuOutputPacket.hpp +++ b/dynosam/include/dynosam/frontend/VisionImuOutputPacket.hpp @@ -80,7 +80,7 @@ class VisionImuPacket { }; /** - * @brief Object track information epresenting visual measurements for a + * @brief Object track information representing visual measurements for a * single object as well as frame-to-frame (and possibly other) motion/pose * information. * @@ -99,6 +99,8 @@ class VisionImuPacket { ImuFrontend::PimPtr pim() const; Camera::ConstPtr camera() const; PointCloudLabelRGB::Ptr denseLabelledCloud() const; + gtsam::Vector6 getBodyVelocity() const; + bool isCameraKeyFrame() const; bool isObjectKeyFrame(ObjectId object_id) const; @@ -107,6 +109,7 @@ class VisionImuPacket { const CameraTracks& cameraTracks() const; const gtsam::Pose3& cameraPose() const; + /** * @brief Returns the relative camera motion T_k_1_k, representing the motion * of the camera from k-1 to k in the camera local frame (at k-1) @@ -149,6 +152,7 @@ class VisionImuPacket { VisionImuPacket& groundTruthPacket( const GroundTruthInputPacket::Optional& gt); VisionImuPacket& debugImagery(const DebugImagery::Optional& dbg); + VisionImuPacket& updateBodyVelocity(const VisionImuPacket& prev_state); bool operator==(const VisionImuPacket& other) const { return frame_id_ == other.frame_id_ && timestamp_ == other.timestamp_; @@ -196,6 +200,11 @@ class VisionImuPacket { static void fillLandmarkMeasurements( StatusLandmarkVector& landmarks, const CameraMeasurementStatusVector& camera_measurements); + + + private: + std::optional stored_body_velocity; + }; } // namespace dyno diff --git a/dynosam/include/dynosam/frontend/vision/FeatureTracker.hpp b/dynosam/include/dynosam/frontend/vision/FeatureTracker.hpp index 7040d271f..7a8ee78b7 100644 --- a/dynosam/include/dynosam/frontend/vision/FeatureTracker.hpp +++ b/dynosam/include/dynosam/frontend/vision/FeatureTracker.hpp @@ -36,10 +36,12 @@ #include "dynosam/frontend/FrontendParams.hpp" #include "dynosam/frontend/vision/FeatureTrackerBase.hpp" #include "dynosam/frontend/vision/Frame.hpp" +#include "dynosam/frontend/FrontendModule.hpp" #include "dynosam/frontend/vision/StaticFeatureTracker.hpp" #include "dynosam/visualizer/Visualizer-Definitions.hpp" #include "dynosam_cv/Camera.hpp" #include "dynosam_cv/Feature.hpp" +#include "dynosam_common/SharedModuleInfo.hpp" // #include "dynosam_common/DynamicObjects.hpp" #include "dynosam_nn/ObjectDetector.hpp" @@ -146,7 +148,7 @@ class FeatureTracker : public FeatureTrackerBase { private: // TODO: for now we loose the actual object detection result if inference was // run! - bool objectDetection( + ObjectDetectionResult objectDetection( vision_tools::ObjectBoundaryMaskResult& boundary_mask_result, ImageContainer& image_container); diff --git a/dynosam/src/frontend/FrontendModule.cc b/dynosam/src/frontend/FrontendModule.cc index e084817d3..0bfaf3b0c 100644 --- a/dynosam/src/frontend/FrontendModule.cc +++ b/dynosam/src/frontend/FrontendModule.cc @@ -66,4 +66,18 @@ void FrontendModule::validateInput( result.valid_, *input->image_container_, result.requirement_); } +void FrontendModule::FrameToClassMap(const Frame::Ptr& frame) const { + const auto& object_observations = frame->getObjectObservations(); + + for (const auto& [object_id, detection] : object_observations) { + if (!detection.class_name.empty()) { + shared_module_info.updateClassLabelMapping(object_id, detection.class_name); + std::cout << "UPDATING CLASS LABEL MAPPING " << object_id << " IS " << detection.class_name << std::endl; + } + else { + std::cout << "CANNOT UPDATE CLASS LABEL MAPPING; IT WAS EMPTY!" << std::endl; + } + } +} + } // namespace dyno diff --git a/dynosam/src/frontend/RGBDInstanceFrontendModule.cc b/dynosam/src/frontend/RGBDInstanceFrontendModule.cc index d67875711..628e1dc10 100644 --- a/dynosam/src/frontend/RGBDInstanceFrontendModule.cc +++ b/dynosam/src/frontend/RGBDInstanceFrontendModule.cc @@ -116,6 +116,8 @@ FrontendModule::SpinReturn RGBDInstanceFrontendModule::boostrapSpin( Frame::Ptr frame = tracker_->track(input->getFrameId(), input->getTimestamp(), *image_container); + + CHECK(frame->updateDepths()); return {State::Nominal, nullptr}; @@ -148,6 +150,7 @@ FrontendModule::SpinReturn RGBDInstanceFrontendModule::nominalSpin( Frame::Ptr frame = tracker_->track(input->getFrameId(), input->getTimestamp(), *image_container, R_curr_ref); + FrontendModule::FrameToClassMap(frame); Frame::Ptr previous_frame = tracker_->getPreviousFrame(); CHECK(previous_frame); @@ -231,6 +234,13 @@ FrontendModule::SpinReturn RGBDInstanceFrontendModule::nominalSpin( fillOutputPacketWithTracks(vision_imu_packet, *frame, T_k_1_k, object_motions, object_poses); + // Update the body velocity according to the previous vision IMU packet + if (previous_vision_imu_packet_) { + vision_imu_packet->updateBodyVelocity(*previous_vision_imu_packet_); + } + + previous_vision_imu_packet_ = vision_imu_packet; + if (R_curr_ref) { imu_frontend_.resetIntegration(); } diff --git a/dynosam/src/frontend/VisionImuOutputPacket.cc b/dynosam/src/frontend/VisionImuOutputPacket.cc index e4c24fe02..5d1e79eca 100644 --- a/dynosam/src/frontend/VisionImuOutputPacket.cc +++ b/dynosam/src/frontend/VisionImuOutputPacket.cc @@ -40,6 +40,8 @@ ImuFrontend::PimPtr VisionImuPacket::pim() const { return pim_; } Camera::ConstPtr VisionImuPacket::camera() const { return camera_; } +gtsam::Vector6 VisionImuPacket::getBodyVelocity() const { return *stored_body_velocity; } + PointCloudLabelRGB::Ptr VisionImuPacket::denseLabelledCloud() const { return dense_labelled_cloud_; } @@ -213,6 +215,32 @@ void VisionImuPacket::fillLandmarkMeasurements( } } +// Adding velocity information into the VisionImuPacket + +// * Function to update the previous body velocity * +VisionImuPacket& VisionImuPacket::updateBodyVelocity(const VisionImuPacket& prev_state) { + + // Getting the pose at k-1 + const gtsam::Pose3& w_L_k_1 = prev_state.cameraPose(); + + // Getting the motion from k-1 to k + const gtsam::Pose3& w_k_1_H_k = camera_tracks_.T_k_1_k; + + stored_body_velocity = calculateBodyMotion( + w_k_1_H_k, + w_L_k_1, + prev_state.timestamp(), + timestamp_ + ); + + return *this; + +} + +// * Function to get the body velocity * + + + } // namespace dyno namespace nlohmann { diff --git a/dynosam/src/frontend/vision/FeatureTracker.cc b/dynosam/src/frontend/vision/FeatureTracker.cc index 4d48925e0..f2cfb590a 100644 --- a/dynosam/src/frontend/vision/FeatureTracker.cc +++ b/dynosam/src/frontend/vision/FeatureTracker.cc @@ -102,7 +102,13 @@ Frame::Ptr FeatureTracker::track(FrameId frame_id, Timestamp timestamp, // result after the function call) ObjectDetectionEngine is used if // params_.prefer_provided_object_detection is false vision_tools::ObjectBoundaryMaskResult boundary_mask_result; - objectDetection(boundary_mask_result, input_images); + + ObjectDetectionResult observations = objectDetection(boundary_mask_result, input_images); + + for (const auto& det : observations.detections) { + std::cout << "Detected object_id=" << det.object_id + << " class_name=" << det.class_name << std::endl; + } if (!initial_computation_ && params_.use_propogate_mask) { utils::ChronoTimingStats timer("propogate_mask"); @@ -155,18 +161,10 @@ Frame::Ptr FeatureTracker::track(FrameId frame_id, Timestamp timestamp, // TODO: SingleDetectionResult really does not need the tracklet ids they // are never actually used!! this prevents the frame from needing to do the // same calculations we've alrady done - std::map object_observations; - for (size_t i = 0; i < boundary_mask_result.objects_detected.size(); i++) { - ObjectId object_id = boundary_mask_result.objects_detected.at(i); - const cv::Rect& bb_detection = - boundary_mask_result.object_bounding_boxes.at(i); - SingleDetectionResult observation; - observation.object_id = object_id; - // observation.object_features = dynamic_features.getByObject(object_id); - observation.bounding_box = bb_detection; - - object_observations[object_id] = observation; + std::map object_observations; + for (const auto& detection : observations.detections) { + object_observations[detection.object_id] = detection; } utils::ChronoTimingStats f_timer("tracking_timer.frame_construction"); @@ -188,6 +186,8 @@ Frame::Ptr FeatureTracker::track(FrameId frame_id, Timestamp timestamp, previous_frame_ = new_frame; boarder_detection_mask_ = boundary_mask_result.boundary_mask; + // FrontendModule::FrameToClassMap(new_frame); + return new_frame; } @@ -1148,7 +1148,7 @@ void FeatureTracker::requiresSampling( } } -bool FeatureTracker::objectDetection( +ObjectDetectionResult FeatureTracker::objectDetection( vision_tools::ObjectBoundaryMaskResult& boundary_mask_result, ImageContainer& image_container) { // from some experimental testing 10 pixles is a good boarder to add around @@ -1179,7 +1179,25 @@ bool FeatureTracker::objectDetection( vision_tools::computeObjectMaskBoundaryMask( boundary_mask_result, object_mask, scaled_boarder_thickness, kUseAsFeatureDetectionMask); - return false; + + ObjectDetectionResult detection_result; + detection_result.input_image = image_container.rgb(); + detection_result.labelled_mask = object_mask; + + for (size_t i = 0; i < boundary_mask_result.objects_detected.size(); i++) { + ObjectId object_id = boundary_mask_result.objects_detected.at(i); + const cv::Rect& bb_detection = + boundary_mask_result.object_bounding_boxes.at(i); + + SingleDetectionResult observation; + observation.object_id = object_id; + // observation.object_features = dynamic_features.getByObject(object_id); + observation.bounding_box = bb_detection; + observation.class_name = "goober"; + detection_result.detections.push_back(observation); + } + + return detection_result; } else { LOG(FATAL) << "Params specify prefer provided object mask but input " "is missing!"; @@ -1205,7 +1223,7 @@ bool FeatureTracker::objectDetection( // update or insert image container with object mask image_container.replace(ImageContainer::kObjectMask, object_mask); - return true; + return detection_result; } } @@ -1221,6 +1239,7 @@ void FeatureTracker::propogateMask(ImageContainer& image_container) { // note reference cv::Mat& current_mask = image_container.objectMotionMask(); + ObjectIds instance_labels; for (const Feature::Ptr& dynamic_feature : previous_frame_->usableDynamicFeaturesBegin()) { @@ -1348,6 +1367,7 @@ void FeatureTracker::propogateMask(ImageContainer& image_container) { current_mask.at(functional_keypoint::v(predicted_kp), functional_keypoint::u(predicted_kp)) = instance_labels[i]; + // current_rgb // updated_mask_points++; } @@ -1358,4 +1378,5 @@ void FeatureTracker::propogateMask(ImageContainer& image_container) { } } + } // namespace dyno diff --git a/dynosam_common/include/dynosam_common/DynamicObjects.hpp b/dynosam_common/include/dynosam_common/DynamicObjects.hpp index ef74681d6..82d33a61f 100644 --- a/dynosam_common/include/dynosam_common/DynamicObjects.hpp +++ b/dynosam_common/include/dynosam_common/DynamicObjects.hpp @@ -122,8 +122,10 @@ struct ObjectDetectionResult { * @param w_L_k_1 const gtsam::Pose3& * @return gtsam::Vector3 */ -gtsam::Vector3 calculateBodyMotion(const gtsam::Pose3& w_k_1_H_k, - const gtsam::Pose3& w_L_k_1); +gtsam::Vector6 calculateBodyMotion(const gtsam::Pose3& w_k_1_H_k, + const gtsam::Pose3& w_L_k_1, + Timestamp timestamp_km1, + Timestamp timestamp_k); enum PropogateType { InitGT, diff --git a/dynosam_common/include/dynosam_common/SharedModuleInfo.hpp b/dynosam_common/include/dynosam_common/SharedModuleInfo.hpp index f88d28c2d..a7648b025 100644 --- a/dynosam_common/include/dynosam_common/SharedModuleInfo.hpp +++ b/dynosam_common/include/dynosam_common/SharedModuleInfo.hpp @@ -43,6 +43,7 @@ class SharedModuleInfo { std::optional getGroundTruthPackets() const; const FrameIdTimestampMap& getTimestampMap() const; + const ObjectIdClassMap& getClassLabelMap() const; bool getTimestamp(FrameId frame_id, Timestamp& timestamp) const; @@ -50,6 +51,7 @@ class SharedModuleInfo { FrameId frame_id, const GroundTruthInputPacket& ground_truth_packet); SharedModuleInfo& updateTimestampMapping(FrameId frame_id, Timestamp timestamp); + SharedModuleInfo& updateClassLabelMapping(ObjectId object_id, std::string class_label); private: static std::unique_ptr instance_; @@ -58,6 +60,7 @@ class SharedModuleInfo { mutable std::mutex mutex_; GroundTruthPacketMap gt_packet_map_; FrameIdTimestampMap frame_id_to_timestamp_map_; + ObjectIdClassMap object_id_to_class_map_; }; struct SharedModuleInterface { diff --git a/dynosam_common/include/dynosam_common/Types.hpp b/dynosam_common/include/dynosam_common/Types.hpp index b904202e8..677c54640 100644 --- a/dynosam_common/include/dynosam_common/Types.hpp +++ b/dynosam_common/include/dynosam_common/Types.hpp @@ -111,6 +111,9 @@ using MotionMap = /// @brief Map of FrameIds (k) to Timestamps using FrameIdTimestampMap = gtsam::FastMap; +/// @brief Map of Object Ids to CLass Labels +using ObjectIdClassMap = gtsam::FastMap; + // T is expected to have (at least) bitwise | (OR) support template class Flags { diff --git a/dynosam_common/src/DynamicObjects.cc b/dynosam_common/src/DynamicObjects.cc index b136c7b2d..a4f579586 100644 --- a/dynosam_common/src/DynamicObjects.cc +++ b/dynosam_common/src/DynamicObjects.cc @@ -73,15 +73,52 @@ std::ostream& operator<<(std::ostream& os, return os; } -gtsam::Vector3 calculateBodyMotion(const gtsam::Pose3& w_k_1_H_k, - const gtsam::Pose3& w_L_k_1) { + +// BODY MOTION CALCULATION IMPLEMENTATION +gtsam::Vector6 calculateBodyMotion(const gtsam::Pose3& w_k_1_H_k, + const gtsam::Pose3& w_L_k_1, + Timestamp timestamp_km1, + Timestamp timestamp_k) { const gtsam::Point3& t_motion = w_k_1_H_k.translation(); const gtsam::Rot3& R_motion = w_k_1_H_k.rotation(); const gtsam::Point3& t_pose = w_L_k_1.translation(); + const gtsam::Rot3& R_pose = w_L_k_1.rotation(); static const gtsam::Rot3 I = gtsam::Rot3::Identity(); - return t_motion - (gtsam::Rot3(I.matrix() - R_motion.matrix())) * t_pose; + double dt = timestamp_k - timestamp_km1; + if (dt <= 1e-6) { + dt = 0.1; // fallback to nominal frame interval + } + + gtsam::Vector3 trans_vel = (t_motion - (gtsam::Rot3(I.matrix() - R_motion.matrix())) * t_pose) / dt; + +// ===== NOW CALCULATING THE ANGULAR VELOCITY ===== + + // Finding the relative rotation over the timestep + gtsam::Rot3 R_pose_transpose = R_pose.inverse(); + gtsam::Rot3 R_new = R_motion * R_pose; + gtsam::Rot3 R_rel = R_pose_transpose * R_new; + + // Calculating the trace of the relative rotation and the angle of rotation + double trace = R_rel.matrix().trace(); + double angle = std::acos((trace - 1) / 2); + + gtsam::Matrix3 R_rel_m = R_rel.matrix(); + + // Finding the unit axis of rotation + double ux = (1 / (2 * std::sin(angle))) * (R_rel_m(2,1) - R_rel_m(1,2)); + double uy = (1 / (2 * std::sin(angle))) * (R_rel_m(0,2) - R_rel_m(2,0)); + double uz = (1 / (2 * std::sin(angle))) * (R_rel_m(1,0) - R_rel_m(0,1)); + const gtsam::Vector3 axis(ux, uy, uz); + + const gtsam::Vector3 angular_vel = (angle / dt) * axis; + + gtsam::Vector6 body_velocity; + body_velocity.head<3>() = trans_vel; + body_velocity.tail<3>() = angular_vel; + + return body_velocity; } void propogateObjectPoses(ObjectPoseMap& object_poses, diff --git a/dynosam_common/src/SharedModuleInfo.cc b/dynosam_common/src/SharedModuleInfo.cc index 076b86213..c6c966a0e 100644 --- a/dynosam_common/src/SharedModuleInfo.cc +++ b/dynosam_common/src/SharedModuleInfo.cc @@ -57,6 +57,11 @@ std::optional SharedModuleInfo::getGroundTruthPackets() return gt_packet_map_; } +const ObjectIdClassMap& SharedModuleInfo::getClassLabelMap() const { + const std::lock_guard lock(mutex_); + return object_id_to_class_map_; +} + bool SharedModuleInfo::getTimestamp(FrameId frame_id, Timestamp& timestamp) const { const FrameIdTimestampMap& timestamp_map = getTimestampMap(); @@ -88,4 +93,17 @@ SharedModuleInfo& SharedModuleInfo::updateTimestampMapping( return *this; } +SharedModuleInfo& SharedModuleInfo::updateClassLabelMapping( + ObjectId object_id, std::string class_label) { + + const std::lock_guard lock(mutex_); + + if (object_id_to_class_map_.exists(object_id)) { + CHECK_EQ(object_id_to_class_map_.at(object_id), class_label); + } else { + object_id_to_class_map_.insert2(object_id, class_label); + } + return *this; +} + } // namespace dyno diff --git a/dynosam_cv/src/ImageContainer.cc b/dynosam_cv/src/ImageContainer.cc index 2dca31a91..91de1336a 100644 --- a/dynosam_cv/src/ImageContainer.cc +++ b/dynosam_cv/src/ImageContainer.cc @@ -62,6 +62,8 @@ const ImageWrapper& ImageContainer::objectMotionMask() const { return this->at(kObjectMask); } + + const ImageWrapper& ImageContainer::rightRgb() const { return this->at(kRightRgb); } @@ -78,6 +80,7 @@ ImageWrapper& ImageContainer::opticalFlow() { ImageWrapper& ImageContainer::objectMotionMask() { return this->at(kObjectMask); } + ImageWrapper& ImageContainer::rightRgb() { return this->at(kRightRgb); } diff --git a/dynosam_nn/CMakeLists.txt b/dynosam_nn/CMakeLists.txt index 8f1104308..8d40b3c6f 100644 --- a/dynosam_nn/CMakeLists.txt +++ b/dynosam_nn/CMakeLists.txt @@ -1,6 +1,6 @@ cmake_minimum_required(VERSION 3.17 FATAL_ERROR) -project(dynosam_nn LANGUAGES CXX CUDA) +project(dynosam_nn LANGUAGES C CXX CUDA) set(CMAKE_C_FLAGS "-Wall -Wextra -O3") @@ -12,6 +12,7 @@ set(CMAKE_CXX_STANDARD 17) list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake") +find_package(MPI REQUIRED) find_package(dynosam_common REQUIRED) find_package(dynosam_cv REQUIRED) find_package(cv_bridge REQUIRED) diff --git a/dynosam_nn/README.md b/dynosam_nn/README.md index 3aacbdefe..ed8af5409 100644 --- a/dynosam_nn/README.md +++ b/dynosam_nn/README.md @@ -19,14 +19,14 @@ For object-level tracking we use a modified C++ implementation of ByteTracker > NOTE: this is the _install_ directory (ie. in the docker container it will be `/home/user/dev_ws/install/dynosam_nn/share/dynosam_nn/weights/`) - To export the model navigate to `dynosam_nn/export` and run ``` -run python3 export_yolo_tensorrt.py +python3 export_yolo_tensorrt.py ``` which should export a `.onnx` model to the weights directory. - YoloV8ObjectDetector requires an exported `.onnx` file which will be converted to a `.engine` file when first loaded. ## Install -- python3 -m pip install "ultralytics==8.3.0" "numpy<2.0" "opencv-python<5.0" -- sudo apt install python3-pybind11 +- `python3 -m pip install "ultralytics==8.3.0" "numpy<2.0" "opencv-python<5.0"` +- `sudo apt install python3-pybind11` > NOTE: these dependancies should already be installed when using the Dockerfile diff --git a/dynosam_ros/include/dynosam_ros/displays/DisplaysCommon.hpp b/dynosam_ros/include/dynosam_ros/displays/DisplaysCommon.hpp index 78f0f946e..daab0c1ce 100644 --- a/dynosam_ros/include/dynosam_ros/displays/DisplaysCommon.hpp +++ b/dynosam_ros/include/dynosam_ros/displays/DisplaysCommon.hpp @@ -68,7 +68,8 @@ struct DisplayCommon { static void publishOdometry(OdometryPub::SharedPtr pub, const gtsam::Pose3& T_world_camera, Timestamp timestamp, const std::string& frame_id, - const std::string& child_frame_id); + const std::string& child_frame_id, + const gtsam::Vector6& velocity = gtsam::Vector6::Zero()); static void publishOdometryPath(PathPub::SharedPtr pub, const gtsam::Pose3Vector& poses, Timestamp latest_timestamp, diff --git a/dynosam_ros/include/dynosam_ros/displays/dynamic_slam_displays/DSDCommonRos.hpp b/dynosam_ros/include/dynosam_ros/displays/dynamic_slam_displays/DSDCommonRos.hpp index d0e38aa15..be8719665 100644 --- a/dynosam_ros/include/dynosam_ros/displays/dynamic_slam_displays/DSDCommonRos.hpp +++ b/dynosam_ros/include/dynosam_ros/displays/dynamic_slam_displays/DSDCommonRos.hpp @@ -83,19 +83,22 @@ class DSDTransport { // to get velocity... static ObjectOdometry constructObjectOdometry( const gtsam::Pose3& e_H_k_world, const gtsam::Pose3& pose_k, - ObjectId object_id, FrameId frame_id_k, Timestamp timestamp_k, - const std::string& frame_id_link, const std::string& child_frame_id_link); + ObjectId object_id, FrameId frame_id_k, Timestamp timestamp_k, Timestamp timestamp_km1, + const std::string& frame_id_link, const std::string& child_frame_id_link, + const ObjectIdClassMap& object_class_map); static ObjectOdometryMap constructObjectOdometries( const ObjectMotionMap& motions, const ObjectPoseMap& poses, - FrameId frame_id_k, Timestamp timestamp_k, - const std::string& frame_id_link); + FrameId frame_id_k, Timestamp timestamp_k, Timestamp timestamp_km1, + const std::string& frame_id_link, + const ObjectIdClassMap& object_class_map); static MultiObjectOdometryPath constructMultiObjectOdometryPaths( const ObjectMotionMap& motions, const ObjectPoseMap& poses, Timestamp timestamp_k, const FrameIdTimestampMap& frame_timestamp_map, const std::string& frame_id_link, - bool interpolate_missing_segments = true); + bool interpolate_missing_segments = true, + const ObjectIdClassMap& object_class_map = {}); /** * @brief Nested Publisher that publishes all the object odometries for a @@ -146,11 +149,14 @@ class DSDTransport { std::string frame_id_link_; FrameId frame_id_; Timestamp timestamp_; + Timestamp timestamp_km1_; //! Object odometries for this frame ObjectOdometryMap object_odometries_; MultiObjectOdometryPath object_paths_; + ObjectIdClassMap object_class_map_; + friend class DSDTransport; /** @@ -180,7 +186,8 @@ class DSDTransport { const ObjectMotionMap& motions, const ObjectPoseMap& poses, const std::string& frame_id_link, const FrameIdTimestampMap& frame_timestamp_map, FrameId frame_id, - Timestamp timestamp); + Timestamp timestamp, + const ObjectIdClassMap& object_class_map = {}); }; /** @@ -201,7 +208,8 @@ class DSDTransport { const ObjectPoseMap& poses, const std::string& frame_id_link, const FrameIdTimestampMap& frame_timestamp_map, - FrameId frame_id, Timestamp timestamp); + FrameId frame_id, Timestamp timestamp, + const ObjectIdClassMap& object_class_map = {}); private: rclcpp::Node::SharedPtr node_; @@ -231,7 +239,8 @@ class DSDRos { DSDRos(const DisplayParams& params, rclcpp::Node::SharedPtr node); void publishVisualOdometry(const gtsam::Pose3& T_world_camera, - Timestamp timestamp, const bool publish_tf); + Timestamp timestamp, const bool publish_tf, + const gtsam::Vector6& velocity = gtsam::Vector6::Zero()); void publishVisualOdometryPath(const gtsam::Pose3Vector& poses, Timestamp latest_timestamp); diff --git a/dynosam_ros/src/RosUtils.cc b/dynosam_ros/src/RosUtils.cc index 73d197092..6be26d1e6 100644 --- a/dynosam_ros/src/RosUtils.cc +++ b/dynosam_ros/src/RosUtils.cc @@ -158,6 +158,24 @@ bool dyno::convert(const gtsam::Pose3& pose, return true; } +// NEW! Template to add twist to the message +template <> +bool dyno::convert(const gtsam::Vector6& vel, + geometry_msgs::msg::Twist& twist) +{ + // linear velocity components + twist.linear.x = vel(0); + twist.linear.y = vel(1); + twist.linear.z = vel(2); + + // angular velocity components + twist.angular.x = vel(3); + twist.angular.y = vel(4); + twist.angular.z = vel(5); + + return true; +} + template <> bool dyno::convert(const gtsam::Pose3& pose, geometry_msgs::msg::TransformStamped& transform) { diff --git a/dynosam_ros/src/displays/DisplaysCommon.cc b/dynosam_ros/src/displays/DisplaysCommon.cc index 548523ef0..0cd0f9a2d 100644 --- a/dynosam_ros/src/displays/DisplaysCommon.cc +++ b/dynosam_ros/src/displays/DisplaysCommon.cc @@ -44,15 +44,23 @@ CloudPerObject DisplayCommon::publishPointCloud( return clouds_per_obj; } +// ================================================================================================================ +// TODO: look here edit this function to include velocities +// ================================================================================================================ + void DisplayCommon::publishOdometry(OdometryPub::SharedPtr pub, const gtsam::Pose3& T_world_camera, Timestamp timestamp, const std::string& frame_id, - const std::string& child_frame_id) { + const std::string& child_frame_id, + const gtsam::Vector6& velocity + ) { nav_msgs::msg::Odometry odom_msg; utils::convertWithHeader(T_world_camera, odom_msg, timestamp, frame_id, child_frame_id); - pub->publish(odom_msg); + + dyno::convert(velocity, odom_msg.twist.twist); + pub->publish(odom_msg); } void DisplayCommon::publishOdometryPath(PathPub::SharedPtr pub, diff --git a/dynosam_ros/src/displays/dynamic_slam_displays/DSDCommonRos.cc b/dynosam_ros/src/displays/dynamic_slam_displays/DSDCommonRos.cc index 4624adf4b..0cf068766 100644 --- a/dynosam_ros/src/displays/dynamic_slam_displays/DSDCommonRos.cc +++ b/dynosam_ros/src/displays/dynamic_slam_displays/DSDCommonRos.cc @@ -25,33 +25,50 @@ std::string DSDTransport::constructObjectFrameLink(ObjectId object_id) { return "object_" + std::to_string(object_id) + "_link"; } +// ================================================================================================================ +// TODO: Fill in the velocity and check calculateBodyMotion is correct (only linear, add angular component too) +// ================================================================================================================ ObjectOdometry DSDTransport::constructObjectOdometry( - const gtsam::Pose3& e_H_k_world, const gtsam::Pose3& pose_k, - ObjectId object_id, FrameId frame_id_k, Timestamp timestamp_k, - const std::string& frame_id_link, const std::string& child_frame_id_link) { + const gtsam::Pose3& e_H_k_world, const gtsam::Pose3& pose_km1, + ObjectId object_id, FrameId frame_id_k, Timestamp timestamp_k, Timestamp timestamp_km1, + const std::string& frame_id_link, const std::string& child_frame_id_link, + const ObjectIdClassMap& object_class_map) { ObjectOdometry object_odom; - // technically this shoudl be k-1 - gtsam::Point3 body_velocity = calculateBodyMotion(e_H_k_world, pose_k); + // technically this should be k-1 + gtsam::Vector6 body_velocity = calculateBodyMotion(e_H_k_world, pose_km1, timestamp_km1, timestamp_k); nav_msgs::msg::Odometry odom_msg; - utils::convertWithHeader(pose_k, odom_msg, timestamp_k, frame_id_link, + utils::convertWithHeader(pose_km1, odom_msg, timestamp_k, frame_id_link, child_frame_id_link); object_odom.odom = odom_msg; // TODO: can check if correct representation? dyno::convert(e_H_k_world, object_odom.h_w_km1_k.pose); - // NO velocity!! + + dyno::convert(body_velocity, object_odom.odom.twist.twist); // Add velocity to message + object_odom.object_id = object_id; object_odom.sequence = frame_id_k; + + // Assigning the class label + auto observation = object_class_map.find(object_id); + if (observation != object_class_map.end()) { + object_odom.class_label=observation->second; + } else { + object_odom.class_label="unknown"; + } + + return object_odom; } ObjectOdometryMap DSDTransport::constructObjectOdometries( const ObjectMotionMap& motions, const ObjectPoseMap& poses, - FrameId frame_id_k, Timestamp timestamp_k, - const std::string& frame_id_link) { + FrameId frame_id_k, Timestamp timestamp_k, Timestamp timestamp_km1, + const std::string& frame_id_link, + const ObjectIdClassMap& object_class_map) { // need to get poses for k-1 // TODO: no way to ensure that the motions are for frame k // this is a weird data-structure to use and motions are per frame and @@ -79,8 +96,8 @@ ObjectOdometryMap DSDTransport::constructObjectOdometries( object_odom_map.insert2( child_frame_id_link, constructObjectOdometry(e_H_k_world, pose_k, object_id, frame_id_k, - timestamp_k, frame_id_link, - child_frame_id_link)); + timestamp_k, timestamp_km1, + frame_id_link, child_frame_id_link, object_class_map)); } return object_odom_map; @@ -89,7 +106,7 @@ ObjectOdometryMap DSDTransport::constructObjectOdometries( MultiObjectOdometryPath DSDTransport::constructMultiObjectOdometryPaths( const ObjectMotionMap& motions, const ObjectPoseMap& poses, Timestamp timestamp_k, const FrameIdTimestampMap& frame_timestamp_map, - const std::string& frame_id_link, bool interpolate_missing_segments) { + const std::string& frame_id_link, bool interpolate_missing_segments, const ObjectIdClassMap& object_class_map) { MultiObjectOdometryPath multi_path; multi_path.header.stamp = utils::toRosTime(timestamp_k); @@ -139,10 +156,20 @@ MultiObjectOdometryPath DSDTransport::constructMultiObjectOdometryPaths( // RIGHT NOW MOTION IDENTITY // timestamp is wrong + + // Adding the (k-1)th timestamp + Timestamp timestamp_km1; + if (previous_frame_id >= 0) { + timestamp_km1 = frame_timestamp_map.at(previous_frame_id); + } + else { + timestamp_km1 = timestamp; + } + gtsam::Pose3 motion; const ObjectOdometry object_odometry = constructObjectOdometry( - object_motion, object_pose, object_id, frame_id, timestamp, - frame_id_link, child_frame_id_link); + object_motion, object_pose, object_id, frame_id, timestamp, timestamp_km1, + frame_id_link, child_frame_id_link, object_class_map); if (!segmented_paths.exists(path_segment)) { ObjectOdometryPath path; @@ -224,6 +251,23 @@ void DSDTransport::Publisher::publishObjectPaths() { multi_object_odom_path_publisher_->publish(object_paths_); } +// ============================== ADDED HELPER FUNCTION TO GET PREVIOUS TIMESTAMP ============================== +Timestamp getPrevTimestamp(const FrameIdTimestampMap& map, FrameId id, Timestamp current_timestamp) { + + if (id < 0) return current_timestamp; + + FrameId prev_id = id - 1; + + auto it = map.find(prev_id); + if (it != map.end()) { + return it->second; + } + else { + return current_timestamp; + } +} +// ============================================================================================================ + DSDTransport::Publisher::Publisher( rclcpp::Node::SharedPtr node, ObjectOdometryPub::SharedPtr object_odom_publisher, @@ -232,7 +276,8 @@ DSDTransport::Publisher::Publisher( const ObjectMotionMap& motions, const ObjectPoseMap& poses, const std::string& frame_id_link, const FrameIdTimestampMap& frame_timestamp_map, FrameId frame_id, - Timestamp timestamp) + Timestamp timestamp, + const ObjectIdClassMap& object_class_map) : node_(node), object_odom_publisher_(object_odom_publisher), multi_object_odom_path_publisher_(multi_object_odom_path_publisher), @@ -240,20 +285,25 @@ DSDTransport::Publisher::Publisher( frame_id_link_(frame_id_link), frame_id_(frame_id), timestamp_(timestamp), + timestamp_km1_(getPrevTimestamp(frame_timestamp_map, frame_id, timestamp)), // added to get (k-1)th timestamp object_odometries_(DSDTransport::constructObjectOdometries( - motions, poses, frame_id, timestamp, frame_id_link)), + motions, poses, frame_id, timestamp, timestamp_km1_, frame_id_link, object_class_map)), object_paths_(DSDTransport::constructMultiObjectOdometryPaths( - motions, poses, timestamp, frame_timestamp_map, frame_id_link)) {} + motions, poses, timestamp, frame_timestamp_map, frame_id_link, true, object_class_map)), + object_class_map_(object_class_map){} + DSDTransport::Publisher DSDTransport::addObjectInfo( const ObjectMotionMap& motions, const ObjectPoseMap& poses, const std::string& frame_id_link, - const FrameIdTimestampMap& frame_timestamp_map, FrameId frame_id, - Timestamp timestamp) { + const FrameIdTimestampMap& frame_timestamp_map, + FrameId frame_id, + Timestamp timestamp, + const ObjectIdClassMap& object_class_map) { return Publisher(node_, object_odom_publisher_, multi_object_odom_path_publisher_, tf_broadcaster_, motions, poses, frame_id_link, frame_timestamp_map, frame_id, - timestamp); + timestamp, object_class_map); } DSDRos::DSDRos(const DisplayParams& params, rclcpp::Node::SharedPtr node) @@ -271,10 +321,12 @@ DSDRos::DSDRos(const DisplayParams& params, rclcpp::Node::SharedPtr node) } void DSDRos::publishVisualOdometry(const gtsam::Pose3& T_world_camera, - Timestamp timestamp, const bool publish_tf) { + Timestamp timestamp, const bool publish_tf, + const gtsam::Vector6& velocity) { DisplayCommon::publishOdometry(vo_publisher_, T_world_camera, timestamp, params_.world_frame_id, - params_.camera_frame_id); + params_.camera_frame_id, + velocity); if (publish_tf) { geometry_msgs::msg::TransformStamped t; diff --git a/dynosam_ros/src/displays/dynamic_slam_displays/FrontendDSDRos.cc b/dynosam_ros/src/displays/dynamic_slam_displays/FrontendDSDRos.cc index fe50cebfe..e7099fa35 100644 --- a/dynosam_ros/src/displays/dynamic_slam_displays/FrontendDSDRos.cc +++ b/dynosam_ros/src/displays/dynamic_slam_displays/FrontendDSDRos.cc @@ -173,9 +173,12 @@ void FrontendDSDRos::tryPublishGroundTruth( void FrontendDSDRos::tryPublishVisualOdometry( const VisionImuPacket::ConstPtr& frontend_output) { // publish vo - constexpr static bool kPublishOdomAsTf = true; + constexpr static bool kPublishOdomAsTf = true; + this->publishVisualOdometry(frontend_output->cameraPose(), - frontend_output->timestamp(), kPublishOdomAsTf); + frontend_output->timestamp(), + kPublishOdomAsTf, + frontend_output->getBodyVelocity()); // relies on correct accumulation of internal objects this->publishVisualOdometryPath(camera_poses_, frontend_output->timestamp()); @@ -213,10 +216,11 @@ void FrontendDSDRos::tryPublishObjects( const auto& object_motions = object_motions_; const auto& object_poses = object_poses_; const auto& timestamp_map = this->shared_module_info.getTimestampMap(); + const auto& label_map = this->shared_module_info.getClassLabelMap(); DSDTransport::Publisher object_poses_publisher = dsd_transport_.addObjectInfo( object_motions, object_poses, params_.world_frame_id, timestamp_map, - frontend_output->frameId(), frontend_output->timestamp()); + frontend_output->frameId(), frontend_output->timestamp(), label_map); object_poses_publisher.publishObjectOdometry(); object_poses_publisher.publishObjectTransforms(); object_poses_publisher.publishObjectPaths(); diff --git a/dynosam_utils/src/run_experiments_ecmr.py b/dynosam_utils/src/run_experiments_ecmr.py old mode 100644 new mode 100755 index 8af56e3d5..e115f1b5e --- a/dynosam_utils/src/run_experiments_ecmr.py +++ b/dynosam_utils/src/run_experiments_ecmr.py @@ -33,7 +33,6 @@ def run_sequnce(path, name, data_loader_num, backend_type, *args, **kwargs): parsed_args["launch_file"] = "dyno_sam_launch.py" - if run_as_frontend: additional_args.extend([ "--use_backend=0", @@ -155,13 +154,13 @@ def run_viodes(): # run_ecmr_experiment_sequences("/root/data/VIODE/city_day/mid", "viode_city_day_mid", viode, "--v=100") # run_ecmr_experiment_sequences("/root/data/VIODE/city_day/high","viode_city_day_high", viode, "--ending_frame=1110") - run_ecmr_experiment_sequences("/root/data/VIODE/city_day/high","test_viode1", viode,"--starting_frame=0", "--ending_frame=1110", "--v=30", "--use_backend=true") + # run_ecmr_experiment_sequences("/root/data/VIODE/city_day/high","test_viode", viode,"--starting_frame=0", "--ending_frame=1110", "--v=30", "--use_backend=true") # # zero_elements_ratio # run_ecmr_experiment_sequences("/root/data/VIODE/city_night/mid", "viode_city_night_mid", viode) # run_ecmr_experiment_sequences("/root/data/VIODE/city_night/high", "viode_city_night_high", viode) # run_ecmr_experiment_sequences("/root/data/VIODE/parking_lot/mid", "parking_lot_night_mid", viode) - # run_ecmr_experiment_sequences("/root/data/VIODE/parking_lot/high", "parking_lot_night_high", viode) + run_ecmr_experiment_sequences("/root/data/viode_parking_lot/high", "parking_lot_night_high", viode) def run_omd(): run_ecmr_experiment_sequences("/root/data/vdo_slam/omd/omd/swinging_4_unconstrained_stereo/","test", omd_dataset, "--ending_frame=300") @@ -173,7 +172,7 @@ def run_tartan_air(): run_ecmr_experiment_sequences("/root/data/TartanAir_shibuya/RoadCrossing05", "test_tartan", tartan_air) # run_ecmr_experiment_sequences("/root/data/TartanAir_shibuya/RoadCrossing06", "tas_rc6", tartan_air) # run_ecmr_experiment_sequences("/root/data/TartanAir_shibuya/RoadCrossing07", "tas_rc7", tartan_air, "--starting_frame=5", "--ending_frame=65") - # run_analysis("tas_rc7") + # run_analysis("tas_rc7") # run_ecmr_experiment_sequences("/root/data/TartanAir_shibuya/Standing01", "tas_s1", tartan_air) # run_ecmr_experiment_sequences("/root/data/TartanAir_shibuya/Standing02", "tas_s2", tartan_air) @@ -192,8 +191,8 @@ def run_kitti(): # run_ecmr_experiment_sequences("/root/data/vdo_slam/kitti/kitti/0005/", "kitti_0005_static_only", kitti_dataset, "--shrink_row=25", "--shrink_col=50") # run_ecmr_experiment_sequences("/root/data/vdo_slam/kitti/kitti/0006/", "kitti_0006_static_only", kitti_dataset, "--shrink_row=25", "--shrink_col=50") # run_ecmr_experiment_sequences("/root/data/vdo_slam/kitti/kitti/0018/", "kitti_0018_static_only", kitti_dataset, "--shrink_row=25", "--shrink_col=50") - run_ecmr_experiment_sequences("/root/data/vdo_slam/kitti/kitti/0020/", "kitti_0020_static_only", kitti_dataset, "--shrink_row=25", "--shrink_col=50") - # run_ecmr_experiment_sequences("/root/data/vdo_slam/kitti/kitti/0004/", "test", kitti_dataset, "--shrink_row=25", "--shrink_col=50", "--use_backend=true") + # run_ecmr_experiment_sequences("/root/data/vdo_slam/kitti/kitti/0020/", "kitti_0020_static_only", kitti_dataset, "--shrink_row=25", "--shrink_col=50") + run_ecmr_experiment_sequences("/root/data/kitti_0001/", "test", kitti_dataset, "--shrink_row=25", "--shrink_col=50", "--use_backend=true") def run_aria(): @@ -210,9 +209,9 @@ def run_aria(): # run_viodes() # run_tartan_air() # run_cluster() - run_omd() + # run_omd() # run_aria() - # run_kitti() + run_kitti() # run_analysis("kitti_0001_static_only") # run_analysis("kitti_0002_static_only") # run_analysis("kitti_0003_static_only") @@ -499,4 +498,4 @@ def run_aria(): # "kitti_0003", # backend_type=object_centric_batch # ) - # run_analysis("kitti_0003") + # run_analysis("kitti_0003") \ No newline at end of file