diff --git a/README.md b/README.md index 0637fad..575a855 100644 --- a/README.md +++ b/README.md @@ -35,9 +35,18 @@ rosinstall -n . srpb/srpb/srpb.rosinstall ## Usage +### Setup + A log file is saved once the goal is reached by the `srpb_move_base` node. Renewing the goal pose before reaching the previous one does not cause the files to be divided into parts. -TBD... +### Typical workflow + +If one intends to collect a bunch of logs, this is the correct workflow: + +* instead of running the typical `move_base`, run the `srpb_move_base` node that aggregates the `srpb_logger` modules, +* after each trial, run the `scripts/copy_logs.sh` to copy the logs related to the newest trial into a separate directory, +* if one wishes to be extra safe, one might want to evaluate the newest logs at this point with `scripts/evaluate_from_dir.sh ` OR (typically) once all trials were completed, run the `scripts/evaluate_all_dirs.sh `, +* now an Excel sheet can be created with `python3 create_excel_from_results.py` script. ## Acknowledgments diff --git a/srpb_evaluation/CMakeLists.txt b/srpb_evaluation/CMakeLists.txt index 0e109e3..8821c87 100644 --- a/srpb_evaluation/CMakeLists.txt +++ b/srpb_evaluation/CMakeLists.txt @@ -43,7 +43,7 @@ include_directories( add_library(${PROJECT_NAME}_lib # libs include/${PROJECT_NAME}/metric.h - include/${PROJECT_NAME}/metric_gaussian.h + include/${PROJECT_NAME}/metric_statistics.h include/${PROJECT_NAME}/rewinder.h include/${PROJECT_NAME}/utils.h # specific metrics @@ -63,7 +63,7 @@ add_library(${PROJECT_NAME}_lib include/${PROJECT_NAME}/metrics/personal_space_instrusion.h include/${PROJECT_NAME}/metrics/velocity_smoothness.h # sources - src/metric_gaussian.cpp + src/metric_statistics.cpp src/rewinder.cpp src/utils.cpp ) @@ -95,8 +95,8 @@ if (CATKIN_ENABLE_TESTING) if(TARGET test_utils) target_link_libraries(test_utils ${PROJECT_NAME}_lib) endif() - catkin_add_gtest(test_metric_gaussian test/test_metric_gaussian.cpp) - if(TARGET test_metric_gaussian) - target_link_libraries(test_metric_gaussian ${PROJECT_NAME}_lib) + catkin_add_gtest(test_metric_statistics test/test_metric_statistics.cpp) + if(TARGET test_metric_statistics) + target_link_libraries(test_metric_statistics ${PROJECT_NAME}_lib) endif() endif() diff --git a/srpb_evaluation/include/srpb_evaluation/metric_gaussian.h b/srpb_evaluation/include/srpb_evaluation/metric_gaussian.h deleted file mode 100644 index f04727c..0000000 --- a/srpb_evaluation/include/srpb_evaluation/metric_gaussian.h +++ /dev/null @@ -1,62 +0,0 @@ -#pragma once - -#include "srpb_evaluation/metric.h" - -// helper functions for calculation of Gaussian distribution value -#include - -namespace srpb { -namespace evaluation { - -class MetricGaussian: public Metric { -public: - MetricGaussian( - const std::vector>& robot_data - ): Metric(robot_data) {} - - MetricGaussian( - const std::vector>& robot_data, - const std::vector>& people_data - ): Metric(robot_data, people_data) {} - - MetricGaussian( - const std::vector>& robot_data, - const std::vector>& people_data, - const std::vector>& groups_data - ): Metric(robot_data, people_data, groups_data) {} - - /// Returns minimum value of Gaussian throughout the scenario - virtual double getValueMin() const = 0; - - /// Returns maximum value of Gaussian throughout the scenario - virtual double getValueMax() const = 0; - - /// Returns percentage of violations of Gaussian throughout the scenario (considering the threshold value) - virtual double getViolations() const = 0; - - /// Prints results - virtual void printResults() const = 0; - - /** - * @brief Computes min, max, normalized metrics related to gaussian statistics and counts number of space violations - * - * @param timed_gaussians vector of pairs with, first, timestamp, and second, values of gaussians - * @param violation_threshold Gaussian values bigger than that will be considered as violation of personal space - * @param max_method set to true (default) to use max element from Gaussians to normalize metrics; - * false means averaging over all Gaussian occurrences in a current time step - * - * @return std::tuple tuple with scores: min, max and normalized to execution - * time and percentage of violation of, e.g., personal space violations (according to given threshold and gaussians) - */ - static std::tuple calculateGaussianStatistics( - std::vector>> timed_gaussians, - double violation_threshold, - bool max_method = true - ); - -protected: - virtual void compute() = 0; -}; - -} // namespace evaluation -} // namespace srpb diff --git a/srpb_evaluation/include/srpb_evaluation/metric_statistics.h b/srpb_evaluation/include/srpb_evaluation/metric_statistics.h new file mode 100644 index 0000000..5a0035c --- /dev/null +++ b/srpb_evaluation/include/srpb_evaluation/metric_statistics.h @@ -0,0 +1,66 @@ +#pragma once + +#include "srpb_evaluation/metric.h" + +namespace srpb { +namespace evaluation { + +class MetricStatistics: public Metric { +public: + MetricStatistics( + const std::vector>& robot_data + ): Metric(robot_data) {} + + MetricStatistics( + const std::vector>& robot_data, + const std::vector>& people_data + ): Metric(robot_data, people_data) {} + + MetricStatistics( + const std::vector>& robot_data, + const std::vector>& people_data, + const std::vector>& groups_data + ): Metric(robot_data, people_data, groups_data) {} + + /// Returns minimum value obtained throughout the scenario + virtual double getValueMin() const = 0; + + /// Returns maximum value obtained throughout the scenario + virtual double getValueMax() const = 0; + + /// Returns the percentage of violations obtained throughout the scenario (considering the threshold value) + virtual double getViolations() const = 0; + + /// Prints results + virtual void printResults() const = 0; + + /** + * @brief Computes min, max, normalized metrics and counts the number of violations (above the certain threshold) + * + * @param timed_values vector of pairs with, first, a timestamp, and second, a container with values + * @param violation_threshold values bigger than that will be considered as violations + * @param violation_above_threshold selects whether the violation is counted when the value is bigger than + * the threshold (true, default) or when the violation is less than the threshold (false). + * This is specific to a certain metric (whether computes "cost" or "reward"). + * @param max_method set to true (default) so the max element is used to normalize metrics; + * false means averaging over all values/occurrences in a current time step + * + * @return std::tuple tuple with scores: + * 1) min value, + * 2) max value, + * 3) a metric value normalized according to the duration/execution time, + * 4) timing-corrected percentage of the threshold value violations + */ + static std::tuple calculateStatistics( + std::vector>> timed_values, + double violation_threshold, + bool violation_above_threshold = true, + bool max_method = true + ); + +protected: + virtual void compute() = 0; +}; + +} // namespace evaluation +} // namespace srpb diff --git a/srpb_evaluation/include/srpb_evaluation/metrics/formation_space_instrusion.h b/srpb_evaluation/include/srpb_evaluation/metrics/formation_space_instrusion.h index cf56b30..eb4d2ff 100644 --- a/srpb_evaluation/include/srpb_evaluation/metrics/formation_space_instrusion.h +++ b/srpb_evaluation/include/srpb_evaluation/metrics/formation_space_instrusion.h @@ -1,6 +1,6 @@ #pragma once -#include "srpb_evaluation/metric_gaussian.h" +#include "srpb_evaluation/metric_statistics.h" #include @@ -8,7 +8,7 @@ namespace srpb { namespace evaluation { /// Similar to personal space intrusion but related to the group space -class FormationSpaceIntrusion: public MetricGaussian { +class FormationSpaceIntrusion: public MetricStatistics { public: FormationSpaceIntrusion( const std::vector>& robot_data, @@ -17,9 +17,13 @@ class FormationSpaceIntrusion: public MetricGaussian { double group_space_threshold, bool max_method = true ): - MetricGaussian(robot_data, people_data, groups_data), + MetricStatistics(robot_data, people_data, groups_data), group_space_threshold_(group_space_threshold), - max_method_(max_method) + max_method_(max_method), + intrusion_min_(0.0), + intrusion_max_(0.0), + intrusion_total_(0.0), + violations_percentage_(0.0) { if (people_data.empty() || groups_data.empty()) { return; @@ -118,12 +122,13 @@ class FormationSpaceIntrusion: public MetricGaussian { rewinder_.perform(); + // Gaussian is computed as a "cost"; hence, "violation_above_threshold" is hard-coded to true std::tie( intrusion_min_, intrusion_max_, intrusion_total_, violations_percentage_ - ) = MetricGaussian::calculateGaussianStatistics(timed_gaussians, group_space_threshold_, max_method_); + ) = MetricStatistics::calculateStatistics(timed_gaussians, group_space_threshold_, true, max_method_); } }; diff --git a/srpb_evaluation/include/srpb_evaluation/metrics/goal_reached.h b/srpb_evaluation/include/srpb_evaluation/metrics/goal_reached.h index 68c1b91..f684c30 100644 --- a/srpb_evaluation/include/srpb_evaluation/metrics/goal_reached.h +++ b/srpb_evaluation/include/srpb_evaluation/metrics/goal_reached.h @@ -30,28 +30,33 @@ class GoalReached: public Metric { void printResults() const override { printf( - "Goal reached = %d [bool] (tolerance violations: position %d, orientation %d)\n", + "Goal reached = %d [bool] " + "(tolerance violations: position %d [offset %7.4f m], orientation %d [offset %7.4f rad])\n", static_cast(!tolerance_xy_violated_ && !tolerance_yaw_violated_), static_cast(tolerance_xy_violated_), - static_cast(tolerance_yaw_violated_) + offset_xy_, + static_cast(tolerance_yaw_violated_), + offset_yaw_ ); } protected: double tolerance_xy_; double tolerance_yaw_; + double offset_xy_; + double offset_yaw_; bool tolerance_xy_violated_; bool tolerance_yaw_violated_; void compute() override { rewinder_.setHandlerLastTimestamp( [&]() { - double dist_to_goal_xy = std::hypot( + offset_xy_ = std::hypot( rewinder_.getRobotCurr().getPositionX() - rewinder_.getRobotCurr().getGoalPositionX(), rewinder_.getRobotCurr().getPositionY() - rewinder_.getRobotCurr().getGoalPositionY() ); - double dist_to_goal_yaw = std::abs( + offset_yaw_ = std::abs( angles::shortest_angular_distance( rewinder_.getRobotCurr().getOrientationYaw(), rewinder_.getRobotCurr().getGoalOrientationYaw() @@ -59,8 +64,8 @@ class GoalReached: public Metric { ); // save results - tolerance_xy_violated_ = dist_to_goal_xy > tolerance_xy_; - tolerance_yaw_violated_ = std::abs(dist_to_goal_yaw) > std::abs(tolerance_yaw_); + tolerance_xy_violated_ = offset_xy_ > tolerance_xy_; + tolerance_yaw_violated_ = std::abs(offset_yaw_) > std::abs(tolerance_yaw_); } ); rewinder_.perform(); diff --git a/srpb_evaluation/include/srpb_evaluation/metrics/heading_change_smoothness.h b/srpb_evaluation/include/srpb_evaluation/metrics/heading_change_smoothness.h index d54417e..44e417c 100644 --- a/srpb_evaluation/include/srpb_evaluation/metrics/heading_change_smoothness.h +++ b/srpb_evaluation/include/srpb_evaluation/metrics/heading_change_smoothness.h @@ -28,6 +28,11 @@ class HeadingChangeSmoothness: public Metric { rewinder_.setHandlerNextTimestamp( [&]() { double dt = rewinder_.getTimestampNext() - rewinder_.getTimestampCurr(); + // if we attempt to divide by 0 (due to possibly insufficient sampling rate of the logger), then skip this one; + // however, this sample will still be included in the final result + if (dt <= 0.0) { + return; + } double dtheta = rewinder_.getRobotNext().getVelocityTheta() - rewinder_.getRobotCurr().getVelocityTheta(); hsm += (std::abs(dtheta) / dt); } diff --git a/srpb_evaluation/include/srpb_evaluation/metrics/heading_direction_disturbance.h b/srpb_evaluation/include/srpb_evaluation/metrics/heading_direction_disturbance.h index 6918d06..d87bff3 100644 --- a/srpb_evaluation/include/srpb_evaluation/metrics/heading_direction_disturbance.h +++ b/srpb_evaluation/include/srpb_evaluation/metrics/heading_direction_disturbance.h @@ -1,6 +1,6 @@ #pragma once -#include "srpb_evaluation/metric_gaussian.h" +#include "srpb_evaluation/metric_statistics.h" #include @@ -8,7 +8,7 @@ namespace srpb { namespace evaluation { /// Related to velocity and direction of the robot movement towards person -class HeadingDirectionDisturbance: public MetricGaussian { +class HeadingDirectionDisturbance: public MetricStatistics { public: HeadingDirectionDisturbance( const std::vector>& robot_data, @@ -20,13 +20,17 @@ class HeadingDirectionDisturbance: public MetricGaussian { double robot_max_speed = social_nav_utils::HeadingDirectionDisturbance::MAX_SPEED_DEFAULT, bool max_method = true ): - MetricGaussian(robot_data, people_data), + MetricStatistics(robot_data, people_data), disturbance_threshold_(disturbance_threshold), person_occupancy_radius_(person_occupancy_radius), person_fov_(person_fov), robot_circumradius_(robot_circumradius), robot_max_speed_(robot_max_speed), - max_method_(max_method) + max_method_(max_method), + disturbance_min_(0.0), + disturbance_max_(0.0), + disturbance_total_(0.0), + violations_percentage_(0.0) { if (people_data.empty()) { return; @@ -111,7 +115,14 @@ class HeadingDirectionDisturbance: public MetricGaussian { person_fov_ ); // normalize cost - heading.normalize(robot_circumradius_, robot_max_speed_); + if (!heading.normalize(robot_circumradius_, robot_max_speed_)) { + printf( + "During calculation of the `HeadingDirectionDisturbance` metric, scales had to be clipped for person '%s' " + "at t=%.4f. Your data might be corrupted!", + rewinder_.getPersonCurr().getName().c_str(), + rewinder_.getTimestampCurr() + ); + } timed_disturbance.second.push_back(heading.getScale()); } @@ -127,12 +138,13 @@ class HeadingDirectionDisturbance: public MetricGaussian { ); rewinder_.perform(); + // Gaussian is computed as a "cost"; hence, "violation_above_threshold" is hard-coded to true std::tie( disturbance_min_, disturbance_max_, disturbance_total_, violations_percentage_ - ) = MetricGaussian::calculateGaussianStatistics(timed_disturbances, disturbance_threshold_, max_method_); + ) = MetricStatistics::calculateStatistics(timed_disturbances, disturbance_threshold_, true, max_method_); } }; diff --git a/srpb_evaluation/include/srpb_evaluation/metrics/obstacle_safety.h b/srpb_evaluation/include/srpb_evaluation/metrics/obstacle_safety.h index 792965b..03a434e 100644 --- a/srpb_evaluation/include/srpb_evaluation/metrics/obstacle_safety.h +++ b/srpb_evaluation/include/srpb_evaluation/metrics/obstacle_safety.h @@ -1,77 +1,91 @@ #pragma once -#include "srpb_evaluation/metric.h" +#include "srpb_evaluation/metric_statistics.h" + +#include namespace srpb { namespace evaluation { -/// @details Originally implemented in MRPB 1.0 (https://github.com/NKU-MobFly-Robotics/local-planning-benchmark) -class ObstacleSafety: public Metric { +class ObstacleSafety: public MetricStatistics { public: ObstacleSafety( const std::vector>& robot_data, double safety_distance ): - Metric(robot_data), + MetricStatistics(robot_data), safety_distance_(safety_distance) { compute(); } - /// Returns total percentage of obstacle safety + /// Returns the time-corrected mean distance to the closest obstacle in subsequent time steps of the scenario virtual double getValue() const override { - return obstacle_safety_ * 1e2; + return obstacle_distance_total_; + } + + /// Returns minimum value obtained throughout the scenario + virtual double getValueMin() const override { + return obstacle_distance_min_; + } + + /// Returns maximum value obtained throughout the scenario + virtual double getValueMax() const override { + return obstacle_distance_max_; } - void printResults() const override { - printf("Obstacle safety = %.4f [%%]\n", obstacle_safety_ * 1e2); + /** + * Returns the percentage of violations obtained throughout the scenario (considering the threshold value) + * The value according to the metric originally implemented in MRPB 1.0 + * See https://github.com/NKU-MobFly-Robotics/local-planning-benchmark for details + */ + virtual double getViolations() const override { + return violations_percentage_ * 100.0; + } + + virtual void printResults() const override { + printf( + "Obstacle safety = %.4f [m] (min = %.4f [m], max = %.4f [m], violations %.4f [%%])\n", + obstacle_distance_total_, + obstacle_distance_min_, + obstacle_distance_max_, + violations_percentage_ * 100.0 + ); } protected: double safety_distance_; - double obstacle_safety_; - void compute() override { - // timestamps to sum up time - const double TS_NOT_SET = std::numeric_limits::min(); - double ts_start = TS_NOT_SET; - double ts_end = TS_NOT_SET; + double obstacle_distance_min_; + double obstacle_distance_max_; + double obstacle_distance_total_; + /// "Obstacle safety" in MRPB 1.0 + double violations_percentage_; - double ts_sum = 0.0; - auto calc_next = [&]() { - // compute how long robot traveled near obstacles located within safety distance - if (rewinder_.getRobotCurr().getDistToObstacle() < safety_distance_) { - if (ts_start == TS_NOT_SET) { - ts_start = rewinder_.getTimestampCurr(); - } else { - // save timestamp - ts_end = rewinder_.getTimestampCurr(); - } - } else { - if (ts_end != TS_NOT_SET) { - // robot started to maintain safety distance again - ts_sum += (ts_end - ts_start); + void compute() override { + // container to compute, i.a., min and max values of distances to obstacles + std::vector>> timed_obs_distances; - // reset timestamps for incoming counts - ts_start = TS_NOT_SET; - ts_end = TS_NOT_SET; - } - } - }; - // NOTE: external lambda must be explicitly passed to capture - auto calc_last = [&, calc_next]() { - calc_next(); - // sum up the last period (if robot finished course not maintaining the safety distance) - if (ts_start != TS_NOT_SET && ts_end != TS_NOT_SET) { - ts_sum += ts_end - ts_start; + rewinder_.setHandlerNextTimestamp( + [&]() { + timed_obs_distances.push_back({ + // step duration + rewinder_.getTimestampNext() - rewinder_.getTimestampCurr(), + // single value container (there is only 1 closest obstacle) + std::vector{rewinder_.getRobotCurr().getDistToObstacle()} + }); } - }; - rewinder_.setHandlerNextTimestamp(calc_next); - rewinder_.setHandlerLastTimestamp(calc_last); + ); rewinder_.perform(); - // save result - obstacle_safety_ = ts_sum / rewinder_.getDuration(); + // NOTE1: a violation is regarded when distance is less than the threshold + // NOTE2: max method does not matter here as only 1 observation in each step is available + std::tie( + obstacle_distance_min_, + obstacle_distance_max_, + obstacle_distance_total_, + violations_percentage_ + ) = MetricStatistics::calculateStatistics(timed_obs_distances, safety_distance_, false, true); } }; diff --git a/srpb_evaluation/include/srpb_evaluation/metrics/passing_speed_discomfort.h b/srpb_evaluation/include/srpb_evaluation/metrics/passing_speed_discomfort.h new file mode 100644 index 0000000..17745d6 --- /dev/null +++ b/srpb_evaluation/include/srpb_evaluation/metrics/passing_speed_discomfort.h @@ -0,0 +1,159 @@ +#pragma once + +#include "srpb_evaluation/metric_statistics.h" + +#include + +namespace srpb { +namespace evaluation { + +/** + * @brief Computes the score of the robot's distance and speed characteristics during the motion around the humans. + * + * This metric implements the findings from "The effect of robot speed on comfortable passing distances" + * by Neggers et al. (2022). A continuous comfort model was recreated based on the data published in this + * paper. Calculations are performed in a fully deterministic manner. + * Refer to the social_nav_utils::PassingSpeedComfort implementation for a detailed description of the underlying + * bivariate model that fits the published data best. + * + * @param robot_data + * @param people_data + * @param distance_min minimum distance between the center of the robot and a human at which (when robot speed + * is equal to @ref speed_max) the normalized discomfort is the highest (1.0); the human is treated as a point here + * @param speed_max the maximum speed of the robot; used for a discomfort normalization (see @ref distance_min too) + * @param discomfort_threshold discomfort values above this level will be treated as violations + * @param max_method whether to use the maximum discomfort in a given timestamp as an indicator (true); the average + * is used when set to false + */ +class PassingSpeedDiscomfort: public MetricStatistics { +public: + PassingSpeedDiscomfort( + const std::vector>& robot_data, + const std::vector>& people_data, + double distance_min = 0.275, + double speed_max = 0.55, + double discomfort_threshold = 0.4, + bool max_method = true + ): + MetricStatistics(robot_data, people_data), + distance_min_(distance_min), + robot_speed_max_(speed_max), + discomfort_threshold_(discomfort_threshold), + max_method_(max_method), + discomfort_min_(0.0), + discomfort_max_(0.0), + discomfort_total_(0.0), + violations_percentage_(0.0) + { + if (people_data.empty()) { + return; + } + compute(); + } + + /// Returns total percentage throughout the scenario + virtual double getValue() const override { + return discomfort_total_ * 100.0; + } + + /// Returns minimum percentage throughout the scenario + virtual double getValueMin() const override { + return discomfort_min_ * 100.0; + } + + /// Returns maximum percentage throughout the scenario + virtual double getValueMax() const override { + return discomfort_max_ * 100.0; + } + + /// Returns percentage of violations throughout the scenario (regarding threshold) + virtual double getViolations() const override { + return violations_percentage_ * 100.0; + } + + void printResults() const override { + printf( + "Passing speed discomfort = %.4f [%%] (min = %.4f [%%], max = %.4f [%%], violations %.4f [%%])\n", + discomfort_total_ * 100.0, + discomfort_min_ * 100.0, + discomfort_max_ * 100.0, + violations_percentage_ * 100.0 + ); + } + +protected: + // parameters + double distance_min_; + double robot_speed_max_; + double discomfort_threshold_; + bool max_method_; + + // results + double discomfort_min_; + double discomfort_max_; + double discomfort_total_; + double violations_percentage_; + + void compute() override { + // store durations and discomforts of the humans in terms of robot motions + std::vector>> timed_discomforts; + /// prepare a container for discomfort values in this `for` iteration + std::pair> timed_discomfort; + + rewinder_.setHandlerNextTimestamp( + [&]() { + // prepare a container for upcoming calculations related to passing speed discomforts + timed_discomfort = std::make_pair( + rewinder_.getTimestampNext() - rewinder_.getTimestampCurr(), std::vector() + ); + } + ); + + // on event 'iterating through next person in the timestamp' - compute a human discomfort given the robot position + // and a speed + rewinder_.setHandlerNextPersonTimestamp( + [&]() { + // Euclidean distance between centers + double distance = std::sqrt( + std::pow(rewinder_.getRobotCurr().getPositionX() - rewinder_.getPersonCurr().getPositionX(), 2.0) + + std::pow(rewinder_.getRobotCurr().getPositionY() - rewinder_.getPersonCurr().getPositionY(), 2.0) + ); + double robot_speed = std::hypot( + rewinder_.getRobotCurr().getVelocityX(), rewinder_.getRobotCurr().getVelocityY() + ); + + // compute discomfort given the distance between the centers and robot speed + social_nav_utils::PassingSpeedComfort psd( + distance, + robot_speed, + distance_min_, + robot_speed_max_ + ); + + // store result for later aggregation + timed_discomfort.second.push_back(psd.getDiscomfortNormalized()); + } + ); + + // on event 'iterated through all people in the timestamp' + rewinder_.setHandlerAllPeopleTimestamp( + [&]() { + if (!timed_discomfort.second.empty()) { + timed_discomforts.push_back(timed_discomfort); + } + } + ); + rewinder_.perform(); + + // Gaussian is computed as a "cost"; hence, "violation_above_threshold" is hard-coded to true + std::tie( + discomfort_min_, + discomfort_max_, + discomfort_total_, + violations_percentage_ + ) = MetricStatistics::calculateStatistics(timed_discomforts, discomfort_threshold_, true, max_method_); + } +}; + +} // namespace evaluation +} // namespace srpb diff --git a/srpb_evaluation/include/srpb_evaluation/metrics/personal_space_instrusion.h b/srpb_evaluation/include/srpb_evaluation/metrics/personal_space_instrusion.h index 6db7b12..eac0858 100644 --- a/srpb_evaluation/include/srpb_evaluation/metrics/personal_space_instrusion.h +++ b/srpb_evaluation/include/srpb_evaluation/metrics/personal_space_instrusion.h @@ -1,6 +1,6 @@ #pragma once -#include "srpb_evaluation/metric_gaussian.h" +#include "srpb_evaluation/metric_statistics.h" #include @@ -29,7 +29,7 @@ namespace evaluation { * “To Approach Humans?”: A Unified Framework for Approaching Pose Prediction and Socially Aware Robot Navigation * They called it `Social Individual Index`. Their method lacks normalization in terms of path duration. */ -class PersonalSpaceIntrusion: public MetricGaussian { +class PersonalSpaceIntrusion: public MetricStatistics { public: PersonalSpaceIntrusion( const std::vector>& robot_data, @@ -37,9 +37,13 @@ class PersonalSpaceIntrusion: public MetricGaussian { double personal_space_threshold, bool max_method = true ): - MetricGaussian(robot_data, people_data), + MetricStatistics(robot_data, people_data), personal_space_threshold_(personal_space_threshold), - max_method_(max_method) + max_method_(max_method), + intrusion_min_(0.0), + intrusion_max_(0.0), + intrusion_total_(0.0), + violations_percentage_(0.0) { if (people_data.empty()) { return; @@ -142,12 +146,13 @@ class PersonalSpaceIntrusion: public MetricGaussian { ); rewinder_.perform(); + // Gaussian is computed as a "cost"; hence, "violation_above_threshold" is hard-coded to true std::tie( intrusion_min_, intrusion_max_, intrusion_total_, violations_percentage_ - ) = MetricGaussian::calculateGaussianStatistics(timed_gaussians, personal_space_threshold_, max_method_); + ) = MetricStatistics::calculateStatistics(timed_gaussians, personal_space_threshold_, true, max_method_); } }; diff --git a/srpb_evaluation/include/srpb_evaluation/metrics/velocity_smoothness.h b/srpb_evaluation/include/srpb_evaluation/metrics/velocity_smoothness.h index 94ce4ee..ef436a5 100644 --- a/srpb_evaluation/include/srpb_evaluation/metrics/velocity_smoothness.h +++ b/srpb_evaluation/include/srpb_evaluation/metrics/velocity_smoothness.h @@ -28,6 +28,11 @@ class VelocitySmoothness: public Metric { rewinder_.setHandlerNextTimestamp( [&]() { double dt = rewinder_.getTimestampNext() - rewinder_.getTimestampCurr(); + // if we attempt to divide by 0 (due to possibly insufficient sampling rate of the logger), then skip this one; + // however, this sample will still be included in the final result + if (dt <= 0.0) { + return; + } double acc_x = std::abs(rewinder_.getRobotNext().getVelocityX() - rewinder_.getRobotCurr().getVelocityX()) / dt; double acc_y = std::abs(rewinder_.getRobotNext().getVelocityY() - rewinder_.getRobotCurr().getVelocityY()) / dt; mean += std::hypot(acc_x, acc_y); diff --git a/srpb_evaluation/include/srpb_evaluation/utils.h b/srpb_evaluation/include/srpb_evaluation/utils.h index 93666b0..ebcc14b 100644 --- a/srpb_evaluation/include/srpb_evaluation/utils.h +++ b/srpb_evaluation/include/srpb_evaluation/utils.h @@ -22,6 +22,7 @@ #include "srpb_evaluation/metrics/motion_efficiency.h" #include "srpb_evaluation/metrics/obstacle_safety.h" #include "srpb_evaluation/metrics/oscillations.h" +#include "srpb_evaluation/metrics/passing_speed_discomfort.h" #include "srpb_evaluation/metrics/path_linear_length.h" #include "srpb_evaluation/metrics/personal_space_instrusion.h" #include "srpb_evaluation/metrics/velocity_smoothness.h" @@ -61,7 +62,8 @@ void createResultsFile( const InplaceRotations& inplace_rotations, const PersonalSpaceIntrusion& personal_space_intrusion, const FormationSpaceIntrusion& formation_space_intrusion, - const HeadingDirectionDisturbance& heading_direction_disturbance + const HeadingDirectionDisturbance& heading_direction_disturbance, + const PassingSpeedDiscomfort& passing_speed_discomfort ); /** diff --git a/srpb_evaluation/package.xml b/srpb_evaluation/package.xml index b26d7c2..88454bb 100644 --- a/srpb_evaluation/package.xml +++ b/srpb_evaluation/package.xml @@ -2,7 +2,7 @@ srpb_evaluation - 0.9.3 + 0.9.9 The srpb_evaluation package provides implementation of metrics that allow evaluation of robot behaviour during navigation diff --git a/srpb_evaluation/scripts/copy_logs.sh b/srpb_evaluation/scripts/copy_logs.sh index c67a004..d51efbd 100755 --- a/srpb_evaluation/scripts/copy_logs.sh +++ b/srpb_evaluation/scripts/copy_logs.sh @@ -1,24 +1,97 @@ #!/usr/bin/env bash # -# Copies 3 newest files from this directory to the directory given by argument. -# It is handy to put that script into the logs directory. +# Copies newest log files (robot/people/groups/gplanner) from the one directory to the directory given by argument. # -if [ "$#" -ne 1 ]; then - echo "What's the target directory?" +# Usage looks like: +# +# rosrun srpb_evaluation copy_logs.sh +# +if [ "$#" -lt 1 ] || [ "$#" -gt 2 ]; then + echo "Wrong usage. Script args:" + echo " (1) [required] path to the source directory with logs ('*_robot.txt', '*_people.txt', '*_groups.txt', and '*_gplanner.txt')" + echo " (2) [required] path to the target directory with copied logs; relative paths will be considered as relative to the source directory" exit 0 fi -SCRIPT_DIR=$(realpath $(dirname $0)) -TARGET_DIR="$SCRIPT_DIR/$1" -LOGS_DIR=$SCRIPT_DIR -mkdir -p $TARGET_DIR +logs_dir="$1" +# trim trailing / (if exists) +logs_dir="${logs_dir%/}" + +target_dir="$2" +# trim trailing / (if exists) +target_dir="${target_dir%/}" + +if [[ ! $target_dir == /* ]]; then + echo "Relative path" + target_dir=$logs_dir/$target_dir +fi +# Find the newest files matching the pattern # https://stackoverflow.com/a/54910963 -files_to_copy=$(ls -t $LOGS_DIR/log_*.txt | head -3) -cp $files_to_copy $TARGET_DIR - -echo "Copied:" -echo "" -echo "$files_to_copy" -echo "" -echo "to $TARGET_DIR" +newest_robot=$(ls -t $logs_dir/log_*_robot.txt | head -1) +newest_people=$(ls -t $logs_dir/log_*_people.txt | head -1) +newest_groups=$(ls -t $logs_dir/log_*_groups.txt | head -1) +newest_gplanner=$(ls -t $logs_dir/log_*_gplanner.txt | head -1) + +# Extract the timestamp from the selected file (note that they may differ slightly) +timestamp_robot=$(echo "$newest_robot" | grep -oP '\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}') +timestamp_people=$(echo "$newest_people" | grep -oP '\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}') +timestamp_groups=$(echo "$newest_groups" | grep -oP '\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}') +timestamp_gplanner=$(echo "$newest_gplanner" | grep -oP '\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}') + +# Indicates whether logs for a certain session consists of multiple parts +max_part='1' + +# Find all files with the same timestamp +while IFS= read -r -d '' file; do + # Extract filename without path + filename=$(basename "$file") + # Extract part number from filename (no "part" suffix means "1") + part=$(echo "$filename" | grep -oP '_part\K\d+' || echo '1') + # Add file to respective part number group + part_files[$part]+="$file " + + # Find the biggest "part" number + curr_part_num=$(expr "${part}" + 0) + max_part_num=$(expr "${max_part}" + 0) + if [ "${curr_part_num}" -gt "${max_part_num}" ]; then + max_part="${curr_part_num}" + fi +done < <(find "$logs_dir" -maxdepth 1 -type f \ + \( \ + -name "log_*_${timestamp_robot}_*.txt" \ + -o -name "log_*_${timestamp_people}_*.txt" \ + -o -name "log_*_${timestamp_groups}_*.txt" \ + -o -name "log_*_${timestamp_gplanner}_*.txt" \ + \) \ + -print0 \ +) + +# If the logs are not divided into "parts", we'll don't need to create separate directories in the target dir +if [ "${max_part}" -eq "1" ]; then + # the logging session consisted from only 1 part + mkdir -p $target_dir + cp ${part_files[${max_part}]} $target_dir + + # files but each in a new line + files_nl=$(echo "${part_files[${max_part}]}" | tr ' ' '\n') + echo "Copied:" + echo "$files_nl" + echo "to $(realpath $target_dir)" + exit 0 +fi + +# Found multiple parts of logs related to the same session +# Iterate from 1 to the value in max_part +for ((i = 1; i <= max_part; i++)); do + target_dir_part=$target_dir/part$i + mkdir -p $target_dir_part + cp ${part_files[$i]} $target_dir_part + + # files but each in a new line + files_nl=$(echo "${part_files[$i]}" | tr ' ' '\n') + echo "Copied:" + echo "$files_nl" + echo "to $(realpath $target_dir_part)" +done +exit 0 diff --git a/srpb_evaluation/scripts/create_bar_plot_from_results.py b/srpb_evaluation/scripts/create_bar_plot_from_results.py new file mode 100644 index 0000000..5539e3c --- /dev/null +++ b/srpb_evaluation/scripts/create_bar_plot_from_results.py @@ -0,0 +1,255 @@ +''' +Script that creates a .pdf file with a bar plot presenting results stored in a spreadsheet + +Dependencies: +sudo apt install python3-openpyxl +sudo apt install python3-matplotlib +sudo apt install python3-numpy +''' + +import argparse +import excel_sheet_utils +import matplotlib.pyplot as plt +import numpy as np +import yaml + +from typing import Dict +from typing import List +from typing import Union + + +def filter_inner_keys(outer_dict: Dict, selected_keys: List[str]) -> Dict: + """ + ChatGPT: + This function takes the outer dictionary (outer_dict) and a list of selected keys (selected_keys). + It then uses a nested dictionary comprehension to iterate over the outer dictionary and, + for each inner dictionary, filter only the selected keys. The result is a new dictionary with the same outer + structure but only containing the selected keys in the inner dictionaries. + """ + return {outer_key: {inner_key: inner_dict[inner_key] for inner_key in selected_keys if inner_key in inner_dict} + for outer_key, inner_dict in outer_dict.items()} + + +def prepare_data_for_plotting(data: Dict) -> Union[List[str], Dict[str, float]]: + """ + Input data are constructed as follows: + * dict with planner names as keys + * dict with metric names as keys + * metric values + Output data are constructed as follows: + * Union[ + list of metric names, + dict of planner names with values of corresponding metrics (ordered according to the list) + ] + """ + if not data: + raise Exception(f"Empty data provided!") + + planner_names = list(data.keys()) + if not planner_names: + raise Exception(f"Inner dict is empty!") + + metric_names = list(data[planner_names[0]].keys()) + # NOTE: [0] because individual filtered values are stored in 1-element lists + metrics_values = { + planner: [ + data[planner].get(metric, None)[0] + for metric in metric_names + ] for planner in planner_names + } + return metric_names, metrics_values + + +def create_bar_plot(dataset: Dict, plot_cfg: Dict) -> plt.Figure: + """ + Prepares a bar plot according to this tutorial: + https://matplotlib.org/stable/gallery/lines_bars_and_markers/barchart.html#sphx-glr-gallery-lines-bars-and-markers-barchart-py + """ + metric_name = dataset['metric'] + metric_planner_data = dataset['data'] + planner_names_list = list(metric_planner_data.keys()) + + # find the number of scenarios as a size of a list with metric values of an arbitrary planner + if not len(planner_names_list): + raise Exception(f"Cannot proceed, data is empty") + scenarios_num = len(metric_planner_data[planner_names_list[0]]) + + # to find maximum (1-element metric sequences are expected) + all_values = [value[0] for value in metric_planner_data.values()] + # find the minimum and maximum values + min_value = min(all_values) + max_value = max(all_values) + ylim_min = plot_cfg['figure']['ylim_min'] + ylim_max = plot_cfg['figure']['ylim_max_value_multiplier'] * max_value + + # abstract from inputs (this is an early version) + planner_ids = metric_planner_data.keys() + metric_values = metric_planner_data + + # the label locations + x = np.arange(scenarios_num) + + # the width of the space reserved for bars + width_scale = 1.0 + width = width_scale / len(metric_values) / 2 + # could be useful for the multi-scenario case + multiplier = 0 + + # prepare configuration entries + font_cfg_legend = { + 'family': plot_cfg['legend']['fontfamily'], + 'size': plot_cfg['legend']['fontsize'], + } + # validate the color + if not len(plot_cfg['figure']['bar_color']): + plot_cfg['figure']['bar_color'] = None + + # Based on the example: https://matplotlib.org/stable/gallery/statistics/boxplot_vs_violin.html + if plot_cfg['figure']['size_width'] == None or plot_cfg['figure']['size_height'] == None: + fig, ax = plt.subplots() + else: + fig, ax = plt.subplots(figsize=(plot_cfg['figure']['size_width'], plot_cfg['figure']['size_height'])) + + for attribute, measurement in metric_values.items(): + offset = width * multiplier + rects = ax.bar( + x + offset, + measurement, width * plot_cfg['figure']['bar_width'], + label=attribute, + color=plot_cfg['figure']['bar_color'], + edgecolor=plot_cfg['figure']['bar_edgecolor'], + linewidth=plot_cfg['figure']['bar_edgewidth'] + ) + + # Add labels manually instead of using "ax.bar_label()" function + if plot_cfg['figure']['bar_height_labels']: + for rect, label in zip(rects, measurement): + height = rect.get_height() + ax.text( + rect.get_x() + rect.get_width() / 2, # center the text + height, + str(plot_cfg['figure']['bar_height_label_format']).format(height), # limit decimal places + ha='center', + va='bottom', + fontdict=font_cfg_legend, + color='black' + ) + # for calculating offsets + multiplier += 1 + + # Add some text for labels, title and custom x-axis tick labels, etc. + ax.set_xlabel(plot_cfg['figure']['xlabel'], fontdict=font_cfg_legend) + ax.set_ylabel(plot_cfg['figure']['ylabel'], fontdict=font_cfg_legend) + ax.set_title(plot_cfg['figure']['title'], fontdict=font_cfg_legend) + + # Ref: https://stackoverflow.com/a/47893553 + idx = np.asarray([i * width for i in range(len(planner_ids))]) + ax.set_xticks(idx) + ax.set_xticklabels(list(planner_ids), fontdict=font_cfg_legend) + ax.tick_params(axis='y', labelsize=font_cfg_legend['size']) + + # x labels rotation + ax.tick_params(axis='x', rotation=plot_cfg['figure']['xlabels_rotation']) + + if plot_cfg['legend']['enable']: + ax.legend( + loc=plot_cfg['legend']['loc'], + ncol=plot_cfg['legend']['ncol'], + fontsize=font_cfg_legend['size'] + ) + ax.set_ylim(ylim_min, ylim_max) + return fig + + +def create_remapped_dataset(data_orig: dict, names_orig_select: dict, remappings: dict): + # remap the original IDs (names) + data_remapped_keys = {} + for name_orig in names_orig_select: + label = remappings.get(name_orig) + # check if found; also, if the remapped value already exists, do not overwrite + if not label == None and not label in data_orig.keys(): + data_remapped_keys[label] = data_orig[name_orig] + elif label == None: + # if key was not found amid the remappings - do not change the original name + data_remapped_keys[name_orig] = data_orig[name_orig] + return data_remapped_keys + + +if __name__ == "__main__": + # API is similar to the `create_box_plot_from_results` script + # Ref: https://stackoverflow.com/a/32763023 + cli = argparse.ArgumentParser() + # positional argument + cli.add_argument("output", type=str, help="Path to the .pdf file with the plot to save") + cli.add_argument( + "--input", + nargs="*", + type=str, + help="Path(s) to the SRPB results sheet(s). Their contents should be orthogonal - different planners in each" + ) + cli.add_argument("--config", type=str, help="Path to the plot configuration file") + cli.add_argument("--metric", type=str, help="ID of the metric") + # optional arguments + cli.add_argument( + "--planners", + nargs="*", + type=str, + help="Space-separated list of planners to include in the plot (all appearing in the results file \ + are included by default)" + ) + + # parse the command line + args = cli.parse_args() + + sheet_paths = args.input + config_path = args.config + metric = args.metric + output_path = args.output + planners_raw = args.planners + + # load dict from the file + with open(config_path) as f: + cfg = yaml.safe_load(f) + + # this list will always be of size 1 (this is just a hack to pass it to `filter_inner_keys`) + metric_name = [str(metric)] + + # load data from the spreadsheet(s) + data_loaded = {} + for sheet_path in sheet_paths: + data_single_sheet = excel_sheet_utils.load_data_from_excel(sheet_path) + # NOTE: this is probably not an optimal solution; ref: https://stackoverflow.com/a/26853961 + data_loaded_so_far = data_loaded + data_loaded = {**data_loaded_so_far, **data_single_sheet} + + data_selected = filter_inner_keys(data_loaded, metric_name) + + # use all included in the input sheet + if planners_raw == None or not len(planners_raw): + print(f"Using planner IDs from the input spreadsheet as none were selected explicitly") + planners_raw = [] + keys = data_loaded.keys() + for planner_id in keys: + planners_raw.append(planner_id) + + metric_name, planner_values = prepare_data_for_plotting(data_selected) + # remap the original IDs (names) of the planners, store in the same dict + planner_values = create_remapped_dataset(planner_values, planners_raw, cfg['labels']) + + # prepare dataset abstract (to eventually extend to a multi-plot scenario) + dataset = { + 'metric': metric_name, + 'data': planner_values + } + + # Create a new dictionary with only the desired keys + plot_cfg_dict = {key: cfg[key] for key in ['figure', 'legend'] if key in cfg} + + # Create bar plot and return a figure handle + fig = create_bar_plot(dataset, plot_cfg_dict) + # blocking call until window with the plot is closed + plt.show() + + # save the figure + fig.savefig(output_path, bbox_inches='tight', pad_inches=0) + print(f"Plot saved to `{output_path}` file") diff --git a/srpb_evaluation/scripts/create_bar_plot_from_results.yaml b/srpb_evaluation/scripts/create_bar_plot_from_results.yaml new file mode 100644 index 0000000..59a9599 --- /dev/null +++ b/srpb_evaluation/scripts/create_bar_plot_from_results.yaml @@ -0,0 +1,36 @@ +figure: + size_width: 9 + size_height: 4 + # one may want to embed mathematical typesetting inside to any label, use "$s^2$" then + title: Metric name + xlabel: Planner + # in degrees + xlabels_rotation: 0 + ylabel: Metric value + ylim_min: 0.0 + ylim_max_value_multiplier: 1.1 + bar_width: 0.5 + bar_height_labels: True + bar_height_label_format: "{:.1f}" + bar_edgecolor: black + bar_edgewidth: 0.7 + # set a valid value so all bars will have the same color; leave empty <""> for default setup + bar_color: "#1f77b4" + +legend: + # when enabling, it's wise to force a default "bar_color" + enable: False + loc: "upper left" + ncol: 3 + fontsize: 12 + fontfamily: serif + +labels: + # defines how the planners will be named according to their IDs + humap: HUMAP + cohan: CoHAN + hateb: HaTEB + teb: TEB + dwa: DWA + trajectory: Traj. Rollout + eband: EBand diff --git a/srpb_evaluation/scripts/create_box_plot_from_results.py b/srpb_evaluation/scripts/create_box_plot_from_results.py new file mode 100644 index 0000000..4291538 --- /dev/null +++ b/srpb_evaluation/scripts/create_box_plot_from_results.py @@ -0,0 +1,152 @@ +''' +Script that creates a .pdf file with a violin/box plot presenting results + +Dependencies: +sudo apt install python3-openpyxl +sudo apt install python3-matplotlib +''' + +import argparse +import excel_sheet_utils +import matplotlib.pyplot as plt +import yaml + + +def remap_names(orig_names: list, remappings: dict): + # remap the original IDs (names) + names = [] + for name in orig_names: + label = remappings.get(name) + # check if found; if not - do not change the name + if not label == None: + names.append(label) + else: + names.append(name) + return names + + +if __name__ == "__main__": + # Ref: https://stackoverflow.com/a/32763023 + cli = argparse.ArgumentParser() + # positional arguments + cli.add_argument("output", type=str, help="Path to the .pdf file with the plot to save") + cli.add_argument( + "--input", + nargs="*", + type=str, + help="Path(s) to the SRPB results sheet(s). Their contents should be orthogonal - different planners in each" + ) + cli.add_argument("--config", type=str, help="Path to the plot configuration file") + cli.add_argument("--metric", type=str, help="ID of the metric") + # optional arguments + cli.add_argument( + "--planners", + nargs="*", + type=str, + help="Space-separated list of planners to include in the plot (all appearing in the results file \ + are included by default)" + ) + # Example usage: + # python3 \ + # create_box_plot_from_results.py \ + # \ + # test.pdf \ + # --config create_box_plot_from_results.yaml \ + # --metric m_mef \ + # --planners teb dwa + # + + # parse the command line + args = cli.parse_args() + + sheet_paths = args.input + config_path = args.config + metric = args.metric + output_path = args.output + planners_raw = args.planners + + # load dict from the file + with open(config_path) as f: + cfg = yaml.safe_load(f) + + # load data from the spreadsheet(s) (all data for statistical presentation) + raw_data = {} + for sheet_path in sheet_paths: + raw_data_single_sheet = excel_sheet_utils.load_raw_data_from_excel(sheet_path) + # NOTE: this is probably not an optimal solution; ref: https://stackoverflow.com/a/26853961 + raw_data_so_far = raw_data + raw_data = {**raw_data_so_far, **raw_data_single_sheet} + + # use all included in the input sheet + if planners_raw == None or not len(planners_raw): + print(f"Using planner IDs from the input spreadsheet as none were selected explicitly") + planners_raw = [] + keys = raw_data.keys() + for planner_id in keys: + planners_raw.append(planner_id) + + # + # Structure of data per individual metric: + # (the assumption of each list with an equal length applies when the dataset is perfectly clean) + # + # data = [ + # # -> ..., , ..., + # [0.0, 0.0, 0.0, 0.0, 0.0], # ^ + # [0.0, 0.0, 0.0, 0.0, 0.0], # data of planner X + # [0.0, 0.0, 0.0, 0.0, 0.0], # data of planner Y + # [0.0, 0.0, 0.0, 0.0, 0.0], # data of planner Z + # [0.0, 0.0, 0.0, 0.0, 0.0], # data of planner W + # [0.0, 0.0, 0.0, 0.0, 0.0] # | + # ] + # + # prepare the loaded data according to the example above + data = [] + for planner in planners_raw: + # delete Nones - method is robust against the failed trials + data_planner_metric_clean = [] + for i, metric_value in enumerate(raw_data[planner][metric]): + if metric_value == None: + print(f"[{i+1}] Planner '{planner}', metric '{metric}' equals '{metric_value}', skipping...") + continue + data_planner_metric_clean.append(metric_value) + data.append(data_planner_metric_clean) + + # remap the original IDs (names) of the planners + planners = remap_names(planners_raw, cfg['labels']) + + # Based on the example: https://matplotlib.org/stable/gallery/statistics/boxplot_vs_violin.html + if cfg['figure']['size_width'] == None or cfg['figure']['size_height'] == None: + fig, ax = plt.subplots() + else: + fig, ax = plt.subplots(figsize=(cfg['figure']['size_width'], cfg['figure']['size_height'])) + + if cfg['plot_type'] == "violin": + ax.violinplot( + data, + showmeans=False, + showmedians=True + ) + elif cfg['plot_type'] == "box": + ax.boxplot(data) + else: + raise Exception(f"Unknown plot_type") + + font_cfg = { + 'family': cfg['figure']['fontfamily'], + 'size': cfg['figure']['fontsize'], + } + # NOTE: converts to "LaTeX-style" if a string (or its part) is written as "$a^x$" + ax.set_title(cfg['figure']['title'], fontdict=font_cfg) + ax.xaxis.grid(cfg['figure']['xaxis_grid']) + ax.yaxis.grid(cfg['figure']['yaxis_grid']) + ax.set_xlabel(cfg['figure']['xlabel'], fontdict=font_cfg) + ax.set_ylabel(cfg['figure']['ylabel'], fontdict=font_cfg) + ax.set_xticks(range(1, len(data) + 1)) + ax.set_xticklabels(planners, fontdict=font_cfg) + # x labels rotation + ax.tick_params(axis='x', rotation=cfg['figure']['xlabels_rotation']) + plt.show() + + # save the figure + fig.savefig(output_path, bbox_inches='tight', pad_inches=0) + print(f"Plot saved to `{output_path}` file") diff --git a/srpb_evaluation/scripts/create_box_plot_from_results.yaml b/srpb_evaluation/scripts/create_box_plot_from_results.yaml new file mode 100644 index 0000000..06ee080 --- /dev/null +++ b/srpb_evaluation/scripts/create_box_plot_from_results.yaml @@ -0,0 +1,26 @@ +# (violin|box) +plot_type: violin + +figure: + size_width: 9 + size_height: 4 + title: Title + xlabel: Samples + # in degrees + xlabels_rotation: 0 + ylabel: Values + xaxis_grid: True + yaxis_grid: True + fontsize: 12 + # (cursive|fantasy|monospace|sans|sans serif|sans-serif|serif) + fontfamily: serif + +labels: + # defines how the planners will be named according to their IDs + humap: HuMAP + cohan: CoHAN + hateb: HaTEB + teb: TEB + dwa: DWA + trajectory: Trajectory Rollout + eband: EBand diff --git a/srpb_evaluation/scripts/create_excel_from_results.py b/srpb_evaluation/scripts/create_excel_from_results.py index 3c72349..13a8176 100644 --- a/srpb_evaluation/scripts/create_excel_from_results.py +++ b/srpb_evaluation/scripts/create_excel_from_results.py @@ -8,24 +8,36 @@ ''' import csv +import excel_sheet_utils import glob +import math +import re import sys -from string import ascii_uppercase from openpyxl import Workbook from pathlib import Path +from srpb_metrics import SrpbMetrics from typing import List from typing import Dict -def get_log_dirs_planner(dir_path: str, planner_name: str, min_logs=3): +def get_log_dirs_planner(dir_path: str, planner_name: str, min_logs=1): dirnames_underscore = glob.glob(dir_path + '/' + '*_' + planner_name + '*') dirnames_dash = glob.glob(dir_path + '/' + '*-' + planner_name + '*') dirnames = dirnames_underscore dirnames.extend(dirnames_dash) if len(dirnames) < min_logs: - exit(f'Too few data entries for {planner_name} planner. Got: \r\n{dirnames}') - return dirnames + exit(f'Too few data entries for {planner_name} planner. Got {len(dirnames)}/{min_logs}: \r\n{dirnames}') + + # got rid of the dirs that are marked to be ignored + dirnames_valid = [] + for dirname in dirnames: + if re.search('ignore', dirname, re.IGNORECASE): + print(f'Ignoring a directory `{dirname}` from including in the overall `{planner_name}` planner results') + continue + dirnames_valid.append(dirname) + + return dirnames_valid def get_log_result_files(log_dirs: str): @@ -33,9 +45,11 @@ def get_log_result_files(log_dirs: str): for log_dir in log_dirs: results_file = glob.glob(log_dir + '/' + '*results.txt*') if len(results_file) == 0: - exit(f'Lack of results file at {log_dir}') + print(f'Lack of results file at `{log_dir}` dir, skipping') + continue elif len(results_file) > 1: - exit(f'Multiple results files at {log_dir}') + print(f'Multiple results files at `{log_dir}` dir, skipping') + continue result_files.append(results_file[0]) return result_files @@ -52,10 +66,17 @@ def read_results_file(results_file: str): for row in csv_result: key = str(row[0]).lstrip().rstrip() value_str = str(row[1]) - if value_str.find('.') != -1: - value = float(value_str) - else: - value = int(value_str) + try: + if 'nan' in value_str: + value = None + print(f"NaN value detected for the key `{key}` in the results file `{results_file}`") + elif value_str.find('.') != -1: + value = float(value_str) + else: + value = int(value_str) + except ValueError: + print(f"Cannot convert a value `{value_str}` of a key `{key}` to a numeric, skipping `{results_file}`") + return {} dict[key] = value return dict @@ -79,123 +100,50 @@ def collect_results_planners(dir_path: str, planner_name: List[str]) -> Dict: def prepare_sheet_rows(results_total: Dict) -> List[List]: - planners_row = [] - trials_row = [] - file_row = [] - spls_robot_row = [] - spls_ppl_row = [] - spls_grp_row = [] - m_goal_row = [] - m_obs_row = [] - m_eff_row = [] - m_cef_row = [] - m_cre_row = [] - m_vsm_row = [] - m_hsm_row = [] - m_path_row = [] - m_chc_row = [] - m_osc_row = [] - m_bwd_row = [] - m_irot_row = [] - m_psi_row = [] - m_fsi_row = [] - m_dir_row = [] - - planners_row.append('Planner') - trials_row.append('Trial') - file_row.append('File') - spls_robot_row.append('Samples robot') - spls_ppl_row.append('Samples people') - spls_grp_row.append('Samples groups') - m_goal_row.append('m_goal') - m_obs_row.append('m_obs') - m_eff_row.append('m_eff') - m_cef_row.append('m_cef') - m_cre_row.append('m_cre') - m_vsm_row.append('m_vsm') - m_hsm_row.append('m_hsm') - m_path_row.append('m_path') - m_chc_row.append('m_chc') - m_osc_row.append('m_osc') - m_bwd_row.append('m_bwd') - m_irot_row.append('m_irot') - m_psi_row.append('m_psi') - m_fsi_row.append('m_fsi') - m_dir_row.append('m_dir') + planners_row = ['Planner'] + trials_row = ['Trial'] + + # Dict[str, List] + rows_dict = {} # iterate over keys for planner_key in results_total: for i, result in enumerate(results_total[planner_key]): planners_row.append(planner_key) trials_row.append(i + 1) - file_row.append(result['file']) - spls_robot_row.append(result['s_rbt']) - spls_ppl_row.append(result['s_ppl']) - spls_grp_row.append(result['s_grp']) - m_goal_row.append(result['m_goal']) - m_obs_row.append(result['m_obs']) - m_eff_row.append(result['m_mef']) - m_cef_row.append(result['m_cef']) - m_cre_row.append(result['m_cre']) - m_vsm_row.append(result['m_vsm']) - m_hsm_row.append(result['m_hsm']) - m_path_row.append(result['m_path']) - m_chc_row.append(result['m_chc']) - m_osc_row.append(result['m_osc']) - m_bwd_row.append(result['m_bwd']) - m_irot_row.append(result['m_inp']) - m_psi_row.append(result['m_psi']) - m_fsi_row.append(result['m_fsi']) - m_dir_row.append(result['m_dir']) - - return [ - planners_row, - trials_row, - file_row, - spls_robot_row, - spls_ppl_row, - spls_grp_row, - m_goal_row, - m_obs_row, - m_eff_row, - m_cef_row, - m_cre_row, - m_vsm_row, - m_hsm_row, - m_path_row, - m_chc_row, - m_osc_row, - m_bwd_row, - m_irot_row, - m_psi_row, - m_fsi_row, - m_dir_row - ] + # `result` is a dict with: + # - keys indicating individual metric names for the planner `planner_key` + # - values - corresponding values + for i, metric_key in enumerate(result.keys()): + if not metric_key in rows_dict.keys(): + # start with metric name, append the result in the first iteration (sheet's row) + rows_dict[str(metric_key)] = [metric_key, result[metric_key]] + else: + # continue with appending to an initialized list + rows_dict[str(metric_key)].append(result[metric_key]) + + # 2 special rows are prepared separately + metrics_rows = [planners_row, trials_row] + # metric-related rows are dicts - key is the metric ID, value is the list with the metric ID and values for + # subsequent trials; the loop below extracts the lists + for row_key in rows_dict.keys(): + metrics_rows.append(rows_dict[row_key]) + return metrics_rows + def cell_coords_to_sheet_cell_id(row: int, col: int): row_w_offset = row + 1 + 2 col_w_offset = col + 1 cell_row = str(row_w_offset) - found_col = False + if not math.isfinite(row_w_offset) or not math.isfinite(col_w_offset): + exit(f"Could not find a valid column ID for ({row}, {col}) configuration") + it_col = 0 - cell_col = '' - for c in ascii_uppercase: - if it_col == col_w_offset: - cell_col = str(c) - found_col = True - break + cell_col = excel_sheet_utils.RESULT_RAW_INIT_COL + while it_col != col_w_offset: + cell_col = excel_sheet_utils.increment_column(cell_col) it_col = it_col + 1 - # repeat once again if required (doubling letters) - if not found_col: - for c in ascii_uppercase: - if it_col == col_w_offset: - cell_col = 'A' + str(c) - found_col = True - break - it_col = it_col + 1 - if not found_col: - exit(f"Could not find a valid column ID for ({row}, {col}) configuration") # append row cell_id = str(cell_col) + str(cell_row) @@ -223,9 +171,17 @@ def get_sheet_datacell(planner: str, trial: int, result_key: str, results_total: # counting rows from the start - exemplary key to iterate through metrics metric_counter = 0 - results_example = results_total[planner][0] + try: + results_example = results_total[planner][0] + except IndexError: + print( + f'The planner `{planner}` does not seem to contain any keys with metric names. ' + f'Got `{results_total[planner]}` while searching for the metric `{result_key}` for the trial `{trial}`' + ) + return None + if not result_key in results_example.keys(): - exit(f'Cannot proceed as results do not contain that key: {result_key}. Available keys: {results_example.keys()}') + exit(f'Cannot proceed as results do not contain that key: `{result_key}`. Available keys: `{results_example.keys()}`') found = False for key in results_example: @@ -242,13 +198,13 @@ def increment_char(c: chr): return chr(ord(c) + 1) -def calculate_sheet(wb: Workbook, planner_names: List[str], results_total: Dict, calc_fun='MEDIAN'): - row_start = 27 - col_header = str('A') +def calculate_sheet(wb: Workbook, planner_names: List[str], results_total: Dict): + row_start = int(excel_sheet_utils.RESULT_INIT_ROW) + col_header = str(excel_sheet_utils.RESULT_INIT_COL) ws[col_header + str(row_start)] = 'Planner' ws[col_header + str(row_start + 1)] = 'Trials' row_metric_start = row_start + 1 - col_planner_start = str('B') + col_planner_start = str(increment_char(col_header)) col_planner = col_planner_start # retrieve metric keys from an arbitrary planner @@ -257,6 +213,9 @@ def calculate_sheet(wb: Workbook, planner_names: List[str], results_total: Dict, for key in results_example.keys(): metric_keys.append(str(key)) + # helps to find an Excel filtering function + srpb_metrics = SrpbMetrics() + # create compound representation (filtered) of raw data for planner in planner_names: # investigate each metric @@ -269,58 +228,81 @@ def calculate_sheet(wb: Workbook, planner_names: List[str], results_total: Dict, planner_trial_last_id = planner_trials - 1 cell_begin = get_sheet_datacell(planner, 0, metric, results) cell_end = get_sheet_datacell(planner, planner_trial_last_id, metric, results) + if cell_begin == None or cell_end == None: + continue # fill spreadsheet ws[col_header + str(row_metric_start + m_index)] = metric ws[col_planner + str(row_start)] = planner ws[col_planner + str(row_start + 1)] = planner_trials - # filtering: put excel function here - ws[col_planner + str(row_metric_start + m_index)] = '=' + str(calc_fun) + '(' + str(cell_begin) + ':' + str(cell_end) + ')' + # filtering: put a string with an Excel function into the cell + excel_fun = srpb_metrics.get_excel_calc_fun(metric, str(cell_begin), str(cell_end)) + ws[col_planner + str(row_metric_start + m_index)] = excel_fun # next planner col_planner = increment_char(col_planner) -######################################################## -# command line arguments -if len(sys.argv) == 1: - print(f'Usage: ') - print(f' {sys.argv[0]} <(OPTIONAL) names of planners, must be part of corresponding filenames>') - exit() - -logs_dir = str(sys.argv[1]) -print(f'Name of directory with logs: {logs_dir}') - -planners = [] -if not len(sys.argv) == 2: - planners = sys.argv[2:] -else: - planners = ['teb', 'dwa', 'trajectory', 'eband', 'hateb', 'cohan'] - -print(f'Investigated planners: {planners}') -results = collect_results_planners(logs_dir, planners) - -############################################### -# documentation at https://openpyxl.readthedocs.io/en/stable/index.html -wb = Workbook() - -# grab the active worksheet -ws = wb.active - -# Prepare rows of data and append them to the sheet -rows = prepare_sheet_rows(results) -for row in rows: - ws.append(row) - -# make some calculations -calculate_sheet(ws, planners, results, 'MEDIAN') - -# Prepare name of the output file -output_filename = 'results' -for planner in planners: - output_filename = output_filename + '_' + planner -output_filename = output_filename.rstrip('_') + '.xlsx' -output_path = Path(logs_dir).parent.absolute() / output_filename - -# Save the file -wb.save(str(output_path)) - -print(f'Results saved in: {output_path}') +if __name__ == "__main__": + # command line arguments + if len(sys.argv) == 1: + print(f'Usage: ') + print(f'') + print(f' python3 {sys.argv[0]} <(OPTIONAL) space-separated planner names>') + print(f'') + print(f' 1. The main directory with logs should store the log files grouped into separate directories, ') + print(f' each containing a robot, people and group data.') + print(f' 2. In order to detect which planners to include into results sheet, each name of a planner ') + print(f' must be a part of log filenames.') + print(f'') + exit() + + logs_dir = str(sys.argv[1]) + print(f'Name of directory with logs: {logs_dir}') + + planners = [] + if not len(sys.argv) == 2: + planners = sys.argv[2:] + else: + planners = ['teb', 'dwa', 'trajectory', 'eband', 'hateb', 'cohan'] + print(f'Using the default list of planners!') + + print(f'Investigated planners: {planners}') + results = collect_results_planners(logs_dir, planners) + + ############################################### + # documentation at https://openpyxl.readthedocs.io/en/stable/index.html + wb = Workbook() + + # grab the active worksheet (workbook always creates an arbitrary sheet) + ws_init = wb.active + # create a sheet with a custom name + ws = wb.create_sheet(title=excel_sheet_utils.SHEET_NAME) + # delete the sheet initially created + wb.remove_sheet(worksheet=ws_init) + + # Prepare rows of data and append them to the sheet + rows = prepare_sheet_rows(results) + for row in rows: + ws.append(row) + + # make some calculations + calculate_sheet(ws, planners, results) + + # Prepare name of the output file + output_filename = 'results' + '_' + Path(logs_dir).name + for planner in planners: + output_filename = output_filename + '_' + planner + output_filename = output_filename.rstrip('_') + '.xlsx' + output_path = Path(logs_dir).parent.absolute() / output_filename + + # Save the file + wb.save(str(output_path)) + + print(f'Results saved in: {output_path}') + print("") + # When the sheet with the results is not opened and saved by the Excel or LibreOffice Calc, then reading a non-empty + # cell will probably return None + # Ref1: https://itecnote.com/tecnote/python-openpyxl-data_onlytrue-returning-none/ + # Ref2: https://groups.google.com/g/openpyxl-users/c/GbBOnOa8g7Y + print(f'Consider opening the results file and saving it with Excel/LibreOffice Calc (without any modifications).') + print(f'It will produce cached values based on formulas written (`openpyxl` library is not able to do so).') + print(f'This is a necessary step when one wants to use the script that creates a LaTeX table from a spreadsheet') diff --git a/srpb_evaluation/scripts/create_latex_table_from_results.py b/srpb_evaluation/scripts/create_latex_table_from_results.py index 1887f38..ff75ba7 100644 --- a/srpb_evaluation/scripts/create_latex_table_from_results.py +++ b/srpb_evaluation/scripts/create_latex_table_from_results.py @@ -5,99 +5,102 @@ sudo apt install python3-openpyxl ''' +from excel_sheet_utils import load_data_from_excel +from srpb_metrics import SrpbMetrics + +import argparse import json -import sys +import math -from openpyxl import load_workbook from pathlib import Path - from typing import List from typing import Dict -def load_data_from_excel(path: Path) -> Dict[str, Dict[str, float]]: - # reading from sheets follows the convention introduced in `create_excel_from_results.calculate_sheet` script - - # NOTE: data_only allows to read values instead of formulas - wb = load_workbook(filename = str(path), data_only=True) - # access specific sheet - sheet = wb["Sheet"] - # upper left cell - cell that initiates results table - col_init = 'A' - row_init = '28' - - col_metric_ids = col_init - - # lambda that creates col-row tuple to access cell in a sheet - make_sheet_cell = lambda row, col : str(col + row) - # lambda that works for chars in A-Z range - increment_char = lambda c : chr(ord(c) + 1) - # lambda that increments integer stored as string - increment_row = lambda row : str(int(row) + 1) - # lambda that retrieves value of sheet's cell - get_sheet_val = lambda sheet, row, col : sheet[make_sheet_cell(col=col, row=row)].value - # lambda that checks if certain cell in the sheet is empty - sheet_cell_empty = lambda sheet, row, col : get_sheet_val(sheet=sheet, row=row, col=col) == None - - # local database with planners' metrics - data = {} - - # to iterate over planners - skip first, avoiding 'Planner' - col_iter = increment_char(col_init) - # to iterate over metrics - row_iter = increment_row(row_init) - # iterate over planners (names) - while not sheet_cell_empty(sheet=sheet, row=row_init, col=col_iter): - # save planner name for later use in database - planner_name = str(get_sheet_val(sheet=sheet, row=row_init, col=col_iter)) - # got a valid planner name - iterate over saved metrics - metric_names = [] - metric_values = [] - while not sheet_cell_empty(sheet=sheet, row=row_iter, col=col_metric_ids): - metric_names.append(str(get_sheet_val(sheet=sheet, row=row_iter, col=col_metric_ids))) - metric_values.append(float(get_sheet_val(sheet=sheet, row=row_iter, col=col_iter))) - # try to proceed to the next metric - row_iter = increment_row(row_iter) - - # finished collecting metrics for a given planner - metrics = dict(zip(metric_names, metric_values)) - # append to overall results - result = {planner_name: metrics} - data.update(result) - - # try to proceed to the next planner - col_iter = increment_char(col_iter) - # reset metrics row 'pointer' - row_iter = increment_row(row_init) - - return data - -def create_latex_table(results: List[Dict[str, Dict[str, Dict[str, float]]]]) -> str: - # only keys from this map will be put into the LaTeX table; keys must match the ones used in the Excel sheet - metric_latex_map = { - 'm_obs': r"$m_{\mathrm{obs}}$ \\ $\left[ \% \right]$", - 'm_mef': r"$m_{\mathrm{mef}}$ \\ $\left[ \mathrm{s} \right]$", - 'm_path': r"$m_{\mathrm{plin}}$ \\ $\left[ \mathrm{m} \right]$", - 'm_chc': r"$m_{\mathrm{chc}}$ \\ $\left[ \mathrm{rad} \right]$", - 'm_cef': r"$m_{\mathrm{cef}}$ \\ $\left[ 10^{-3} \cdot \mathrm{s} \right]$", - 'm_cre': r"$m_{\mathrm{cre}}$ \\ $\left[ 10^{-3} \cdot \mathrm{s} \right]$", - 'm_vsm': r"$m_{\mathrm{vsm}}$ \\ $\left[ \mathrm{\frac{m}{s^2}} \right]$", - 'm_hsm': r"$m_{\mathrm{hsm}}$ \\ $\left[ \mathrm{\frac{rad}{s^2}} \right]$", - 'm_osc': r"$m_{\mathrm{osc}}$ \\ $\left[ \% \right]$", - 'm_bwd': r"$m_{\mathrm{bwd}}$ \\ $\left[ \% \right]$", - 'm_inp': r"$m_{\mathrm{iprot}}$ \\ $\left[ \% \right]$", - 'm_psi': r"$m_{\mathrm{psi}}$ \\ $\left[ \% \right]$", - 'm_fsi': r"$m_{\mathrm{fsi}}$ \\ $\left[ \% \right]$", - 'm_dir': r"$m_{\mathrm{dir}}$ \\ $\left[ \% \right]$" - } - - # retrieve names of planners assuming that all scenarios results have the same planner entries; - # choose from the first scenario - planner_names = results[0]['results'].keys() - # save number of checked planners +# results: results of a specific scenarios aggregated into a single structure +# metrics: names (keys) of metrics to include in the LaTeX table +def create_latex_table( + results: List[Dict[str, Dict[str, Dict[str, float]]]], + metric_names: List[str], + planner_names: List[str] +) -> str: + # only keys known by the SrpbMetrics will be put into the LaTeX table; keys must match the ones used in the sheet + srpb_metrics = SrpbMetrics() + # select metrics from the predefined set stored in SrpbMetrics + metrics_map = {} + + # by default, all metrics are included + if not len(metric_names): + metrics_map = srpb_metrics.get() + else: + metrics_map = srpb_metrics.get_metrics(metric_names) + + if not len(metrics_map): + raise Exception( + f"Aborting further execution as no metrics were selected to include in the LaTeX table. " + f"See the script's usage instruction." + ) + print(f"Selected `{len(metrics_map)}` metrics to include in the LaTeX table: `{metrics_map.keys()}`") + + # save the number of considered planners planners_num = len(planner_names) + if not planners_num: + # choose the set from the first scenario + print(f"The input list of selected planners is empty! Selecting the planners appearing in the first scenario.") + planner_names = results[0]['results'].keys() + planners_num = len(planner_names) + + # evaluate whether valid data are available + if not len(planner_names) or not planners_num: + raise Exception( + f"Aborting further execution since no planners were selected or appeared in the first scenario." + ) + # retrieve number of evaluated scenarios scenarios_num = len(results) + print(f"Selected '{planners_num}' planners with names `{planner_names}` for `{scenarios_num}` scenarios") + + + def get_metric_value_for_planner( + scenario_results: Dict[str, Dict[str, Dict[str, float]]], + planner_name: str, + metric_id: str + ) -> float: + """ + A nested function that returns a unified value once the required planner is nonexistent in the results. + It is assumed that the nested dict given in scenario_results contains keys defining: planner name + and dict nested further - a metric ID + """ + metric_val = math.nan + try: + metric_val = scenario_results[planner_name][metric_id] + except KeyError: + print( + f"Could not find a '{planner_name}' in a given results set. " + f"Available planners are: '{scenario_results.keys()}'. " + f"This might be intentional, returning NaN." + ) + # value will store the value of the metric (if able to parse correctly) + value = None + if isinstance(metric_val, float): + value = metric_val + elif isinstance(metric_val, list): + if not len(metric_val): + raise Exception(f"'{metric_id}' metric value for the planner '{planner_name}' is an empty list") + # arbitrarily selecting the first element in the list + value = metric_val[0] + # treat as a warning when the list is bigger than 1-element + if len(metric_val) > 1: + print( + f"\033[93m" + f"'{metric_id}' metric value for the planner '{planner_name}' is '{len(metric_val)}'-elem list, " + f"arbitrarily selecting the first element '{value}'" + "\033[0m" + ) + else: + raise Exception(f"'{metric_id}' metric value for the planner '{planner_name}' is of unsupported type") + return value + tex = str("") @@ -107,6 +110,7 @@ def create_latex_table(results: List[Dict[str, Dict[str, Dict[str, float]]]]) -> # r forces to parse as raw string, ref: https://stackoverflow.com/a/46011113 tex += (r"% Dependencies of the 'results' table" + "\r\n") + tex += (r"\usepackage{graphicx} % \rotatebox" + "\r\n") tex += (r"\usepackage{multirow}" + "\r\n") tex += (r"\usepackage{diagbox}" + "\r\n") tex += ("\r\n") @@ -126,7 +130,15 @@ def create_latex_table(results: List[Dict[str, Dict[str, Dict[str, float]]]]) -> tex += (r" % NOTE1: arg to parbox defines how high the text will start" + "\r\n") tex += (r" % NOTE2: \cline{2-8} is a partial horizontal line, ref: https://tex.stackexchange.com/a/8555" + "\r\n") tex += (r" \begin{tabular}" + "\r\n") - tex += (r" {||c||c||c|c|c|c|c|c||}" + "\r\n") + + # Prepare column for metric identifiers and units and for scenario identifiers + tex += (r" {||c||c||") + # centered columns according to the number of planners + for _ in range(planners_num): + tex += (r"c|") + # double vertical border and line end + tex += (r"|}" + "\r\n") + tex += (r" \hline" + "\r\n") tex += (r" % =============================== header" + "\r\n") tex += (r" \multicolumn{2}{|c|}{ % spreads across metric and scenario ID" + "\r\n") @@ -142,11 +154,12 @@ def create_latex_table(results: List[Dict[str, Dict[str, Dict[str, float]]]]) -> # header columns of planners - assuming that all results have the same planner entries for planner_name in planner_names: + planner_name_latex_safe = planner_name.replace("_", "\_") tex += (r" & \rotatebox[origin=c]{90}{" + "\r\n") tex += (r" \parbox[c]{\benchresultspheaderheight}{" + "\r\n") tex += (r" \centering" + "\r\n") # enter a name of the planner - tex += (r" \emph{" + str(planner_name) + r"}" + "\r\n") + tex += (r" \emph{" + str(planner_name_latex_safe) + r"}" + "\r\n") tex += (r" }" + "\r\n") tex += (r" }" + "\r\n") @@ -155,7 +168,7 @@ def create_latex_table(results: List[Dict[str, Dict[str, Dict[str, float]]]]) -> tex += (r" % =============================== entries" + "\r\n") # iterate over metrics - for metric_id in metric_latex_map.keys(): + for metric_id in metrics_map.keys(): tex += (r"" + "\r\n") tex += (r" \multirow" + "\r\n") # total number of scenarios @@ -164,7 +177,12 @@ def create_latex_table(results: List[Dict[str, Dict[str, Dict[str, float]]]]) -> tex += (r" {" + "\r\n") tex += (r" \shortstack{" + "\r\n") # ID of the metric and its unit - tex += (r" " + str(metric_latex_map[metric_id]) + "\r\n") + # whether to put the unit in a new line or not (when there are too few rows) + if scenarios_num > 2: + metric_name_and_unit = f"{metrics_map[metric_id]['tex_name']} \\\\ {metrics_map[metric_id]['tex_unit']}" + else: + metric_name_and_unit = f"{metrics_map[metric_id]['tex_name']} {metrics_map[metric_id]['tex_unit']}" + tex += (r" " + str(metric_name_and_unit) + "\r\n") tex += (r" }" + "\r\n") tex += (r" }" + "\r\n") tex += (r" % =========" + "\r\n") @@ -185,10 +203,22 @@ def create_latex_table(results: List[Dict[str, Dict[str, Dict[str, float]]]]) -> # find the best value among checked planners metric_values_among_planners = [] for planner_name in planner_names: - metric_val = results[scenario_num]['results'][planner_name][metric_id] + metric_val = get_metric_value_for_planner(results[scenario_num]['results'], planner_name, metric_id) + # there might be incorrect data: + # * 'nan' will break the min/max selection (nan will be returned) + # * 'None' cannot be compared with floats + if math.isnan(metric_val): + continue metric_values_among_planners.append(metric_val) - # NOTE: by default, the best metric is the one with the smallest value - metric_best_val = min(metric_values_among_planners) + + if not len(metric_values_among_planners): + raise Exception(f"Expected '{metric_id}' metric value but none of the planners have one assigned. Results are: {results[scenario_num]['results']}") + + # select the best metric - the one with the smallest or largest value (excluding NaNs) + if srpb_metrics.is_minimum_best(metric_id): + metric_best_val = min(metric_values_among_planners) + else: + metric_best_val = max(metric_values_among_planners) # if all metric values are equal to the best - let's mark the best as 'invalid' if all(x == metric_best_val for x in metric_values_among_planners): @@ -196,9 +226,15 @@ def create_latex_table(results: List[Dict[str, Dict[str, Dict[str, float]]]]) -> # iterate over values of a specific metric for each planner for planner_name in planner_names: - metric_val = results[scenario_num]['results'][planner_name][metric_id] - # format to 2 decimal points - metric_val_str = "{:.2f}".format(metric_val) + metric_val = get_metric_value_for_planner(results[scenario_num]['results'], planner_name, metric_id) + # check for correctness/availability + if not math.isnan(metric_val): + # format to 2 decimal points + metric_val_str = "{:.2f}".format(metric_val) + else: + # indicate lack of valid data + metric_val_str = srpb_metrics.get_latex_missing_metric_value() + # mark the best value unless it is equal to 0 if metric_val == metric_best_val and metric_best_val != None: # bold @@ -229,24 +265,56 @@ def create_latex_table(results: List[Dict[str, Dict[str, Dict[str, float]]]]) -> # main # ########################### if __name__ == "__main__": - # command line arguments - if len(sys.argv) != 3: - print(f'Usage: ') - print(f' python3 {sys.argv[0]} ') - print(f'') - print(f'Example:') - print(f' python3 {sys.argv[0]} "{{"static": {{"sim": "path_static_sim", "real": "path_static_real"}}, "dynamic": {{"sim": "path_dynamic_sim", "real": "path_dynamic_real"}}}}" ~/table.tex') - exit(0) + # Ref: https://stackoverflow.com/a/32763023 + cli = argparse.ArgumentParser() + # positional arguments + cli.add_argument("input", help="JSON string with scenario identifiers and paths to SRPB results sheets") + cli.add_argument("output", help="path to the generated .tex file") + # optional arguments + cli.add_argument( + "--metrics", + nargs="*", + type=str, + default=[], + help="space-separated list of metrics to include in the LaTeX table (all available are included by default)" + ) + cli.add_argument( + "--planners", + nargs="*", + type=str, + default=[], + help="space-separated list of planners to include in the LaTeX table (all appearing in the first results file \ + are included by default)" + ) + # Usage example + # python3 create_latex_table_from_results.py \ + # "{{"static": {{"sim": "path_static_sim", "real": "path_static_real"}}, "dynamic": {{"sim": "path_dynamic_sim", "real": "path_dynamic_real"}}}}" \ + # ~/table.tex \ + # --metrics m_obs m_chc \ + # --planners dwa teb + + # parse the command line + args = cli.parse_args() # location of the output file - output_path = sys.argv[2] + output_path = args.output + + # metrics to include in the table + metric_names = args.metrics + + # selected planner set + planner_names = args.planners # list of dicts {, } inputs = [] - cmd_json = json.loads(sys.argv[1]) + cmd_json = json.loads(args.input) for key in cmd_json.keys(): - inputs.append({'name': str(key), 'path': Path(cmd_json[key])}) + # (multiple) spreadsheet path(s) is(are) given as a string(s) (separated with a comma) + paths = cmd_json[key].split(",") + paths_pathlib = [Path(path.strip("'")) for path in paths] + # collect a single input + inputs.append({'name': str(key), 'path': paths_pathlib}) print("\tScript inputs:") print(*inputs, sep='\n') @@ -255,12 +323,16 @@ def create_latex_table(results: List[Dict[str, Dict[str, Dict[str, float]]]]) -> results_total = [] # loop for processing Excel sheets for all input files - for input_file in inputs: - scenario_results = load_data_from_excel(input_file['path']) - results_total.append({'name': input_file['name'], 'results': scenario_results}) - - # generates a string representing LaTeX command that can be - results_table = create_latex_table(results_total) + for input in inputs: + # aggregated results (possibly from multiple files) + scenario_results = {} + for file in input['path']: + scenario_results_partial = load_data_from_excel(file) + scenario_results.update(scenario_results_partial) + results_total.append({'name': input['name'], 'results': scenario_results}) + + # generates a string representing LaTeX command that can be directly included and used in a LaTeX document + results_table = create_latex_table(results_total, metric_names, planner_names) # save results to a file f = open(output_path, "w") diff --git a/srpb_evaluation/scripts/evaluate_all_dirs.sh b/srpb_evaluation/scripts/evaluate_all_dirs.sh index 9f61a1a..579616a 100755 --- a/srpb_evaluation/scripts/evaluate_all_dirs.sh +++ b/srpb_evaluation/scripts/evaluate_all_dirs.sh @@ -2,21 +2,50 @@ # # Runs srpb_evaluation node with the newest set of log files from each directory below the script's path # -# It expects that `evaluate_from_dir.sh` script is placed in the same directory +# It expects that `evaluate_from_dir.sh` script is available using `rosrun srpb_evaluation evaluate_from_dir.sh` # # Remember to source your ROS workspace before execution # -SCRIPT_DIR=$(realpath $(dirname $0)) +# This script can be executed using: +# +# rosrun srpb_evaluation evaluate_all_dirs.sh +# +if [ "$#" -lt 1 ] || [ "$#" -gt 2 ]; then + echo "Wrong usage. Script args:" + echo " (1) [required] full path to the main directory with grouped logs (4 logs in each separate directory)" + echo " (2) [optional] safety distance for m_obs metric, 0.55 by default" + exit 0 +fi + +# only the path to logs directory is given +if [ "$#" -eq 1 ]; then + logs_dir="$1" + # trim trailing / (if exists) + logs_dir="${logs_dir%/}" + safety_dist=0.55 + echo "Using logs main directory: '$logs_dir' and default safety distance of '$safety_dist' m" +fi + +if [ "$#" -eq 2 ]; then + logs_dir="$1" + # trim trailing / (if exists) + logs_dir="${logs_dir%/}" + safety_dist=$2 + echo "Using logs main directory: '$logs_dir' and custom safety distance of '$safety_dist' m" +fi + +# exclude main directory from the search below +logs_basename=$(basename $logs_dir) # - ignore directories starting with the underscore # - for each directory, run the `evaluate_from_dir` script, passing each found directory as an argument # Ref: # * https://stackoverflow.com/a/20292636 # * https://stackoverflow.com/a/15736463 -find . -maxdepth 1 -type d -not -path "./_*" \( ! -name . \) -exec bash -c "$SCRIPT_DIR/evaluate_from_dir.sh '{}'" \; +find $logs_dir -maxdepth 1 -type d -not -path "$logs_dir/_*" \( ! -name $logs_basename \) -exec bash -c "rosrun srpb_evaluation evaluate_from_dir.sh '{}'" \; # first part of command above to save number of evaluated dirs, ref: https://stackoverflow.com/a/15663664 -count=$(find . -maxdepth 1 -type d -not -path "./_*" \( ! -name . \) | wc -l) +count=$(find $logs_dir -maxdepth 1 -type d -not -path "$logs_dir/_*" \( ! -name $logs_basename \) | wc -l) echo echo "Evaluated $count directories" diff --git a/srpb_evaluation/scripts/evaluate_from_dir.sh b/srpb_evaluation/scripts/evaluate_from_dir.sh index 7a878da..a9f41b8 100755 --- a/srpb_evaluation/scripts/evaluate_from_dir.sh +++ b/srpb_evaluation/scripts/evaluate_from_dir.sh @@ -4,25 +4,43 @@ # # Remember to source your ROS workspace before execution # -if [ "$#" -ne 1 ]; then - echo "What's the target directory?" +# This script can be executed using: +# +# rosrun srpb_evaluation evaluate_from_dir.sh +# +if [ "$#" -lt 1 ] || [ "$#" -gt 2 ]; then + echo "Wrong usage. Script args:" + echo " (1) [required] full path to the directory with logs ('*_robot.txt', '*_people.txt', and '*_groups.txt')" + echo " (2) [optional] safety distance for m_obs metric, 0.55 by default" exit 0 fi -SCRIPT_DIR=$(realpath $(dirname $0)) -LOGS_DIR="$1" -SAFETY_DIST=0.55 +# only the path to logs directory is given +if [ "$#" -eq 1 ]; then + logs_dir="$1" + safety_dist=0.55 + echo "Using logs directory: '$logs_dir' and default safety distance of '$safety_dist' m" +fi + +if [ "$#" -eq 2 ]; then + logs_dir="$1" + safety_dist=$2 + echo "Using logs directory: '$logs_dir' and custom safety distance of '$safety_dist' m" +fi # https://stackoverflow.com/a/54910963 -newest_robot=$(ls -t $LOGS_DIR/log_*_robot.txt | head -1) -newest_people=$(ls -t $LOGS_DIR/log_*_people.txt | head -1) -newest_groups=$(ls -t $LOGS_DIR/log_*_groups.txt | head -1) +# exclude results file(s) +newest_robot=$(ls -t $logs_dir/log_*_robot*.txt | grep -v "results" | head -1) +newest_people=$(ls -t $logs_dir/log_*_people*.txt | grep -v "results" | head -1) +newest_groups=$(ls -t $logs_dir/log_*_groups*.txt | grep -v "results" | head -1) +newest_gplanner=$(ls -t $logs_dir/log_*_gplanner*.txt | grep -v "results" | head -1) echo "Evaluating:" echo "" echo "robot data: $newest_robot" echo "people data: $newest_people" echo "groups data: $newest_groups" +echo "global planner data: $newest_gplanner" echo "" -rosrun srpb_evaluation srpb_evaluation $newest_robot $newest_people $newest_groups $SAFETY_DIST +rosrun srpb_evaluation srpb_evaluation $newest_robot $newest_people $newest_groups $newest_gplanner $safety_dist diff --git a/srpb_evaluation/scripts/evaluate_newest.sh b/srpb_evaluation/scripts/evaluate_newest.sh deleted file mode 100755 index e672cd6..0000000 --- a/srpb_evaluation/scripts/evaluate_newest.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env bash -# -# Runs srpb_evaluation node with the newest set of log files from the script's directory. -# -# Remember to source your ROS workspace before execution -# -SCRIPT_DIR=$(realpath $(dirname $0)) -LOGS_DIR=$SCRIPT_DIR -SAFETY_DIST=0.55 - -# https://stackoverflow.com/a/54910963 -newest_robot=$(ls -t $LOGS_DIR/log_*_robot.txt | head -1) -newest_people=$(ls -t $LOGS_DIR/log_*_people.txt | head -1) -newest_groups=$(ls -t $LOGS_DIR/log_*_groups.txt | head -1) - -echo "Evaluating:" -echo "" -echo "robot data: $newest_robot" -echo "people data: $newest_people" -echo "groups data: $newest_groups" -echo "" - -rosrun srpb_evaluation srpb_evaluation $newest_robot $newest_people $newest_groups $SAFETY_DIST diff --git a/srpb_evaluation/scripts/excel_sheet_utils.py b/srpb_evaluation/scripts/excel_sheet_utils.py new file mode 100644 index 0000000..e774a63 --- /dev/null +++ b/srpb_evaluation/scripts/excel_sheet_utils.py @@ -0,0 +1,199 @@ +from openpyxl import load_workbook +from openpyxl import Workbook +from openpyxl.worksheet import Worksheet +from pathlib import Path +from typing import Dict +from typing import List + +# name of the sheet in the Excel file with the results +SHEET_NAME = "SRPB" + +# upper left cell - cell that initiates results table +RESULT_RAW_INIT_COL = 'A' +RESULT_RAW_INIT_ROW = '1' +# filtered (mean/median) +RESULT_INIT_COL = 'A' +RESULT_INIT_ROW = '50' + +# Must be non-float and cannot be None, as that would break detection of that special case +FAILED_TRIAL_METRIC_VALUE = '#FAIL#' + + +def increment_column(col: str): + """ + Increments the ID of the column using A-Z characters, e.g., passing Z gives AA + :param col column ID to be incremented + """ + if col == None: + raise Exception("Could not increment 'None'") + + # lambda that works for chars in A-Z range + increment_char = lambda c : chr(ord(c) + 1) + + # Generated by ChatGPT + if col == "": + return "A" + if col[-1] != 'Z': + return col[:-1] + increment_char(col[-1]) + else: + i = len(col) - 1 + while i >= 0 and col[i] == 'Z': + i -= 1 + if i == -1: + return 'A' * (len(col) + 1) + else: + return col[:i] + increment_char(col[i]) + 'A' * (len(col) - i - 1) + + +def increment_row(row: int): + """ + Increments integer stored as string + """ + return str(int(row) + 1) + + +def make_sheet_cell(row: int, col: str): + """ + Creates col-row tuple to access cell in a sheet + """ + return str(col + row) + + +def get_sheet_val(sheet: Worksheet, row: int, col: str): + """ + Retrieves a value of sheet's cell + """ + return sheet[make_sheet_cell(col=col, row=row)].value + + +def is_sheet_cell_empty(sheet: Worksheet, row: int, col: str): + """ + Checks if certain cell in the sheet is empty + """ + return get_sheet_val(sheet=sheet, row=row, col=col) == None + + +def read_data_from_excel(wb: Workbook, sheet_name: str, col_init: str, row_init: int) -> Dict[str, Dict[str, float]]: + """ + Reads the spreadsheet following the convention introduced in `create_excel_from_results.calculate_sheet` script + :param wb loaded workbook of the spreadsheet + :param sheet_name specific sheet to access + :param col_init column of the upper left cell - cell that initiates results table + :param row_init row of the upper left cell - cell that initiates results table + """ + + sheet = wb[sheet_name] + col_metric_ids = col_init + + # local database with planners' metrics, actually: + # Dict[str, Dict[str, List[float]]] + # str: planner ID + # str: metric ID + # list[float]: metric values for the subsequent trials + data = {} + + # to iterate over planners - skip first, avoiding 'Planner' + col_iter = increment_column(col_init) + # to iterate over metrics + row_iter = increment_row(row_init) + # iterate over planners (names) + while not is_sheet_cell_empty(sheet=sheet, row=row_init, col=col_iter): + # save planner name for later use in database + planner_name = str(get_sheet_val(sheet=sheet, row=row_init, col=col_iter)) + # print(f"Checking sheet at {make_sheet_cell(col=col_iter, row=row_init)} for planner {planner_name}") + # got a valid planner name - iterate over saved metrics + metric_names = [] + metric_values = [] + while not is_sheet_cell_empty(sheet=sheet, row=row_iter, col=col_metric_ids): + # obtain sheet values + metric_name = get_sheet_val(sheet=sheet, row=row_iter, col=col_metric_ids) + metric_value = get_sheet_val(sheet=sheet, row=row_iter, col=col_iter) + # evaluate the correctness of read data + if metric_name == None or metric_value == None: + raise Exception( + f"The metric ID cell `{col_metric_ids}{row_iter}` contains `{metric_name}` " + f"and the metric value cell `{col_iter}{row_iter}` contains `{metric_value}`. " + f"Cannot proceed with such values. Check whether the cells in your spreadsheet are empty. " + f"If they aren't, try to open the sheet and simply save it using Excel or LibreOffice Calc." + ) + # try to convert + metric_name = str(metric_name) + try: + metric_value = float(metric_value) + except ValueError: + # EDGE CASE detection: when iterating through the spreadsheet on which `remove_failed_trials_from_excel.py` + # has been called, it may happen that metric is indicated as invalid - set it to None + if not metric_value == FAILED_TRIAL_METRIC_VALUE: + # try to proceed to the next metric (this is the normal operation) + row_iter = increment_row(row_iter) + print( + f"Could not convert '{metric_value}' into float, proceeding to the next row '{row_iter}'" + f" in column '{col_iter}'" + ) + continue + else: + # convert failure indicator to None and proceed normally (FAILED_TRIAL_METRIC_VALUE is used for + # distinguishing failed trial cells from out-of-range cells) + metric_value = None + # collect + metric_names.append(metric_name) + metric_values.append(metric_value) + # print(f"Checking metric ID {metric_names[-1]}, val {metric_values[-1]}") + # try to proceed to the next metric + row_iter = increment_row(row_iter) + + # finished collecting metrics for a given planner + metrics = dict(zip(metric_names, metric_values)) + + # update the planner data with the new set of metrics + if not planner_name in data.keys(): + # initialize key in dict + data[planner_name] = {} + for metric_id, metric_value in metrics.items(): + # assert that values are stored as lists + data[planner_name][metric_id] = [metric_value] + else: + # ready to append new values to an existing lists + for metric_id in data[planner_name].keys(): + data[planner_name][metric_id].append(metrics[metric_id]) + + # make sure that the same metrics are included in each new set of metrics + num_entries = None + for metric_id in data[planner_name].keys(): + prev_num_entries = num_entries + num_entries = len(data[planner_name][metric_id]) + if prev_num_entries == None: + continue + if num_entries != prev_num_entries: + raise Exception( + f"Number of value entries for metric '{metric_id}' is different than for the previous " + f"one ({num_entries} vs {prev_num_entries})" + ) + + # try to proceed to the next planner + col_iter = increment_column(col_iter) + # reset metrics row 'pointer' + row_iter = increment_row(row_init) + + return data + + +def load_data_from_excel(path: Path) -> Dict[str, Dict[str, float]]: + """ + Reads the filtered data from the spreadsheet following the convention introduced in + `create_excel_from_results.calculate_sheet` function + """ + + # NOTE: data_only allows to read values instead of formulas + wb = load_workbook(filename = str(path), data_only=True) + return read_data_from_excel(wb, SHEET_NAME, RESULT_INIT_COL, RESULT_INIT_ROW) + + +def load_raw_data_from_excel(path: Path) -> Dict[str, Dict[str, List[float]]]: + """ + Reads the raw data from the spreadsheet following the convention introduced in + `create_excel_from_results.calculate_sheet` function + """ + # NOTE: data_only allows to read values instead of formulas + wb = load_workbook(filename = str(path), data_only=True) + return read_data_from_excel(wb, SHEET_NAME, RESULT_RAW_INIT_COL, RESULT_RAW_INIT_ROW) diff --git a/srpb_evaluation/scripts/remove_failed_trials_from_excel.py b/srpb_evaluation/scripts/remove_failed_trials_from_excel.py new file mode 100644 index 0000000..69746fd --- /dev/null +++ b/srpb_evaluation/scripts/remove_failed_trials_from_excel.py @@ -0,0 +1,120 @@ +''' +Opens an Excel file with results obtained from SRPB logs, searches for trials that were not +successful (unfinished), and clears the metrics related to those trials so the failed trials are not considered +in filtering. +Note that the Excel spreadsheet must be previously generated with the relevant script, +i.e., 'create_excel_from_results.py', as this script strictly follows the spreadsheet data placement introduced +in the mentioned 'create_excel_from_results.py' + +Dependency: +sudo apt install python3-openpyxl +''' + +import argparse +import excel_sheet_utils +import shutil + +from excel_sheet_utils import get_sheet_val +from excel_sheet_utils import increment_column +from excel_sheet_utils import increment_row +from excel_sheet_utils import is_sheet_cell_empty +from excel_sheet_utils import make_sheet_cell + +from openpyxl import load_workbook +from openpyxl.worksheet import Worksheet + +from pathlib import Path + + +def remove_unfinished_trials( + ws: Worksheet, + metric_cond_name: str, + metric_cond_remove_val: float, + row_init: int, + col_init: str +): + # iterate over column with subsequent trials + col_it = col_init + # find the row of the target metric + row_it = row_init + while not get_sheet_val(ws, row_it, col_it) == metric_cond_name: + row_it = increment_row(row_it) + # save + row_target_metric = row_it + + # iterate along the same row + while not is_sheet_cell_empty(ws, row_init, col_it): + remove_trial_if_unsuccessful(ws, row_target_metric, col_it, metric_cond_remove_val) + col_it = increment_column(col_it) + + +def remove_trial_if_unsuccessful( + ws: Worksheet, + metric_cond_row: int, + metric_cond_col: str, + metric_cond_value_to_remove +): + """ + Assumes that the target metric is located above the metrics to remove + """ + cond_value = get_sheet_val(ws, metric_cond_row, metric_cond_col) + if not cond_value == metric_cond_value_to_remove: + return + + # do not remove the conditional value + row_it = increment_row(metric_cond_row) + # iterate over rows to remove all metrics in a given column (related to a given trial) + while not is_sheet_cell_empty(ws, row_it, metric_cond_col): + cell = make_sheet_cell(row_it, metric_cond_col) + # NOTE: the cell is filled with a special value stored under the constant + ws[cell] = excel_sheet_utils.FAILED_TRIAL_METRIC_VALUE + row_it = increment_row(row_it) + + +if __name__ == "__main__": + # Ref: https://stackoverflow.com/a/32763023 + cli = argparse.ArgumentParser() + # positional arguments + cli.add_argument("input", help="Path to an input spreadsheet with SRPB results") + cli.add_argument("output", nargs='?', help="Path to an output spreadsheet with SRPB results; empty overwrites the input file") + + # parse the command line + args = cli.parse_args() + + # location of the input/output files + input_path = Path(args.input) + # if optional argument is empty, input file will be overwritten + if args.output == None: + output_path = input_path + # create a temporary backup just in case + shutil.copyfile(input_path, "/tmp/" + str(input_path.stem) + str(input_path.suffix)) + else: + output_path = Path(args.output) + + TARGET_METRIC_NAME = 'm_goal' + TARGET_METRIC_VALUE_REMOVE = 0 + + # Load the spreadsheet contents and operate directly on them + # NOTE: False keeps the functions etc. instead of raw data (True) + wb = load_workbook(filename=input_path, data_only=False) + remove_unfinished_trials( + wb[excel_sheet_utils.SHEET_NAME], + TARGET_METRIC_NAME, + TARGET_METRIC_VALUE_REMOVE, + excel_sheet_utils.RESULT_RAW_INIT_ROW, + excel_sheet_utils.RESULT_RAW_INIT_COL + ) + + # Save the file + wb.save(output_path) + + # Copied from 'create_excel_from_results.py' + print(f'Results saved in: {output_path}') + print("") + # When the sheet with the results is not opened and saved by the Excel or LibreOffice Calc, then reading a non-empty + # cell will probably return None + # Ref1: https://itecnote.com/tecnote/python-openpyxl-data_onlytrue-returning-none/ + # Ref2: https://groups.google.com/g/openpyxl-users/c/GbBOnOa8g7Y + print(f'Consider opening the results file and saving it with Excel/LibreOffice Calc (without any modifications).') + print(f'It will produce cached values based on formulas written (`openpyxl` library is not able to do so).') + print(f'This is a necessary step when one wants to use the script that creates a LaTeX table from a spreadsheet') diff --git a/srpb_evaluation/scripts/rename_dirs_matching_pattern.sh b/srpb_evaluation/scripts/rename_dirs_matching_pattern.sh new file mode 100755 index 0000000..34b6a62 --- /dev/null +++ b/srpb_evaluation/scripts/rename_dirs_matching_pattern.sh @@ -0,0 +1,79 @@ +#!/bin/bash +# +# This script is handy when one wants to use `create_excel_from_results` but in the same directory one has planners +# named with the same "base name", e.g., `great_planner` and `great_planner_plus` +# +# Usage: +# +# ./rename_dirs_matching_pattern.sh ~/srpb_logs/ great_planner great_planner_plus great_planner_original +# +# This script was written by the ChatGPT + +# Check if all arguments are provided +if [ "$#" -ne 4 ]; then + echo "Usage: $0 " + exit 1 +fi + +main_dir="$1" +pattern_to_match="$2" +pattern_to_differentiate="$3" +pattern_to_replace="$4" + +cd "$main_dir" || exit 1 + +# Initialize the highest_number +highest_number="0" + +# Loop through directories matching the provided pattern +for dir in *"$pattern_to_match"*; do + # Check if the directory does not contain the differentiate pattern + if [[ $dir != *"$pattern_to_differentiate"* ]]; then + # Extract the number part using sed + number=$(echo "$dir" | sed "s/.*${pattern_to_replace}\([0-9]\+\).*/\1/") + + # Check if the extracted string is a number + if [[ "$number" =~ ^[0-9]+$ ]]; then + # Compare the numbers + if [ "$number" -gt "$highest_number" ]; then + highest_number="$number" + fi + fi + fi +done + +# Loop through directories matching the provided pattern +for dir in *"$pattern_to_match"*; do + # Check if the directory matching the pattern exists + if [ ! -d "$dir" ]; then + continue + fi + # Check if the directory does not contain the differentiate pattern + if [[ $dir != *"$pattern_to_differentiate"* ]]; then + # Check if the directory does not contain the replace pattern + if [[ $dir != *"$pattern_to_replace"* ]]; then + # Extract the base name without the pattern and append the replace pattern + new_dir=$(echo "$dir" | sed "s/$pattern_to_match/$pattern_to_replace/")"" + + # trim the number from a directory named with "pattern_to_match" and replace with the "highest" + # from the previous loop + number_match=$(echo "$dir" | sed "s/.*${pattern_to_match}\([0-9]*\).*/\1/") + + # interpret as non-octal, ref: https://unix.stackexchange.com/a/406445 + highest_number=$(echo $((10#$highest_number))) + # increment + ((highest_number++)) + # adds a leading zero for a digit + number_replace=$(printf "%02d" $highest_number) + + new_dir_num=$(echo "$new_dir" | sed "s/$number_match/$number_replace/")"" + + # Rename the directory + mv "$dir" "$new_dir_num" + + # Print a message indicating the rename + echo "Renamed: $dir -> $new_dir_num" + fi + fi +done +exit 0 diff --git a/srpb_evaluation/scripts/rewind_experiment.py b/srpb_evaluation/scripts/rewind_experiment.py index c726086..b426e76 100644 --- a/srpb_evaluation/scripts/rewind_experiment.py +++ b/srpb_evaluation/scripts/rewind_experiment.py @@ -135,8 +135,8 @@ def main( ax.get_yaxis().set_visible(False) # save figure with the experiment data - output_filename = vis_name + '.pdf' - fig.savefig(output_filename, bbox_inches='tight', pad_inches=0) + output_filename = vis_name + '.' + str(visuals['figure']['extension']) + fig.savefig(output_filename, bbox_inches='tight', pad_inches=0, dpi=visuals['figure']['dpi']) print(f"Logged experiment data saved to `{output_filename}` file") # blocking call until window with the plot is closed @@ -153,13 +153,19 @@ def main( norm=mpl.colors.Normalize(vmin=timestamps[0], vmax=timestamps[-1]), orientation='vertical' ) + font_cfg = { + 'family': visuals['legend']['fontfamily'], + 'size': visuals['legend']['fontsize'], + } + colorbar.set_label('Seconds', fontdict=font_cfg) # https://stackoverflow.com/a/67438742 - colorbar.ax.tick_params(labelsize=visuals['legend']['fontsize']) - colorbar.set_label('Seconds', fontsize=visuals['legend']['fontsize']) + colorbar.ax.tick_params(labelsize=font_cfg['size']) + # Update tick label font family + colorbar.ax.set_yticklabels(colorbar.ax.get_yticklabels(), fontdict=font_cfg) # Save the auxiliary figure - tim_scale_filename = vis_name + '_timing' + '.pdf' - fig_tim_scale.savefig(tim_scale_filename, bbox_inches='tight', pad_inches=0) + tim_scale_filename = vis_name + '_timing' + '.' + str(visuals['figure']['extension']) + fig_tim_scale.savefig(tim_scale_filename, bbox_inches='tight', pad_inches=0, dpi=visuals['figure']['dpi']) print(f"Figure with colorized passage of time saved to `{tim_scale_filename}` file") # show the timing legend figure @@ -200,7 +206,9 @@ def main( timestamp_max = config['log']['max_timestamp'] # load visual configuration - visuals = {'people': {}, 'groups': {}, 'legend': {}} + visuals = {'figure': {}, 'people': {}, 'groups': {}, 'legend': {}} + visuals['figure']['extension'] = config['vis']['figure']['extension'] + visuals['figure']['dpi'] = config['vis']['figure']['dpi'] visuals['people']['ec'] = config['vis']['people']['edge_color'] visuals['people']['radius'] = config['vis']['people']['radius'] visuals['groups']['edge_alpha'] = config['vis']['groups']['edge_alpha'] @@ -209,6 +217,7 @@ def main( visuals['legend']['width'] = config['vis']['legend']['width'] visuals['legend']['height'] = config['vis']['legend']['height'] visuals['legend']['fontsize'] = config['vis']['legend']['fontsize'] + visuals['legend']['fontfamily'] = config['vis']['legend']['fontfamily'] log_file_robot = log_file_basename + '_robot.txt' log_file_people = log_file_basename + '_people.txt' @@ -302,6 +311,8 @@ def main( log_entities['groups'].append(group) # create timestamps + last_timestamp_of_experiment = log_entities['robot'][-1].get_timestamp() + print(f"The last timestamp of the experiment is {last_timestamp_of_experiment}") timestamps = extract_timestamps(log_entities['robot'], timestamp_max) print(f"Prepared timestamps from {timestamps[0]} to {timestamps[-1]} with {len(timestamps)} entries") diff --git a/srpb_evaluation/scripts/rewind_experiment.yaml b/srpb_evaluation/scripts/rewind_experiment.yaml index e27f203..56f886a 100644 --- a/srpb_evaluation/scripts/rewind_experiment.yaml +++ b/srpb_evaluation/scripts/rewind_experiment.yaml @@ -26,6 +26,10 @@ map: # map coordinates (x and y) of additional points to be drawn (list of 2-element lists) additional_pts: [] vis: + figure: + # (pdf|png|jpeg) affects the format of the output figures + extension: png + dpi: 100 people: # full RGBA edge_color: [0.23, 0.23, 0.23, 0.10] @@ -39,3 +43,5 @@ vis: width: 0.25 height: 5 fontsize: 17 + # (cursive|fantasy|monospace|sans|sans serif|sans-serif|serif) + fontfamily: serif diff --git a/srpb_evaluation/scripts/rewind_experiment_utils.py b/srpb_evaluation/scripts/rewind_experiment_utils.py index 1d15c8a..73d77a7 100644 --- a/srpb_evaluation/scripts/rewind_experiment_utils.py +++ b/srpb_evaluation/scripts/rewind_experiment_utils.py @@ -184,8 +184,9 @@ def recompute_group_cog(group: Group, people_set: List[Person], people_hide: Lis for person in valid_people_this_group: new_cog_x += person.get_x() new_cog_y += person.get_y() - new_cog_x /= len(valid_people_this_group) - new_cog_y /= len(valid_people_this_group) + if valid_people_this_group: + new_cog_x /= len(valid_people_this_group) + new_cog_y /= len(valid_people_this_group) new_cog_z = 0.0 # collect valid member IDs diff --git a/srpb_evaluation/scripts/srpb_metrics.py b/srpb_evaluation/scripts/srpb_metrics.py new file mode 100644 index 0000000..55c0f37 --- /dev/null +++ b/srpb_evaluation/scripts/srpb_metrics.py @@ -0,0 +1,340 @@ +class SrpbMetrics: + def __init__(self) -> None: + self.tex_missing_metric_value = r"---" + # short names to avoid line breaking + # excel_fun_range_begin + self.efrb = "{#1}" + # excel_fun_range_end + self.efre = "{#2}" + + self.map = { + 's_rbt': { + 'tex_name': r"$m_{\mathrm{srbt}}$", + 'tex_unit': r"$\left[ \mathrm{unit} \right]$", + 'description': 'Mean number of robot samples collected during a single trial', + # not necessarily true, but less samples with the same metric results indicates better effectiveness + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 's_ppl': { + 'tex_name': r"$m_{\mathrm{sppl}}$", + 'tex_unit': r"$\left[ \mathrm{unit} \right]$", + 'description': 'Mean number of people samples collected during a single trial', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 's_grp': { + 'tex_name': r"$m_{\mathrm{sgrp}}$", + 'tex_unit': r"$\left[ \mathrm{unit} \right]$", + 'description': 'Mean number of group samples collected during a single trial', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + + 'm_goal': { + 'tex_name': r"$m_{\mathrm{goal}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Navigation success rate', + 'min_is_best': False, + 'excel_calc_fun': f'=100.0 * (SUM({self.efrb}:{self.efre}) / COLUMNS({self.efrb}:{self.efre}))' + }, + # for the backward compatibility with IEEE Access article, this metric (not presented in the paper) + # is named '_dist m_obs', whereas violations metric (m_obs_viol) is 'm_obs' (as presented) + 'm_obs': { + 'tex_name': r"${}_{\mathrm{dist}} m_{\mathrm{obs}}$", + 'tex_unit': r"$\left[ \mathrm{m} \right]$", + 'description': 'Mean distance to the closest obstacle', + 'min_is_best': False, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_obs_min': { + 'tex_name': r"${}_{\mathrm{min}} m_{\mathrm{obs}}$", + 'tex_unit': r"$\left[ \mathrm{m} \right]$", + 'description': 'Minimum distance to the closest obstacle', + 'min_is_best': False, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_obs_max': { + 'tex_name': r"${}_{\mathrm{max}} m_{\mathrm{obs}}$", + 'tex_unit': r"$\left[ \mathrm{m} \right]$", + 'description': 'Maximum distance to the closest obstacle', + 'min_is_best': False, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_obs_viol': { + 'tex_name': r"$m_{\mathrm{obs}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Obstacle safety', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_mef': { + 'tex_name': r"$m_{\mathrm{mef}}$", + 'tex_unit': r"$\left[ \mathrm{s} \right]$", + 'description': 'Motion efficiency', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_path': { + 'tex_name': r"$m_{\mathrm{plin}}$", + 'tex_unit': r"$\left[ \mathrm{m} \right]$", + 'description': 'Path length', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_chc': { + 'tex_name': r"$m_{\mathrm{chc}}$", + 'tex_unit': r"$\left[ \mathrm{rad} \right]$", + 'description': 'Cumulative heading change', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_cef': { + 'tex_name': r"$m_{\mathrm{cef}}$", + 'tex_unit': r"$\left[ 10^{-3} \cdot \mathrm{s} \right]$", + 'description': 'Computational efficiency', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_cre': { + 'tex_name': r"$m_{\mathrm{cre}}$", + 'tex_unit': r"$\left[ 10^{-3} \cdot \mathrm{s} \right]$", + 'description': 'Computational time repeatability', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_vsm': { + 'tex_name': r"$m_{\mathrm{vsm}}$", + 'tex_unit': r"$\left[ \mathrm{\frac{m}{s^2}} \right]$", + 'description': 'Velocity smoothness', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_hsm': { + 'tex_name': r"$m_{\mathrm{hsm}}$", + 'tex_unit': r"$\left[ \mathrm{\frac{rad}{s^2}} \right]$", + 'description': 'Heading change smoothness', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_osc': { + 'tex_name': r"$m_{\mathrm{osc}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Oscillations', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_bwd': { + 'tex_name': r"$m_{\mathrm{bwd}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Backward movements', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_inp': { + 'tex_name': r"$m_{\mathrm{iprot}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'In-place rotations', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_psi': { + 'tex_name': r"$m_{\mathrm{psi}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Personal spaces intrusion', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_psi_min': { + 'tex_name': r"${}_{\mathrm{min}} m_{\mathrm{psi}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Minimum value of the personal spaces intrusion', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_psi_max': { + 'tex_name': r"${}_{\mathrm{max}} m_{\mathrm{psi}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Maximum value of the personal spaces intrusion', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_psi_viol': { + 'tex_name': r"${}_{\mathrm{viol}} m_{\mathrm{psi}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Percentage of the personal spaces intrusion violations', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_fsi': { + 'tex_name': r"$m_{\mathrm{fsi}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'F-Formations\' O-spaces intrusion', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_fsi_min': { + 'tex_name': r"${}_{\mathrm{min}} m_{\mathrm{fsi}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Minimum value of the F-Formations\' O-spaces intrusion', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_fsi_max': { + 'tex_name': r"${}_{\mathrm{max}} m_{\mathrm{fsi}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Maximum value of the F-Formations\' O-spaces intrusion', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_fsi_viol': { + 'tex_name': r"${}_{\mathrm{viol}} m_{\mathrm{fsi}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Percentage of the F-Formations\' O-spaces intrusion violations', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_dir': { + 'tex_name': r"$m_{\mathrm{dir}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Heading direction discomfort', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_dir_min': { + 'tex_name': r"${}_{\mathrm{min}} m_{\mathrm{dir}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Minimum value of the heading direction discomfort', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_dir_max': { + 'tex_name': r"${}_{\mathrm{max}} m_{\mathrm{dir}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Maximum value of the heading direction discomfort', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_dir_viol': { + 'tex_name': r"${}_{\mathrm{viol}} m_{\mathrm{dir}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Percentage of the heading direction discomfort violations', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_psd': { + 'tex_name': r"$m_{\mathrm{psd}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Passing speed discomfort', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_psd_min': { + 'tex_name': r"${}_{\mathrm{min}} m_{\mathrm{psd}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Minimum value of a passing speed discomfort', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_psd_max': { + 'tex_name': r"${}_{\mathrm{max}} m_{\mathrm{psd}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Maximum value of a passing speed discomfort', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_psd_viol': { + 'tex_name': r"${}_{\mathrm{viol}} m_{\mathrm{psd}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Percentage of the passing speed discomfort violations', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + } + } + + + def get(self) -> dict: + """ + Returns a whole map + """ + return self.map + + + def has_metric(self, metric_id: str) -> bool: + """ + Evaluates whether a given metric exists + """ + return metric_id in self.map.keys() + + + def get_metric(self, metric_id: str) -> dict: + """ + Returns all data related to a certain metric_id + """ + if not self.has_metric(metric_id): + Exception(f'No such metric_id as {metric_id}') + return self.map[metric_id] + + + def get_metrics(self, metrics_id: list) -> dict: + """ + Returns a dict with data related to selected metrics + """ + metrics_map = {} + for name in metrics_id: + if not self.has_metric(name): + raise Exception( + f"Could not find {name} in the SRPB metrics map. Available metric names are: {self.map.keys()}" + ) + metrics_map[name] = self.get_metric(name) + return metrics_map + + + def get_latex_unit(self, metric_id: str) -> str: + if not self.has_metric(metric_id): + Exception(f'No such metric_id as {metric_id}') + return self.map[metric_id]['tex_unit'] + + + def get_latex_name(self, metric_id: str) -> str: + if not self.has_metric(metric_id): + Exception(f'No such metric_id as {metric_id}') + return self.map[metric_id]['tex_name'] + + + def get_description(self, metric_id: str) -> str: + if not self.has_metric(metric_id): + Exception(f'No such metric_id as {metric_id}') + return self.map[metric_id]['description'] + + + def is_minimum_best(self, metric_id: str) -> bool: + """ + Returns True if the minimum value of the metric is desired, or False otherwise + """ + if not self.has_metric(metric_id): + Exception(f'No such metric_id as {metric_id}') + return self.map[metric_id]['min_is_best'] + + + def get_excel_calc_fun_raw(self, metric_id: str) -> str: + """ + Returns the raw string representation of an Excel function + """ + if not self.has_metric(metric_id): + Exception(f'No such metric_id as {metric_id}') + return self.map[metric_id]['excel_calc_fun'] + + + def get_excel_calc_fun(self, metric_id: str, range_begin: str, range_end: str) -> str: + """ + Returns the string representation of an Excel function + """ + fun_str = self.get_excel_calc_fun_raw(metric_id) + fun_str = fun_str.replace(self.efrb, range_begin) + fun_str = fun_str.replace(self.efre, range_end) + return fun_str + + + def get_latex_missing_metric_value(self) -> str: + return self.tex_missing_metric_value diff --git a/srpb_evaluation/src/main.cpp b/srpb_evaluation/src/main.cpp index 8e77f63..156d739 100644 --- a/srpb_evaluation/src/main.cpp +++ b/srpb_evaluation/src/main.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include "srpb_evaluation/utils.h" @@ -8,12 +9,13 @@ using namespace srpb::logger; using namespace srpb::evaluation; int main(int argc, char* argv[]) { - if (argc != 5) { + if (argc != 6) { printf( "Please input\r\n" "\t* the path to the log file of the robot\r\n" "\t* the path to the log file of the people\r\n" "\t* the path to the log file of the people groups\r\n" + "\t* the path to the log file of the global planner\r\n" "\t* and value of the safety distance [m].\r\n" ); return 1; @@ -22,7 +24,8 @@ int main(int argc, char* argv[]) { auto file_robot = std::string(argv[1]); auto file_people = std::string(argv[2]); auto file_groups = std::string(argv[3]); - auto safety_distance = std::atof(argv[4]); + auto file_gplanner = std::string(argv[4]); + auto safety_distance = std::atof(argv[5]); // goal reached values double goal_tolerance_xy = 0.2; @@ -52,10 +55,13 @@ int main(int argc, char* argv[]) { double robot_max_speed = 0.55; // threshold of Gaussian value to detect significant disturbance caused by robot location or motion direction double heading_disturbance_threshold = 0.20; + // threshold of the discomfort value that defines a significant violation + double passing_speed_discomfort_threshold = 0.40; auto timed_robot_data = parseFile(file_robot, &RobotLogger::robotFromString); auto timed_people_data = parseFile(file_people, &PeopleLogger::personFromString); auto timed_groups_data = parseFile(file_groups, &PeopleLogger::groupFromString); + auto timed_gplanner_data = parseFile(file_gplanner, &GlobalPlannerLogger::plannerFromString); // since Person and Group are logged in separation, so by default Group does not contain members, only their IDs timed_groups_data = fillGroupsWithMembers(timed_groups_data, timed_people_data); @@ -134,6 +140,15 @@ int main(int argc, char* argv[]) { ); heading_direction.printResults(); + PassingSpeedDiscomfort psd( + timed_robot_data, + timed_people_data, + robot_circumradius, + robot_max_speed, + passing_speed_discomfort_threshold + ); + psd.printResults(); + // save results file auto file_results = file_robot.substr(0, file_robot.find_last_of('_')) + "_results.txt"; createResultsFile( @@ -155,7 +170,8 @@ int main(int argc, char* argv[]) { inplace, psi, fsi, - heading_direction + heading_direction, + psd ); return 0; diff --git a/srpb_evaluation/src/metric_gaussian.cpp b/srpb_evaluation/src/metric_gaussian.cpp deleted file mode 100644 index 969c7ef..0000000 --- a/srpb_evaluation/src/metric_gaussian.cpp +++ /dev/null @@ -1,83 +0,0 @@ -#include "srpb_evaluation/metric_gaussian.h" - -#include -#include -#include - -#include - -namespace srpb { -namespace evaluation { - -std::tuple MetricGaussian::calculateGaussianStatistics( - std::vector>> timed_gaussians, - double violation_threshold, - bool max_method -) { - // find actual duration - double duration = 0.0; - for (const auto& tg: timed_gaussians) { - duration += tg.first; - } - - // find number of personal space / f-formation space violations - unsigned int gaussian_violations = 0; - unsigned int gaussians_total = 0; - - // find gaussians and recompute according to recognized people (max/sum) - double metrics = 0.0; - double min_elem = std::numeric_limits::max(); - double max_elem = std::numeric_limits::min(); - - // rollout gaussians and compute score (metrics) - for (const auto& tg: timed_gaussians) { // tg in fact might be const but ac - double dt = tg.first; - if (tg.second.empty()) { - std::cout << "Gaussian(s) is empty for at least 1 sample. Metrics will be 0.0" << std::endl; - return std::make_tuple(0.0, 0.0, 0.0, 0); - } - - // count timestamps when violations ocurred - gaussian_violations += std::count_if( - tg.second.cbegin(), - tg.second.cend(), - [&](double g) { - // count total numbers to find percentage - gaussians_total++; - return g > violation_threshold; - } - ); - - // overall min and max computation - double local_min_elem = *std::min_element(tg.second.cbegin(), tg.second.cend()); - if (local_min_elem < min_elem) { - min_elem = local_min_elem; - } - - double local_max_elem = *std::max_element(tg.second.cbegin(), tg.second.cend()); - if (local_max_elem > max_elem) { - max_elem = local_max_elem; - } - - // check for selected method of normalization - double metrics_elem = 0.0; - if (max_method) { - // max method used here for normalization - metrics_elem = local_max_elem; - } else { - // average used for normalization - metrics_elem = std::accumulate(tg.second.cbegin(), tg.second.cend(), 0.0) / static_cast(tg.second.size()); - } - - // Gaussians must be referenced to timestamps when, e.g., people were actually detected - metrics += (metrics_elem * (dt / duration)); - } - - // find percentage of time when personal space / f-formation space etc. thresholds were violated - double gaussian_violations_percentage = gaussian_violations / static_cast(gaussians_total); - - return std::make_tuple(min_elem, max_elem, metrics, gaussian_violations_percentage); -} - -} // namespace evaluation -} // namespace srpb diff --git a/srpb_evaluation/src/metric_statistics.cpp b/srpb_evaluation/src/metric_statistics.cpp new file mode 100644 index 0000000..9abcdc0 --- /dev/null +++ b/srpb_evaluation/src/metric_statistics.cpp @@ -0,0 +1,93 @@ +#include "srpb_evaluation/metric_statistics.h" + +#include +#include +#include + +#include + +namespace srpb { +namespace evaluation { + +std::tuple MetricStatistics::calculateStatistics( + std::vector>> timed_values, + double violation_threshold, + bool violation_above_threshold, + bool max_method +) { + // find the actual duration + double duration = 0.0; + for (const auto& tg: timed_values) { + duration += tg.first; + } + + // find the values and recompute according to, e.g., recognized people/groups (max/sum) + double metrics = 0.0; + double min_elem = std::numeric_limits::max(); + double max_elem = std::numeric_limits::min(); + // find the timing-corrected percentage of threshold violations of, e.g., personal space + double violations_duration = 0.0; + + // rollout values to compute the score (metrics) + for (const auto& tvalue: timed_values) { + double dt = tvalue.first; + if (tvalue.second.empty()) { + std::cout + << "\x1B[33m" + << "The value container is empty for at least 1 sample. The metrics will be 0.0" + << "\x1B[0m" + << std::endl; + return std::make_tuple(0.0, 0.0, 0.0, 0); + } + + /* + * Count the duration when violations ocurred, i.e., it does not matter whether there are ten people whose + * personal spaces the robot intruded, but rather in it happened in a single time step, it will be regarded + * in a sum of "violating" time steps (durations) + */ + size_t threshold_violations_num = std::count_if( + tvalue.second.cbegin(), + tvalue.second.cend(), + [=](double val) { + return violation_above_threshold ? (val > violation_threshold) : (val < violation_threshold); + } + ); + + // overall min and max computation + double local_min_elem = *std::min_element(tvalue.second.cbegin(), tvalue.second.cend()); + if (local_min_elem < min_elem) { + min_elem = local_min_elem; + } + + double local_max_elem = *std::max_element(tvalue.second.cbegin(), tvalue.second.cend()); + if (local_max_elem > max_elem) { + max_elem = local_max_elem; + } + + // check for selected method of normalization + double metrics_elem = 0.0; + if (max_method) { + // max method used here for normalization + metrics_elem = local_max_elem; + } else { + // average used for normalization + metrics_elem = std::accumulate( + tvalue.second.cbegin(), + tvalue.second.cend(), + 0.0 + ) / static_cast(tvalue.second.size()); + } + + // values must be referenced to a duration of time steps when, e.g., people were actually detected + metrics += (metrics_elem * (dt / duration)); + + // sum up the percentage of time when the threshold value of, e.g., personal space was violated + if (threshold_violations_num > 0) { + violations_duration += (dt / duration); + } + } + return std::make_tuple(min_elem, max_elem, metrics, violations_duration); +} + +} // namespace evaluation +} // namespace srpb diff --git a/srpb_evaluation/src/utils.cpp b/srpb_evaluation/src/utils.cpp index 2a5b3b2..50895e0 100644 --- a/srpb_evaluation/src/utils.cpp +++ b/srpb_evaluation/src/utils.cpp @@ -137,7 +137,8 @@ void createResultsFile( const InplaceRotations& inplace_rotations, const PersonalSpaceIntrusion& personal_space_intrusion, const FormationSpaceIntrusion& formation_space_intrusion, - const HeadingDirectionDisturbance& heading_direction_disturbance + const HeadingDirectionDisturbance& heading_direction_disturbance, + const PassingSpeedDiscomfort& passing_speed_discomfort ) { std::fstream file_results; file_results.open(filename, std::ios::out); @@ -148,24 +149,42 @@ void createResultsFile( std::stringstream ss; ss.setf(std::ios::fixed); - ss << "s_rbt ," << std::setw(9) << std::setprecision(4) << samples_robot << std::endl; - ss << "s_ppl ," << std::setw(9) << std::setprecision(4) << samples_people << std::endl; - ss << "s_grp ," << std::setw(9) << std::setprecision(4) << samples_groups << std::endl; - ss << "m_goal ," << std::setw(9) << std::setprecision(4) << goal_reached.getValue() << std::endl; - ss << "m_obs ," << std::setw(9) << std::setprecision(4) << obstacle_safety.getValue() << std::endl; - ss << "m_mef ," << std::setw(9) << std::setprecision(4) << motion_efficiency.getValue() << std::endl; - ss << "m_cef ," << std::setw(9) << std::setprecision(4) << computational_efficiency.getValue() << std::endl; - ss << "m_cre ," << std::setw(9) << std::setprecision(4) << computational_time_repeatability.getValue() << std::endl; - ss << "m_vsm ," << std::setw(9) << std::setprecision(4) << velocity_smoothness.getValue() << std::endl; - ss << "m_hsm ," << std::setw(9) << std::setprecision(4) << heading_change_smoothness.getValue() << std::endl; - ss << "m_path ," << std::setw(9) << std::setprecision(4) << path_linear_length.getValue() << std::endl; - ss << "m_chc ," << std::setw(9) << std::setprecision(4) << cumulative_heading_change.getValue() << std::endl; - ss << "m_osc ," << std::setw(9) << std::setprecision(4) << oscillations.getValue() << std::endl; - ss << "m_bwd ," << std::setw(9) << std::setprecision(4) << backward_movements.getValue() << std::endl; - ss << "m_inp ," << std::setw(9) << std::setprecision(4) << inplace_rotations.getValue() << std::endl; - ss << "m_psi ," << std::setw(9) << std::setprecision(4) << personal_space_intrusion.getValue() << std::endl; - ss << "m_fsi ," << std::setw(9) << std::setprecision(4) << formation_space_intrusion.getValue() << std::endl; - ss << "m_dir ," << std::setw(9) << std::setprecision(4) << heading_direction_disturbance.getValue() << std::endl; + ss << "s_rbt ," << std::setw(9) << std::setprecision(4) << samples_robot << std::endl; + ss << "s_ppl ," << std::setw(9) << std::setprecision(4) << samples_people << std::endl; + ss << "s_grp ," << std::setw(9) << std::setprecision(4) << samples_groups << std::endl; + ss << "m_goal ," << std::setw(9) << std::setprecision(4) << goal_reached.getValue() << std::endl; + // NOTE: legacy m_obs (before making m_obs inherit from MetricStatistics) was equal to the current m_obs_viol + ss << "m_obs ," << std::setw(9) << std::setprecision(4) << obstacle_safety.getValue() << std::endl; + ss << "m_obs_min ," << std::setw(9) << std::setprecision(4) << obstacle_safety.getValueMin() << std::endl; + ss << "m_obs_max ," << std::setw(9) << std::setprecision(4) << obstacle_safety.getValueMax() << std::endl; + ss << "m_obs_viol ," << std::setw(9) << std::setprecision(4) << obstacle_safety.getViolations() << std::endl; + ss << "m_mef ," << std::setw(9) << std::setprecision(4) << motion_efficiency.getValue() << std::endl; + ss << "m_cef ," << std::setw(9) << std::setprecision(4) << computational_efficiency.getValue() << std::endl; + ss << "m_cre ," << std::setw(9) << std::setprecision(4) << computational_time_repeatability.getValue() << std::endl; + ss << "m_vsm ," << std::setw(9) << std::setprecision(4) << velocity_smoothness.getValue() << std::endl; + ss << "m_hsm ," << std::setw(9) << std::setprecision(4) << heading_change_smoothness.getValue() << std::endl; + ss << "m_path ," << std::setw(9) << std::setprecision(4) << path_linear_length.getValue() << std::endl; + ss << "m_chc ," << std::setw(9) << std::setprecision(4) << cumulative_heading_change.getValue() << std::endl; + ss << "m_osc ," << std::setw(9) << std::setprecision(4) << oscillations.getValue() << std::endl; + ss << "m_bwd ," << std::setw(9) << std::setprecision(4) << backward_movements.getValue() << std::endl; + ss << "m_inp ," << std::setw(9) << std::setprecision(4) << inplace_rotations.getValue() << std::endl; + ss << "m_psi ," << std::setw(9) << std::setprecision(4) << personal_space_intrusion.getValue() << std::endl; + ss << "m_psi_min ," << std::setw(9) << std::setprecision(4) << personal_space_intrusion.getValueMin() << std::endl; + ss << "m_psi_max ," << std::setw(9) << std::setprecision(4) << personal_space_intrusion.getValueMax() << std::endl; + ss << "m_psi_viol ," << std::setw(9) << std::setprecision(4) << personal_space_intrusion.getViolations() << std::endl; + ss << "m_fsi ," << std::setw(9) << std::setprecision(4) << formation_space_intrusion.getValue() << std::endl; + ss << "m_fsi_min ," << std::setw(9) << std::setprecision(4) << formation_space_intrusion.getValueMin() << std::endl; + ss << "m_fsi_max ," << std::setw(9) << std::setprecision(4) << formation_space_intrusion.getValueMax() << std::endl; + ss << "m_fsi_viol ," << std::setw(9) << std::setprecision(4) << formation_space_intrusion.getViolations() << std::endl; + ss << "m_dir ," << std::setw(9) << std::setprecision(4) << heading_direction_disturbance.getValue() << std::endl; + ss << "m_dir_min ," << std::setw(9) << std::setprecision(4) << heading_direction_disturbance.getValueMin() << std::endl; + ss << "m_dir_max ," << std::setw(9) << std::setprecision(4) << heading_direction_disturbance.getValueMax() << std::endl; + ss << "m_dir_viol ," << std::setw(9) << std::setprecision(4) << heading_direction_disturbance.getViolations() << std::endl; + ss << "m_psd ," << std::setw(9) << std::setprecision(4) << passing_speed_discomfort.getValue() << std::endl; + ss << "m_psd_min ," << std::setw(9) << std::setprecision(4) << passing_speed_discomfort.getValueMin() << std::endl; + ss << "m_psd_max ," << std::setw(9) << std::setprecision(4) << passing_speed_discomfort.getValueMax() << std::endl; + ss << "m_psd_viol ," << std::setw(9) << std::setprecision(4) << passing_speed_discomfort.getViolations() << std::endl; + file_results << ss.str(); printf("Results CSV file saved at `%s`\r\n", filename.c_str()); } diff --git a/srpb_evaluation/test/test_metric_gaussian.cpp b/srpb_evaluation/test/test_metric_gaussian.cpp deleted file mode 100644 index e9994fb..0000000 --- a/srpb_evaluation/test/test_metric_gaussian.cpp +++ /dev/null @@ -1,31 +0,0 @@ -#include - -#include - -using namespace srpb::evaluation; - -TEST(TestMetricGaussian, gaussianStatistics) { - // durations and gaussians - std::vector>> timed_gaussians{ - {0.25, {1.0, 0.75, 0.51, 0.24, 0.51, 0.75, 1.0, 1.25}}, - {0.50, {1.0, 0.75, 0.51, 0.25, 0.51, 0.75, 1.0, 1.27}}, - {0.75, {1.0, 0.75, 0.51, 0.26, 0.51, 0.75, 1.0, 1.26}} - }; - // tuple with: min, max and normalized, and threshold violations - auto stats = MetricGaussian::calculateGaussianStatistics(timed_gaussians, 0.75, true); - ASSERT_EQ(std::get<0>(stats), 0.24); - ASSERT_EQ(std::get<1>(stats), 1.27); - double duration = 0.25 + 0.50 + 0.75; - ASSERT_EQ( - std::get<2>(stats), - 1.25 * (0.25 / duration) - + 1.27 * (0.50 / duration) - + 1.26 * (0.75 / duration) - ); - ASSERT_EQ(std::get<3>(stats), 9.0 / (3.0 * 8.0)); -} - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/srpb_evaluation/test/test_metric_statistics.cpp b/srpb_evaluation/test/test_metric_statistics.cpp new file mode 100644 index 0000000..6f13dc0 --- /dev/null +++ b/srpb_evaluation/test/test_metric_statistics.cpp @@ -0,0 +1,152 @@ +#include + +#include + +using namespace srpb::evaluation; + +TEST(TestMetricStatistics, statistics) { + // durations and, e.g., Gaussians + std::vector>> timed_gaussians{ + {0.25, {1.0, 0.75, 0.51, 0.24, 0.51, 0.75, 1.0, 1.25}}, + {0.50, {1.0, 0.75, 0.51, 0.25, 0.51, 0.75, 1.0, 1.27}}, + {0.75, {1.0, 0.75, 0.51, 0.26, 0.51, 0.75, 1.0, 1.26}}, + // {1.00, {}} // empty "second" - container, will force metrics to be 0.0 + }; + // tuple with: min, max and normalized, and threshold violations + auto stats = MetricStatistics::calculateStatistics(timed_gaussians, 0.75, true, true); + ASSERT_EQ(std::get<0>(stats), 0.24); + ASSERT_EQ(std::get<1>(stats), 1.27); + double duration = 0.25 + 0.50 + 0.75; + // max_method is set to true, thus max values are pointed out below + ASSERT_DOUBLE_EQ( + std::get<2>(stats), + 1.25 * (0.25 / duration) + + 1.27 * (0.50 / duration) + + 1.26 * (0.75 / duration) + ); + + // violations ocurred in each time step, i.e., sum up the subsequent durations and divide them by a total duration + // the metric is 1.0 here + ASSERT_DOUBLE_EQ( + std::get<3>(stats), + (0.25 + 0.50 + 0.75) / duration + ); +} + +TEST(TestMetricStatistics, statisticsObstacles1) { + // durations and, e.g., distances to obstacles + std::vector>> timed_dists{ + {0.24, {0.2}}, + {0.26, {0.6}}, + {0.25, {0.5}}, + {0.23, {0.3}} + }; + // tuple with: min, max and normalized, and threshold violations + auto stats = MetricStatistics::calculateStatistics(timed_dists, 0.5, false, true); + ASSERT_EQ(std::get<0>(stats), 0.2); + ASSERT_EQ(std::get<1>(stats), 0.6); + double duration = 0.24 + 0.26 + 0.25 + 0.23; + ASSERT_DOUBLE_EQ( + std::get<2>(stats), + 0.2 * (0.24 / duration) + + 0.6 * (0.26 / duration) + + 0.5 * (0.25 / duration) + + 0.3 * (0.23 / duration) + ); + + // sum up only a duration when the threshold was violated (distances less than a threshold) + ASSERT_DOUBLE_EQ( + std::get<3>(stats), + (0.24 + 0.23) / duration + ); +} + +TEST(TestMetricStatistics, statisticsObstacles2) { + // durations and, e.g., distances to obstacles + std::vector>> timed_dists{ + {0.25, {0.51}}, + {0.25, {0.51}}, + {0.25, {0.51}}, + {0.25, {0.51}} + }; + // tuple with: min, max and normalized, and threshold violations + auto stats = MetricStatistics::calculateStatistics(timed_dists, 0.5, false, true); + ASSERT_EQ(std::get<0>(stats), 0.51); + ASSERT_EQ(std::get<1>(stats), 0.51); + double duration = 0.25 + 0.25 + 0.25 + 0.25; + ASSERT_DOUBLE_EQ( + std::get<2>(stats), + 0.51 * (0.25 / duration) + + 0.51 * (0.25 / duration) + + 0.51 * (0.25 / duration) + + 0.51 * (0.25 / duration) + ); + + // violations did not occur + ASSERT_DOUBLE_EQ( + std::get<3>(stats), + 0.0 / duration + ); +} + +TEST(TestMetricStatistics, statisticsObstacles3) { + // durations and, e.g., distances to obstacles + std::vector>> timed_dists{ + {0.22, {0.48}}, + {0.23, {0.49}}, + {0.24, {0.50}}, + }; + // tuple with: min, max and normalized, and threshold violations + auto stats = MetricStatistics::calculateStatistics(timed_dists, 0.5, false, true); + ASSERT_EQ(std::get<0>(stats), 0.48); + ASSERT_EQ(std::get<1>(stats), 0.50); + double duration = 0.22 + 0.23 + 0.24; + ASSERT_DOUBLE_EQ( + std::get<2>(stats), + 0.48 * (0.22 / duration) + + 0.49 * (0.23 / duration) + + 0.50 * (0.24 / duration) + ); + + // 2 out of 3 samples are less than the threshold (they violate) + ASSERT_DOUBLE_EQ( + std::get<3>(stats), + (0.22 + 0.23) / duration + ); +} + +TEST(TestMetricStatistics, statisticsUnequal) { + // durations and values (e.g., Gaussians) + std::vector>> timed_data{ + {0.22, {0.48, 0.51, 0.68, 0.12}}, + {0.23, {0.49, 0.34, 0.54, 0.86, 0.76}}, + {0.24, {0.50, 0.20}}, + {0.38, {0.10, 0.12}}, + {0.32, {0.97}} + }; + // tuple with: min, max and normalized, and threshold violations + // violation occurs above the threshold + auto stats = MetricStatistics::calculateStatistics(timed_data, 0.5, true, true); + ASSERT_EQ(std::get<0>(stats), 0.10); + ASSERT_EQ(std::get<1>(stats), 0.97); + double duration = 0.22 + 0.23 + 0.24 + 0.38 + 0.32; + ASSERT_DOUBLE_EQ( + std::get<2>(stats), + 0.68 * (0.22 / duration) + + 0.86 * (0.23 / duration) + + 0.50 * (0.24 / duration) + + 0.12 * (0.38 / duration) + + 0.97 * (0.32 / duration) + ); + + // in 3 out of 5 sets the threshold was violated + ASSERT_DOUBLE_EQ( + std::get<3>(stats), + (0.22 + 0.23 + 0.32) / duration + ); +} + +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/srpb_evaluation/test/test_rewinder.cpp b/srpb_evaluation/test/test_rewinder.cpp index fbc0564..8cba62c 100644 --- a/srpb_evaluation/test/test_rewinder.cpp +++ b/srpb_evaluation/test/test_rewinder.cpp @@ -10,6 +10,7 @@ const logger::RobotData DUMMY_ROBOT( geometry_msgs::PoseWithCovarianceStamped(), geometry_msgs::PoseWithCovarianceStamped(), geometry_msgs::PoseStamped(), + geometry_msgs::TwistStamped(), 0.5, 0.1 ); @@ -26,7 +27,7 @@ people_msgs_utils::Person createPerson(const std::string& name) { "123" ); } -const people_msgs_utils::Group DUMMY_GROUP = people_msgs_utils::EMPTY_GROUP; +const people_msgs_utils::Group DUMMY_GROUP = people_msgs_utils::Group(); /* * Mocking references: diff --git a/srpb_logger/CMakeLists.txt b/srpb_logger/CMakeLists.txt index 1903760..682a492 100644 --- a/srpb_logger/CMakeLists.txt +++ b/srpb_logger/CMakeLists.txt @@ -5,6 +5,7 @@ find_package(catkin REQUIRED COMPONENTS cmake_modules roscpp + std_msgs geometry_msgs people_msgs people_msgs_utils @@ -19,6 +20,7 @@ catkin_package( ${PROJECT_NAME}_lib CATKIN_DEPENDS roscpp + std_msgs geometry_msgs people_msgs people_msgs_utils @@ -36,8 +38,10 @@ add_library(${PROJECT_NAME}_lib include/${PROJECT_NAME}/people_logger.h include/${PROJECT_NAME}/robot_data.h include/${PROJECT_NAME}/robot_logger.h + include/${PROJECT_NAME}/global_planner_logger.h src/people_logger.cpp src/robot_logger.cpp + src/global_planner_logger.cpp ) target_link_libraries(${PROJECT_NAME}_lib ${catkin_LIBRARIES} @@ -63,4 +67,14 @@ if (CATKIN_ENABLE_TESTING) if(TARGET test_people_logger) target_link_libraries(test_people_logger ${PROJECT_NAME}_lib) endif() + + catkin_add_gtest(test_global_planner_logger test/test_global_planner_logger.cpp) + if(TARGET test_global_planner_logger) + target_link_libraries(test_global_planner_logger ${PROJECT_NAME}_lib) + endif() + + catkin_add_gtest(test_robot_logger test/test_robot_logger.cpp) + if(TARGET test_robot_logger) + target_link_libraries(test_robot_logger ${PROJECT_NAME}_lib) + endif() endif() diff --git a/srpb_logger/include/srpb_logger/benchmark_logger.h b/srpb_logger/include/srpb_logger/benchmark_logger.h index f3a1933..98f4792 100644 --- a/srpb_logger/include/srpb_logger/benchmark_logger.h +++ b/srpb_logger/include/srpb_logger/benchmark_logger.h @@ -21,9 +21,9 @@ class BenchmarkLogger { public: virtual void init(ros::NodeHandle& nh) { //path to save the recorded data - nh.param("log_filename", log_filename_, std::string("log.txt")); + nh.param("srpb/log_filename", log_filename_, std::string("log.txt")); // ID of the frame that poses will be expressed in - nh.param("log_frame_id", target_frame_, target_frame_); + nh.param("srpb/log_frame_id", target_frame_, target_frame_); // append the timestamp to filename log_filename_ = BenchmarkLogger::appendToFilename(log_filename_, BenchmarkLogger::timeToString()); } @@ -65,6 +65,10 @@ class BenchmarkLogger { * that wrong time source will be used and transform is not found (wall vs sim time difference). */ geometry_msgs::PoseWithCovarianceStamped transformPose(const geometry_msgs::PoseWithCovarianceStamped& pose_in) { + // transforming might not be necessary + if (pose_in.header.frame_id == target_frame_) { + return pose_in; + } auto transform_stamped = getTransform(pose_in.header.frame_id); geometry_msgs::PoseWithCovarianceStamped pose_out; tf2::doTransform(pose_in, pose_out, transform_stamped); diff --git a/srpb_logger/include/srpb_logger/global_planner_data.h b/srpb_logger/include/srpb_logger/global_planner_data.h new file mode 100644 index 0000000..9c98861 --- /dev/null +++ b/srpb_logger/include/srpb_logger/global_planner_data.h @@ -0,0 +1,117 @@ +#pragma once + +#include +#include + +#include + +namespace srpb { +namespace logger { + +class GlobalPlannerData { +public: + GlobalPlannerData( + const geometry_msgs::PoseStamped& robot_pose, + const geometry_msgs::PoseStamped& goal_pose, + bool planning_success, + double planning_time, + const std::vector& plan + ): GlobalPlannerData(robot_pose.pose, goal_pose.pose, planning_success, planning_time, plan) {} + + GlobalPlannerData( + const geometry_msgs::Pose& robot_pose, + const geometry_msgs::Pose& goal_pose, + bool planning_success, + double planning_time, + const std::vector& plan + ): + pose_(robot_pose), + goal_(goal_pose), + planning_success_(planning_success), + planning_time_(planning_time), + plan_(plan) + {} + + inline geometry_msgs::Pose getPose() const { + return pose_; + } + + inline geometry_msgs::Point getPosition() const { + return pose_.position; + } + + inline geometry_msgs::Quaternion getOrientation() const { + return pose_.orientation; + } + + inline double getPositionX() const { + return pose_.position.x; + } + + inline double getPositionY() const { + return pose_.position.y; + } + + inline double getPositionZ() const { + return pose_.position.z; + } + + inline double getOrientationYaw() const { + return tf2::getYaw(pose_.orientation); + } + + inline geometry_msgs::Pose getGoal() const { + return goal_; + } + + inline double getGoalPositionX() const { + return goal_.position.x; + } + + inline double getGoalPositionY() const { + return goal_.position.y; + } + + inline double getGoalPositionZ() const { + return goal_.position.z; + } + + inline double getGoalOrientationYaw() const { + return tf2::getYaw(goal_.orientation); + } + + /// Whether the planning was successful (i.e. the planner did not fail and returned a non-empty plan) + inline bool getPlanningStatus() const { + return planning_success_; + } + + /// Returns duration of global plan computations + inline double getPlanningTime() const { + return planning_time_; + } + + /// Returns a copy of a container with the global path plan @ref plan_ + inline std::vector getPlan() const { + return plan_; + } + + /// Returns a const reference to a container with the global path plan @ref plan_ + inline const std::vector& getPlanCref() const { + return plan_; + } + + /// Number of poses the global path plan @ref plan_ is expected to contain + inline size_t getPlanSize() const { + return plan_.size(); + } + +protected: + bool planning_success_; + geometry_msgs::Pose pose_; + geometry_msgs::Pose goal_; + double planning_time_; + std::vector plan_; +}; + +} // namespace logger +} // namespace srpb diff --git a/srpb_logger/include/srpb_logger/global_planner_logger.h b/srpb_logger/include/srpb_logger/global_planner_logger.h new file mode 100644 index 0000000..848e7fb --- /dev/null +++ b/srpb_logger/include/srpb_logger/global_planner_logger.h @@ -0,0 +1,44 @@ +#pragma once + +#include "benchmark_logger.h" +#include "global_planner_data.h" + +namespace srpb { +namespace logger { + +/** + * @brief A logger class storing the information about the global planner + * + * The class for global planner data logging was separated from the logger of the general robot data (gathered with + * a frequency of the local planning) as it is usually gathered with a very different frequency. + * + * All in all, this class allows avoiding the interpolation during an offline data analysis. + */ +class GlobalPlannerLogger: public BenchmarkLogger { +public: + /// For odometry, e.g., Z, Roll, Pitch (based on diff_drive controller) + static constexpr auto COVARIANCE_UNKNOWN = 1000000.0; + + GlobalPlannerLogger() = default; + + void init(ros::NodeHandle& nh); + + void start(); + + /// Performs writes to files that this class handles, most recent planner data is used + void update(double timestamp, const GlobalPlannerData& planner); + + void finish(); + + /// Converts a given global planing data into string description + static std::string plannerToString(const GlobalPlannerData& planner); + + /// Converts a given @ref str string description into a global planing data + static std::pair plannerFromString(const std::string& str); + +protected: + std::fstream log_file_; +}; + +} // namespace logger +} // namespace srpb diff --git a/srpb_logger/include/srpb_logger/robot_data.h b/srpb_logger/include/srpb_logger/robot_data.h index 0657fe8..23d5b55 100644 --- a/srpb_logger/include/srpb_logger/robot_data.h +++ b/srpb_logger/include/srpb_logger/robot_data.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include @@ -24,21 +25,31 @@ class RobotData { const geometry_msgs::PoseWithCovarianceStamped& robot_pose, const geometry_msgs::PoseWithCovarianceStamped& robot_vel, const geometry_msgs::PoseStamped& goal_pose, + const geometry_msgs::TwistStamped& cmd_vel, double obstacle_distance, double exec_time - ): RobotData(robot_pose.pose, robot_vel.pose, goal_pose.pose, obstacle_distance, exec_time) {} + ): RobotData( + robot_pose.pose, + robot_vel.pose, + goal_pose.pose, + cmd_vel.twist, + obstacle_distance, + exec_time + ) {} /// Full constructor for the RobotLogger RobotData( const geometry_msgs::PoseWithCovariance& robot_pose, const geometry_msgs::PoseWithCovariance& robot_vel, const geometry_msgs::Pose& goal_pose, + const geometry_msgs::Twist& cmd_vel, double obstacle_distance, double exec_time ): pose_(robot_pose), vel_(robot_vel), goal_(goal_pose), + command_(cmd_vel), obstacle_distance_(obstacle_distance), local_plan_exec_time_(exec_time), partial_(false) @@ -47,17 +58,38 @@ class RobotData { /// Partial constructor for the `move_base` node RobotData( const geometry_msgs::PoseStamped& goal_pose, + const geometry_msgs::TwistStamped& cmd_vel, double obstacle_distance, double exec_time - ): RobotData(goal_pose.pose, obstacle_distance, exec_time) {} + ): RobotData( + goal_pose.pose, + cmd_vel.twist, + obstacle_distance, + exec_time + ) {} + + /// Partial constructor for the `move_base` node + RobotData( + const geometry_msgs::PoseStamped& goal_pose, + const geometry_msgs::Twist& cmd_vel, + double obstacle_distance, + double exec_time + ): RobotData( + goal_pose.pose, + cmd_vel, + obstacle_distance, + exec_time + ) {} /// Partial constructor for the `move_base` node RobotData( const geometry_msgs::Pose& goal_pose, + const geometry_msgs::Twist& cmd_vel, double obstacle_distance, double exec_time ): goal_(goal_pose), + command_(cmd_vel), obstacle_distance_(obstacle_distance), local_plan_exec_time_(exec_time), partial_(true) @@ -73,6 +105,10 @@ class RobotData { vel_ = pose_; } + inline geometry_msgs::PoseWithCovariance getPoseWithCovariance() const { + return pose_; + } + inline geometry_msgs::Pose getPose() const { return pose_.pose; } @@ -128,6 +164,10 @@ class RobotData { return pose_.covariance.at(COV_YAWYAW_INDEX); } + inline geometry_msgs::PoseWithCovariance getVelocityWithCovariance() const { + return vel_; + } + inline geometry_msgs::Pose getVelocity() const { return vel_.pose; } @@ -195,6 +235,34 @@ class RobotData { return tf2::getYaw(goal_.orientation); } + inline geometry_msgs::Twist getVelocityCommand() const { + return command_; + } + + inline double getVelocityCommandLinearX() const { + return command_.linear.x; + } + + inline double getVelocityCommandLinearY() const { + return command_.linear.y; + } + + inline double getVelocityCommandLinearZ() const { + return command_.linear.z; + } + + inline double getVelocityCommandAngularX() const { + return command_.angular.x; + } + + inline double getVelocityCommandAngularY() const { + return command_.angular.y; + } + + inline double getVelocityCommandAngularZ() const { + return command_.angular.z; + } + inline double getDistToObstacle() const { return obstacle_distance_; } @@ -213,6 +281,7 @@ class RobotData { geometry_msgs::PoseWithCovariance pose_; geometry_msgs::PoseWithCovariance vel_; geometry_msgs::Pose goal_; + geometry_msgs::Twist command_; double obstacle_distance_; double local_plan_exec_time_; diff --git a/srpb_logger/include/srpb_logger/robot_logger.h b/srpb_logger/include/srpb_logger/robot_logger.h index 9293332..fdb0824 100644 --- a/srpb_logger/include/srpb_logger/robot_logger.h +++ b/srpb_logger/include/srpb_logger/robot_logger.h @@ -4,10 +4,12 @@ #include "robot_data.h" #include +#include // helper functions #include +#include #include #include @@ -25,6 +27,16 @@ class RobotLogger: public BenchmarkLogger { void start(); + /** + * Should be called before @ref update when a significant timing offset is expected (see details) + * + * The timing offset is related to the the duration between the "timestamp" passed to the @ref update and + * the actual timestamp of the localization data that are updated asynchronously and stored as class members. + * + * Each "latch" should be renewed after every @ref update call. + */ + void latch(); + /// Performs writes to files that this class handles, most recent robot data is used void update(double timestamp, const RobotData& robot); @@ -39,15 +51,26 @@ class RobotLogger: public BenchmarkLogger { protected: void localizationCB(const nav_msgs::OdometryConstPtr& msg); + void externalComputationTimesCB(const std_msgs::Float64MultiArrayConstPtr& msg); + std::fstream log_file_; std::mutex cb_mutex_; ros::Subscriber localization_sub_; + /// Subscriber that is valid once the given topic param is non-empty + ros::Subscriber external_comp_times_sub_; + /// When this flag is true, any incoming updates of robot pose and velocity will not be accepted + std::atomic latched_; /// Newest pose with covariance of the robot (expressed in coordinate system given by 'target_frame_') geometry_msgs::PoseWithCovariance robot_pose_; /// Newest velocity with covariance of the robot (expressed in local coordinate system) geometry_msgs::PoseWithCovariance robot_vel_; + + /// Data index of the multi-dimension array that a computation time is stored under + size_t external_comp_time_array_index_; + /// Stores the newest value of the computation time obtained via the @ref external_comp_times_sub_ subscriber + std::atomic external_comp_time_; }; } // namespace logger diff --git a/srpb_logger/package.xml b/srpb_logger/package.xml index 6d57afb..ae381ec 100644 --- a/srpb_logger/package.xml +++ b/srpb_logger/package.xml @@ -2,7 +2,7 @@ srpb_logger - 0.9.3 + 0.9.5 The srpb_logger package provides implementation of tools that logging and parsing of data necessary for evaluation of robot behaviour during navigation @@ -21,6 +21,7 @@ tf2_geometry_msgs roscpp + std_msgs geometry_msgs people_msgs people_msgs_utils diff --git a/srpb_logger/src/global_planner_logger.cpp b/srpb_logger/src/global_planner_logger.cpp new file mode 100644 index 0000000..e2b6f5d --- /dev/null +++ b/srpb_logger/src/global_planner_logger.cpp @@ -0,0 +1,144 @@ +#include + +// helper functions +#include + +namespace srpb { +namespace logger { + +void GlobalPlannerLogger::init(ros::NodeHandle& nh) { + BenchmarkLogger::init(nh); + log_filename_ = BenchmarkLogger::appendToFilename(log_filename_, "gplanner"); +} + +void GlobalPlannerLogger::start() { + if (!log_file_.is_open()) { + BenchmarkLogger::start(log_file_, log_filename_); + std::cout << "[ SRPB] Started the first log file for a global planner" << std::endl; + } else if (log_file_.is_open()) { + BenchmarkLogger::finish(log_file_); + std::cout << "[ SRPB] Finishing a global planner log file" << std::endl; + BenchmarkLogger::startNextPart(log_file_, log_filename_); + std::cout << "[ SRPB] Started next log file for a global planner" << std::endl; + } + + // increment the counter used for numbering goals + incrementLogPartCounter(); +} + +void GlobalPlannerLogger::update(double timestamp, const GlobalPlannerData& planner) { + if (!log_file_) { + throw std::runtime_error("File for GlobalPlannerLogger was not properly created!"); + } + + std::stringstream ss; + ss.setf(std::ios::fixed); + ss << std::setw(9) << std::setprecision(4) << timestamp << " "; + ss << GlobalPlannerLogger::plannerToString(planner) << std::endl; + log_file_ << ss.str(); +} + +void GlobalPlannerLogger::finish() { + BenchmarkLogger::finish(log_file_); +} + +std::string GlobalPlannerLogger::plannerToString(const GlobalPlannerData& planner) { + std::stringstream ss; + ss.setf(std::ios::fixed); + /* 0 */ ss << std::setw(9) << std::setprecision(4) << planner.getPositionX() << " "; + /* 1 */ ss << std::setw(9) << std::setprecision(4) << planner.getPositionY() << " "; + /* 2 */ ss << std::setw(9) << std::setprecision(4) << planner.getPositionZ() << " "; + /* 3 */ ss << std::setw(9) << std::setprecision(4) << planner.getOrientationYaw() << " "; + /* 4 */ ss << std::setw(9) << std::setprecision(4) << planner.getGoalPositionX() << " "; + /* 5 */ ss << std::setw(9) << std::setprecision(4) << planner.getGoalPositionY() << " "; + /* 6 */ ss << std::setw(9) << std::setprecision(4) << planner.getGoalPositionZ() << " "; + /* 7 */ ss << std::setw(9) << std::setprecision(4) << planner.getGoalOrientationYaw() << " "; + /* 8 */ ss << std::setw(4) << std::setprecision(1) << static_cast(planner.getPlanningStatus()) << " "; + /* 9 */ ss << std::setw(9) << std::setprecision(4) << planner.getPlanningTime() << " "; + /* 10 */ ss << std::setw(9) << std::setprecision(1) << planner.getPlanSize(); + + // dynamic size of the plan + ss << " / "; + const auto& plan = planner.getPlanCref(); + for (const auto& pose_stamped: plan) { + ss << " " << std::setw(9) << std::setprecision(4) << pose_stamped.pose.position.x; + ss << " " << std::setw(9) << std::setprecision(4) << pose_stamped.pose.position.y; + ss << " " << std::setw(9) << std::setprecision(4) << tf2::getYaw(pose_stamped.pose.orientation); + } + return ss.str(); +} + +std::pair GlobalPlannerLogger::plannerFromString(const std::string& str) { + // divide into 2 parts considering separator + size_t separator = str.find("/"); + auto vals = people_msgs_utils::parseString(str.substr(0, separator - 1), " "); + auto vals_plan_poses = people_msgs_utils::parseString(str.substr(separator + 1), " "); + + // static part of the entry has always the same length; a dynamic part must have 3 elements for each pose + bool plan_poses_in_triplets = vals_plan_poses.size() % 3 != 0; + if (vals.size() != 11 || plan_poses_in_triplets) { + std::cout << "\x1B[31mFound corrupted data of a global planner:\r\n\t" << str << "\x1B[0m" << std::endl; + // dummy planner data + auto dummy_planner = GlobalPlannerData( + geometry_msgs::Pose(), + geometry_msgs::Pose(), + false, + 0.0, + std::vector() + ); + return {false, dummy_planner}; + } + + geometry_msgs::Pose pose; + pose.position.x = vals.at(0); + pose.position.y = vals.at(1); + pose.position.z = vals.at(2); + tf2::Quaternion quat; + quat.setRPY(0.0, 0.0, vals.at(3)); + pose.orientation.x = quat.getX(); + pose.orientation.y = quat.getY(); + pose.orientation.z = quat.getZ(); + pose.orientation.w = quat.getW(); + + geometry_msgs::Pose goal; + goal.position.x = vals.at(4); + goal.position.y = vals.at(5); + goal.position.z = vals.at(6); + quat.setRPY(0.0, 0.0, vals.at(7)); + goal.orientation.x = quat.getX(); + goal.orientation.y = quat.getY(); + goal.orientation.z = quat.getZ(); + goal.orientation.w = quat.getW(); + + bool planning_status = static_cast(static_cast(vals.at(8))); + double planning_time = vals.at(9); + + double plan_size = vals.at(10); + std::vector plan; + // each pose is represented by a triplet + for ( + auto it = vals_plan_poses.cbegin(); + it != vals_plan_poses.cend(); + it = it + 3 + ) { + double x = *it; + double y = *(it+1); + double yaw = *(it+2); + geometry_msgs::PoseStamped pose; + pose.pose.position.x = x; + pose.pose.position.y = y; + + // NOTE: ctor of tf2::Quaternion using Euler angles is deprecated + tf2::Quaternion quat; + quat.setEuler(0.0, 0.0, yaw); + pose.pose.orientation = tf2::toMsg(quat); + + plan.push_back(pose); + } + + auto planner = GlobalPlannerData(pose, goal, planning_status, planning_time, plan); + return {true, planner}; +} + +} // namespace logger +} // namespace srpb diff --git a/srpb_logger/src/people_logger.cpp b/srpb_logger/src/people_logger.cpp index 96acd3c..4397cca 100644 --- a/srpb_logger/src/people_logger.cpp +++ b/srpb_logger/src/people_logger.cpp @@ -255,14 +255,14 @@ std::pair PeopleLogger::groupFromString(const st if (vals1.empty()) { // return a dummy group without any IDs of tracked people - return {false, people_msgs_utils::EMPTY_GROUP}; + return {false, people_msgs_utils::Group()}; } // 5 static entries and `group` must contain at least 2 IDs if (vals1.size() < 7) { std::cout << "\x1B[31mFound corrupted data of a group:\r\n\t" << str << "\x1B[0m" << std::endl; // return a dummy group - return {false, people_msgs_utils::EMPTY_GROUP}; + return {false, people_msgs_utils::Group()}; } std::string name = vals1.at(0); diff --git a/srpb_logger/src/robot_logger.cpp b/srpb_logger/src/robot_logger.cpp index 5209df3..5699c03 100644 --- a/srpb_logger/src/robot_logger.cpp +++ b/srpb_logger/src/robot_logger.cpp @@ -12,11 +12,30 @@ void RobotLogger::init(ros::NodeHandle& nh) { BenchmarkLogger::init(nh); log_filename_ = BenchmarkLogger::appendToFilename(log_filename_, "robot"); + // by default, latching is disabled = async updates are allowed + latched_.store(false); localization_sub_ = nh.subscribe( "/odom", 1, boost::bind(&RobotLogger::localizationCB, this, _1) ); + + // obtain the name of the topic to retrieve the computation times of the external local planner (not a navstack + // plugin); if empty, computation times obtained via the @ref update call will be used (default) + external_comp_time_.store(NAN); + std::string computation_time_topic(""); + int computation_time_array_idx = -1; + nh.param("srpb/external_computation_times_topic", computation_time_topic, computation_time_topic); + nh.param("srpb/external_computation_times_array_index", computation_time_array_idx, computation_time_array_idx); + + if (!computation_time_topic.empty() && computation_time_array_idx >= 0) { + external_comp_time_array_index_ = static_cast(computation_time_array_idx); + external_comp_times_sub_ = nh.subscribe( + computation_time_topic, + 10, + boost::bind(&RobotLogger::externalComputationTimesCB, this, _1) + ); + } } void RobotLogger::start() { @@ -50,10 +69,25 @@ void RobotLogger::update(double timestamp, const RobotData& robot) { robot_pose_, robot_vel_, robot.getGoal(), + robot.getVelocityCommand(), robot.getDistToObstacle(), robot.getLocalPlanningTime() ); } + // modify local planning time, if required and valid data was received + if (!std::isnan(external_comp_time_.load())) { + robot_data = RobotData( + robot_data.getPoseWithCovariance(), + robot_data.getVelocityWithCovariance(), + robot_data.getGoal(), + robot_data.getVelocityCommand(), + robot_data.getDistToObstacle(), + external_comp_time_.load() + ); + } + + // (enable the asynchronous updates of pose and velocity + latched_.store(false); std::stringstream ss; ss.setf(std::ios::fixed); @@ -62,6 +96,10 @@ void RobotLogger::update(double timestamp, const RobotData& robot) { log_file_ << ss.str(); } +void RobotLogger::latch() { + latched_.store(true); +} + void RobotLogger::finish() { BenchmarkLogger::finish(log_file_); } @@ -90,20 +128,24 @@ std::string RobotLogger::robotToString(const RobotData& robot) { /* 18 */ ss << std::setw(9) << std::setprecision(4) << robot.getGoalPositionX() << " "; /* 19 */ ss << std::setw(9) << std::setprecision(4) << robot.getGoalPositionY() << " "; /* 20 */ ss << std::setw(9) << std::setprecision(4) << robot.getGoalOrientationYaw() << " "; - /* 21 */ ss << std::setw(9) << std::setprecision(4) << robot.getDistToObstacle() << " "; - /* 22 */ ss << std::setw(9) << std::setprecision(4) << robot.getLocalPlanningTime(); + /* 21 */ ss << std::setw(9) << std::setprecision(4) << robot.getVelocityCommandLinearX() << " "; + /* 22 */ ss << std::setw(9) << std::setprecision(4) << robot.getVelocityCommandLinearY() << " "; + /* 23 */ ss << std::setw(9) << std::setprecision(4) << robot.getVelocityCommandAngularZ() << " "; + /* 24 */ ss << std::setw(9) << std::setprecision(4) << robot.getDistToObstacle() << " "; + /* 25 */ ss << std::setw(9) << std::setprecision(4) << robot.getLocalPlanningTime(); return ss.str(); } std::pair RobotLogger::robotFromString(const std::string& str) { auto vals = people_msgs_utils::parseString(str, " "); - if (vals.size() != 23) { + if (vals.size() != 26) { std::cout << "\x1B[31mFound corrupted data of a robot:\r\n\t" << str << "\x1B[0m" << std::endl; // dummy robot data auto dummy_robot = RobotData( geometry_msgs::PoseWithCovariance(), geometry_msgs::PoseWithCovariance(), geometry_msgs::Pose(), + geometry_msgs::Twist(), 0.0, 0.0 ); @@ -158,14 +200,24 @@ std::pair RobotLogger::robotFromString(const std::string& str) goal.orientation.z = quat.getZ(); goal.orientation.w = quat.getW(); - double obst_dist = vals.at(21); - double exec_time = vals.at(22); + geometry_msgs::Twist cmd_vel; + cmd_vel.linear.x = vals.at(21); + cmd_vel.linear.y = vals.at(22); + cmd_vel.angular.z = vals.at(23); - auto robot = RobotData(pose, vel, goal, obst_dist, exec_time); + double obst_dist = vals.at(24); + double exec_time = vals.at(25); + + auto robot = RobotData(pose, vel, goal, cmd_vel, obst_dist, exec_time); return {true, robot}; } void RobotLogger::localizationCB(const nav_msgs::OdometryConstPtr& msg) { + // check if any updates should be accepted + if (latched_.load()) { + return; + } + std::lock_guard l(cb_mutex_); // pose should be transformed to the logger's frame robot_pose_ = transformPose(msg->pose, msg->header.frame_id).pose; @@ -187,5 +239,22 @@ void RobotLogger::localizationCB(const nav_msgs::OdometryConstPtr& msg) { robot_vel_ = vel; } +void RobotLogger::externalComputationTimesCB(const std_msgs::Float64MultiArrayConstPtr& msg) { + size_t required_elems = external_comp_time_array_index_ + 1; + // verify that sufficient number of elements is available + if (msg->data.size() < required_elems) { + std::cout << + "\x1B[31m" << + "[ SRPB] RobotLogger did not receive a proper multi array at the " << + "`" << external_comp_times_sub_.getTopic().c_str() << "` topic. " << + "Got " << msg->data.size() << " data elements, whereas at least " << + required_elems << " are required" << + "\x1B[0m" << + std::endl; + return; + } + external_comp_time_.store(static_cast(msg->data.at(external_comp_time_array_index_))); +} + } // namespace srpb } // namespace logger diff --git a/srpb_logger/test/test_global_planner_logger.cpp b/srpb_logger/test/test_global_planner_logger.cpp new file mode 100644 index 0000000..e0a4603 --- /dev/null +++ b/srpb_logger/test/test_global_planner_logger.cpp @@ -0,0 +1,121 @@ +#include + +#include +#include + +using namespace srpb::logger; + +// Test cases +TEST(ConversionTest, planner) { + geometry_msgs::Pose pose; + pose.position.x = 0.123; + pose.position.y = 10.654; + pose.position.z = 12.321; + // only `yaw` is logged + tf2::Quaternion pose_quat; + pose_quat.setRPY(0.0, 0.0, 0.632); + pose.orientation.x = pose_quat.getX(); + pose.orientation.y = pose_quat.getY(); + pose.orientation.z = pose_quat.getZ(); + pose.orientation.w = pose_quat.getW(); + + geometry_msgs::Pose goal; + goal.position.x = 100.354; + goal.position.y = 40.123; + goal.position.z = 11.325; + // only `yaw` is logged + tf2::Quaternion goal_quat; + goal_quat.setRPY(0.0, 0.0, 1.5789); + goal.orientation.x = goal_quat.getX(); + goal.orientation.y = goal_quat.getY(); + goal.orientation.z = goal_quat.getZ(); + goal.orientation.w = goal_quat.getW(); + + bool planning_success = true; + double planning_time = 0.135; + size_t plan_size = 497; + // create a arbitrary path plan + std::vector plan; + plan.resize(plan_size); + double x_plan = -5.159; + double y_plan = 20.951; + double yaw_plan = 0.357; + for (auto& pose_stamped: plan) { + // modify vector element + pose_stamped.pose.position.x = x_plan; + pose_stamped.pose.position.y = y_plan; + // NOTE: ctor of tf2::Quaternion using Euler angles is deprecated + tf2::Quaternion quat; + quat.setEuler(0.0, 0.0, yaw_plan); + pose_stamped.pose.orientation = tf2::toMsg(quat); + // prep coords for the next iteration + x_plan += 0.142; + y_plan -= 0.249; + yaw_plan = angles::normalize_angle(yaw_plan + 0.029); + } + + GlobalPlannerData data_in(pose, goal, planning_success, planning_time, plan); + + // 1. evaluate if attributes were saved correctly + ASSERT_EQ(data_in.getPositionX(), pose.position.x); + ASSERT_EQ(data_in.getPositionY(), pose.position.y); + ASSERT_EQ(data_in.getPositionZ(), pose.position.z); + ASSERT_EQ(data_in.getOrientation().x, pose.orientation.x); + ASSERT_EQ(data_in.getOrientation().y, pose.orientation.y); + ASSERT_EQ(data_in.getOrientation().z, pose.orientation.z); + ASSERT_EQ(data_in.getOrientation().w, pose.orientation.w); + ASSERT_EQ(data_in.getGoalPositionX(), goal.position.x); + ASSERT_EQ(data_in.getGoalPositionY(), goal.position.y); + ASSERT_EQ(data_in.getGoalPositionZ(), goal.position.z); + ASSERT_DOUBLE_EQ(data_in.getGoalOrientationYaw(), tf2::getYaw(goal_quat)); + ASSERT_EQ(data_in.getPlanningStatus(), planning_success); + ASSERT_EQ(data_in.getPlanningTime(), planning_time); + ASSERT_EQ(data_in.getPlanSize(), plan_size); + ASSERT_EQ(data_in.getPlan(), plan); + + auto data_in_str = GlobalPlannerLogger::plannerToString(data_in); + auto conversion = GlobalPlannerLogger::plannerFromString(data_in_str); + + // conversion successful + ASSERT_TRUE(conversion.first); + GlobalPlannerData data_out = conversion.second; + + // 2. check if "out" attributes are equal to "in"'s + ASSERT_EQ(data_out.getPositionX(), data_in.getPositionX()); + ASSERT_EQ(data_out.getPositionY(), data_in.getPositionY()); + ASSERT_EQ(data_out.getPositionZ(), data_in.getPositionZ()); + ASSERT_EQ(data_out.getOrientation().x, data_in.getOrientation().x); + ASSERT_EQ(data_out.getOrientation().y, data_in.getOrientation().y); + ASSERT_EQ(data_out.getOrientation().z, data_in.getOrientation().z); + ASSERT_EQ(data_out.getOrientation().w, data_in.getOrientation().w); + ASSERT_EQ(data_out.getGoalPositionX(), data_in.getGoalPositionX()); + ASSERT_EQ(data_out.getGoalPositionY(), data_in.getGoalPositionY()); + ASSERT_EQ(data_out.getGoalPositionZ(), data_in.getGoalPositionZ()); + ASSERT_EQ(data_out.getGoalOrientationYaw(), data_in.getGoalOrientationYaw()); + ASSERT_EQ(data_out.getPlanningStatus(), data_in.getPlanningStatus()); + ASSERT_EQ(data_out.getPlanningTime(), data_in.getPlanningTime()); + ASSERT_EQ(data_out.getPlanSize(), data_in.getPlanSize()); + /* NOTE: cannot simply use: + * ASSERT_EQ(data_out.getPlan(), data_in.getPlan()); + * here, as it uses a serialized version (?) of the message structure; despite the same contents (with 1e-04 + * accuracy), there may be some whitespace differences + */ + auto out_plan = data_out.getPlanCref(); + auto in_plan = data_in.getPlanCref(); + for ( + auto out_it = out_plan.cbegin(), in_it = in_plan.cbegin(); + out_it != out_plan.cend() || in_it != in_plan.cend(); + out_it++, in_it++ + ) { + auto out_pose = *out_it; + auto in_pose = *in_it; + ASSERT_NEAR(out_pose.pose.position.x, in_pose.pose.position.x, 1e-04); + ASSERT_NEAR(out_pose.pose.position.y, in_pose.pose.position.y, 1e-04); + ASSERT_NEAR(tf2::getYaw(out_pose.pose.orientation), tf2::getYaw(in_pose.pose.orientation), 1e-04); + } +} + +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/srpb_logger/test/test_robot_logger.cpp b/srpb_logger/test/test_robot_logger.cpp new file mode 100644 index 0000000..0fa2c95 --- /dev/null +++ b/srpb_logger/test/test_robot_logger.cpp @@ -0,0 +1,149 @@ +#include + +#include + +using namespace srpb::logger; + +// Test cases +TEST(ConversionTest, robot) { + geometry_msgs::PoseWithCovarianceStamped pose; + pose.pose.pose.position.x = 0.123; + pose.pose.pose.position.y = 10.654; + pose.pose.pose.position.z = 12.321; + tf2::Quaternion pose_quat; + pose_quat.setRPY(0.0, 0.0, 0.632); + pose.pose.pose.orientation.x = pose_quat.getX(); + pose.pose.pose.orientation.y = pose_quat.getY(); + pose.pose.pose.orientation.z = pose_quat.getZ(); + pose.pose.pose.orientation.w = pose_quat.getW(); + + geometry_msgs::PoseWithCovarianceStamped vel; + vel.pose.pose.position.x = 8.963; + vel.pose.pose.position.y = 6.852; + vel.pose.pose.position.z = 1.321; + tf2::Quaternion vel_quat; + vel_quat.setRPY(0.0, 0.0, 0.987); + vel.pose.pose.orientation.x = vel_quat.getX(); + vel.pose.pose.orientation.y = vel_quat.getY(); + vel.pose.pose.orientation.z = vel_quat.getZ(); + vel.pose.pose.orientation.w = vel_quat.getW(); + + geometry_msgs::PoseStamped goal; + goal.pose.position.x = 100.354; + goal.pose.position.y = 40.123; + goal.pose.position.z = 11.325; + tf2::Quaternion goal_quat; + goal_quat.setRPY(0.0, 0.0, 1.3572); + goal.pose.orientation.x = goal_quat.getX(); + goal.pose.orientation.y = goal_quat.getY(); + goal.pose.orientation.z = goal_quat.getZ(); + goal.pose.orientation.w = goal_quat.getW(); + + geometry_msgs::TwistStamped cmd; + cmd.twist.linear.x = 0.478; + cmd.twist.linear.y = 0.321; + cmd.twist.angular.z = 0.741; + + double obstacle_distance = 0.135; + double exec_time = 0.497; + + RobotData data_in( + pose, + vel, + goal, + cmd, + obstacle_distance, + exec_time + ); + + // 1. evaluate if attributes were saved correctly + ASSERT_EQ(data_in.getPositionX(), pose.pose.pose.position.x); + ASSERT_EQ(data_in.getPositionY(), pose.pose.pose.position.y); + ASSERT_EQ(data_in.getPositionZ(), pose.pose.pose.position.z); + ASSERT_EQ(data_in.getOrientation().x, pose.pose.pose.orientation.x); + ASSERT_EQ(data_in.getOrientation().y, pose.pose.pose.orientation.y); + ASSERT_EQ(data_in.getOrientation().z, pose.pose.pose.orientation.z); + ASSERT_EQ(data_in.getOrientation().w, pose.pose.pose.orientation.w); + ASSERT_EQ(data_in.getCovariancePoseXX(), pose.pose.covariance.at(RobotData::COV_XX_INDEX)); + ASSERT_EQ(data_in.getCovariancePoseXY(), pose.pose.covariance.at(RobotData::COV_XY_INDEX)); + ASSERT_EQ(data_in.getCovariancePoseYX(), pose.pose.covariance.at(RobotData::COV_YX_INDEX)); + ASSERT_EQ(data_in.getCovariancePoseYY(), pose.pose.covariance.at(RobotData::COV_YY_INDEX)); + ASSERT_EQ(data_in.getCovariancePoseYawYaw(), pose.pose.covariance.at(RobotData::COV_YAWYAW_INDEX)); + + ASSERT_EQ(data_in.getVelocityX(), vel.pose.pose.position.x); + ASSERT_EQ(data_in.getVelocityY(), vel.pose.pose.position.y); + ASSERT_EQ(data_in.getVelocityZ(), vel.pose.pose.position.z); + ASSERT_EQ(data_in.getVelocityTheta(), tf2::getYaw(vel.pose.pose.orientation)); + ASSERT_EQ(data_in.getCovarianceVelocityXX(), vel.pose.covariance.at(RobotData::COV_XX_INDEX)); + ASSERT_EQ(data_in.getCovarianceVelocityXY(), vel.pose.covariance.at(RobotData::COV_XY_INDEX)); + ASSERT_EQ(data_in.getCovarianceVelocityYX(), vel.pose.covariance.at(RobotData::COV_YX_INDEX)); + ASSERT_EQ(data_in.getCovarianceVelocityYY(), vel.pose.covariance.at(RobotData::COV_YY_INDEX)); + ASSERT_EQ(data_in.getCovarianceVelocityThTh(), vel.pose.covariance.at(RobotData::COV_YAWYAW_INDEX)); + + ASSERT_EQ(data_in.getGoalPositionX(), goal.pose.position.x); + ASSERT_EQ(data_in.getGoalPositionY(), goal.pose.position.y); + ASSERT_EQ(data_in.getGoalPositionZ(), goal.pose.position.z); + ASSERT_DOUBLE_EQ(data_in.getGoalOrientationYaw(), tf2::getYaw(goal_quat)); + + ASSERT_EQ(data_in.getVelocityCommandLinearX(), cmd.twist.linear.x); + ASSERT_EQ(data_in.getVelocityCommandLinearY(), cmd.twist.linear.y); + ASSERT_EQ(data_in.getVelocityCommandLinearZ(), cmd.twist.linear.z); + ASSERT_EQ(data_in.getVelocityCommandAngularX(), cmd.twist.angular.x); + ASSERT_EQ(data_in.getVelocityCommandAngularY(), cmd.twist.angular.y); + ASSERT_EQ(data_in.getVelocityCommandAngularZ(), cmd.twist.angular.z); + + ASSERT_EQ(data_in.getDistToObstacle(), obstacle_distance); + ASSERT_EQ(data_in.getLocalPlanningTime(), exec_time); + + auto data_in_str = RobotLogger::robotToString(data_in); + auto conversion = RobotLogger::robotFromString(data_in_str); + + // conversion successful + ASSERT_TRUE(conversion.first); + RobotData data_out = conversion.second; + + // 2. check if "out" attributes are equal to "in"'s + ASSERT_EQ(data_out.getPositionX(), data_in.getPositionX()); + ASSERT_EQ(data_out.getPositionY(), data_in.getPositionY()); + ASSERT_EQ(data_out.getPositionZ(), data_in.getPositionZ()); + ASSERT_EQ(data_out.getOrientation().x, data_in.getOrientation().x); + ASSERT_EQ(data_out.getOrientation().y, data_in.getOrientation().y); + ASSERT_EQ(data_out.getOrientation().z, data_in.getOrientation().z); + ASSERT_EQ(data_out.getOrientation().w, data_in.getOrientation().w); + ASSERT_EQ(data_out.getCovariancePoseXX(), data_in.getCovariancePoseXX()); + ASSERT_EQ(data_out.getCovariancePoseXY(), data_in.getCovariancePoseXY()); + ASSERT_EQ(data_out.getCovariancePoseYX(), data_in.getCovariancePoseYX()); + ASSERT_EQ(data_out.getCovariancePoseYY(), data_in.getCovariancePoseYY()); + ASSERT_EQ(data_out.getCovariancePoseYawYaw(), data_in.getCovariancePoseYawYaw()); + + ASSERT_EQ(data_out.getVelocityX(), data_in.getVelocityX()); + ASSERT_EQ(data_out.getVelocityY(), data_in.getVelocityY()); + ASSERT_EQ(data_out.getVelocityZ(), data_in.getVelocityZ()); + ASSERT_EQ(data_out.getVelocityTheta(), data_in.getVelocityTheta()); + ASSERT_EQ(data_out.getCovarianceVelocityXX(), data_in.getCovarianceVelocityXX()); + ASSERT_EQ(data_out.getCovarianceVelocityXY(), data_in.getCovarianceVelocityXY()); + ASSERT_EQ(data_out.getCovarianceVelocityYX(), data_in.getCovarianceVelocityYX()); + ASSERT_EQ(data_out.getCovarianceVelocityYY(), data_in.getCovarianceVelocityYY()); + ASSERT_EQ(data_out.getCovarianceVelocityThTh(), data_in.getCovarianceVelocityThTh()); + + ASSERT_EQ(data_out.getGoalPositionX(), data_in.getGoalPositionX()); + ASSERT_EQ(data_out.getGoalPositionY(), data_in.getGoalPositionY()); + // not logged + // ASSERT_EQ(data_out.getGoalPositionZ(), data_in.getGoalPositionZ()); + ASSERT_EQ(data_out.getGoalOrientationYaw(), data_in.getGoalOrientationYaw()); + + ASSERT_EQ(data_out.getVelocityCommandLinearX(), data_in.getVelocityCommandLinearX()); + ASSERT_EQ(data_out.getVelocityCommandLinearY(), data_in.getVelocityCommandLinearY()); + ASSERT_EQ(data_out.getVelocityCommandLinearZ(), data_in.getVelocityCommandLinearZ()); + ASSERT_EQ(data_out.getVelocityCommandAngularX(), data_in.getVelocityCommandAngularX()); + ASSERT_EQ(data_out.getVelocityCommandAngularY(), data_in.getVelocityCommandAngularY()); + ASSERT_EQ(data_out.getVelocityCommandAngularZ(), data_in.getVelocityCommandAngularZ()); + + ASSERT_EQ(data_out.getLocalPlanningTime(), data_in.getLocalPlanningTime()); + ASSERT_EQ(data_out.getDistToObstacle(), data_in.getDistToObstacle()); +} + +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +}