From 9103e32b4badffe4a5909135ae8ccfd916b807b3 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Thu, 10 Aug 2023 15:29:03 +0200 Subject: [PATCH 01/81] `evaluation` - parameterized scripts for managing and processing logs - Aim is to avoid copying each script to log directories. Instead, all logs now take parameters that define where are the logs etc. - Deleted `evaluate_newest` as its functionality would be the same as `evaluate_from_dir`'s after parameterization --- srpb_evaluation/scripts/copy_logs.sh | 43 +++++++++++++++----- srpb_evaluation/scripts/evaluate_all_dirs.sh | 37 +++++++++++++++-- srpb_evaluation/scripts/evaluate_from_dir.sh | 33 +++++++++++---- srpb_evaluation/scripts/evaluate_newest.sh | 23 ----------- 4 files changed, 89 insertions(+), 47 deletions(-) delete mode 100755 srpb_evaluation/scripts/evaluate_newest.sh diff --git a/srpb_evaluation/scripts/copy_logs.sh b/srpb_evaluation/scripts/copy_logs.sh index c67a004..c8c6bb5 100755 --- a/srpb_evaluation/scripts/copy_logs.sh +++ b/srpb_evaluation/scripts/copy_logs.sh @@ -1,24 +1,45 @@ #!/usr/bin/env bash # -# Copies 3 newest files from this directory to the directory given by argument. -# It is handy to put that script into the logs directory. +# Copies 3 newest log files (robot/people/groups) from the one directory to the directory given by argument. # -if [ "$#" -ne 1 ]; then - echo "What's the target directory?" +# Usage looks like: +# +# rosrun srpb_evaluation copy_logs.sh +# +if [ "$#" -lt 1 ] || [ "$#" -gt 2 ]; then + echo "Wrong usage. Script args:" + echo " (1) [required] path to the source directory with logs ('*_robot.txt', '*_people.txt', and '*_groups.txt')" + echo " (2) [required] path to the target directory with copied logs; relative paths will be considered as relative to the source directory" exit 0 fi -SCRIPT_DIR=$(realpath $(dirname $0)) -TARGET_DIR="$SCRIPT_DIR/$1" -LOGS_DIR=$SCRIPT_DIR -mkdir -p $TARGET_DIR +logs_dir="$1" +# trim trailing / (if exists) +logs_dir="${logs_dir%/}" + +target_dir="$2" +# trim trailing / (if exists) +target_dir="${target_dir%/}" + +if [[ ! $target_dir == /* ]]; then + echo "Relative path" + target_dir=$logs_dir/$target_dir +fi + +# create a new directory for the copied logs +mkdir -p $target_dir # https://stackoverflow.com/a/54910963 -files_to_copy=$(ls -t $LOGS_DIR/log_*.txt | head -3) -cp $files_to_copy $TARGET_DIR +newest_robot=$(ls -t $logs_dir/log_*_robot.txt | head -1) +newest_people=$(ls -t $logs_dir/log_*_people.txt | head -1) +newest_groups=$(ls -t $logs_dir/log_*_groups.txt | head -1) + +# added an escape sequence for newline (file paths are printed in separate lines) +files_to_copy="$newest_robot"$'\n'"$newest_people"$'\n'"$newest_groups" +cp $files_to_copy $target_dir echo "Copied:" echo "" echo "$files_to_copy" echo "" -echo "to $TARGET_DIR" +echo "to $target_dir" diff --git a/srpb_evaluation/scripts/evaluate_all_dirs.sh b/srpb_evaluation/scripts/evaluate_all_dirs.sh index 9f61a1a..39dae32 100755 --- a/srpb_evaluation/scripts/evaluate_all_dirs.sh +++ b/srpb_evaluation/scripts/evaluate_all_dirs.sh @@ -2,21 +2,50 @@ # # Runs srpb_evaluation node with the newest set of log files from each directory below the script's path # -# It expects that `evaluate_from_dir.sh` script is placed in the same directory +# It expects that `evaluate_from_dir.sh` script is available using `rosrun srpb_evaluation evaluate_from_dir.sh` # # Remember to source your ROS workspace before execution # -SCRIPT_DIR=$(realpath $(dirname $0)) +# This script can be executed using: +# +# rosrun srpb_evaluation evaluate_all_dirs.sh +# +if [ "$#" -lt 1 ] || [ "$#" -gt 2 ]; then + echo "Wrong usage. Script args:" + echo " (1) [required] full path to the main directory with grouped logs (3 logs in each separate directory)" + echo " (2) [optional] safety distance for m_obs metric, 0.55 by default" + exit 0 +fi + +# only the path to logs directory is given +if [ "$#" -eq 1 ]; then + logs_dir="$1" + # trim trailing / (if exists) + logs_dir="${logs_dir%/}" + safety_dist=0.55 + echo "Using logs main directory: '$logs_dir' and default safety distance of '$safety_dist' m" +fi + +if [ "$#" -eq 2 ]; then + logs_dir="$1" + # trim trailing / (if exists) + logs_dir="${logs_dir%/}" + safety_dist=$2 + echo "Using logs main directory: '$logs_dir' and custom safety distance of '$safety_dist' m" +fi + +# exclude main directory from the search below +logs_basename=$(basename $logs_dir) # - ignore directories starting with the underscore # - for each directory, run the `evaluate_from_dir` script, passing each found directory as an argument # Ref: # * https://stackoverflow.com/a/20292636 # * https://stackoverflow.com/a/15736463 -find . -maxdepth 1 -type d -not -path "./_*" \( ! -name . \) -exec bash -c "$SCRIPT_DIR/evaluate_from_dir.sh '{}'" \; +find $logs_dir -maxdepth 1 -type d -not -path "$logs_dir/_*" \( ! -name $logs_basename \) -exec bash -c "rosrun srpb_evaluation evaluate_from_dir.sh '{}'" \; # first part of command above to save number of evaluated dirs, ref: https://stackoverflow.com/a/15663664 -count=$(find . -maxdepth 1 -type d -not -path "./_*" \( ! -name . \) | wc -l) +count=$(find $logs_dir -maxdepth 1 -type d -not -path "$logs_dir/_*" \( ! -name $logs_basename \) | wc -l) echo echo "Evaluated $count directories" diff --git a/srpb_evaluation/scripts/evaluate_from_dir.sh b/srpb_evaluation/scripts/evaluate_from_dir.sh index 7a878da..e96291e 100755 --- a/srpb_evaluation/scripts/evaluate_from_dir.sh +++ b/srpb_evaluation/scripts/evaluate_from_dir.sh @@ -4,19 +4,34 @@ # # Remember to source your ROS workspace before execution # -if [ "$#" -ne 1 ]; then - echo "What's the target directory?" +# This script can be executed using: +# +# rosrun srpb_evaluation evaluate_from_dir.sh +# +if [ "$#" -lt 1 ] || [ "$#" -gt 2 ]; then + echo "Wrong usage. Script args:" + echo " (1) [required] full path to the directory with logs ('*_robot.txt', '*_people.txt', and '*_groups.txt')" + echo " (2) [optional] safety distance for m_obs metric, 0.55 by default" exit 0 fi -SCRIPT_DIR=$(realpath $(dirname $0)) -LOGS_DIR="$1" -SAFETY_DIST=0.55 +# only the path to logs directory is given +if [ "$#" -eq 1 ]; then + logs_dir="$1" + safety_dist=0.55 + echo "Using logs directory: '$logs_dir' and default safety distance of '$safety_dist' m" +fi + +if [ "$#" -eq 2 ]; then + logs_dir="$1" + safety_dist=$2 + echo "Using logs directory: '$logs_dir' and custom safety distance of '$safety_dist' m" +fi # https://stackoverflow.com/a/54910963 -newest_robot=$(ls -t $LOGS_DIR/log_*_robot.txt | head -1) -newest_people=$(ls -t $LOGS_DIR/log_*_people.txt | head -1) -newest_groups=$(ls -t $LOGS_DIR/log_*_groups.txt | head -1) +newest_robot=$(ls -t $logs_dir/log_*_robot.txt | head -1) +newest_people=$(ls -t $logs_dir/log_*_people.txt | head -1) +newest_groups=$(ls -t $logs_dir/log_*_groups.txt | head -1) echo "Evaluating:" echo "" @@ -25,4 +40,4 @@ echo "people data: $newest_people" echo "groups data: $newest_groups" echo "" -rosrun srpb_evaluation srpb_evaluation $newest_robot $newest_people $newest_groups $SAFETY_DIST +rosrun srpb_evaluation srpb_evaluation $newest_robot $newest_people $newest_groups $safety_dist diff --git a/srpb_evaluation/scripts/evaluate_newest.sh b/srpb_evaluation/scripts/evaluate_newest.sh deleted file mode 100755 index e672cd6..0000000 --- a/srpb_evaluation/scripts/evaluate_newest.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env bash -# -# Runs srpb_evaluation node with the newest set of log files from the script's directory. -# -# Remember to source your ROS workspace before execution -# -SCRIPT_DIR=$(realpath $(dirname $0)) -LOGS_DIR=$SCRIPT_DIR -SAFETY_DIST=0.55 - -# https://stackoverflow.com/a/54910963 -newest_robot=$(ls -t $LOGS_DIR/log_*_robot.txt | head -1) -newest_people=$(ls -t $LOGS_DIR/log_*_people.txt | head -1) -newest_groups=$(ls -t $LOGS_DIR/log_*_groups.txt | head -1) - -echo "Evaluating:" -echo "" -echo "robot data: $newest_robot" -echo "people data: $newest_people" -echo "groups data: $newest_groups" -echo "" - -rosrun srpb_evaluation srpb_evaluation $newest_robot $newest_people $newest_groups $SAFETY_DIST From 72cc224d3bc827f12c7745efb2a097b6f49ac6c5 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Mon, 25 Sep 2023 15:06:15 +0200 Subject: [PATCH 02/81] `evaluation` - version bumped --- srpb_evaluation/package.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/srpb_evaluation/package.xml b/srpb_evaluation/package.xml index b26d7c2..7b47f27 100644 --- a/srpb_evaluation/package.xml +++ b/srpb_evaluation/package.xml @@ -2,7 +2,7 @@ srpb_evaluation - 0.9.3 + 0.9.4 The srpb_evaluation package provides implementation of metrics that allow evaluation of robot behaviour during navigation From 4785fe1d494a81e15aede500c0bf9e70ac621a5d Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Mon, 2 Oct 2023 12:42:06 +0200 Subject: [PATCH 03/81] `evaluation` - added `PassingSpeedDiscomfort` metric, adjusted scripts for the automated data processing [#26] --- .../metrics/passing_speed_discomfort.h | 161 ++++++++++++++++++ .../include/srpb_evaluation/utils.h | 4 +- .../scripts/create_excel_from_results.py | 6 +- .../create_latex_table_from_results.py | 3 +- srpb_evaluation/src/main.cpp | 14 +- srpb_evaluation/src/utils.cpp | 4 +- 6 files changed, 187 insertions(+), 5 deletions(-) create mode 100644 srpb_evaluation/include/srpb_evaluation/metrics/passing_speed_discomfort.h diff --git a/srpb_evaluation/include/srpb_evaluation/metrics/passing_speed_discomfort.h b/srpb_evaluation/include/srpb_evaluation/metrics/passing_speed_discomfort.h new file mode 100644 index 0000000..eae473f --- /dev/null +++ b/srpb_evaluation/include/srpb_evaluation/metrics/passing_speed_discomfort.h @@ -0,0 +1,161 @@ +#pragma once + +#include "srpb_evaluation/metric_gaussian.h" + +#include + +namespace srpb { +namespace evaluation { + +/** + * @brief Computes the score of the robot's distance and speed characteristics during the motion around the humans. + * + * This metric implements the findings from "The effect of robot speed on comfortable passing distances" + * by Neggers et al. (2022). A continuous comfort model was recreated based on the data published in this + * paper. Calculations are performed in a fully deterministic manner. + * Refer to the social_nav_utils::PassingSpeedComfort implementation for a detailed description of the underlying + * bivariate model that fits the published data best. + * + * @note The class inherits from the @ref MetricGaussian class but in fact @ref PassingSpeedDiscomfort is + * not a Gaussian metric. It just uses min/max and violations calculation methods from the base class. + * + * @param robot_data + * @param people_data + * @param distance_min minimum distance between the center of the robot and a human at which (when robot speed + * is equal to @ref speed_max) the normalized discomfort is the highest (1.0); the human is treated as a point here + * @param speed_max the maximum speed of the robot; used for a discomfort normalization (see @ref distance_min too) + * @param discomfort_threshold discomfort values above this level will be treated as violations + * @param max_method whether to use the maximum discomfort in a given timestamp as an indicator (true); the average + * is used when set to false + */ +class PassingSpeedDiscomfort: public MetricGaussian { +public: + PassingSpeedDiscomfort( + const std::vector>& robot_data, + const std::vector>& people_data, + double distance_min = 0.275, + double speed_max = 0.55, + double discomfort_threshold = 0.4, + bool max_method = true + ): + MetricGaussian(robot_data, people_data), + distance_min_(distance_min), + robot_speed_max_(speed_max), + discomfort_threshold_(discomfort_threshold), + max_method_(max_method), + discomfort_min_(0.0), + discomfort_max_(0.0), + discomfort_total_(0.0), + violations_percentage_(0.0) + { + if (people_data.empty()) { + return; + } + compute(); + } + + /// Returns total percentage throughout the scenario + virtual double getValue() const override { + return discomfort_total_ * 100.0; + } + + /// Returns minimum percentage throughout the scenario + virtual double getValueMin() const override { + return discomfort_min_ * 100.0; + } + + /// Returns maximum percentage throughout the scenario + virtual double getValueMax() const override { + return discomfort_max_ * 100.0; + } + + /// Returns percentage of violations throughout the scenario (regarding threshold) + virtual double getViolations() const override { + return violations_percentage_ * 100.0; + } + + void printResults() const override { + printf( + "Passing speed discomfort = %.4f [%%] (min = %.4f [%%], max = %.4f [%%], violations %.4f [%%])\n", + discomfort_total_ * 100.0, + discomfort_min_ * 100.0, + discomfort_max_ * 100.0, + violations_percentage_ * 100.0 + ); + } + +protected: + // parameters + double distance_min_; + double robot_speed_max_; + double discomfort_threshold_; + bool max_method_; + + // results + double discomfort_min_; + double discomfort_max_; + double discomfort_total_; + double violations_percentage_; + + void compute() override { + // store durations and discomforts of the humans in terms of robot motions + std::vector>> timed_discomforts; + /// prepare a container for discomfort values in this `for` iteration + std::pair> timed_discomfort; + + rewinder_.setHandlerNextTimestamp( + [&]() { + // prepare a container for upcoming calculations related to passing speed discomforts + timed_discomfort = std::make_pair( + rewinder_.getTimestampNext() - rewinder_.getTimestampCurr(), std::vector() + ); + } + ); + + // on event 'iterating through next person in the timestamp' - compute a human discomfort given the robot position + // and a speed + rewinder_.setHandlerNextPersonTimestamp( + [&]() { + // Euclidean distance between centers + double distance = std::sqrt( + std::pow(rewinder_.getRobotCurr().getPositionX() - rewinder_.getPersonCurr().getPositionX(), 2.0) + + std::pow(rewinder_.getRobotCurr().getPositionY() - rewinder_.getPersonCurr().getPositionY(), 2.0) + ); + double robot_speed = std::hypot( + rewinder_.getRobotCurr().getVelocityX(), rewinder_.getRobotCurr().getVelocityY() + ); + + // compute discomfort given the distance between the centers and robot speed + social_nav_utils::PassingSpeedComfort psd( + distance, + robot_speed, + distance_min_, + robot_speed_max_ + ); + + // store result for later aggregation + timed_discomfort.second.push_back(psd.getDiscomfortNormalized()); + } + ); + + // on event 'iterated through all people in the timestamp' + rewinder_.setHandlerAllPeopleTimestamp( + [&]() { + if (!timed_discomfort.second.empty()) { + timed_discomforts.push_back(timed_discomfort); + } + } + ); + rewinder_.perform(); + + std::tie( + discomfort_min_, + discomfort_max_, + discomfort_total_, + violations_percentage_ + ) = MetricGaussian::calculateGaussianStatistics(timed_discomforts, discomfort_threshold_, max_method_); + } +}; + +} // namespace evaluation +} // namespace srpb diff --git a/srpb_evaluation/include/srpb_evaluation/utils.h b/srpb_evaluation/include/srpb_evaluation/utils.h index 93666b0..ebcc14b 100644 --- a/srpb_evaluation/include/srpb_evaluation/utils.h +++ b/srpb_evaluation/include/srpb_evaluation/utils.h @@ -22,6 +22,7 @@ #include "srpb_evaluation/metrics/motion_efficiency.h" #include "srpb_evaluation/metrics/obstacle_safety.h" #include "srpb_evaluation/metrics/oscillations.h" +#include "srpb_evaluation/metrics/passing_speed_discomfort.h" #include "srpb_evaluation/metrics/path_linear_length.h" #include "srpb_evaluation/metrics/personal_space_instrusion.h" #include "srpb_evaluation/metrics/velocity_smoothness.h" @@ -61,7 +62,8 @@ void createResultsFile( const InplaceRotations& inplace_rotations, const PersonalSpaceIntrusion& personal_space_intrusion, const FormationSpaceIntrusion& formation_space_intrusion, - const HeadingDirectionDisturbance& heading_direction_disturbance + const HeadingDirectionDisturbance& heading_direction_disturbance, + const PassingSpeedDiscomfort& passing_speed_discomfort ); /** diff --git a/srpb_evaluation/scripts/create_excel_from_results.py b/srpb_evaluation/scripts/create_excel_from_results.py index 3c72349..5aabc64 100644 --- a/srpb_evaluation/scripts/create_excel_from_results.py +++ b/srpb_evaluation/scripts/create_excel_from_results.py @@ -100,6 +100,7 @@ def prepare_sheet_rows(results_total: Dict) -> List[List]: m_psi_row = [] m_fsi_row = [] m_dir_row = [] + m_psd_row = [] planners_row.append('Planner') trials_row.append('Trial') @@ -122,6 +123,7 @@ def prepare_sheet_rows(results_total: Dict) -> List[List]: m_psi_row.append('m_psi') m_fsi_row.append('m_fsi') m_dir_row.append('m_dir') + m_psd_row.append('m_psd') # iterate over keys for planner_key in results_total: @@ -147,6 +149,7 @@ def prepare_sheet_rows(results_total: Dict) -> List[List]: m_psi_row.append(result['m_psi']) m_fsi_row.append(result['m_fsi']) m_dir_row.append(result['m_dir']) + m_psd_row.append(result['m_psd']) return [ planners_row, @@ -169,7 +172,8 @@ def prepare_sheet_rows(results_total: Dict) -> List[List]: m_irot_row, m_psi_row, m_fsi_row, - m_dir_row + m_dir_row, + m_psd_row ] def cell_coords_to_sheet_cell_id(row: int, col: int): diff --git a/srpb_evaluation/scripts/create_latex_table_from_results.py b/srpb_evaluation/scripts/create_latex_table_from_results.py index 1887f38..f452123 100644 --- a/srpb_evaluation/scripts/create_latex_table_from_results.py +++ b/srpb_evaluation/scripts/create_latex_table_from_results.py @@ -88,7 +88,8 @@ def create_latex_table(results: List[Dict[str, Dict[str, Dict[str, float]]]]) -> 'm_inp': r"$m_{\mathrm{iprot}}$ \\ $\left[ \% \right]$", 'm_psi': r"$m_{\mathrm{psi}}$ \\ $\left[ \% \right]$", 'm_fsi': r"$m_{\mathrm{fsi}}$ \\ $\left[ \% \right]$", - 'm_dir': r"$m_{\mathrm{dir}}$ \\ $\left[ \% \right]$" + 'm_dir': r"$m_{\mathrm{dir}}$ \\ $\left[ \% \right]$", + 'm_psd': r"$m_{\mathrm{psd}}$ \\ $\left[ \% \right]$" } # retrieve names of planners assuming that all scenarios results have the same planner entries; diff --git a/srpb_evaluation/src/main.cpp b/srpb_evaluation/src/main.cpp index 8e77f63..098a4b9 100644 --- a/srpb_evaluation/src/main.cpp +++ b/srpb_evaluation/src/main.cpp @@ -52,6 +52,8 @@ int main(int argc, char* argv[]) { double robot_max_speed = 0.55; // threshold of Gaussian value to detect significant disturbance caused by robot location or motion direction double heading_disturbance_threshold = 0.20; + // threshold of the discomfort value that defines a significant violation + double passing_speed_discomfort_threshold = 0.40; auto timed_robot_data = parseFile(file_robot, &RobotLogger::robotFromString); auto timed_people_data = parseFile(file_people, &PeopleLogger::personFromString); @@ -134,6 +136,15 @@ int main(int argc, char* argv[]) { ); heading_direction.printResults(); + PassingSpeedDiscomfort psd( + timed_robot_data, + timed_people_data, + robot_circumradius, + robot_max_speed, + passing_speed_discomfort_threshold + ); + psd.printResults(); + // save results file auto file_results = file_robot.substr(0, file_robot.find_last_of('_')) + "_results.txt"; createResultsFile( @@ -155,7 +166,8 @@ int main(int argc, char* argv[]) { inplace, psi, fsi, - heading_direction + heading_direction, + psd ); return 0; diff --git a/srpb_evaluation/src/utils.cpp b/srpb_evaluation/src/utils.cpp index 2a5b3b2..7f3b044 100644 --- a/srpb_evaluation/src/utils.cpp +++ b/srpb_evaluation/src/utils.cpp @@ -137,7 +137,8 @@ void createResultsFile( const InplaceRotations& inplace_rotations, const PersonalSpaceIntrusion& personal_space_intrusion, const FormationSpaceIntrusion& formation_space_intrusion, - const HeadingDirectionDisturbance& heading_direction_disturbance + const HeadingDirectionDisturbance& heading_direction_disturbance, + const PassingSpeedDiscomfort& passing_speed_discomfort ) { std::fstream file_results; file_results.open(filename, std::ios::out); @@ -166,6 +167,7 @@ void createResultsFile( ss << "m_psi ," << std::setw(9) << std::setprecision(4) << personal_space_intrusion.getValue() << std::endl; ss << "m_fsi ," << std::setw(9) << std::setprecision(4) << formation_space_intrusion.getValue() << std::endl; ss << "m_dir ," << std::setw(9) << std::setprecision(4) << heading_direction_disturbance.getValue() << std::endl; + ss << "m_psd ," << std::setw(9) << std::setprecision(4) << passing_speed_discomfort.getValue() << std::endl; file_results << ss.str(); printf("Results CSV file saved at `%s`\r\n", filename.c_str()); } From 87bfce3b9f2049a5419dd9f4acd48eef3d1d38ca Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Mon, 2 Oct 2023 12:57:43 +0200 Subject: [PATCH 04/81] `evaluation` - explicit initialization of all member variables in `FormationSpaceIntrusion` --- .../srpb_evaluation/metrics/formation_space_instrusion.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/srpb_evaluation/include/srpb_evaluation/metrics/formation_space_instrusion.h b/srpb_evaluation/include/srpb_evaluation/metrics/formation_space_instrusion.h index cf56b30..419a86c 100644 --- a/srpb_evaluation/include/srpb_evaluation/metrics/formation_space_instrusion.h +++ b/srpb_evaluation/include/srpb_evaluation/metrics/formation_space_instrusion.h @@ -19,7 +19,11 @@ class FormationSpaceIntrusion: public MetricGaussian { ): MetricGaussian(robot_data, people_data, groups_data), group_space_threshold_(group_space_threshold), - max_method_(max_method) + max_method_(max_method), + intrusion_min_(0.0), + intrusion_max_(0.0), + intrusion_total_(0.0), + violations_percentage_(0.0) { if (people_data.empty() || groups_data.empty()) { return; From f20c76f1f9fd0b500474bc0125ee5a10d4057e8f Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Mon, 2 Oct 2023 12:58:08 +0200 Subject: [PATCH 05/81] `evaluation` - explicit initialization of all member variables in `HeadingDirectionDisturbance` --- .../srpb_evaluation/metrics/heading_direction_disturbance.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/srpb_evaluation/include/srpb_evaluation/metrics/heading_direction_disturbance.h b/srpb_evaluation/include/srpb_evaluation/metrics/heading_direction_disturbance.h index 6918d06..ccc593a 100644 --- a/srpb_evaluation/include/srpb_evaluation/metrics/heading_direction_disturbance.h +++ b/srpb_evaluation/include/srpb_evaluation/metrics/heading_direction_disturbance.h @@ -26,7 +26,11 @@ class HeadingDirectionDisturbance: public MetricGaussian { person_fov_(person_fov), robot_circumradius_(robot_circumradius), robot_max_speed_(robot_max_speed), - max_method_(max_method) + max_method_(max_method), + disturbance_min_(0.0), + disturbance_max_(0.0), + disturbance_total_(0.0), + violations_percentage_(0.0) { if (people_data.empty()) { return; From 8d712e798391a779993b0b249bf9696c31b0ebff Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Mon, 2 Oct 2023 12:58:27 +0200 Subject: [PATCH 06/81] `evaluation` - explicit initialization of all member variables in `PersonalSpaceIntrusion` --- .../srpb_evaluation/metrics/personal_space_instrusion.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/srpb_evaluation/include/srpb_evaluation/metrics/personal_space_instrusion.h b/srpb_evaluation/include/srpb_evaluation/metrics/personal_space_instrusion.h index 6db7b12..0c40e87 100644 --- a/srpb_evaluation/include/srpb_evaluation/metrics/personal_space_instrusion.h +++ b/srpb_evaluation/include/srpb_evaluation/metrics/personal_space_instrusion.h @@ -39,7 +39,11 @@ class PersonalSpaceIntrusion: public MetricGaussian { ): MetricGaussian(robot_data, people_data), personal_space_threshold_(personal_space_threshold), - max_method_(max_method) + max_method_(max_method), + intrusion_min_(0.0), + intrusion_max_(0.0), + intrusion_total_(0.0), + violations_percentage_(0.0) { if (people_data.empty()) { return; From 17bc89daf2c220c919b17aaae31f11b629416f27 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 4 Oct 2023 16:29:56 +0200 Subject: [PATCH 07/81] `evaluation` - added an extensive usage note in `create_excel_from_results` script --- srpb_evaluation/scripts/create_excel_from_results.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/srpb_evaluation/scripts/create_excel_from_results.py b/srpb_evaluation/scripts/create_excel_from_results.py index 5aabc64..1b952b5 100644 --- a/srpb_evaluation/scripts/create_excel_from_results.py +++ b/srpb_evaluation/scripts/create_excel_from_results.py @@ -287,7 +287,14 @@ def calculate_sheet(wb: Workbook, planner_names: List[str], results_total: Dict, # command line arguments if len(sys.argv) == 1: print(f'Usage: ') - print(f' {sys.argv[0]} <(OPTIONAL) names of planners, must be part of corresponding filenames>') + print(f'') + print(f' python3 {sys.argv[0]} <(OPTIONAL) space-separated planner names>') + print(f'') + print(f' 1. The main directory with logs should store the log files grouped into separate directories, ') + print(f' each containing a robot, people and group data.') + print(f' 2. In order to detect which planners to include into results sheet, each name of a planner ') + print(f' must be a part of log filenames.') + print(f'') exit() logs_dir = str(sys.argv[1]) From 4e24b6b31f0fb08df1974014acd1a8a0f26adedf Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 4 Oct 2023 20:45:47 +0200 Subject: [PATCH 08/81] `evaluation` - added a note about the spreadsheet created with the `create_excel_from_results` script --- srpb_evaluation/scripts/create_excel_from_results.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/srpb_evaluation/scripts/create_excel_from_results.py b/srpb_evaluation/scripts/create_excel_from_results.py index 1b952b5..91a95ce 100644 --- a/srpb_evaluation/scripts/create_excel_from_results.py +++ b/srpb_evaluation/scripts/create_excel_from_results.py @@ -335,3 +335,10 @@ def calculate_sheet(wb: Workbook, planner_names: List[str], results_total: Dict, wb.save(str(output_path)) print(f'Results saved in: {output_path}') +print("") +# When the sheet with the results is not opened and saved by the Excel or LibreOffice Calc, then reading a non-empty +# cell will probably return None +# Ref1: https://itecnote.com/tecnote/python-openpyxl-data_onlytrue-returning-none/ +# Ref2: https://groups.google.com/g/openpyxl-users/c/GbBOnOa8g7Y +print(f'Consider opening the results file and saving it in Excel/LibreOffice Calc (without any modifications).') +print(f'It will produce cached values based on formulas written (`openpyxl` library is not able to do so).') From f909a8e13e7af44cecf8d814031be4866814f3d8 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 4 Oct 2023 20:54:40 +0200 Subject: [PATCH 09/81] `evaluation` - more robust reading of sheet values in `create_latex_table_from_results` script - added note about a possible cause of None value appearing in a non-empty cell --- .../create_latex_table_from_results.py | 22 +++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/srpb_evaluation/scripts/create_latex_table_from_results.py b/srpb_evaluation/scripts/create_latex_table_from_results.py index f452123..b58b1f8 100644 --- a/srpb_evaluation/scripts/create_latex_table_from_results.py +++ b/srpb_evaluation/scripts/create_latex_table_from_results.py @@ -54,8 +54,20 @@ def load_data_from_excel(path: Path) -> Dict[str, Dict[str, float]]: metric_names = [] metric_values = [] while not sheet_cell_empty(sheet=sheet, row=row_iter, col=col_metric_ids): - metric_names.append(str(get_sheet_val(sheet=sheet, row=row_iter, col=col_metric_ids))) - metric_values.append(float(get_sheet_val(sheet=sheet, row=row_iter, col=col_iter))) + # obtain sheet values + metric_name = get_sheet_val(sheet=sheet, row=row_iter, col=col_metric_ids) + metric_value = get_sheet_val(sheet=sheet, row=row_iter, col=col_iter) + # evaluate the correctness of read data + if metric_name == None or metric_value == None: + raise Exception( + f"The metric ID cell `{col_metric_ids}{row_iter}` contains `{metric_name}` " + f"and the metric value cell `{col_iter}{row_iter}` contains `{metric_value}`. " + f"Cannot proceed with such values. Check whether the cells in your spreadsheet are empty. " + f"If they aren't, try to open the sheet and simply save it using Excel or LibreOffice Calc." + ) + # collect + metric_names.append(str(metric_name)) + metric_values.append(float(metric_value)) # try to proceed to the next metric row_iter = increment_row(row_iter) @@ -100,6 +112,12 @@ def create_latex_table(results: List[Dict[str, Dict[str, Dict[str, float]]]]) -> # retrieve number of evaluated scenarios scenarios_num = len(results) + # evaluate whether valid data are available + if not len(planner_names) or not planners_num: + raise Exception( + f"Aborting further execution as no planners are found in the results. " + ) + tex = str("") tex += ("% !TeX spellcheck = en_GB" + "\r\n") From 3bbe883c48a27a0531be9e25bb26bd9eeac83dd4 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 4 Oct 2023 21:01:32 +0200 Subject: [PATCH 10/81] `evaluation` - metrics included in the `create_latex_table_from_results` script made configurable [#17] --- .../create_latex_table_from_results.py | 47 +++++++++++++++---- 1 file changed, 38 insertions(+), 9 deletions(-) diff --git a/srpb_evaluation/scripts/create_latex_table_from_results.py b/srpb_evaluation/scripts/create_latex_table_from_results.py index b58b1f8..92991a2 100644 --- a/srpb_evaluation/scripts/create_latex_table_from_results.py +++ b/srpb_evaluation/scripts/create_latex_table_from_results.py @@ -84,9 +84,11 @@ def load_data_from_excel(path: Path) -> Dict[str, Dict[str, float]]: return data -def create_latex_table(results: List[Dict[str, Dict[str, Dict[str, float]]]]) -> str: +# results: results of a specific scenarios aggregated into a single structure +# metrics: names (keys) of metrics to include in the LaTeX table +def create_latex_table(results: List[Dict[str, Dict[str, Dict[str, float]]]], metric_names: List[str]) -> str: # only keys from this map will be put into the LaTeX table; keys must match the ones used in the Excel sheet - metric_latex_map = { + METRIC_LATEX_MAP = { 'm_obs': r"$m_{\mathrm{obs}}$ \\ $\left[ \% \right]$", 'm_mef': r"$m_{\mathrm{mef}}$ \\ $\left[ \mathrm{s} \right]$", 'm_path': r"$m_{\mathrm{plin}}$ \\ $\left[ \mathrm{m} \right]$", @@ -103,6 +105,25 @@ def create_latex_table(results: List[Dict[str, Dict[str, Dict[str, float]]]]) -> 'm_dir': r"$m_{\mathrm{dir}}$ \\ $\left[ \% \right]$", 'm_psd': r"$m_{\mathrm{psd}}$ \\ $\left[ \% \right]$" } + # select metrics from the predefined set, i.e., METRIC_LATEX_MAP + metrics_map = {} + # by default, all metrics are included + if not len(metric_names): + metrics_map = METRIC_LATEX_MAP + else: + for name in metric_names: + if not name in METRIC_LATEX_MAP.keys(): + raise Exception( + f"Could not find {name} in the SRPB metrics map. Available metric names are: {METRIC_LATEX_MAP.keys()}" + ) + metrics_map[name] = METRIC_LATEX_MAP[name] + + if not len(metrics_map): + raise Exception( + f"Aborting further execution as no metrics were selected to include in the LaTeX table. " + f"See the script's usage instruction." + ) + print(f"Selected `{len(metrics_map)}` metrics to include in the LaTeX table: `{metrics_map.keys()}`") # retrieve names of planners assuming that all scenarios results have the same planner entries; # choose from the first scenario @@ -174,7 +195,7 @@ def create_latex_table(results: List[Dict[str, Dict[str, Dict[str, float]]]]) -> tex += (r" % =============================== entries" + "\r\n") # iterate over metrics - for metric_id in metric_latex_map.keys(): + for metric_id in metrics_map.keys(): tex += (r"" + "\r\n") tex += (r" \multirow" + "\r\n") # total number of scenarios @@ -183,7 +204,7 @@ def create_latex_table(results: List[Dict[str, Dict[str, Dict[str, float]]]]) -> tex += (r" {" + "\r\n") tex += (r" \shortstack{" + "\r\n") # ID of the metric and its unit - tex += (r" " + str(metric_latex_map[metric_id]) + "\r\n") + tex += (r" " + str(metrics_map[metric_id]) + "\r\n") tex += (r" }" + "\r\n") tex += (r" }" + "\r\n") tex += (r" % =========" + "\r\n") @@ -249,17 +270,25 @@ def create_latex_table(results: List[Dict[str, Dict[str, Dict[str, float]]]]) -> ########################### if __name__ == "__main__": # command line arguments - if len(sys.argv) != 3: + if len(sys.argv) == 1: print(f'Usage: ') - print(f' python3 {sys.argv[0]} ') + print( + f' python3 {sys.argv[0]} ' + f' ' + f' ' + ) print(f'') print(f'Example:') - print(f' python3 {sys.argv[0]} "{{"static": {{"sim": "path_static_sim", "real": "path_static_real"}}, "dynamic": {{"sim": "path_dynamic_sim", "real": "path_dynamic_real"}}}}" ~/table.tex') + print(f' python3 {sys.argv[0]} "{{"static": {{"sim": "path_static_sim", "real": "path_static_real"}}, "dynamic": {{"sim": "path_dynamic_sim", "real": "path_dynamic_real"}}}}" ~/table.tex m_obs m_chc') exit(0) # location of the output file output_path = sys.argv[2] + # metrics to include in the table + metric_names = sys.argv[3:] + # list of dicts {, } inputs = [] @@ -278,8 +307,8 @@ def create_latex_table(results: List[Dict[str, Dict[str, Dict[str, float]]]]) -> scenario_results = load_data_from_excel(input_file['path']) results_total.append({'name': input_file['name'], 'results': scenario_results}) - # generates a string representing LaTeX command that can be - results_table = create_latex_table(results_total) + # generates a string representing LaTeX command that can be directly included and used in a LaTeX document + results_table = create_latex_table(results_total, metric_names) # save results to a file f = open(output_path, "w") From 628f9ab9fff951c19ef006387610849a8d9e945c Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 4 Oct 2023 21:33:21 +0200 Subject: [PATCH 11/81] `evaluation` - updated LaTeX dependencies for the table generated by `create_latex_table_from_results` --- srpb_evaluation/scripts/create_latex_table_from_results.py | 1 + 1 file changed, 1 insertion(+) diff --git a/srpb_evaluation/scripts/create_latex_table_from_results.py b/srpb_evaluation/scripts/create_latex_table_from_results.py index 92991a2..5935984 100644 --- a/srpb_evaluation/scripts/create_latex_table_from_results.py +++ b/srpb_evaluation/scripts/create_latex_table_from_results.py @@ -147,6 +147,7 @@ def create_latex_table(results: List[Dict[str, Dict[str, Dict[str, float]]]], me # r forces to parse as raw string, ref: https://stackoverflow.com/a/46011113 tex += (r"% Dependencies of the 'results' table" + "\r\n") + tex += (r"\usepackage{graphicx} % \rotatebox" + "\r\n") tex += (r"\usepackage{multirow}" + "\r\n") tex += (r"\usepackage{diagbox}" + "\r\n") tex += ("\r\n") From 0a8121196ad5bba41bd641fff93696a72b259c42 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 4 Oct 2023 21:57:50 +0200 Subject: [PATCH 12/81] `evaluation` - created a shared file with the Excel table placement, propagated to scripts [#31] --- .../scripts/create_excel_from_results.py | 15 ++++++++++----- .../scripts/create_latex_table_from_results.py | 7 ++++--- srpb_evaluation/scripts/excel_sheet_defines.py | 6 ++++++ 3 files changed, 20 insertions(+), 8 deletions(-) create mode 100644 srpb_evaluation/scripts/excel_sheet_defines.py diff --git a/srpb_evaluation/scripts/create_excel_from_results.py b/srpb_evaluation/scripts/create_excel_from_results.py index 91a95ce..700bc68 100644 --- a/srpb_evaluation/scripts/create_excel_from_results.py +++ b/srpb_evaluation/scripts/create_excel_from_results.py @@ -8,6 +8,7 @@ ''' import csv +import excel_sheet_defines import glob import sys @@ -247,12 +248,12 @@ def increment_char(c: chr): def calculate_sheet(wb: Workbook, planner_names: List[str], results_total: Dict, calc_fun='MEDIAN'): - row_start = 27 - col_header = str('A') + row_start = int(excel_sheet_defines.RESULT_INIT_ROW) + col_header = str(excel_sheet_defines.RESULT_INIT_COL) ws[col_header + str(row_start)] = 'Planner' ws[col_header + str(row_start + 1)] = 'Trials' row_metric_start = row_start + 1 - col_planner_start = str('B') + col_planner_start = str(increment_char(col_header)) col_planner = col_planner_start # retrieve metric keys from an arbitrary planner @@ -313,8 +314,12 @@ def calculate_sheet(wb: Workbook, planner_names: List[str], results_total: Dict, # documentation at https://openpyxl.readthedocs.io/en/stable/index.html wb = Workbook() -# grab the active worksheet -ws = wb.active +# grab the active worksheet (workbook always creates an arbitrary sheet) +ws_init = wb.active +# create a sheet with a custom name +ws = wb.create_sheet(title=excel_sheet_defines.SHEET_NAME) +# delete the sheet initially created +wb.remove_sheet(worksheet=ws_init) # Prepare rows of data and append them to the sheet rows = prepare_sheet_rows(results) diff --git a/srpb_evaluation/scripts/create_latex_table_from_results.py b/srpb_evaluation/scripts/create_latex_table_from_results.py index 5935984..c4ce54a 100644 --- a/srpb_evaluation/scripts/create_latex_table_from_results.py +++ b/srpb_evaluation/scripts/create_latex_table_from_results.py @@ -5,6 +5,7 @@ sudo apt install python3-openpyxl ''' +import excel_sheet_defines import json import sys @@ -21,10 +22,10 @@ def load_data_from_excel(path: Path) -> Dict[str, Dict[str, float]]: # NOTE: data_only allows to read values instead of formulas wb = load_workbook(filename = str(path), data_only=True) # access specific sheet - sheet = wb["Sheet"] + sheet = wb[excel_sheet_defines.SHEET_NAME] # upper left cell - cell that initiates results table - col_init = 'A' - row_init = '28' + col_init = excel_sheet_defines.RESULT_INIT_COL + row_init = excel_sheet_defines.RESULT_INIT_ROW col_metric_ids = col_init diff --git a/srpb_evaluation/scripts/excel_sheet_defines.py b/srpb_evaluation/scripts/excel_sheet_defines.py new file mode 100644 index 0000000..a0646a3 --- /dev/null +++ b/srpb_evaluation/scripts/excel_sheet_defines.py @@ -0,0 +1,6 @@ +# name of the sheet in the Excel file with the results +SHEET_NAME = "SRPB" + +# upper left cell - cell that initiates results table +RESULT_INIT_COL = 'A' +RESULT_INIT_ROW = '27' From d6fd7e92ea90ee96817e7c229b30192232a8b8ab Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 4 Oct 2023 22:18:25 +0200 Subject: [PATCH 13/81] `evaluation` - `create_latex_table_from_results`: remake of SRPB metrics map, putting a newline between metric and its unit dynamically decided --- .../create_latex_table_from_results.py | 37 +++++++++++-------- 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/srpb_evaluation/scripts/create_latex_table_from_results.py b/srpb_evaluation/scripts/create_latex_table_from_results.py index c4ce54a..e613d1c 100644 --- a/srpb_evaluation/scripts/create_latex_table_from_results.py +++ b/srpb_evaluation/scripts/create_latex_table_from_results.py @@ -90,21 +90,21 @@ def load_data_from_excel(path: Path) -> Dict[str, Dict[str, float]]: def create_latex_table(results: List[Dict[str, Dict[str, Dict[str, float]]]], metric_names: List[str]) -> str: # only keys from this map will be put into the LaTeX table; keys must match the ones used in the Excel sheet METRIC_LATEX_MAP = { - 'm_obs': r"$m_{\mathrm{obs}}$ \\ $\left[ \% \right]$", - 'm_mef': r"$m_{\mathrm{mef}}$ \\ $\left[ \mathrm{s} \right]$", - 'm_path': r"$m_{\mathrm{plin}}$ \\ $\left[ \mathrm{m} \right]$", - 'm_chc': r"$m_{\mathrm{chc}}$ \\ $\left[ \mathrm{rad} \right]$", - 'm_cef': r"$m_{\mathrm{cef}}$ \\ $\left[ 10^{-3} \cdot \mathrm{s} \right]$", - 'm_cre': r"$m_{\mathrm{cre}}$ \\ $\left[ 10^{-3} \cdot \mathrm{s} \right]$", - 'm_vsm': r"$m_{\mathrm{vsm}}$ \\ $\left[ \mathrm{\frac{m}{s^2}} \right]$", - 'm_hsm': r"$m_{\mathrm{hsm}}$ \\ $\left[ \mathrm{\frac{rad}{s^2}} \right]$", - 'm_osc': r"$m_{\mathrm{osc}}$ \\ $\left[ \% \right]$", - 'm_bwd': r"$m_{\mathrm{bwd}}$ \\ $\left[ \% \right]$", - 'm_inp': r"$m_{\mathrm{iprot}}$ \\ $\left[ \% \right]$", - 'm_psi': r"$m_{\mathrm{psi}}$ \\ $\left[ \% \right]$", - 'm_fsi': r"$m_{\mathrm{fsi}}$ \\ $\left[ \% \right]$", - 'm_dir': r"$m_{\mathrm{dir}}$ \\ $\left[ \% \right]$", - 'm_psd': r"$m_{\mathrm{psd}}$ \\ $\left[ \% \right]$" + 'm_obs': {'name': r"$m_{\mathrm{obs}}$", 'unit': r"$\left[ \% \right]$"}, + 'm_mef': {'name': r"$m_{\mathrm{mef}}$", 'unit': r"$\left[ \mathrm{s} \right]$"}, + 'm_path': {'name': r"$m_{\mathrm{plin}}$", 'unit': r"$\left[ \mathrm{m} \right]$"}, + 'm_chc': {'name': r"$m_{\mathrm{chc}}$", 'unit': r"$\left[ \mathrm{rad} \right]$"}, + 'm_cef': {'name': r"$m_{\mathrm{cef}}$", 'unit': r"$\left[ 10^{-3} \cdot \mathrm{s} \right]$"}, + 'm_cre': {'name': r"$m_{\mathrm{cre}}$", 'unit': r"$\left[ 10^{-3} \cdot \mathrm{s} \right]$"}, + 'm_vsm': {'name': r"$m_{\mathrm{vsm}}$", 'unit': r"$\left[ \mathrm{\frac{m}{s^2}} \right]$"}, + 'm_hsm': {'name': r"$m_{\mathrm{hsm}}$", 'unit': r"$\left[ \mathrm{\frac{rad}{s^2}} \right]$"}, + 'm_osc': {'name': r"$m_{\mathrm{osc}}$", 'unit': r"$\left[ \% \right]$"}, + 'm_bwd': {'name': r"$m_{\mathrm{bwd}}$", 'unit': r"$\left[ \% \right]$"}, + 'm_inp': {'name': r"$m_{\mathrm{iprot}}$", 'unit': r"$\left[ \% \right]$"}, + 'm_psi': {'name': r"$m_{\mathrm{psi}}$", 'unit': r"$\left[ \% \right]$"}, + 'm_fsi': {'name': r"$m_{\mathrm{fsi}}$", 'unit': r"$\left[ \% \right]$"}, + 'm_dir': {'name': r"$m_{\mathrm{dir}}$", 'unit': r"$\left[ \% \right]$"}, + 'm_psd': {'name': r"$m_{\mathrm{psd}}$", 'unit': r"$\left[ \% \right]$"} } # select metrics from the predefined set, i.e., METRIC_LATEX_MAP metrics_map = {} @@ -206,7 +206,12 @@ def create_latex_table(results: List[Dict[str, Dict[str, Dict[str, float]]]], me tex += (r" {" + "\r\n") tex += (r" \shortstack{" + "\r\n") # ID of the metric and its unit - tex += (r" " + str(metrics_map[metric_id]) + "\r\n") + # whether to put the unit in a new line or not (when there are too few rows) + if scenarios_num > 2: + metric_name_and_unit = f"{metrics_map[metric_id]['name']} \\ {metrics_map[metric_id]['unit']}" + else: + metric_name_and_unit = f"{metrics_map[metric_id]['name']} {metrics_map[metric_id]['unit']}" + tex += (r" " + str(metric_name_and_unit) + "\r\n") tex += (r" }" + "\r\n") tex += (r" }" + "\r\n") tex += (r" % =========" + "\r\n") From 3a67cbf130c3810410e86256b8da4d461a181339 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 4 Oct 2023 22:21:12 +0200 Subject: [PATCH 14/81] `evaluation` - `create_excel_from_results`: simplified `prepare_sheet_rows` --- .../scripts/create_excel_from_results.py | 67 ++++++------------- 1 file changed, 22 insertions(+), 45 deletions(-) diff --git a/srpb_evaluation/scripts/create_excel_from_results.py b/srpb_evaluation/scripts/create_excel_from_results.py index 700bc68..5035461 100644 --- a/srpb_evaluation/scripts/create_excel_from_results.py +++ b/srpb_evaluation/scripts/create_excel_from_results.py @@ -80,51 +80,28 @@ def collect_results_planners(dir_path: str, planner_name: List[str]) -> Dict: def prepare_sheet_rows(results_total: Dict) -> List[List]: - planners_row = [] - trials_row = [] - file_row = [] - spls_robot_row = [] - spls_ppl_row = [] - spls_grp_row = [] - m_goal_row = [] - m_obs_row = [] - m_eff_row = [] - m_cef_row = [] - m_cre_row = [] - m_vsm_row = [] - m_hsm_row = [] - m_path_row = [] - m_chc_row = [] - m_osc_row = [] - m_bwd_row = [] - m_irot_row = [] - m_psi_row = [] - m_fsi_row = [] - m_dir_row = [] - m_psd_row = [] - - planners_row.append('Planner') - trials_row.append('Trial') - file_row.append('File') - spls_robot_row.append('Samples robot') - spls_ppl_row.append('Samples people') - spls_grp_row.append('Samples groups') - m_goal_row.append('m_goal') - m_obs_row.append('m_obs') - m_eff_row.append('m_eff') - m_cef_row.append('m_cef') - m_cre_row.append('m_cre') - m_vsm_row.append('m_vsm') - m_hsm_row.append('m_hsm') - m_path_row.append('m_path') - m_chc_row.append('m_chc') - m_osc_row.append('m_osc') - m_bwd_row.append('m_bwd') - m_irot_row.append('m_irot') - m_psi_row.append('m_psi') - m_fsi_row.append('m_fsi') - m_dir_row.append('m_dir') - m_psd_row.append('m_psd') + planners_row = ['Planner'] + trials_row = ['Trial'] + file_row = ['File'] + spls_robot_row = ['Samples robot'] + spls_ppl_row = ['Samples people'] + spls_grp_row = ['Samples groups'] + m_goal_row = ['m_goal'] + m_obs_row = ['m_obs'] + m_eff_row = ['m_eff'] + m_cef_row = ['m_cef'] + m_cre_row = ['m_cre'] + m_vsm_row = ['m_vsm'] + m_hsm_row = ['m_hsm'] + m_path_row = ['m_path'] + m_chc_row = ['m_chc'] + m_osc_row = ['m_osc'] + m_bwd_row = ['m_bwd'] + m_irot_row = ['m_irot'] + m_psi_row = ['m_psi'] + m_fsi_row = ['m_fsi'] + m_dir_row = ['m_dir'] + m_psd_row = ['m_psd'] # iterate over keys for planner_key in results_total: From 674ec4600e6c269182dbceec7eb835e9917ce5b4 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 4 Oct 2023 22:25:46 +0200 Subject: [PATCH 15/81] `evaluation` - `create_latex_table_from_results`: added logging of planners and scenarios numbers --- srpb_evaluation/scripts/create_latex_table_from_results.py | 1 + 1 file changed, 1 insertion(+) diff --git a/srpb_evaluation/scripts/create_latex_table_from_results.py b/srpb_evaluation/scripts/create_latex_table_from_results.py index e613d1c..3092372 100644 --- a/srpb_evaluation/scripts/create_latex_table_from_results.py +++ b/srpb_evaluation/scripts/create_latex_table_from_results.py @@ -139,6 +139,7 @@ def create_latex_table(results: List[Dict[str, Dict[str, Dict[str, float]]]], me raise Exception( f"Aborting further execution as no planners are found in the results. " ) + print(f"Selected {planners_num} planners with names `{planner_names}` for `{scenarios_num}` scenarios") tex = str("") From b58a649b9664716860fd37d66c257243e4c544c2 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Fri, 6 Oct 2023 00:27:24 +0200 Subject: [PATCH 16/81] `evaluation` - added a script for renaming directories matching the pattern --- .../scripts/rename_dirs_matching_pattern.sh | 38 +++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100755 srpb_evaluation/scripts/rename_dirs_matching_pattern.sh diff --git a/srpb_evaluation/scripts/rename_dirs_matching_pattern.sh b/srpb_evaluation/scripts/rename_dirs_matching_pattern.sh new file mode 100755 index 0000000..84b912e --- /dev/null +++ b/srpb_evaluation/scripts/rename_dirs_matching_pattern.sh @@ -0,0 +1,38 @@ +#!/bin/bash +# +# This script is handy when one wants to use `create_excel_from_results` but in the same directory one has planners +# named with the same "base name", e.g., `great_planner` and `great_planner_plus` +# +# Usage: +# +# ./rename_dirs_matching_pattern.sh ~/srpb_logs/ great_planner great_planner_plus great_planner_original +# +# This script was written by the ChatGPT + +# Check if all arguments are provided +if [ "$#" -ne 4 ]; then + echo "Usage: $0 " + exit 1 +fi + +main_dir="$1" +pattern_to_match="$2" +pattern_to_differentiate="$3" +pattern_to_replace="$4" + +cd "$main_dir" || exit 1 + +# Loop through directories matching the provided pattern +for dir in *"$pattern_to_match"*; do + # Check if the directory does not contain the differentiate pattern + if [[ $dir != *"$pattern_to_differentiate"* ]]; then + # Extract the base name without the pattern and append the replace pattern + new_dir=$(echo "$dir" | sed "s/$pattern_to_match/$pattern_to_replace/")"" + + # Rename the directory + mv "$dir" "$new_dir" + + # Print a message indicating the rename + echo "Renamed: $dir -> $new_dir" + fi +done From b842345fd192e1d6cbcc34603983053b7d8360d3 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Fri, 6 Oct 2023 00:34:18 +0200 Subject: [PATCH 17/81] `evaluation` - `create_excel_from_results`: multiple adjustments to increase robustness when a corrupted or incomplete data is present, support for bigger datasets (column numbering) --- .../scripts/create_excel_from_results.py | 73 ++++++++++++++----- 1 file changed, 55 insertions(+), 18 deletions(-) diff --git a/srpb_evaluation/scripts/create_excel_from_results.py b/srpb_evaluation/scripts/create_excel_from_results.py index 5035461..47fd863 100644 --- a/srpb_evaluation/scripts/create_excel_from_results.py +++ b/srpb_evaluation/scripts/create_excel_from_results.py @@ -10,6 +10,7 @@ import csv import excel_sheet_defines import glob +import re import sys from string import ascii_uppercase @@ -25,8 +26,17 @@ def get_log_dirs_planner(dir_path: str, planner_name: str, min_logs=3): dirnames = dirnames_underscore dirnames.extend(dirnames_dash) if len(dirnames) < min_logs: - exit(f'Too few data entries for {planner_name} planner. Got: \r\n{dirnames}') - return dirnames + exit(f'Too few data entries for {planner_name} planner. Got {len(dirnames)}/{min_logs}: \r\n{dirnames}') + + # got rid of the dirs that are marked to be ignored + dirnames_valid = [] + for dirname in dirnames: + if re.search('ignore', dirname, re.IGNORECASE): + print(f'Ignoring a directory `{dirname}` from including to the overall `{planner_name}` planner results') + continue + dirnames_valid.append(dirname) + + return dirnames_valid def get_log_result_files(log_dirs: str): @@ -34,9 +44,11 @@ def get_log_result_files(log_dirs: str): for log_dir in log_dirs: results_file = glob.glob(log_dir + '/' + '*results.txt*') if len(results_file) == 0: - exit(f'Lack of results file at {log_dir}') + print(f'Lack of results file at `{log_dir}` dir, skipping') + continue elif len(results_file) > 1: - exit(f'Multiple results files at {log_dir}') + print(f'Multiple results files at `{log_dir}` dir, skipping') + continue result_files.append(results_file[0]) return result_files @@ -53,10 +65,17 @@ def read_results_file(results_file: str): for row in csv_result: key = str(row[0]).lstrip().rstrip() value_str = str(row[1]) - if value_str.find('.') != -1: - value = float(value_str) - else: - value = int(value_str) + try: + if 'nan' in value_str: + value = None + print(f"NaN value detected for the key `{key}` in the results file `{results_file}`") + elif value_str.find('.') != -1: + value = float(value_str) + else: + value = int(value_str) + except ValueError: + print(f"Cannot convert a value `{value_str}` of a key `{key}` to a numeric, skipping `{results_file}`") + return {} dict[key] = value return dict @@ -168,14 +187,21 @@ def cell_coords_to_sheet_cell_id(row: int, col: int): found_col = True break it_col = it_col + 1 - # repeat once again if required (doubling letters) + # repeat once again if required (doubling letters) - columns A - ZZ cover a sufficient number of cases if not found_col: - for c in ascii_uppercase: - if it_col == col_w_offset: - cell_col = 'A' + str(c) - found_col = True + # first letter of the cell address + for first in ascii_uppercase: + # second letter of the cell address + for second in ascii_uppercase: + # print(f'[cell_coords_to_sheet_cell_id] row {row}, col {col}, cell row {cell_row} | it col {it_col}, ADDR `{first}{second}`') + if it_col == col_w_offset: + cell_col = str(first) + str(second) + found_col = True + break + it_col = it_col + 1 + # break the outer loop if possible + if found_col: break - it_col = it_col + 1 if not found_col: exit(f"Could not find a valid column ID for ({row}, {col}) configuration") @@ -205,9 +231,17 @@ def get_sheet_datacell(planner: str, trial: int, result_key: str, results_total: # counting rows from the start - exemplary key to iterate through metrics metric_counter = 0 - results_example = results_total[planner][0] + try: + results_example = results_total[planner][0] + except IndexError: + print( + f'The planner `{planner}` does not seem to contain any keys with metric names. ' + f'Got `{results_total[planner]}` while searching for the metric `{result_key}` for the trial `{trial}`' + ) + return None + if not result_key in results_example.keys(): - exit(f'Cannot proceed as results do not contain that key: {result_key}. Available keys: {results_example.keys()}') + exit(f'Cannot proceed as results do not contain that key: `{result_key}`. Available keys: `{results_example.keys()}`') found = False for key in results_example: @@ -251,6 +285,8 @@ def calculate_sheet(wb: Workbook, planner_names: List[str], results_total: Dict, planner_trial_last_id = planner_trials - 1 cell_begin = get_sheet_datacell(planner, 0, metric, results) cell_end = get_sheet_datacell(planner, planner_trial_last_id, metric, results) + if cell_begin == None or cell_end == None: + continue # fill spreadsheet ws[col_header + str(row_metric_start + m_index)] = metric ws[col_planner + str(row_start)] = planner @@ -307,7 +343,7 @@ def calculate_sheet(wb: Workbook, planner_names: List[str], results_total: Dict, calculate_sheet(ws, planners, results, 'MEDIAN') # Prepare name of the output file -output_filename = 'results' +output_filename = 'results' + '_' + Path(logs_dir).name for planner in planners: output_filename = output_filename + '_' + planner output_filename = output_filename.rstrip('_') + '.xlsx' @@ -322,5 +358,6 @@ def calculate_sheet(wb: Workbook, planner_names: List[str], results_total: Dict, # cell will probably return None # Ref1: https://itecnote.com/tecnote/python-openpyxl-data_onlytrue-returning-none/ # Ref2: https://groups.google.com/g/openpyxl-users/c/GbBOnOa8g7Y -print(f'Consider opening the results file and saving it in Excel/LibreOffice Calc (without any modifications).') +print(f'Consider opening the results file and saving it with Excel/LibreOffice Calc (without any modifications).') print(f'It will produce cached values based on formulas written (`openpyxl` library is not able to do so).') +print(f'This is a necessary step when one wants to use the script that creates a LaTeX table from a spreadsheet') From f94709cb322de83fd19f8c4907ca6057b8807bec Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Tue, 21 Nov 2023 02:07:05 +0100 Subject: [PATCH 18/81] `evaluation` - scripts now handle copying and evaluation of logs divided into parts but related to the same benchmark session --- srpb_evaluation/scripts/copy_logs.sh | 73 ++++++++++++++++---- srpb_evaluation/scripts/evaluate_from_dir.sh | 7 +- 2 files changed, 65 insertions(+), 15 deletions(-) diff --git a/srpb_evaluation/scripts/copy_logs.sh b/srpb_evaluation/scripts/copy_logs.sh index c8c6bb5..b1bc642 100755 --- a/srpb_evaluation/scripts/copy_logs.sh +++ b/srpb_evaluation/scripts/copy_logs.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # -# Copies 3 newest log files (robot/people/groups) from the one directory to the directory given by argument. +# Copies newest log files (robot/people/groups) from the one directory to the directory given by argument. # # Usage looks like: # @@ -26,20 +26,69 @@ if [[ ! $target_dir == /* ]]; then target_dir=$logs_dir/$target_dir fi -# create a new directory for the copied logs -mkdir -p $target_dir - +# Find the newest files matching the pattern # https://stackoverflow.com/a/54910963 newest_robot=$(ls -t $logs_dir/log_*_robot.txt | head -1) newest_people=$(ls -t $logs_dir/log_*_people.txt | head -1) newest_groups=$(ls -t $logs_dir/log_*_groups.txt | head -1) -# added an escape sequence for newline (file paths are printed in separate lines) -files_to_copy="$newest_robot"$'\n'"$newest_people"$'\n'"$newest_groups" -cp $files_to_copy $target_dir +# Extract the timestamp from the selected file (note that they may differ slightly) +timestamp_robot=$(echo "$newest_robot" | grep -oP '\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}') +timestamp_people=$(echo "$newest_people" | grep -oP '\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}') +timestamp_groups=$(echo "$newest_groups" | grep -oP '\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}') + +# Indicates whether logs for a certain session consists of multiple parts +max_part='1' + +# Find all files with the same timestamp +while IFS= read -r -d '' file; do + # Extract filename without path + filename=$(basename "$file") + # Extract part number from filename (no "part" suffix means "1") + part=$(echo "$filename" | grep -oP '_part\K\d+' || echo '1') + # Add file to respective part number group + part_files[$part]+="$file " + + # Find the biggest "part" number + curr_part_num=$(expr "${part}" + 0) + max_part_num=$(expr "${max_part}" + 0) + if [ "${curr_part_num}" -gt "${max_part_num}" ]; then + max_part="${curr_part_num}" + fi +done < <(find "$logs_dir" -maxdepth 1 -type f \ + \( \ + -name "log_*_${timestamp_robot}_*.txt" \ + -o -name "log_*_${timestamp_people}_*.txt" \ + -o -name "log_*_${timestamp_groups}_*.txt" \ + \) \ + -print0 \ +) + +# If the logs are not divided into "parts", we'll don't need to create separate directories in the target dir +if [ "${max_part}" -eq "1" ]; then + # the logging session consisted from only 1 part + mkdir -p $target_dir + cp ${part_files[${max_part}]} $target_dir + + # files but each in a new line + files_nl=$(echo "${part_files[${max_part}]}" | tr ' ' '\n') + echo "Copied:" + echo "$files_nl" + echo "to $(realpath $target_dir)" + exit 0 +fi + +# Found multiple parts of logs related to the same session +# Iterate from 1 to the value in max_part +for ((i = 1; i <= max_part; i++)); do + target_dir_part=$target_dir/part$i + mkdir -p $target_dir_part + cp ${part_files[$i]} $target_dir_part -echo "Copied:" -echo "" -echo "$files_to_copy" -echo "" -echo "to $target_dir" + # files but each in a new line + files_nl=$(echo "${part_files[$i]}" | tr ' ' '\n') + echo "Copied:" + echo "$files_nl" + echo "to $(realpath $target_dir_part)" +done +exit 0 diff --git a/srpb_evaluation/scripts/evaluate_from_dir.sh b/srpb_evaluation/scripts/evaluate_from_dir.sh index e96291e..b429763 100755 --- a/srpb_evaluation/scripts/evaluate_from_dir.sh +++ b/srpb_evaluation/scripts/evaluate_from_dir.sh @@ -29,9 +29,10 @@ if [ "$#" -eq 2 ]; then fi # https://stackoverflow.com/a/54910963 -newest_robot=$(ls -t $logs_dir/log_*_robot.txt | head -1) -newest_people=$(ls -t $logs_dir/log_*_people.txt | head -1) -newest_groups=$(ls -t $logs_dir/log_*_groups.txt | head -1) +# exclude results file(s) +newest_robot=$(ls -t $logs_dir/log_*_robot*.txt | grep -v "results" | head -1) +newest_people=$(ls -t $logs_dir/log_*_people*.txt | grep -v "results" | head -1) +newest_groups=$(ls -t $logs_dir/log_*_groups*.txt | grep -v "results" | head -1) echo "Evaluating:" echo "" From 95dfe0088c7c7e90494f597dc2d75d327b2666bd Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Mon, 2 Oct 2023 13:07:56 +0200 Subject: [PATCH 19/81] `evaluation` - version bumped --- srpb_evaluation/package.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/srpb_evaluation/package.xml b/srpb_evaluation/package.xml index 7b47f27..6e5f634 100644 --- a/srpb_evaluation/package.xml +++ b/srpb_evaluation/package.xml @@ -2,7 +2,7 @@ srpb_evaluation - 0.9.4 + 0.9.5 The srpb_evaluation package provides implementation of metrics that allow evaluation of robot behaviour during navigation From d62a39a1f17ca8c95f13e4a77afc1a785ad380fb Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Mon, 20 Nov 2023 21:46:50 +0100 Subject: [PATCH 20/81] `logger` - added `latch` method in `RobotLogger` to synchronize better the robot and people data - with a longer computation times there might be a slight timing offset between, e.g. robot and people data --- srpb_logger/include/srpb_logger/robot_logger.h | 13 +++++++++++++ srpb_logger/src/robot_logger.cpp | 14 ++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/srpb_logger/include/srpb_logger/robot_logger.h b/srpb_logger/include/srpb_logger/robot_logger.h index 9293332..22df311 100644 --- a/srpb_logger/include/srpb_logger/robot_logger.h +++ b/srpb_logger/include/srpb_logger/robot_logger.h @@ -8,6 +8,7 @@ // helper functions #include +#include #include #include @@ -25,6 +26,16 @@ class RobotLogger: public BenchmarkLogger { void start(); + /** + * Should be called before @ref update when a significant timing offset is expected (see details) + * + * The timing offset is related to the the duration between the "timestamp" passed to the @ref update and + * the actual timestamp of the localization data that are updated asynchronously and stored as class members. + * + * Each "latch" should be renewed after every @ref update call. + */ + void latch(); + /// Performs writes to files that this class handles, most recent robot data is used void update(double timestamp, const RobotData& robot); @@ -44,6 +55,8 @@ class RobotLogger: public BenchmarkLogger { std::mutex cb_mutex_; ros::Subscriber localization_sub_; + /// When this flag is true, any incoming updates of robot pose and velocity will not be accepted + std::atomic latched_; /// Newest pose with covariance of the robot (expressed in coordinate system given by 'target_frame_') geometry_msgs::PoseWithCovariance robot_pose_; /// Newest velocity with covariance of the robot (expressed in local coordinate system) diff --git a/srpb_logger/src/robot_logger.cpp b/srpb_logger/src/robot_logger.cpp index 5209df3..b0e9c40 100644 --- a/srpb_logger/src/robot_logger.cpp +++ b/srpb_logger/src/robot_logger.cpp @@ -12,6 +12,8 @@ void RobotLogger::init(ros::NodeHandle& nh) { BenchmarkLogger::init(nh); log_filename_ = BenchmarkLogger::appendToFilename(log_filename_, "robot"); + // by default, latching is disabled = async updates are allowed + latched_.store(false); localization_sub_ = nh.subscribe( "/odom", 1, @@ -55,6 +57,9 @@ void RobotLogger::update(double timestamp, const RobotData& robot) { ); } + // (enable the asynchronous updates of pose and velocity + latched_.store(false); + std::stringstream ss; ss.setf(std::ios::fixed); ss << std::setw(9) << std::setprecision(4) << timestamp << " "; @@ -62,6 +67,10 @@ void RobotLogger::update(double timestamp, const RobotData& robot) { log_file_ << ss.str(); } +void RobotLogger::latch() { + latched_.store(true); +} + void RobotLogger::finish() { BenchmarkLogger::finish(log_file_); } @@ -166,6 +175,11 @@ std::pair RobotLogger::robotFromString(const std::string& str) } void RobotLogger::localizationCB(const nav_msgs::OdometryConstPtr& msg) { + // check if any updates should be accepted + if (latched_.load()) { + return; + } + std::lock_guard l(cb_mutex_); // pose should be transformed to the logger's frame robot_pose_ = transformPose(msg->pose, msg->header.frame_id).pose; From 4a0cb9c07a6a3fef9d592af7825a92801315ffe7 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Mon, 20 Nov 2023 19:58:35 +0100 Subject: [PATCH 21/81] `logger` & `evaluation` - added logging of the global path planner features, updated automation scripts [#25] - added `GlobalPlannerLogger` and `GlobalPlannerData` classes - added unit tests evaluating the conversion to/from `GlobalPlannerData` from/to `string` - updated `evaluation` scripts --- srpb_evaluation/scripts/copy_logs.sh | 7 +- srpb_evaluation/scripts/evaluate_from_dir.sh | 2 + srpb_logger/CMakeLists.txt | 7 ++ .../include/srpb_logger/global_planner_data.h | 105 +++++++++++++++++ .../srpb_logger/global_planner_logger.h | 44 +++++++ srpb_logger/src/global_planner_logger.cpp | 107 ++++++++++++++++++ .../test/test_global_planner_logger.cpp | 82 ++++++++++++++ 7 files changed, 352 insertions(+), 2 deletions(-) create mode 100644 srpb_logger/include/srpb_logger/global_planner_data.h create mode 100644 srpb_logger/include/srpb_logger/global_planner_logger.h create mode 100644 srpb_logger/src/global_planner_logger.cpp create mode 100644 srpb_logger/test/test_global_planner_logger.cpp diff --git a/srpb_evaluation/scripts/copy_logs.sh b/srpb_evaluation/scripts/copy_logs.sh index b1bc642..d51efbd 100755 --- a/srpb_evaluation/scripts/copy_logs.sh +++ b/srpb_evaluation/scripts/copy_logs.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # -# Copies newest log files (robot/people/groups) from the one directory to the directory given by argument. +# Copies newest log files (robot/people/groups/gplanner) from the one directory to the directory given by argument. # # Usage looks like: # @@ -8,7 +8,7 @@ # if [ "$#" -lt 1 ] || [ "$#" -gt 2 ]; then echo "Wrong usage. Script args:" - echo " (1) [required] path to the source directory with logs ('*_robot.txt', '*_people.txt', and '*_groups.txt')" + echo " (1) [required] path to the source directory with logs ('*_robot.txt', '*_people.txt', '*_groups.txt', and '*_gplanner.txt')" echo " (2) [required] path to the target directory with copied logs; relative paths will be considered as relative to the source directory" exit 0 fi @@ -31,11 +31,13 @@ fi newest_robot=$(ls -t $logs_dir/log_*_robot.txt | head -1) newest_people=$(ls -t $logs_dir/log_*_people.txt | head -1) newest_groups=$(ls -t $logs_dir/log_*_groups.txt | head -1) +newest_gplanner=$(ls -t $logs_dir/log_*_gplanner.txt | head -1) # Extract the timestamp from the selected file (note that they may differ slightly) timestamp_robot=$(echo "$newest_robot" | grep -oP '\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}') timestamp_people=$(echo "$newest_people" | grep -oP '\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}') timestamp_groups=$(echo "$newest_groups" | grep -oP '\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}') +timestamp_gplanner=$(echo "$newest_gplanner" | grep -oP '\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}') # Indicates whether logs for a certain session consists of multiple parts max_part='1' @@ -60,6 +62,7 @@ done < <(find "$logs_dir" -maxdepth 1 -type f \ -name "log_*_${timestamp_robot}_*.txt" \ -o -name "log_*_${timestamp_people}_*.txt" \ -o -name "log_*_${timestamp_groups}_*.txt" \ + -o -name "log_*_${timestamp_gplanner}_*.txt" \ \) \ -print0 \ ) diff --git a/srpb_evaluation/scripts/evaluate_from_dir.sh b/srpb_evaluation/scripts/evaluate_from_dir.sh index b429763..42c857c 100755 --- a/srpb_evaluation/scripts/evaluate_from_dir.sh +++ b/srpb_evaluation/scripts/evaluate_from_dir.sh @@ -33,12 +33,14 @@ fi newest_robot=$(ls -t $logs_dir/log_*_robot*.txt | grep -v "results" | head -1) newest_people=$(ls -t $logs_dir/log_*_people*.txt | grep -v "results" | head -1) newest_groups=$(ls -t $logs_dir/log_*_groups*.txt | grep -v "results" | head -1) +newest_gplanner=$(ls -t $logs_dir/log_*_gplanner*.txt | grep -v "results" | head -1) echo "Evaluating:" echo "" echo "robot data: $newest_robot" echo "people data: $newest_people" echo "groups data: $newest_groups" +echo "global planner data: $newest_gplanner" echo "" rosrun srpb_evaluation srpb_evaluation $newest_robot $newest_people $newest_groups $safety_dist diff --git a/srpb_logger/CMakeLists.txt b/srpb_logger/CMakeLists.txt index 1903760..fb050dc 100644 --- a/srpb_logger/CMakeLists.txt +++ b/srpb_logger/CMakeLists.txt @@ -36,8 +36,10 @@ add_library(${PROJECT_NAME}_lib include/${PROJECT_NAME}/people_logger.h include/${PROJECT_NAME}/robot_data.h include/${PROJECT_NAME}/robot_logger.h + include/${PROJECT_NAME}/global_planner_logger.h src/people_logger.cpp src/robot_logger.cpp + src/global_planner_logger.cpp ) target_link_libraries(${PROJECT_NAME}_lib ${catkin_LIBRARIES} @@ -63,4 +65,9 @@ if (CATKIN_ENABLE_TESTING) if(TARGET test_people_logger) target_link_libraries(test_people_logger ${PROJECT_NAME}_lib) endif() + + catkin_add_gtest(test_global_planner_logger test/test_global_planner_logger.cpp) + if(TARGET test_global_planner_logger) + target_link_libraries(test_global_planner_logger ${PROJECT_NAME}_lib) + endif() endif() diff --git a/srpb_logger/include/srpb_logger/global_planner_data.h b/srpb_logger/include/srpb_logger/global_planner_data.h new file mode 100644 index 0000000..d8ad1e7 --- /dev/null +++ b/srpb_logger/include/srpb_logger/global_planner_data.h @@ -0,0 +1,105 @@ +#pragma once + +#include +#include + +namespace srpb { +namespace logger { + +class GlobalPlannerData { +public: + GlobalPlannerData( + const geometry_msgs::PoseStamped& robot_pose, + const geometry_msgs::PoseStamped& goal_pose, + bool planning_success, + double planning_time, + size_t plan_size + ): GlobalPlannerData(robot_pose.pose, goal_pose.pose, planning_success, planning_time, plan_size) {} + + GlobalPlannerData( + const geometry_msgs::Pose& robot_pose, + const geometry_msgs::Pose& goal_pose, + bool planning_success, + double planning_time, + size_t plan_size + ): + pose_(robot_pose), + goal_(goal_pose), + planning_success_(planning_success), + planning_time_(planning_time), + plan_size_(plan_size) + {} + + inline geometry_msgs::Pose getPose() const { + return pose_; + } + + inline geometry_msgs::Point getPosition() const { + return pose_.position; + } + + inline geometry_msgs::Quaternion getOrientation() const { + return pose_.orientation; + } + + inline double getPositionX() const { + return pose_.position.x; + } + + inline double getPositionY() const { + return pose_.position.y; + } + + inline double getPositionZ() const { + return pose_.position.z; + } + + inline double getOrientationYaw() const { + return tf2::getYaw(pose_.orientation); + } + + inline geometry_msgs::Pose getGoal() const { + return goal_; + } + + inline double getGoalPositionX() const { + return goal_.position.x; + } + + inline double getGoalPositionY() const { + return goal_.position.y; + } + + inline double getGoalPositionZ() const { + return goal_.position.z; + } + + inline double getGoalOrientationYaw() const { + return tf2::getYaw(goal_.orientation); + } + + /// Whether the planning was successful (i.e. the planner did not fail and returned a non-empty plan) + inline bool getPlanningStatus() const { + return planning_success_; + } + + /// Returns duration of global plan computations + inline double getPlanningTime() const { + return planning_time_; + } + + /// Number of poses the global path plan consists of + inline size_t getPlanSize() const { + return plan_size_; + } + +protected: + bool planning_success_; + geometry_msgs::Pose pose_; + geometry_msgs::Pose goal_; + double planning_time_; + size_t plan_size_; +}; + +} // namespace logger +} // namespace srpb diff --git a/srpb_logger/include/srpb_logger/global_planner_logger.h b/srpb_logger/include/srpb_logger/global_planner_logger.h new file mode 100644 index 0000000..848e7fb --- /dev/null +++ b/srpb_logger/include/srpb_logger/global_planner_logger.h @@ -0,0 +1,44 @@ +#pragma once + +#include "benchmark_logger.h" +#include "global_planner_data.h" + +namespace srpb { +namespace logger { + +/** + * @brief A logger class storing the information about the global planner + * + * The class for global planner data logging was separated from the logger of the general robot data (gathered with + * a frequency of the local planning) as it is usually gathered with a very different frequency. + * + * All in all, this class allows avoiding the interpolation during an offline data analysis. + */ +class GlobalPlannerLogger: public BenchmarkLogger { +public: + /// For odometry, e.g., Z, Roll, Pitch (based on diff_drive controller) + static constexpr auto COVARIANCE_UNKNOWN = 1000000.0; + + GlobalPlannerLogger() = default; + + void init(ros::NodeHandle& nh); + + void start(); + + /// Performs writes to files that this class handles, most recent planner data is used + void update(double timestamp, const GlobalPlannerData& planner); + + void finish(); + + /// Converts a given global planing data into string description + static std::string plannerToString(const GlobalPlannerData& planner); + + /// Converts a given @ref str string description into a global planing data + static std::pair plannerFromString(const std::string& str); + +protected: + std::fstream log_file_; +}; + +} // namespace logger +} // namespace srpb diff --git a/srpb_logger/src/global_planner_logger.cpp b/srpb_logger/src/global_planner_logger.cpp new file mode 100644 index 0000000..878d8a8 --- /dev/null +++ b/srpb_logger/src/global_planner_logger.cpp @@ -0,0 +1,107 @@ +#include + +// helper functions +#include + +namespace srpb { +namespace logger { + +void GlobalPlannerLogger::init(ros::NodeHandle& nh) { + BenchmarkLogger::init(nh); + log_filename_ = BenchmarkLogger::appendToFilename(log_filename_, "gplanner"); +} + +void GlobalPlannerLogger::start() { + if (!log_file_.is_open()) { + BenchmarkLogger::start(log_file_, log_filename_); + std::cout << "[ SRPB] Started the first log file for a global planner" << std::endl; + } else if (log_file_.is_open()) { + BenchmarkLogger::finish(log_file_); + std::cout << "[ SRPB] Finishing a global planner log file" << std::endl; + BenchmarkLogger::startNextPart(log_file_, log_filename_); + std::cout << "[ SRPB] Started next log file for a global planner" << std::endl; + } + + // increment the counter used for numbering goals + incrementLogPartCounter(); +} + +void GlobalPlannerLogger::update(double timestamp, const GlobalPlannerData& planner) { + if (!log_file_) { + throw std::runtime_error("File for GlobalPlannerLogger was not properly created!"); + } + + std::stringstream ss; + ss.setf(std::ios::fixed); + ss << std::setw(9) << std::setprecision(4) << timestamp << " "; + ss << GlobalPlannerLogger::plannerToString(planner) << std::endl; + log_file_ << ss.str(); +} + +void GlobalPlannerLogger::finish() { + BenchmarkLogger::finish(log_file_); +} + +std::string GlobalPlannerLogger::plannerToString(const GlobalPlannerData& planner) { + std::stringstream ss; + ss.setf(std::ios::fixed); + /* 0 */ ss << std::setw(9) << std::setprecision(4) << planner.getPositionX() << " "; + /* 1 */ ss << std::setw(9) << std::setprecision(4) << planner.getPositionY() << " "; + /* 2 */ ss << std::setw(9) << std::setprecision(4) << planner.getPositionZ() << " "; + /* 3 */ ss << std::setw(9) << std::setprecision(4) << planner.getOrientationYaw() << " "; + /* 4 */ ss << std::setw(9) << std::setprecision(4) << planner.getGoalPositionX() << " "; + /* 5 */ ss << std::setw(9) << std::setprecision(4) << planner.getGoalPositionY() << " "; + /* 6 */ ss << std::setw(9) << std::setprecision(4) << planner.getGoalPositionZ() << " "; + /* 7 */ ss << std::setw(9) << std::setprecision(4) << planner.getGoalOrientationYaw() << " "; + /* 8 */ ss << std::setw(4) << std::setprecision(1) << static_cast(planner.getPlanningStatus()) << " "; + /* 9 */ ss << std::setw(9) << std::setprecision(4) << planner.getPlanningTime() << " "; + /* 10 */ ss << std::setw(9) << std::setprecision(1) << planner.getPlanSize(); + return ss.str(); +} + +std::pair GlobalPlannerLogger::plannerFromString(const std::string& str) { + auto vals = people_msgs_utils::parseString(str, " "); + if (vals.size() != 11) { + std::cout << "\x1B[31mFound corrupted data of a global planner:\r\n\t" << str << "\x1B[0m" << std::endl; + // dummy planner data + auto dummy_planner = GlobalPlannerData( + geometry_msgs::Pose(), + geometry_msgs::Pose(), + false, + 0.0, + 0 + ); + return {false, dummy_planner}; + } + + geometry_msgs::Pose pose; + pose.position.x = vals.at(0); + pose.position.y = vals.at(1); + pose.position.z = vals.at(2); + tf2::Quaternion quat; + quat.setRPY(0.0, 0.0, vals.at(3)); + pose.orientation.x = quat.getX(); + pose.orientation.y = quat.getY(); + pose.orientation.z = quat.getZ(); + pose.orientation.w = quat.getW(); + + geometry_msgs::Pose goal; + goal.position.x = vals.at(4); + goal.position.y = vals.at(5); + goal.position.z = vals.at(6); + quat.setRPY(0.0, 0.0, vals.at(7)); + goal.orientation.x = quat.getX(); + goal.orientation.y = quat.getY(); + goal.orientation.z = quat.getZ(); + goal.orientation.w = quat.getW(); + + bool planning_status = static_cast(static_cast(vals.at(8))); + double planning_time = vals.at(9); + double plan_size = vals.at(10); + + auto planner = GlobalPlannerData(pose, goal, planning_status, planning_time, plan_size); + return {true, planner}; +} + +} // namespace logger +} // namespace srpb diff --git a/srpb_logger/test/test_global_planner_logger.cpp b/srpb_logger/test/test_global_planner_logger.cpp new file mode 100644 index 0000000..9dbb151 --- /dev/null +++ b/srpb_logger/test/test_global_planner_logger.cpp @@ -0,0 +1,82 @@ +#include + +#include + +using namespace srpb::logger; + +// Test cases +TEST(ConversionTest, planner) { + geometry_msgs::Pose pose; + pose.position.x = 0.123; + pose.position.y = 10.654; + pose.position.z = 12.321; + // only `yaw` is logged + tf2::Quaternion pose_quat; + pose_quat.setRPY(0.0, 0.0, 0.632); + pose.orientation.x = pose_quat.getX(); + pose.orientation.y = pose_quat.getY(); + pose.orientation.z = pose_quat.getZ(); + pose.orientation.w = pose_quat.getW(); + + geometry_msgs::Pose goal; + goal.position.x = 100.354; + goal.position.y = 40.123; + goal.position.z = 11.325; + // only `yaw` is logged + tf2::Quaternion goal_quat; + goal_quat.setRPY(0.0, 0.0, 1.5789); + goal.orientation.x = goal_quat.getX(); + goal.orientation.y = goal_quat.getY(); + goal.orientation.z = goal_quat.getZ(); + goal.orientation.w = goal_quat.getW(); + + bool planning_success = true; + double planning_time = 0.135; + size_t plan_size = 497; + + GlobalPlannerData data_in(pose, goal, planning_success, planning_time, plan_size); + + // 1. evaluate if attributes were saved correctly + ASSERT_EQ(data_in.getPositionX(), pose.position.x); + ASSERT_EQ(data_in.getPositionY(), pose.position.y); + ASSERT_EQ(data_in.getPositionZ(), pose.position.z); + ASSERT_EQ(data_in.getOrientation().x, pose.orientation.x); + ASSERT_EQ(data_in.getOrientation().y, pose.orientation.y); + ASSERT_EQ(data_in.getOrientation().z, pose.orientation.z); + ASSERT_EQ(data_in.getOrientation().w, pose.orientation.w); + ASSERT_EQ(data_in.getGoalPositionX(), goal.position.x); + ASSERT_EQ(data_in.getGoalPositionY(), goal.position.y); + ASSERT_EQ(data_in.getGoalPositionZ(), goal.position.z); + ASSERT_DOUBLE_EQ(data_in.getGoalOrientationYaw(), tf2::getYaw(goal_quat)); + ASSERT_EQ(data_in.getPlanningStatus(), planning_success); + ASSERT_EQ(data_in.getPlanningTime(), planning_time); + ASSERT_EQ(data_in.getPlanSize(), plan_size); + + auto data_in_str = GlobalPlannerLogger::plannerToString(data_in); + auto conversion = GlobalPlannerLogger::plannerFromString(data_in_str); + + // conversion successful + ASSERT_TRUE(conversion.first); + GlobalPlannerData data_out = conversion.second; + + // 2. check if "out" attributes are equal to "in"'s + ASSERT_EQ(data_out.getPositionX(), data_in.getPositionX()); + ASSERT_EQ(data_out.getPositionY(), data_in.getPositionY()); + ASSERT_EQ(data_out.getPositionZ(), data_in.getPositionZ()); + ASSERT_EQ(data_out.getOrientation().x, data_in.getOrientation().x); + ASSERT_EQ(data_out.getOrientation().y, data_in.getOrientation().y); + ASSERT_EQ(data_out.getOrientation().z, data_in.getOrientation().z); + ASSERT_EQ(data_out.getOrientation().w, data_in.getOrientation().w); + ASSERT_EQ(data_out.getGoalPositionX(), data_in.getGoalPositionX()); + ASSERT_EQ(data_out.getGoalPositionY(), data_in.getGoalPositionY()); + ASSERT_EQ(data_out.getGoalPositionZ(), data_in.getGoalPositionZ()); + ASSERT_EQ(data_out.getGoalOrientationYaw(), data_in.getGoalOrientationYaw()); + ASSERT_EQ(data_out.getPlanningStatus(), data_in.getPlanningStatus()); + ASSERT_EQ(data_out.getPlanningTime(), data_in.getPlanningTime()); + ASSERT_EQ(data_out.getPlanSize(), data_in.getPlanSize()); +} + +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} From ac5ea9d18a74bf910d8b76f926d4a29c9def7b8a Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Tue, 21 Nov 2023 02:14:31 +0100 Subject: [PATCH 22/81] `evaluation` - global path planner logs loaded into the main executable [#25] - adjusted `evaluate_from_dir` script invoking the executable --- srpb_evaluation/scripts/evaluate_from_dir.sh | 2 +- srpb_evaluation/src/main.cpp | 8 ++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/srpb_evaluation/scripts/evaluate_from_dir.sh b/srpb_evaluation/scripts/evaluate_from_dir.sh index 42c857c..a9f41b8 100755 --- a/srpb_evaluation/scripts/evaluate_from_dir.sh +++ b/srpb_evaluation/scripts/evaluate_from_dir.sh @@ -43,4 +43,4 @@ echo "groups data: $newest_groups" echo "global planner data: $newest_gplanner" echo "" -rosrun srpb_evaluation srpb_evaluation $newest_robot $newest_people $newest_groups $safety_dist +rosrun srpb_evaluation srpb_evaluation $newest_robot $newest_people $newest_groups $newest_gplanner $safety_dist diff --git a/srpb_evaluation/src/main.cpp b/srpb_evaluation/src/main.cpp index 098a4b9..156d739 100644 --- a/srpb_evaluation/src/main.cpp +++ b/srpb_evaluation/src/main.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include "srpb_evaluation/utils.h" @@ -8,12 +9,13 @@ using namespace srpb::logger; using namespace srpb::evaluation; int main(int argc, char* argv[]) { - if (argc != 5) { + if (argc != 6) { printf( "Please input\r\n" "\t* the path to the log file of the robot\r\n" "\t* the path to the log file of the people\r\n" "\t* the path to the log file of the people groups\r\n" + "\t* the path to the log file of the global planner\r\n" "\t* and value of the safety distance [m].\r\n" ); return 1; @@ -22,7 +24,8 @@ int main(int argc, char* argv[]) { auto file_robot = std::string(argv[1]); auto file_people = std::string(argv[2]); auto file_groups = std::string(argv[3]); - auto safety_distance = std::atof(argv[4]); + auto file_gplanner = std::string(argv[4]); + auto safety_distance = std::atof(argv[5]); // goal reached values double goal_tolerance_xy = 0.2; @@ -58,6 +61,7 @@ int main(int argc, char* argv[]) { auto timed_robot_data = parseFile(file_robot, &RobotLogger::robotFromString); auto timed_people_data = parseFile(file_people, &PeopleLogger::personFromString); auto timed_groups_data = parseFile(file_groups, &PeopleLogger::groupFromString); + auto timed_gplanner_data = parseFile(file_gplanner, &GlobalPlannerLogger::plannerFromString); // since Person and Group are logged in separation, so by default Group does not contain members, only their IDs timed_groups_data = fillGroupsWithMembers(timed_groups_data, timed_people_data); From 5ae7b0ac922b53ab71a1848f3b87482fcc8249d4 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 6 Dec 2023 16:12:49 +0100 Subject: [PATCH 23/81] `evaluation` - fixed typo in `create_excel_from_results` (metric name) --- srpb_evaluation/scripts/create_excel_from_results.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/srpb_evaluation/scripts/create_excel_from_results.py b/srpb_evaluation/scripts/create_excel_from_results.py index 47fd863..77a742b 100644 --- a/srpb_evaluation/scripts/create_excel_from_results.py +++ b/srpb_evaluation/scripts/create_excel_from_results.py @@ -107,7 +107,7 @@ def prepare_sheet_rows(results_total: Dict) -> List[List]: spls_grp_row = ['Samples groups'] m_goal_row = ['m_goal'] m_obs_row = ['m_obs'] - m_eff_row = ['m_eff'] + m_mef_row = ['m_mef'] m_cef_row = ['m_cef'] m_cre_row = ['m_cre'] m_vsm_row = ['m_vsm'] @@ -133,7 +133,7 @@ def prepare_sheet_rows(results_total: Dict) -> List[List]: spls_grp_row.append(result['s_grp']) m_goal_row.append(result['m_goal']) m_obs_row.append(result['m_obs']) - m_eff_row.append(result['m_mef']) + m_mef_row.append(result['m_mef']) m_cef_row.append(result['m_cef']) m_cre_row.append(result['m_cre']) m_vsm_row.append(result['m_vsm']) @@ -157,7 +157,7 @@ def prepare_sheet_rows(results_total: Dict) -> List[List]: spls_grp_row, m_goal_row, m_obs_row, - m_eff_row, + m_mef_row, m_cef_row, m_cre_row, m_vsm_row, From 1728b746876b0c2950b3a79b4b6a1456309d4cc8 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 6 Dec 2023 16:20:55 +0100 Subject: [PATCH 24/81] `evaluation` - script for renaming directories made more robust - leaves the already renamed directories untouched - assigns the ID regarding the list of existing, already renamed directories --- .../scripts/rename_dirs_matching_pattern.sh | 57 ++++++++++++++++--- 1 file changed, 49 insertions(+), 8 deletions(-) diff --git a/srpb_evaluation/scripts/rename_dirs_matching_pattern.sh b/srpb_evaluation/scripts/rename_dirs_matching_pattern.sh index 84b912e..34b6a62 100755 --- a/srpb_evaluation/scripts/rename_dirs_matching_pattern.sh +++ b/srpb_evaluation/scripts/rename_dirs_matching_pattern.sh @@ -22,17 +22,58 @@ pattern_to_replace="$4" cd "$main_dir" || exit 1 +# Initialize the highest_number +highest_number="0" + # Loop through directories matching the provided pattern for dir in *"$pattern_to_match"*; do # Check if the directory does not contain the differentiate pattern if [[ $dir != *"$pattern_to_differentiate"* ]]; then - # Extract the base name without the pattern and append the replace pattern - new_dir=$(echo "$dir" | sed "s/$pattern_to_match/$pattern_to_replace/")"" - - # Rename the directory - mv "$dir" "$new_dir" - - # Print a message indicating the rename - echo "Renamed: $dir -> $new_dir" + # Extract the number part using sed + number=$(echo "$dir" | sed "s/.*${pattern_to_replace}\([0-9]\+\).*/\1/") + + # Check if the extracted string is a number + if [[ "$number" =~ ^[0-9]+$ ]]; then + # Compare the numbers + if [ "$number" -gt "$highest_number" ]; then + highest_number="$number" + fi + fi + fi +done + +# Loop through directories matching the provided pattern +for dir in *"$pattern_to_match"*; do + # Check if the directory matching the pattern exists + if [ ! -d "$dir" ]; then + continue + fi + # Check if the directory does not contain the differentiate pattern + if [[ $dir != *"$pattern_to_differentiate"* ]]; then + # Check if the directory does not contain the replace pattern + if [[ $dir != *"$pattern_to_replace"* ]]; then + # Extract the base name without the pattern and append the replace pattern + new_dir=$(echo "$dir" | sed "s/$pattern_to_match/$pattern_to_replace/")"" + + # trim the number from a directory named with "pattern_to_match" and replace with the "highest" + # from the previous loop + number_match=$(echo "$dir" | sed "s/.*${pattern_to_match}\([0-9]*\).*/\1/") + + # interpret as non-octal, ref: https://unix.stackexchange.com/a/406445 + highest_number=$(echo $((10#$highest_number))) + # increment + ((highest_number++)) + # adds a leading zero for a digit + number_replace=$(printf "%02d" $highest_number) + + new_dir_num=$(echo "$new_dir" | sed "s/$number_match/$number_replace/")"" + + # Rename the directory + mv "$dir" "$new_dir_num" + + # Print a message indicating the rename + echo "Renamed: $dir -> $new_dir_num" + fi fi done +exit 0 From eaa6d004189882b8d8bc3c640185f7cd1d55676b Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 6 Dec 2023 16:21:29 +0100 Subject: [PATCH 25/81] `evaluation` - version bumped --- srpb_evaluation/package.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/srpb_evaluation/package.xml b/srpb_evaluation/package.xml index 6e5f634..18e2865 100644 --- a/srpb_evaluation/package.xml +++ b/srpb_evaluation/package.xml @@ -2,7 +2,7 @@ srpb_evaluation - 0.9.5 + 0.9.6 The srpb_evaluation package provides implementation of metrics that allow evaluation of robot behaviour during navigation From 381d670b8e14df7f9157f6b3b7b5a799697106a7 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 27 Dec 2023 12:58:16 +0100 Subject: [PATCH 26/81] `logger` - `srpb` params put into a namespace --- srpb_logger/include/srpb_logger/benchmark_logger.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/srpb_logger/include/srpb_logger/benchmark_logger.h b/srpb_logger/include/srpb_logger/benchmark_logger.h index f3a1933..d7d0b85 100644 --- a/srpb_logger/include/srpb_logger/benchmark_logger.h +++ b/srpb_logger/include/srpb_logger/benchmark_logger.h @@ -21,9 +21,9 @@ class BenchmarkLogger { public: virtual void init(ros::NodeHandle& nh) { //path to save the recorded data - nh.param("log_filename", log_filename_, std::string("log.txt")); + nh.param("srpb/log_filename", log_filename_, std::string("log.txt")); // ID of the frame that poses will be expressed in - nh.param("log_frame_id", target_frame_, target_frame_); + nh.param("srpb/log_frame_id", target_frame_, target_frame_); // append the timestamp to filename log_filename_ = BenchmarkLogger::appendToFilename(log_filename_, BenchmarkLogger::timeToString()); } From 9f7202b84555813d6574ae959cc3b25d5e131187 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 27 Dec 2023 15:33:01 +0100 Subject: [PATCH 27/81] `logger` - `RobotData` class extended with the `getPoseWithCovariance` and `getVelocityWithCovariance` --- srpb_logger/include/srpb_logger/robot_data.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/srpb_logger/include/srpb_logger/robot_data.h b/srpb_logger/include/srpb_logger/robot_data.h index 0657fe8..e4b0b9f 100644 --- a/srpb_logger/include/srpb_logger/robot_data.h +++ b/srpb_logger/include/srpb_logger/robot_data.h @@ -73,6 +73,10 @@ class RobotData { vel_ = pose_; } + inline geometry_msgs::PoseWithCovariance getPoseWithCovariance() const { + return pose_; + } + inline geometry_msgs::Pose getPose() const { return pose_.pose; } @@ -128,6 +132,10 @@ class RobotData { return pose_.covariance.at(COV_YAWYAW_INDEX); } + inline geometry_msgs::PoseWithCovariance getVelocityWithCovariance() const { + return vel_; + } + inline geometry_msgs::Pose getVelocity() const { return vel_.pose; } From f26074ca608dcc82820bd01e05b8b9582d317d40 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 27 Dec 2023 15:40:45 +0100 Subject: [PATCH 28/81] `logger` - `RobotLogger` extended with a feature of receiving computation times from external planners via the ROS topic (Float64MultiArray) - this is not necessary with local planners that act as navigation stack plugins (i.e., are not external in terms of `srpb_move_base`) - see https://github.com/rayvburn/external_local_planner for reference --- srpb_logger/CMakeLists.txt | 2 + .../include/srpb_logger/robot_logger.h | 10 +++++ srpb_logger/package.xml | 1 + srpb_logger/src/robot_logger.cpp | 44 +++++++++++++++++++ 4 files changed, 57 insertions(+) diff --git a/srpb_logger/CMakeLists.txt b/srpb_logger/CMakeLists.txt index fb050dc..82266cd 100644 --- a/srpb_logger/CMakeLists.txt +++ b/srpb_logger/CMakeLists.txt @@ -5,6 +5,7 @@ find_package(catkin REQUIRED COMPONENTS cmake_modules roscpp + std_msgs geometry_msgs people_msgs people_msgs_utils @@ -19,6 +20,7 @@ catkin_package( ${PROJECT_NAME}_lib CATKIN_DEPENDS roscpp + std_msgs geometry_msgs people_msgs people_msgs_utils diff --git a/srpb_logger/include/srpb_logger/robot_logger.h b/srpb_logger/include/srpb_logger/robot_logger.h index 22df311..fdb0824 100644 --- a/srpb_logger/include/srpb_logger/robot_logger.h +++ b/srpb_logger/include/srpb_logger/robot_logger.h @@ -4,6 +4,7 @@ #include "robot_data.h" #include +#include // helper functions #include @@ -50,10 +51,14 @@ class RobotLogger: public BenchmarkLogger { protected: void localizationCB(const nav_msgs::OdometryConstPtr& msg); + void externalComputationTimesCB(const std_msgs::Float64MultiArrayConstPtr& msg); + std::fstream log_file_; std::mutex cb_mutex_; ros::Subscriber localization_sub_; + /// Subscriber that is valid once the given topic param is non-empty + ros::Subscriber external_comp_times_sub_; /// When this flag is true, any incoming updates of robot pose and velocity will not be accepted std::atomic latched_; @@ -61,6 +66,11 @@ class RobotLogger: public BenchmarkLogger { geometry_msgs::PoseWithCovariance robot_pose_; /// Newest velocity with covariance of the robot (expressed in local coordinate system) geometry_msgs::PoseWithCovariance robot_vel_; + + /// Data index of the multi-dimension array that a computation time is stored under + size_t external_comp_time_array_index_; + /// Stores the newest value of the computation time obtained via the @ref external_comp_times_sub_ subscriber + std::atomic external_comp_time_; }; } // namespace logger diff --git a/srpb_logger/package.xml b/srpb_logger/package.xml index 6d57afb..d3e9f7d 100644 --- a/srpb_logger/package.xml +++ b/srpb_logger/package.xml @@ -21,6 +21,7 @@ tf2_geometry_msgs roscpp + std_msgs geometry_msgs people_msgs people_msgs_utils diff --git a/srpb_logger/src/robot_logger.cpp b/srpb_logger/src/robot_logger.cpp index b0e9c40..5b33f9b 100644 --- a/srpb_logger/src/robot_logger.cpp +++ b/srpb_logger/src/robot_logger.cpp @@ -19,6 +19,23 @@ void RobotLogger::init(ros::NodeHandle& nh) { 1, boost::bind(&RobotLogger::localizationCB, this, _1) ); + + // obtain the name of the topic to retrieve the computation times of the external local planner (not a navstack + // plugin); if empty, computation times obtained via the @ref update call will be used (default) + external_comp_time_.store(NAN); + std::string computation_time_topic(""); + int computation_time_array_idx = -1; + nh.param("srpb/external_computation_times_topic", computation_time_topic, computation_time_topic); + nh.param("srpb/external_computation_times_array_index", computation_time_array_idx, computation_time_array_idx); + + if (!computation_time_topic.empty() && computation_time_array_idx >= 0) { + external_comp_time_array_index_ = static_cast(computation_time_array_idx); + external_comp_times_sub_ = nh.subscribe( + computation_time_topic, + 10, + boost::bind(&RobotLogger::externalComputationTimesCB, this, _1) + ); + } } void RobotLogger::start() { @@ -56,6 +73,16 @@ void RobotLogger::update(double timestamp, const RobotData& robot) { robot.getLocalPlanningTime() ); } + // modify local planning time, if required and valid data was received + if (!std::isnan(external_comp_time_.load())) { + robot_data = RobotData( + robot_data.getPoseWithCovariance(), + robot_data.getVelocityWithCovariance(), + robot_data.getGoal(), + robot_data.getDistToObstacle(), + external_comp_time_.load() + ); + } // (enable the asynchronous updates of pose and velocity latched_.store(false); @@ -201,5 +228,22 @@ void RobotLogger::localizationCB(const nav_msgs::OdometryConstPtr& msg) { robot_vel_ = vel; } +void RobotLogger::externalComputationTimesCB(const std_msgs::Float64MultiArrayConstPtr& msg) { + size_t required_elems = external_comp_time_array_index_ + 1; + // verify that sufficient number of elements is available + if (msg->data.size() < required_elems) { + std::cout << + "\x1B[31m" << + "[ SRPB] RobotLogger did not receive a proper multi array at the " << + "`" << external_comp_times_sub_.getTopic().c_str() << "` topic. " << + "Got " << msg->data.size() << " data elements, whereas at least " << + required_elems << " are required" << + "\x1B[0m" << + std::endl; + return; + } + external_comp_time_.store(static_cast(msg->data.at(external_comp_time_array_index_))); +} + } // namespace srpb } // namespace logger From c3f205ad52da5fa1ec719ab4dcb28b9e06aacc77 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 27 Dec 2023 15:41:04 +0100 Subject: [PATCH 29/81] `logger` - version bumped --- srpb_logger/package.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/srpb_logger/package.xml b/srpb_logger/package.xml index d3e9f7d..df0c33c 100644 --- a/srpb_logger/package.xml +++ b/srpb_logger/package.xml @@ -2,7 +2,7 @@ srpb_logger - 0.9.3 + 0.9.4 The srpb_logger package provides implementation of tools that logging and parsing of data necessary for evaluation of robot behaviour during navigation From 3512e9afa07455456f224df4272989e17aad74fc Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 27 Dec 2023 16:32:55 +0100 Subject: [PATCH 30/81] `logger` - `RobotLogger` collects raw velocity commands of the local planner --- srpb_evaluation/test/test_rewinder.cpp | 1 + srpb_logger/include/srpb_logger/robot_data.h | 65 +++++++++++++++++++- srpb_logger/src/robot_logger.cpp | 23 +++++-- 3 files changed, 81 insertions(+), 8 deletions(-) diff --git a/srpb_evaluation/test/test_rewinder.cpp b/srpb_evaluation/test/test_rewinder.cpp index fbc0564..6325aea 100644 --- a/srpb_evaluation/test/test_rewinder.cpp +++ b/srpb_evaluation/test/test_rewinder.cpp @@ -10,6 +10,7 @@ const logger::RobotData DUMMY_ROBOT( geometry_msgs::PoseWithCovarianceStamped(), geometry_msgs::PoseWithCovarianceStamped(), geometry_msgs::PoseStamped(), + geometry_msgs::TwistStamped(), 0.5, 0.1 ); diff --git a/srpb_logger/include/srpb_logger/robot_data.h b/srpb_logger/include/srpb_logger/robot_data.h index e4b0b9f..23d5b55 100644 --- a/srpb_logger/include/srpb_logger/robot_data.h +++ b/srpb_logger/include/srpb_logger/robot_data.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include @@ -24,21 +25,31 @@ class RobotData { const geometry_msgs::PoseWithCovarianceStamped& robot_pose, const geometry_msgs::PoseWithCovarianceStamped& robot_vel, const geometry_msgs::PoseStamped& goal_pose, + const geometry_msgs::TwistStamped& cmd_vel, double obstacle_distance, double exec_time - ): RobotData(robot_pose.pose, robot_vel.pose, goal_pose.pose, obstacle_distance, exec_time) {} + ): RobotData( + robot_pose.pose, + robot_vel.pose, + goal_pose.pose, + cmd_vel.twist, + obstacle_distance, + exec_time + ) {} /// Full constructor for the RobotLogger RobotData( const geometry_msgs::PoseWithCovariance& robot_pose, const geometry_msgs::PoseWithCovariance& robot_vel, const geometry_msgs::Pose& goal_pose, + const geometry_msgs::Twist& cmd_vel, double obstacle_distance, double exec_time ): pose_(robot_pose), vel_(robot_vel), goal_(goal_pose), + command_(cmd_vel), obstacle_distance_(obstacle_distance), local_plan_exec_time_(exec_time), partial_(false) @@ -47,17 +58,38 @@ class RobotData { /// Partial constructor for the `move_base` node RobotData( const geometry_msgs::PoseStamped& goal_pose, + const geometry_msgs::TwistStamped& cmd_vel, double obstacle_distance, double exec_time - ): RobotData(goal_pose.pose, obstacle_distance, exec_time) {} + ): RobotData( + goal_pose.pose, + cmd_vel.twist, + obstacle_distance, + exec_time + ) {} + + /// Partial constructor for the `move_base` node + RobotData( + const geometry_msgs::PoseStamped& goal_pose, + const geometry_msgs::Twist& cmd_vel, + double obstacle_distance, + double exec_time + ): RobotData( + goal_pose.pose, + cmd_vel, + obstacle_distance, + exec_time + ) {} /// Partial constructor for the `move_base` node RobotData( const geometry_msgs::Pose& goal_pose, + const geometry_msgs::Twist& cmd_vel, double obstacle_distance, double exec_time ): goal_(goal_pose), + command_(cmd_vel), obstacle_distance_(obstacle_distance), local_plan_exec_time_(exec_time), partial_(true) @@ -203,6 +235,34 @@ class RobotData { return tf2::getYaw(goal_.orientation); } + inline geometry_msgs::Twist getVelocityCommand() const { + return command_; + } + + inline double getVelocityCommandLinearX() const { + return command_.linear.x; + } + + inline double getVelocityCommandLinearY() const { + return command_.linear.y; + } + + inline double getVelocityCommandLinearZ() const { + return command_.linear.z; + } + + inline double getVelocityCommandAngularX() const { + return command_.angular.x; + } + + inline double getVelocityCommandAngularY() const { + return command_.angular.y; + } + + inline double getVelocityCommandAngularZ() const { + return command_.angular.z; + } + inline double getDistToObstacle() const { return obstacle_distance_; } @@ -221,6 +281,7 @@ class RobotData { geometry_msgs::PoseWithCovariance pose_; geometry_msgs::PoseWithCovariance vel_; geometry_msgs::Pose goal_; + geometry_msgs::Twist command_; double obstacle_distance_; double local_plan_exec_time_; diff --git a/srpb_logger/src/robot_logger.cpp b/srpb_logger/src/robot_logger.cpp index 5b33f9b..5699c03 100644 --- a/srpb_logger/src/robot_logger.cpp +++ b/srpb_logger/src/robot_logger.cpp @@ -69,6 +69,7 @@ void RobotLogger::update(double timestamp, const RobotData& robot) { robot_pose_, robot_vel_, robot.getGoal(), + robot.getVelocityCommand(), robot.getDistToObstacle(), robot.getLocalPlanningTime() ); @@ -79,6 +80,7 @@ void RobotLogger::update(double timestamp, const RobotData& robot) { robot_data.getPoseWithCovariance(), robot_data.getVelocityWithCovariance(), robot_data.getGoal(), + robot_data.getVelocityCommand(), robot_data.getDistToObstacle(), external_comp_time_.load() ); @@ -126,20 +128,24 @@ std::string RobotLogger::robotToString(const RobotData& robot) { /* 18 */ ss << std::setw(9) << std::setprecision(4) << robot.getGoalPositionX() << " "; /* 19 */ ss << std::setw(9) << std::setprecision(4) << robot.getGoalPositionY() << " "; /* 20 */ ss << std::setw(9) << std::setprecision(4) << robot.getGoalOrientationYaw() << " "; - /* 21 */ ss << std::setw(9) << std::setprecision(4) << robot.getDistToObstacle() << " "; - /* 22 */ ss << std::setw(9) << std::setprecision(4) << robot.getLocalPlanningTime(); + /* 21 */ ss << std::setw(9) << std::setprecision(4) << robot.getVelocityCommandLinearX() << " "; + /* 22 */ ss << std::setw(9) << std::setprecision(4) << robot.getVelocityCommandLinearY() << " "; + /* 23 */ ss << std::setw(9) << std::setprecision(4) << robot.getVelocityCommandAngularZ() << " "; + /* 24 */ ss << std::setw(9) << std::setprecision(4) << robot.getDistToObstacle() << " "; + /* 25 */ ss << std::setw(9) << std::setprecision(4) << robot.getLocalPlanningTime(); return ss.str(); } std::pair RobotLogger::robotFromString(const std::string& str) { auto vals = people_msgs_utils::parseString(str, " "); - if (vals.size() != 23) { + if (vals.size() != 26) { std::cout << "\x1B[31mFound corrupted data of a robot:\r\n\t" << str << "\x1B[0m" << std::endl; // dummy robot data auto dummy_robot = RobotData( geometry_msgs::PoseWithCovariance(), geometry_msgs::PoseWithCovariance(), geometry_msgs::Pose(), + geometry_msgs::Twist(), 0.0, 0.0 ); @@ -194,10 +200,15 @@ std::pair RobotLogger::robotFromString(const std::string& str) goal.orientation.z = quat.getZ(); goal.orientation.w = quat.getW(); - double obst_dist = vals.at(21); - double exec_time = vals.at(22); + geometry_msgs::Twist cmd_vel; + cmd_vel.linear.x = vals.at(21); + cmd_vel.linear.y = vals.at(22); + cmd_vel.angular.z = vals.at(23); - auto robot = RobotData(pose, vel, goal, obst_dist, exec_time); + double obst_dist = vals.at(24); + double exec_time = vals.at(25); + + auto robot = RobotData(pose, vel, goal, cmd_vel, obst_dist, exec_time); return {true, robot}; } From 871fa88545d18c5b36a9e0bed00dc0b9e738a215 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 27 Dec 2023 16:35:12 +0100 Subject: [PATCH 31/81] `logger` - added a unit test for `RobotLogger` conversions --- srpb_logger/CMakeLists.txt | 5 + srpb_logger/test/test_robot_logger.cpp | 149 +++++++++++++++++++++++++ 2 files changed, 154 insertions(+) create mode 100644 srpb_logger/test/test_robot_logger.cpp diff --git a/srpb_logger/CMakeLists.txt b/srpb_logger/CMakeLists.txt index 82266cd..682a492 100644 --- a/srpb_logger/CMakeLists.txt +++ b/srpb_logger/CMakeLists.txt @@ -72,4 +72,9 @@ if (CATKIN_ENABLE_TESTING) if(TARGET test_global_planner_logger) target_link_libraries(test_global_planner_logger ${PROJECT_NAME}_lib) endif() + + catkin_add_gtest(test_robot_logger test/test_robot_logger.cpp) + if(TARGET test_robot_logger) + target_link_libraries(test_robot_logger ${PROJECT_NAME}_lib) + endif() endif() diff --git a/srpb_logger/test/test_robot_logger.cpp b/srpb_logger/test/test_robot_logger.cpp new file mode 100644 index 0000000..0fa2c95 --- /dev/null +++ b/srpb_logger/test/test_robot_logger.cpp @@ -0,0 +1,149 @@ +#include + +#include + +using namespace srpb::logger; + +// Test cases +TEST(ConversionTest, robot) { + geometry_msgs::PoseWithCovarianceStamped pose; + pose.pose.pose.position.x = 0.123; + pose.pose.pose.position.y = 10.654; + pose.pose.pose.position.z = 12.321; + tf2::Quaternion pose_quat; + pose_quat.setRPY(0.0, 0.0, 0.632); + pose.pose.pose.orientation.x = pose_quat.getX(); + pose.pose.pose.orientation.y = pose_quat.getY(); + pose.pose.pose.orientation.z = pose_quat.getZ(); + pose.pose.pose.orientation.w = pose_quat.getW(); + + geometry_msgs::PoseWithCovarianceStamped vel; + vel.pose.pose.position.x = 8.963; + vel.pose.pose.position.y = 6.852; + vel.pose.pose.position.z = 1.321; + tf2::Quaternion vel_quat; + vel_quat.setRPY(0.0, 0.0, 0.987); + vel.pose.pose.orientation.x = vel_quat.getX(); + vel.pose.pose.orientation.y = vel_quat.getY(); + vel.pose.pose.orientation.z = vel_quat.getZ(); + vel.pose.pose.orientation.w = vel_quat.getW(); + + geometry_msgs::PoseStamped goal; + goal.pose.position.x = 100.354; + goal.pose.position.y = 40.123; + goal.pose.position.z = 11.325; + tf2::Quaternion goal_quat; + goal_quat.setRPY(0.0, 0.0, 1.3572); + goal.pose.orientation.x = goal_quat.getX(); + goal.pose.orientation.y = goal_quat.getY(); + goal.pose.orientation.z = goal_quat.getZ(); + goal.pose.orientation.w = goal_quat.getW(); + + geometry_msgs::TwistStamped cmd; + cmd.twist.linear.x = 0.478; + cmd.twist.linear.y = 0.321; + cmd.twist.angular.z = 0.741; + + double obstacle_distance = 0.135; + double exec_time = 0.497; + + RobotData data_in( + pose, + vel, + goal, + cmd, + obstacle_distance, + exec_time + ); + + // 1. evaluate if attributes were saved correctly + ASSERT_EQ(data_in.getPositionX(), pose.pose.pose.position.x); + ASSERT_EQ(data_in.getPositionY(), pose.pose.pose.position.y); + ASSERT_EQ(data_in.getPositionZ(), pose.pose.pose.position.z); + ASSERT_EQ(data_in.getOrientation().x, pose.pose.pose.orientation.x); + ASSERT_EQ(data_in.getOrientation().y, pose.pose.pose.orientation.y); + ASSERT_EQ(data_in.getOrientation().z, pose.pose.pose.orientation.z); + ASSERT_EQ(data_in.getOrientation().w, pose.pose.pose.orientation.w); + ASSERT_EQ(data_in.getCovariancePoseXX(), pose.pose.covariance.at(RobotData::COV_XX_INDEX)); + ASSERT_EQ(data_in.getCovariancePoseXY(), pose.pose.covariance.at(RobotData::COV_XY_INDEX)); + ASSERT_EQ(data_in.getCovariancePoseYX(), pose.pose.covariance.at(RobotData::COV_YX_INDEX)); + ASSERT_EQ(data_in.getCovariancePoseYY(), pose.pose.covariance.at(RobotData::COV_YY_INDEX)); + ASSERT_EQ(data_in.getCovariancePoseYawYaw(), pose.pose.covariance.at(RobotData::COV_YAWYAW_INDEX)); + + ASSERT_EQ(data_in.getVelocityX(), vel.pose.pose.position.x); + ASSERT_EQ(data_in.getVelocityY(), vel.pose.pose.position.y); + ASSERT_EQ(data_in.getVelocityZ(), vel.pose.pose.position.z); + ASSERT_EQ(data_in.getVelocityTheta(), tf2::getYaw(vel.pose.pose.orientation)); + ASSERT_EQ(data_in.getCovarianceVelocityXX(), vel.pose.covariance.at(RobotData::COV_XX_INDEX)); + ASSERT_EQ(data_in.getCovarianceVelocityXY(), vel.pose.covariance.at(RobotData::COV_XY_INDEX)); + ASSERT_EQ(data_in.getCovarianceVelocityYX(), vel.pose.covariance.at(RobotData::COV_YX_INDEX)); + ASSERT_EQ(data_in.getCovarianceVelocityYY(), vel.pose.covariance.at(RobotData::COV_YY_INDEX)); + ASSERT_EQ(data_in.getCovarianceVelocityThTh(), vel.pose.covariance.at(RobotData::COV_YAWYAW_INDEX)); + + ASSERT_EQ(data_in.getGoalPositionX(), goal.pose.position.x); + ASSERT_EQ(data_in.getGoalPositionY(), goal.pose.position.y); + ASSERT_EQ(data_in.getGoalPositionZ(), goal.pose.position.z); + ASSERT_DOUBLE_EQ(data_in.getGoalOrientationYaw(), tf2::getYaw(goal_quat)); + + ASSERT_EQ(data_in.getVelocityCommandLinearX(), cmd.twist.linear.x); + ASSERT_EQ(data_in.getVelocityCommandLinearY(), cmd.twist.linear.y); + ASSERT_EQ(data_in.getVelocityCommandLinearZ(), cmd.twist.linear.z); + ASSERT_EQ(data_in.getVelocityCommandAngularX(), cmd.twist.angular.x); + ASSERT_EQ(data_in.getVelocityCommandAngularY(), cmd.twist.angular.y); + ASSERT_EQ(data_in.getVelocityCommandAngularZ(), cmd.twist.angular.z); + + ASSERT_EQ(data_in.getDistToObstacle(), obstacle_distance); + ASSERT_EQ(data_in.getLocalPlanningTime(), exec_time); + + auto data_in_str = RobotLogger::robotToString(data_in); + auto conversion = RobotLogger::robotFromString(data_in_str); + + // conversion successful + ASSERT_TRUE(conversion.first); + RobotData data_out = conversion.second; + + // 2. check if "out" attributes are equal to "in"'s + ASSERT_EQ(data_out.getPositionX(), data_in.getPositionX()); + ASSERT_EQ(data_out.getPositionY(), data_in.getPositionY()); + ASSERT_EQ(data_out.getPositionZ(), data_in.getPositionZ()); + ASSERT_EQ(data_out.getOrientation().x, data_in.getOrientation().x); + ASSERT_EQ(data_out.getOrientation().y, data_in.getOrientation().y); + ASSERT_EQ(data_out.getOrientation().z, data_in.getOrientation().z); + ASSERT_EQ(data_out.getOrientation().w, data_in.getOrientation().w); + ASSERT_EQ(data_out.getCovariancePoseXX(), data_in.getCovariancePoseXX()); + ASSERT_EQ(data_out.getCovariancePoseXY(), data_in.getCovariancePoseXY()); + ASSERT_EQ(data_out.getCovariancePoseYX(), data_in.getCovariancePoseYX()); + ASSERT_EQ(data_out.getCovariancePoseYY(), data_in.getCovariancePoseYY()); + ASSERT_EQ(data_out.getCovariancePoseYawYaw(), data_in.getCovariancePoseYawYaw()); + + ASSERT_EQ(data_out.getVelocityX(), data_in.getVelocityX()); + ASSERT_EQ(data_out.getVelocityY(), data_in.getVelocityY()); + ASSERT_EQ(data_out.getVelocityZ(), data_in.getVelocityZ()); + ASSERT_EQ(data_out.getVelocityTheta(), data_in.getVelocityTheta()); + ASSERT_EQ(data_out.getCovarianceVelocityXX(), data_in.getCovarianceVelocityXX()); + ASSERT_EQ(data_out.getCovarianceVelocityXY(), data_in.getCovarianceVelocityXY()); + ASSERT_EQ(data_out.getCovarianceVelocityYX(), data_in.getCovarianceVelocityYX()); + ASSERT_EQ(data_out.getCovarianceVelocityYY(), data_in.getCovarianceVelocityYY()); + ASSERT_EQ(data_out.getCovarianceVelocityThTh(), data_in.getCovarianceVelocityThTh()); + + ASSERT_EQ(data_out.getGoalPositionX(), data_in.getGoalPositionX()); + ASSERT_EQ(data_out.getGoalPositionY(), data_in.getGoalPositionY()); + // not logged + // ASSERT_EQ(data_out.getGoalPositionZ(), data_in.getGoalPositionZ()); + ASSERT_EQ(data_out.getGoalOrientationYaw(), data_in.getGoalOrientationYaw()); + + ASSERT_EQ(data_out.getVelocityCommandLinearX(), data_in.getVelocityCommandLinearX()); + ASSERT_EQ(data_out.getVelocityCommandLinearY(), data_in.getVelocityCommandLinearY()); + ASSERT_EQ(data_out.getVelocityCommandLinearZ(), data_in.getVelocityCommandLinearZ()); + ASSERT_EQ(data_out.getVelocityCommandAngularX(), data_in.getVelocityCommandAngularX()); + ASSERT_EQ(data_out.getVelocityCommandAngularY(), data_in.getVelocityCommandAngularY()); + ASSERT_EQ(data_out.getVelocityCommandAngularZ(), data_in.getVelocityCommandAngularZ()); + + ASSERT_EQ(data_out.getLocalPlanningTime(), data_in.getLocalPlanningTime()); + ASSERT_EQ(data_out.getDistToObstacle(), data_in.getDistToObstacle()); +} + +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} From 8f63b728716c2d591a4a991886d20793c63e0111 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 27 Dec 2023 16:35:27 +0100 Subject: [PATCH 32/81] `logger` - version bumped --- srpb_logger/package.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/srpb_logger/package.xml b/srpb_logger/package.xml index df0c33c..ae381ec 100644 --- a/srpb_logger/package.xml +++ b/srpb_logger/package.xml @@ -2,7 +2,7 @@ srpb_logger - 0.9.4 + 0.9.5 The srpb_logger package provides implementation of tools that logging and parsing of data necessary for evaluation of robot behaviour during navigation From 85549be1e2bc11c16fe171591c194a889dae2d54 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 27 Dec 2023 21:24:34 +0100 Subject: [PATCH 33/81] `evaluation` - extended the list of metrics saved in the `results` file, updated tooling scripts (metric names loaded from the CSV instead of hard-coded list) --- .../scripts/create_excel_from_results.py | 87 +++++-------------- .../scripts/excel_sheet_defines.py | 2 +- srpb_evaluation/src/utils.cpp | 51 +++++++---- 3 files changed, 55 insertions(+), 85 deletions(-) diff --git a/srpb_evaluation/scripts/create_excel_from_results.py b/srpb_evaluation/scripts/create_excel_from_results.py index 77a742b..c385037 100644 --- a/srpb_evaluation/scripts/create_excel_from_results.py +++ b/srpb_evaluation/scripts/create_excel_from_results.py @@ -101,77 +101,34 @@ def collect_results_planners(dir_path: str, planner_name: List[str]) -> Dict: def prepare_sheet_rows(results_total: Dict) -> List[List]: planners_row = ['Planner'] trials_row = ['Trial'] - file_row = ['File'] - spls_robot_row = ['Samples robot'] - spls_ppl_row = ['Samples people'] - spls_grp_row = ['Samples groups'] - m_goal_row = ['m_goal'] - m_obs_row = ['m_obs'] - m_mef_row = ['m_mef'] - m_cef_row = ['m_cef'] - m_cre_row = ['m_cre'] - m_vsm_row = ['m_vsm'] - m_hsm_row = ['m_hsm'] - m_path_row = ['m_path'] - m_chc_row = ['m_chc'] - m_osc_row = ['m_osc'] - m_bwd_row = ['m_bwd'] - m_irot_row = ['m_irot'] - m_psi_row = ['m_psi'] - m_fsi_row = ['m_fsi'] - m_dir_row = ['m_dir'] - m_psd_row = ['m_psd'] + + # Dict[str, List] + rows_dict = {} # iterate over keys for planner_key in results_total: for i, result in enumerate(results_total[planner_key]): planners_row.append(planner_key) trials_row.append(i + 1) - file_row.append(result['file']) - spls_robot_row.append(result['s_rbt']) - spls_ppl_row.append(result['s_ppl']) - spls_grp_row.append(result['s_grp']) - m_goal_row.append(result['m_goal']) - m_obs_row.append(result['m_obs']) - m_mef_row.append(result['m_mef']) - m_cef_row.append(result['m_cef']) - m_cre_row.append(result['m_cre']) - m_vsm_row.append(result['m_vsm']) - m_hsm_row.append(result['m_hsm']) - m_path_row.append(result['m_path']) - m_chc_row.append(result['m_chc']) - m_osc_row.append(result['m_osc']) - m_bwd_row.append(result['m_bwd']) - m_irot_row.append(result['m_inp']) - m_psi_row.append(result['m_psi']) - m_fsi_row.append(result['m_fsi']) - m_dir_row.append(result['m_dir']) - m_psd_row.append(result['m_psd']) - - return [ - planners_row, - trials_row, - file_row, - spls_robot_row, - spls_ppl_row, - spls_grp_row, - m_goal_row, - m_obs_row, - m_mef_row, - m_cef_row, - m_cre_row, - m_vsm_row, - m_hsm_row, - m_path_row, - m_chc_row, - m_osc_row, - m_bwd_row, - m_irot_row, - m_psi_row, - m_fsi_row, - m_dir_row, - m_psd_row - ] + # `result` is a dict with: + # - keys indicating individual metric names for the planner `planner_key` + # - values - corresponding values + for i, metric_key in enumerate(result.keys()): + if not metric_key in rows_dict.keys(): + # start with metric name, append the result in the first iteration (sheet's row) + rows_dict[str(metric_key)] = [metric_key, result[metric_key]] + else: + # continue with appending to an initialized list + rows_dict[str(metric_key)].append(result[metric_key]) + + # 2 special rows are prepared separately + metrics_rows = [planners_row, trials_row] + # metric-related rows are dicts - key is the metric ID, value is the list with the metric ID and values for + # subsequent trials; the loop below extracts the lists + for row_key in rows_dict.keys(): + metrics_rows.append(rows_dict[row_key]) + return metrics_rows + def cell_coords_to_sheet_cell_id(row: int, col: int): row_w_offset = row + 1 + 2 diff --git a/srpb_evaluation/scripts/excel_sheet_defines.py b/srpb_evaluation/scripts/excel_sheet_defines.py index a0646a3..2a85c81 100644 --- a/srpb_evaluation/scripts/excel_sheet_defines.py +++ b/srpb_evaluation/scripts/excel_sheet_defines.py @@ -3,4 +3,4 @@ # upper left cell - cell that initiates results table RESULT_INIT_COL = 'A' -RESULT_INIT_ROW = '27' +RESULT_INIT_ROW = '50' diff --git a/srpb_evaluation/src/utils.cpp b/srpb_evaluation/src/utils.cpp index 7f3b044..d02ae50 100644 --- a/srpb_evaluation/src/utils.cpp +++ b/srpb_evaluation/src/utils.cpp @@ -149,25 +149,38 @@ void createResultsFile( std::stringstream ss; ss.setf(std::ios::fixed); - ss << "s_rbt ," << std::setw(9) << std::setprecision(4) << samples_robot << std::endl; - ss << "s_ppl ," << std::setw(9) << std::setprecision(4) << samples_people << std::endl; - ss << "s_grp ," << std::setw(9) << std::setprecision(4) << samples_groups << std::endl; - ss << "m_goal ," << std::setw(9) << std::setprecision(4) << goal_reached.getValue() << std::endl; - ss << "m_obs ," << std::setw(9) << std::setprecision(4) << obstacle_safety.getValue() << std::endl; - ss << "m_mef ," << std::setw(9) << std::setprecision(4) << motion_efficiency.getValue() << std::endl; - ss << "m_cef ," << std::setw(9) << std::setprecision(4) << computational_efficiency.getValue() << std::endl; - ss << "m_cre ," << std::setw(9) << std::setprecision(4) << computational_time_repeatability.getValue() << std::endl; - ss << "m_vsm ," << std::setw(9) << std::setprecision(4) << velocity_smoothness.getValue() << std::endl; - ss << "m_hsm ," << std::setw(9) << std::setprecision(4) << heading_change_smoothness.getValue() << std::endl; - ss << "m_path ," << std::setw(9) << std::setprecision(4) << path_linear_length.getValue() << std::endl; - ss << "m_chc ," << std::setw(9) << std::setprecision(4) << cumulative_heading_change.getValue() << std::endl; - ss << "m_osc ," << std::setw(9) << std::setprecision(4) << oscillations.getValue() << std::endl; - ss << "m_bwd ," << std::setw(9) << std::setprecision(4) << backward_movements.getValue() << std::endl; - ss << "m_inp ," << std::setw(9) << std::setprecision(4) << inplace_rotations.getValue() << std::endl; - ss << "m_psi ," << std::setw(9) << std::setprecision(4) << personal_space_intrusion.getValue() << std::endl; - ss << "m_fsi ," << std::setw(9) << std::setprecision(4) << formation_space_intrusion.getValue() << std::endl; - ss << "m_dir ," << std::setw(9) << std::setprecision(4) << heading_direction_disturbance.getValue() << std::endl; - ss << "m_psd ," << std::setw(9) << std::setprecision(4) << passing_speed_discomfort.getValue() << std::endl; + ss << "s_rbt ," << std::setw(9) << std::setprecision(4) << samples_robot << std::endl; + ss << "s_ppl ," << std::setw(9) << std::setprecision(4) << samples_people << std::endl; + ss << "s_grp ," << std::setw(9) << std::setprecision(4) << samples_groups << std::endl; + ss << "m_goal ," << std::setw(9) << std::setprecision(4) << goal_reached.getValue() << std::endl; + ss << "m_obs ," << std::setw(9) << std::setprecision(4) << obstacle_safety.getValue() << std::endl; + ss << "m_mef ," << std::setw(9) << std::setprecision(4) << motion_efficiency.getValue() << std::endl; + ss << "m_cef ," << std::setw(9) << std::setprecision(4) << computational_efficiency.getValue() << std::endl; + ss << "m_cre ," << std::setw(9) << std::setprecision(4) << computational_time_repeatability.getValue() << std::endl; + ss << "m_vsm ," << std::setw(9) << std::setprecision(4) << velocity_smoothness.getValue() << std::endl; + ss << "m_hsm ," << std::setw(9) << std::setprecision(4) << heading_change_smoothness.getValue() << std::endl; + ss << "m_path ," << std::setw(9) << std::setprecision(4) << path_linear_length.getValue() << std::endl; + ss << "m_chc ," << std::setw(9) << std::setprecision(4) << cumulative_heading_change.getValue() << std::endl; + ss << "m_osc ," << std::setw(9) << std::setprecision(4) << oscillations.getValue() << std::endl; + ss << "m_bwd ," << std::setw(9) << std::setprecision(4) << backward_movements.getValue() << std::endl; + ss << "m_inp ," << std::setw(9) << std::setprecision(4) << inplace_rotations.getValue() << std::endl; + ss << "m_psi ," << std::setw(9) << std::setprecision(4) << personal_space_intrusion.getValue() << std::endl; + ss << "m_psi_min ," << std::setw(9) << std::setprecision(4) << personal_space_intrusion.getValueMin() << std::endl; + ss << "m_psi_max ," << std::setw(9) << std::setprecision(4) << personal_space_intrusion.getValueMax() << std::endl; + ss << "m_psi_viol ," << std::setw(9) << std::setprecision(4) << personal_space_intrusion.getViolations() << std::endl; + ss << "m_fsi ," << std::setw(9) << std::setprecision(4) << formation_space_intrusion.getValue() << std::endl; + ss << "m_fsi_min ," << std::setw(9) << std::setprecision(4) << formation_space_intrusion.getValueMin() << std::endl; + ss << "m_fsi_max ," << std::setw(9) << std::setprecision(4) << formation_space_intrusion.getValueMax() << std::endl; + ss << "m_fsi_viol ," << std::setw(9) << std::setprecision(4) << formation_space_intrusion.getViolations() << std::endl; + ss << "m_dir ," << std::setw(9) << std::setprecision(4) << heading_direction_disturbance.getValue() << std::endl; + ss << "m_dir_min ," << std::setw(9) << std::setprecision(4) << heading_direction_disturbance.getValueMin() << std::endl; + ss << "m_dir_max ," << std::setw(9) << std::setprecision(4) << heading_direction_disturbance.getValueMax() << std::endl; + ss << "m_dir_viol ," << std::setw(9) << std::setprecision(4) << heading_direction_disturbance.getViolations() << std::endl; + ss << "m_psd ," << std::setw(9) << std::setprecision(4) << passing_speed_discomfort.getValue() << std::endl; + ss << "m_psd_min ," << std::setw(9) << std::setprecision(4) << passing_speed_discomfort.getValueMin() << std::endl; + ss << "m_psd_max ," << std::setw(9) << std::setprecision(4) << passing_speed_discomfort.getValueMax() << std::endl; + ss << "m_psd_viol ," << std::setw(9) << std::setprecision(4) << passing_speed_discomfort.getViolations() << std::endl; + file_results << ss.str(); printf("Results CSV file saved at `%s`\r\n", filename.c_str()); } From 0776834fe1822143eef3d10b8685dc9003408a5c Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 27 Dec 2023 21:24:53 +0100 Subject: [PATCH 34/81] `evaluation` - version bumped --- srpb_evaluation/package.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/srpb_evaluation/package.xml b/srpb_evaluation/package.xml index 18e2865..25c15df 100644 --- a/srpb_evaluation/package.xml +++ b/srpb_evaluation/package.xml @@ -2,7 +2,7 @@ srpb_evaluation - 0.9.6 + 0.9.7 The srpb_evaluation package provides implementation of metrics that allow evaluation of robot behaviour during navigation From 7d02ca69928d8792dae529e2e1ff68ce1613efc1 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Thu, 11 Jan 2024 12:12:10 +0100 Subject: [PATCH 35/81] `logger` - optimized `transformPose` for the cases where transforming is not necessary --- srpb_logger/include/srpb_logger/benchmark_logger.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/srpb_logger/include/srpb_logger/benchmark_logger.h b/srpb_logger/include/srpb_logger/benchmark_logger.h index d7d0b85..98f4792 100644 --- a/srpb_logger/include/srpb_logger/benchmark_logger.h +++ b/srpb_logger/include/srpb_logger/benchmark_logger.h @@ -65,6 +65,10 @@ class BenchmarkLogger { * that wrong time source will be used and transform is not found (wall vs sim time difference). */ geometry_msgs::PoseWithCovarianceStamped transformPose(const geometry_msgs::PoseWithCovarianceStamped& pose_in) { + // transforming might not be necessary + if (pose_in.header.frame_id == target_frame_) { + return pose_in; + } auto transform_stamped = getTransform(pose_in.header.frame_id); geometry_msgs::PoseWithCovarianceStamped pose_out; tf2::doTransform(pose_in, pose_out, transform_stamped); From f1d805eaade32b021b8e299f1daa49b01a5c2d9e Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Thu, 11 Jan 2024 12:15:31 +0100 Subject: [PATCH 36/81] `logger` - whole planned path is saved by the `GlobalPlannerLogger` --- .../include/srpb_logger/global_planner_data.h | 26 ++++++++--- srpb_logger/src/global_planner_logger.cpp | 45 +++++++++++++++++-- .../test/test_global_planner_logger.cpp | 41 ++++++++++++++++- 3 files changed, 100 insertions(+), 12 deletions(-) diff --git a/srpb_logger/include/srpb_logger/global_planner_data.h b/srpb_logger/include/srpb_logger/global_planner_data.h index d8ad1e7..9c98861 100644 --- a/srpb_logger/include/srpb_logger/global_planner_data.h +++ b/srpb_logger/include/srpb_logger/global_planner_data.h @@ -3,6 +3,8 @@ #include #include +#include + namespace srpb { namespace logger { @@ -13,21 +15,21 @@ class GlobalPlannerData { const geometry_msgs::PoseStamped& goal_pose, bool planning_success, double planning_time, - size_t plan_size - ): GlobalPlannerData(robot_pose.pose, goal_pose.pose, planning_success, planning_time, plan_size) {} + const std::vector& plan + ): GlobalPlannerData(robot_pose.pose, goal_pose.pose, planning_success, planning_time, plan) {} GlobalPlannerData( const geometry_msgs::Pose& robot_pose, const geometry_msgs::Pose& goal_pose, bool planning_success, double planning_time, - size_t plan_size + const std::vector& plan ): pose_(robot_pose), goal_(goal_pose), planning_success_(planning_success), planning_time_(planning_time), - plan_size_(plan_size) + plan_(plan) {} inline geometry_msgs::Pose getPose() const { @@ -88,9 +90,19 @@ class GlobalPlannerData { return planning_time_; } - /// Number of poses the global path plan consists of + /// Returns a copy of a container with the global path plan @ref plan_ + inline std::vector getPlan() const { + return plan_; + } + + /// Returns a const reference to a container with the global path plan @ref plan_ + inline const std::vector& getPlanCref() const { + return plan_; + } + + /// Number of poses the global path plan @ref plan_ is expected to contain inline size_t getPlanSize() const { - return plan_size_; + return plan_.size(); } protected: @@ -98,7 +110,7 @@ class GlobalPlannerData { geometry_msgs::Pose pose_; geometry_msgs::Pose goal_; double planning_time_; - size_t plan_size_; + std::vector plan_; }; } // namespace logger diff --git a/srpb_logger/src/global_planner_logger.cpp b/srpb_logger/src/global_planner_logger.cpp index 878d8a8..e2b6f5d 100644 --- a/srpb_logger/src/global_planner_logger.cpp +++ b/srpb_logger/src/global_planner_logger.cpp @@ -56,12 +56,27 @@ std::string GlobalPlannerLogger::plannerToString(const GlobalPlannerData& planne /* 8 */ ss << std::setw(4) << std::setprecision(1) << static_cast(planner.getPlanningStatus()) << " "; /* 9 */ ss << std::setw(9) << std::setprecision(4) << planner.getPlanningTime() << " "; /* 10 */ ss << std::setw(9) << std::setprecision(1) << planner.getPlanSize(); + + // dynamic size of the plan + ss << " / "; + const auto& plan = planner.getPlanCref(); + for (const auto& pose_stamped: plan) { + ss << " " << std::setw(9) << std::setprecision(4) << pose_stamped.pose.position.x; + ss << " " << std::setw(9) << std::setprecision(4) << pose_stamped.pose.position.y; + ss << " " << std::setw(9) << std::setprecision(4) << tf2::getYaw(pose_stamped.pose.orientation); + } return ss.str(); } std::pair GlobalPlannerLogger::plannerFromString(const std::string& str) { - auto vals = people_msgs_utils::parseString(str, " "); - if (vals.size() != 11) { + // divide into 2 parts considering separator + size_t separator = str.find("/"); + auto vals = people_msgs_utils::parseString(str.substr(0, separator - 1), " "); + auto vals_plan_poses = people_msgs_utils::parseString(str.substr(separator + 1), " "); + + // static part of the entry has always the same length; a dynamic part must have 3 elements for each pose + bool plan_poses_in_triplets = vals_plan_poses.size() % 3 != 0; + if (vals.size() != 11 || plan_poses_in_triplets) { std::cout << "\x1B[31mFound corrupted data of a global planner:\r\n\t" << str << "\x1B[0m" << std::endl; // dummy planner data auto dummy_planner = GlobalPlannerData( @@ -69,7 +84,7 @@ std::pair GlobalPlannerLogger::plannerFromString(const geometry_msgs::Pose(), false, 0.0, - 0 + std::vector() ); return {false, dummy_planner}; } @@ -97,9 +112,31 @@ std::pair GlobalPlannerLogger::plannerFromString(const bool planning_status = static_cast(static_cast(vals.at(8))); double planning_time = vals.at(9); + double plan_size = vals.at(10); + std::vector plan; + // each pose is represented by a triplet + for ( + auto it = vals_plan_poses.cbegin(); + it != vals_plan_poses.cend(); + it = it + 3 + ) { + double x = *it; + double y = *(it+1); + double yaw = *(it+2); + geometry_msgs::PoseStamped pose; + pose.pose.position.x = x; + pose.pose.position.y = y; + + // NOTE: ctor of tf2::Quaternion using Euler angles is deprecated + tf2::Quaternion quat; + quat.setEuler(0.0, 0.0, yaw); + pose.pose.orientation = tf2::toMsg(quat); + + plan.push_back(pose); + } - auto planner = GlobalPlannerData(pose, goal, planning_status, planning_time, plan_size); + auto planner = GlobalPlannerData(pose, goal, planning_status, planning_time, plan); return {true, planner}; } diff --git a/srpb_logger/test/test_global_planner_logger.cpp b/srpb_logger/test/test_global_planner_logger.cpp index 9dbb151..e0a4603 100644 --- a/srpb_logger/test/test_global_planner_logger.cpp +++ b/srpb_logger/test/test_global_planner_logger.cpp @@ -1,6 +1,7 @@ #include #include +#include using namespace srpb::logger; @@ -33,8 +34,27 @@ TEST(ConversionTest, planner) { bool planning_success = true; double planning_time = 0.135; size_t plan_size = 497; + // create a arbitrary path plan + std::vector plan; + plan.resize(plan_size); + double x_plan = -5.159; + double y_plan = 20.951; + double yaw_plan = 0.357; + for (auto& pose_stamped: plan) { + // modify vector element + pose_stamped.pose.position.x = x_plan; + pose_stamped.pose.position.y = y_plan; + // NOTE: ctor of tf2::Quaternion using Euler angles is deprecated + tf2::Quaternion quat; + quat.setEuler(0.0, 0.0, yaw_plan); + pose_stamped.pose.orientation = tf2::toMsg(quat); + // prep coords for the next iteration + x_plan += 0.142; + y_plan -= 0.249; + yaw_plan = angles::normalize_angle(yaw_plan + 0.029); + } - GlobalPlannerData data_in(pose, goal, planning_success, planning_time, plan_size); + GlobalPlannerData data_in(pose, goal, planning_success, planning_time, plan); // 1. evaluate if attributes were saved correctly ASSERT_EQ(data_in.getPositionX(), pose.position.x); @@ -51,6 +71,7 @@ TEST(ConversionTest, planner) { ASSERT_EQ(data_in.getPlanningStatus(), planning_success); ASSERT_EQ(data_in.getPlanningTime(), planning_time); ASSERT_EQ(data_in.getPlanSize(), plan_size); + ASSERT_EQ(data_in.getPlan(), plan); auto data_in_str = GlobalPlannerLogger::plannerToString(data_in); auto conversion = GlobalPlannerLogger::plannerFromString(data_in_str); @@ -74,6 +95,24 @@ TEST(ConversionTest, planner) { ASSERT_EQ(data_out.getPlanningStatus(), data_in.getPlanningStatus()); ASSERT_EQ(data_out.getPlanningTime(), data_in.getPlanningTime()); ASSERT_EQ(data_out.getPlanSize(), data_in.getPlanSize()); + /* NOTE: cannot simply use: + * ASSERT_EQ(data_out.getPlan(), data_in.getPlan()); + * here, as it uses a serialized version (?) of the message structure; despite the same contents (with 1e-04 + * accuracy), there may be some whitespace differences + */ + auto out_plan = data_out.getPlanCref(); + auto in_plan = data_in.getPlanCref(); + for ( + auto out_it = out_plan.cbegin(), in_it = in_plan.cbegin(); + out_it != out_plan.cend() || in_it != in_plan.cend(); + out_it++, in_it++ + ) { + auto out_pose = *out_it; + auto in_pose = *in_it; + ASSERT_NEAR(out_pose.pose.position.x, in_pose.pose.position.x, 1e-04); + ASSERT_NEAR(out_pose.pose.position.y, in_pose.pose.position.y, 1e-04); + ASSERT_NEAR(tf2::getYaw(out_pose.pose.orientation), tf2::getYaw(in_pose.pose.orientation), 1e-04); + } } int main(int argc, char** argv) { From b461d455abe8c4ad306f020ac6ed1c7f420e521c Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Mon, 22 Jan 2024 23:12:09 +0100 Subject: [PATCH 37/81] `evaluation` - renamed `excel_sheet_defines` to `excel_sheet_utils` --- srpb_evaluation/scripts/create_excel_from_results.py | 8 ++++---- .../scripts/create_latex_table_from_results.py | 8 ++++---- .../{excel_sheet_defines.py => excel_sheet_utils.py} | 0 3 files changed, 8 insertions(+), 8 deletions(-) rename srpb_evaluation/scripts/{excel_sheet_defines.py => excel_sheet_utils.py} (100%) diff --git a/srpb_evaluation/scripts/create_excel_from_results.py b/srpb_evaluation/scripts/create_excel_from_results.py index c385037..9bae3aa 100644 --- a/srpb_evaluation/scripts/create_excel_from_results.py +++ b/srpb_evaluation/scripts/create_excel_from_results.py @@ -8,7 +8,7 @@ ''' import csv -import excel_sheet_defines +import excel_sheet_utils import glob import re import sys @@ -216,8 +216,8 @@ def increment_char(c: chr): def calculate_sheet(wb: Workbook, planner_names: List[str], results_total: Dict, calc_fun='MEDIAN'): - row_start = int(excel_sheet_defines.RESULT_INIT_ROW) - col_header = str(excel_sheet_defines.RESULT_INIT_COL) + row_start = int(excel_sheet_utils.RESULT_INIT_ROW) + col_header = str(excel_sheet_utils.RESULT_INIT_COL) ws[col_header + str(row_start)] = 'Planner' ws[col_header + str(row_start + 1)] = 'Trials' row_metric_start = row_start + 1 @@ -287,7 +287,7 @@ def calculate_sheet(wb: Workbook, planner_names: List[str], results_total: Dict, # grab the active worksheet (workbook always creates an arbitrary sheet) ws_init = wb.active # create a sheet with a custom name -ws = wb.create_sheet(title=excel_sheet_defines.SHEET_NAME) +ws = wb.create_sheet(title=excel_sheet_utils.SHEET_NAME) # delete the sheet initially created wb.remove_sheet(worksheet=ws_init) diff --git a/srpb_evaluation/scripts/create_latex_table_from_results.py b/srpb_evaluation/scripts/create_latex_table_from_results.py index 3092372..50dbad5 100644 --- a/srpb_evaluation/scripts/create_latex_table_from_results.py +++ b/srpb_evaluation/scripts/create_latex_table_from_results.py @@ -5,7 +5,7 @@ sudo apt install python3-openpyxl ''' -import excel_sheet_defines +import excel_sheet_utils import json import sys @@ -22,10 +22,10 @@ def load_data_from_excel(path: Path) -> Dict[str, Dict[str, float]]: # NOTE: data_only allows to read values instead of formulas wb = load_workbook(filename = str(path), data_only=True) # access specific sheet - sheet = wb[excel_sheet_defines.SHEET_NAME] + sheet = wb[excel_sheet_utils.SHEET_NAME] # upper left cell - cell that initiates results table - col_init = excel_sheet_defines.RESULT_INIT_COL - row_init = excel_sheet_defines.RESULT_INIT_ROW + col_init = excel_sheet_utils.RESULT_INIT_COL + row_init = excel_sheet_utils.RESULT_INIT_ROW col_metric_ids = col_init diff --git a/srpb_evaluation/scripts/excel_sheet_defines.py b/srpb_evaluation/scripts/excel_sheet_utils.py similarity index 100% rename from srpb_evaluation/scripts/excel_sheet_defines.py rename to srpb_evaluation/scripts/excel_sheet_utils.py From e7265a4114a7aa8d42d99923b416e98fd71378b8 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Mon, 22 Jan 2024 23:18:35 +0100 Subject: [PATCH 38/81] `evaluation` - `load_data_from_excel` moved to `excel_sheet_utils` --- .../create_latex_table_from_results.py | 74 +---------------- srpb_evaluation/scripts/excel_sheet_utils.py | 79 +++++++++++++++++++ 2 files changed, 81 insertions(+), 72 deletions(-) diff --git a/srpb_evaluation/scripts/create_latex_table_from_results.py b/srpb_evaluation/scripts/create_latex_table_from_results.py index 50dbad5..80ed920 100644 --- a/srpb_evaluation/scripts/create_latex_table_from_results.py +++ b/srpb_evaluation/scripts/create_latex_table_from_results.py @@ -5,86 +5,16 @@ sudo apt install python3-openpyxl ''' -import excel_sheet_utils +from excel_sheet_utils import load_data_from_excel + import json import sys -from openpyxl import load_workbook from pathlib import Path - from typing import List from typing import Dict -def load_data_from_excel(path: Path) -> Dict[str, Dict[str, float]]: - # reading from sheets follows the convention introduced in `create_excel_from_results.calculate_sheet` script - - # NOTE: data_only allows to read values instead of formulas - wb = load_workbook(filename = str(path), data_only=True) - # access specific sheet - sheet = wb[excel_sheet_utils.SHEET_NAME] - # upper left cell - cell that initiates results table - col_init = excel_sheet_utils.RESULT_INIT_COL - row_init = excel_sheet_utils.RESULT_INIT_ROW - - col_metric_ids = col_init - - # lambda that creates col-row tuple to access cell in a sheet - make_sheet_cell = lambda row, col : str(col + row) - # lambda that works for chars in A-Z range - increment_char = lambda c : chr(ord(c) + 1) - # lambda that increments integer stored as string - increment_row = lambda row : str(int(row) + 1) - # lambda that retrieves value of sheet's cell - get_sheet_val = lambda sheet, row, col : sheet[make_sheet_cell(col=col, row=row)].value - # lambda that checks if certain cell in the sheet is empty - sheet_cell_empty = lambda sheet, row, col : get_sheet_val(sheet=sheet, row=row, col=col) == None - - # local database with planners' metrics - data = {} - - # to iterate over planners - skip first, avoiding 'Planner' - col_iter = increment_char(col_init) - # to iterate over metrics - row_iter = increment_row(row_init) - # iterate over planners (names) - while not sheet_cell_empty(sheet=sheet, row=row_init, col=col_iter): - # save planner name for later use in database - planner_name = str(get_sheet_val(sheet=sheet, row=row_init, col=col_iter)) - # got a valid planner name - iterate over saved metrics - metric_names = [] - metric_values = [] - while not sheet_cell_empty(sheet=sheet, row=row_iter, col=col_metric_ids): - # obtain sheet values - metric_name = get_sheet_val(sheet=sheet, row=row_iter, col=col_metric_ids) - metric_value = get_sheet_val(sheet=sheet, row=row_iter, col=col_iter) - # evaluate the correctness of read data - if metric_name == None or metric_value == None: - raise Exception( - f"The metric ID cell `{col_metric_ids}{row_iter}` contains `{metric_name}` " - f"and the metric value cell `{col_iter}{row_iter}` contains `{metric_value}`. " - f"Cannot proceed with such values. Check whether the cells in your spreadsheet are empty. " - f"If they aren't, try to open the sheet and simply save it using Excel or LibreOffice Calc." - ) - # collect - metric_names.append(str(metric_name)) - metric_values.append(float(metric_value)) - # try to proceed to the next metric - row_iter = increment_row(row_iter) - - # finished collecting metrics for a given planner - metrics = dict(zip(metric_names, metric_values)) - # append to overall results - result = {planner_name: metrics} - data.update(result) - - # try to proceed to the next planner - col_iter = increment_char(col_iter) - # reset metrics row 'pointer' - row_iter = increment_row(row_init) - - return data - # results: results of a specific scenarios aggregated into a single structure # metrics: names (keys) of metrics to include in the LaTeX table def create_latex_table(results: List[Dict[str, Dict[str, Dict[str, float]]]], metric_names: List[str]) -> str: diff --git a/srpb_evaluation/scripts/excel_sheet_utils.py b/srpb_evaluation/scripts/excel_sheet_utils.py index 2a85c81..e8088a3 100644 --- a/srpb_evaluation/scripts/excel_sheet_utils.py +++ b/srpb_evaluation/scripts/excel_sheet_utils.py @@ -1,6 +1,85 @@ +from openpyxl import load_workbook +from pathlib import Path +from typing import Dict + # name of the sheet in the Excel file with the results SHEET_NAME = "SRPB" # upper left cell - cell that initiates results table RESULT_INIT_COL = 'A' RESULT_INIT_ROW = '50' + + +def load_data_from_excel(path: Path) -> Dict[str, Dict[str, float]]: + """ + Reads the spreadsheet following the convention introduced in `create_excel_from_results.calculate_sheet` script + """ + + # NOTE: data_only allows to read values instead of formulas + wb = load_workbook(filename = str(path), data_only=True) + # access specific sheet + sheet = wb[SHEET_NAME] + # upper left cell - cell that initiates results table + col_init = RESULT_INIT_COL + row_init = RESULT_INIT_ROW + + col_metric_ids = col_init + + # lambda that creates col-row tuple to access cell in a sheet + make_sheet_cell = lambda row, col : str(col + row) + # lambda that works for chars in A-Z range + increment_char = lambda c : chr(ord(c) + 1) + # lambda that increments integer stored as string + increment_row = lambda row : str(int(row) + 1) + # lambda that retrieves value of sheet's cell + get_sheet_val = lambda sheet, row, col : sheet[make_sheet_cell(col=col, row=row)].value + # lambda that checks if certain cell in the sheet is empty + sheet_cell_empty = lambda sheet, row, col : get_sheet_val(sheet=sheet, row=row, col=col) == None + + # local database with planners' metrics + data = {} + + # to iterate over planners - skip first, avoiding 'Planner' + col_iter = increment_char(col_init) + # to iterate over metrics + row_iter = increment_row(row_init) + # iterate over planners (names) + while not sheet_cell_empty(sheet=sheet, row=row_init, col=col_iter): + # save planner name for later use in database + planner_name = str(get_sheet_val(sheet=sheet, row=row_init, col=col_iter)) + # print(f"Checking sheet at {make_sheet_cell(col=col_iter, row=row_init)} for planner {planner_name}") + # got a valid planner name - iterate over saved metrics + metric_names = [] + metric_values = [] + while not sheet_cell_empty(sheet=sheet, row=row_iter, col=col_metric_ids): + # obtain sheet values + metric_name = get_sheet_val(sheet=sheet, row=row_iter, col=col_metric_ids) + metric_value = get_sheet_val(sheet=sheet, row=row_iter, col=col_iter) + # evaluate the correctness of read data + if metric_name == None or metric_value == None: + raise Exception( + f"The metric ID cell `{col_metric_ids}{row_iter}` contains `{metric_name}` " + f"and the metric value cell `{col_iter}{row_iter}` contains `{metric_value}`. " + f"Cannot proceed with such values. Check whether the cells in your spreadsheet are empty. " + f"If they aren't, try to open the sheet at '{path}' and simply save it using Excel " + f"or LibreOffice Calc." + ) + # collect + metric_names.append(str(metric_name)) + metric_values.append(float(metric_value)) + # print(f"Checking metric ID {metric_names[-1]}, val {metric_values[-1]}") + # try to proceed to the next metric + row_iter = increment_row(row_iter) + + # finished collecting metrics for a given planner + metrics = dict(zip(metric_names, metric_values)) + # append to overall results + result = {planner_name: metrics} + data.update(result) + + # try to proceed to the next planner + col_iter = increment_char(col_iter) + # reset metrics row 'pointer' + row_iter = increment_row(row_init) + + return data From 3caabd0d86d70557230aa3d954a28e87abb1dbda Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 24 Jan 2024 00:46:14 +0100 Subject: [PATCH 39/81] `evaluation` - `MetricGaussian` renamed to `MetricStatistics`, adjusted its `calculateStatistics` method API to handle diverse metrics - adjusted classes inheriting from the `MetricStatistics` - adjusted unit tests --- srpb_evaluation/CMakeLists.txt | 10 +-- .../include/srpb_evaluation/metric_gaussian.h | 62 ------------- .../srpb_evaluation/metric_statistics.h | 63 +++++++++++++ .../metrics/formation_space_instrusion.h | 9 +- .../metrics/heading_direction_disturbance.h | 9 +- .../metrics/passing_speed_discomfort.h | 12 ++- .../metrics/personal_space_instrusion.h | 9 +- srpb_evaluation/src/metric_gaussian.cpp | 83 ----------------- srpb_evaluation/src/metric_statistics.cpp | 88 +++++++++++++++++++ ...aussian.cpp => test_metric_statistics.cpp} | 8 +- 10 files changed, 180 insertions(+), 173 deletions(-) delete mode 100644 srpb_evaluation/include/srpb_evaluation/metric_gaussian.h create mode 100644 srpb_evaluation/include/srpb_evaluation/metric_statistics.h delete mode 100644 srpb_evaluation/src/metric_gaussian.cpp create mode 100644 srpb_evaluation/src/metric_statistics.cpp rename srpb_evaluation/test/{test_metric_gaussian.cpp => test_metric_statistics.cpp} (79%) diff --git a/srpb_evaluation/CMakeLists.txt b/srpb_evaluation/CMakeLists.txt index 0e109e3..8821c87 100644 --- a/srpb_evaluation/CMakeLists.txt +++ b/srpb_evaluation/CMakeLists.txt @@ -43,7 +43,7 @@ include_directories( add_library(${PROJECT_NAME}_lib # libs include/${PROJECT_NAME}/metric.h - include/${PROJECT_NAME}/metric_gaussian.h + include/${PROJECT_NAME}/metric_statistics.h include/${PROJECT_NAME}/rewinder.h include/${PROJECT_NAME}/utils.h # specific metrics @@ -63,7 +63,7 @@ add_library(${PROJECT_NAME}_lib include/${PROJECT_NAME}/metrics/personal_space_instrusion.h include/${PROJECT_NAME}/metrics/velocity_smoothness.h # sources - src/metric_gaussian.cpp + src/metric_statistics.cpp src/rewinder.cpp src/utils.cpp ) @@ -95,8 +95,8 @@ if (CATKIN_ENABLE_TESTING) if(TARGET test_utils) target_link_libraries(test_utils ${PROJECT_NAME}_lib) endif() - catkin_add_gtest(test_metric_gaussian test/test_metric_gaussian.cpp) - if(TARGET test_metric_gaussian) - target_link_libraries(test_metric_gaussian ${PROJECT_NAME}_lib) + catkin_add_gtest(test_metric_statistics test/test_metric_statistics.cpp) + if(TARGET test_metric_statistics) + target_link_libraries(test_metric_statistics ${PROJECT_NAME}_lib) endif() endif() diff --git a/srpb_evaluation/include/srpb_evaluation/metric_gaussian.h b/srpb_evaluation/include/srpb_evaluation/metric_gaussian.h deleted file mode 100644 index f04727c..0000000 --- a/srpb_evaluation/include/srpb_evaluation/metric_gaussian.h +++ /dev/null @@ -1,62 +0,0 @@ -#pragma once - -#include "srpb_evaluation/metric.h" - -// helper functions for calculation of Gaussian distribution value -#include - -namespace srpb { -namespace evaluation { - -class MetricGaussian: public Metric { -public: - MetricGaussian( - const std::vector>& robot_data - ): Metric(robot_data) {} - - MetricGaussian( - const std::vector>& robot_data, - const std::vector>& people_data - ): Metric(robot_data, people_data) {} - - MetricGaussian( - const std::vector>& robot_data, - const std::vector>& people_data, - const std::vector>& groups_data - ): Metric(robot_data, people_data, groups_data) {} - - /// Returns minimum value of Gaussian throughout the scenario - virtual double getValueMin() const = 0; - - /// Returns maximum value of Gaussian throughout the scenario - virtual double getValueMax() const = 0; - - /// Returns percentage of violations of Gaussian throughout the scenario (considering the threshold value) - virtual double getViolations() const = 0; - - /// Prints results - virtual void printResults() const = 0; - - /** - * @brief Computes min, max, normalized metrics related to gaussian statistics and counts number of space violations - * - * @param timed_gaussians vector of pairs with, first, timestamp, and second, values of gaussians - * @param violation_threshold Gaussian values bigger than that will be considered as violation of personal space - * @param max_method set to true (default) to use max element from Gaussians to normalize metrics; - * false means averaging over all Gaussian occurrences in a current time step - * - * @return std::tuple tuple with scores: min, max and normalized to execution - * time and percentage of violation of, e.g., personal space violations (according to given threshold and gaussians) - */ - static std::tuple calculateGaussianStatistics( - std::vector>> timed_gaussians, - double violation_threshold, - bool max_method = true - ); - -protected: - virtual void compute() = 0; -}; - -} // namespace evaluation -} // namespace srpb diff --git a/srpb_evaluation/include/srpb_evaluation/metric_statistics.h b/srpb_evaluation/include/srpb_evaluation/metric_statistics.h new file mode 100644 index 0000000..10492c2 --- /dev/null +++ b/srpb_evaluation/include/srpb_evaluation/metric_statistics.h @@ -0,0 +1,63 @@ +#pragma once + +#include "srpb_evaluation/metric.h" + +namespace srpb { +namespace evaluation { + +class MetricStatistics: public Metric { +public: + MetricStatistics( + const std::vector>& robot_data + ): Metric(robot_data) {} + + MetricStatistics( + const std::vector>& robot_data, + const std::vector>& people_data + ): Metric(robot_data, people_data) {} + + MetricStatistics( + const std::vector>& robot_data, + const std::vector>& people_data, + const std::vector>& groups_data + ): Metric(robot_data, people_data, groups_data) {} + + /// Returns minimum value obtained throughout the scenario + virtual double getValueMin() const = 0; + + /// Returns maximum value obtained throughout the scenario + virtual double getValueMax() const = 0; + + /// Returns the percentage of violations obtained throughout the scenario (considering the threshold value) + virtual double getViolations() const = 0; + + /// Prints results + virtual void printResults() const = 0; + + /** + * @brief Computes min, max, normalized metrics and counts the number of violations (above the certain threshold) + * + * @param timed_values vector of pairs with, first, a timestamp, and second, a container with values + * @param violation_threshold values bigger than that will be considered as violations + * @param violation_above_threshold selects whether the violation is counted when the value is bigger than + * the threshold (true, default) or when the violation is less than the threshold (false). + * This is specific to a certain metric (whether computes "cost" or "reward"). + * @param max_method set to true (default) so the max element is used to normalize metrics; + * false means averaging over all values/occurrences in a current time step + * + * @return std::tuple tuple with scores: min, max and normalized to execution + * time and percentage of violation of, e.g., personal space violations (according to given threshold and values) + */ + static std::tuple calculateStatistics( + std::vector>> timed_values, + double violation_threshold, + bool violation_above_threshold = true, + bool max_method = true + ); + +protected: + virtual void compute() = 0; +}; + +} // namespace evaluation +} // namespace srpb diff --git a/srpb_evaluation/include/srpb_evaluation/metrics/formation_space_instrusion.h b/srpb_evaluation/include/srpb_evaluation/metrics/formation_space_instrusion.h index 419a86c..eb4d2ff 100644 --- a/srpb_evaluation/include/srpb_evaluation/metrics/formation_space_instrusion.h +++ b/srpb_evaluation/include/srpb_evaluation/metrics/formation_space_instrusion.h @@ -1,6 +1,6 @@ #pragma once -#include "srpb_evaluation/metric_gaussian.h" +#include "srpb_evaluation/metric_statistics.h" #include @@ -8,7 +8,7 @@ namespace srpb { namespace evaluation { /// Similar to personal space intrusion but related to the group space -class FormationSpaceIntrusion: public MetricGaussian { +class FormationSpaceIntrusion: public MetricStatistics { public: FormationSpaceIntrusion( const std::vector>& robot_data, @@ -17,7 +17,7 @@ class FormationSpaceIntrusion: public MetricGaussian { double group_space_threshold, bool max_method = true ): - MetricGaussian(robot_data, people_data, groups_data), + MetricStatistics(robot_data, people_data, groups_data), group_space_threshold_(group_space_threshold), max_method_(max_method), intrusion_min_(0.0), @@ -122,12 +122,13 @@ class FormationSpaceIntrusion: public MetricGaussian { rewinder_.perform(); + // Gaussian is computed as a "cost"; hence, "violation_above_threshold" is hard-coded to true std::tie( intrusion_min_, intrusion_max_, intrusion_total_, violations_percentage_ - ) = MetricGaussian::calculateGaussianStatistics(timed_gaussians, group_space_threshold_, max_method_); + ) = MetricStatistics::calculateStatistics(timed_gaussians, group_space_threshold_, true, max_method_); } }; diff --git a/srpb_evaluation/include/srpb_evaluation/metrics/heading_direction_disturbance.h b/srpb_evaluation/include/srpb_evaluation/metrics/heading_direction_disturbance.h index ccc593a..2fac921 100644 --- a/srpb_evaluation/include/srpb_evaluation/metrics/heading_direction_disturbance.h +++ b/srpb_evaluation/include/srpb_evaluation/metrics/heading_direction_disturbance.h @@ -1,6 +1,6 @@ #pragma once -#include "srpb_evaluation/metric_gaussian.h" +#include "srpb_evaluation/metric_statistics.h" #include @@ -8,7 +8,7 @@ namespace srpb { namespace evaluation { /// Related to velocity and direction of the robot movement towards person -class HeadingDirectionDisturbance: public MetricGaussian { +class HeadingDirectionDisturbance: public MetricStatistics { public: HeadingDirectionDisturbance( const std::vector>& robot_data, @@ -20,7 +20,7 @@ class HeadingDirectionDisturbance: public MetricGaussian { double robot_max_speed = social_nav_utils::HeadingDirectionDisturbance::MAX_SPEED_DEFAULT, bool max_method = true ): - MetricGaussian(robot_data, people_data), + MetricStatistics(robot_data, people_data), disturbance_threshold_(disturbance_threshold), person_occupancy_radius_(person_occupancy_radius), person_fov_(person_fov), @@ -131,12 +131,13 @@ class HeadingDirectionDisturbance: public MetricGaussian { ); rewinder_.perform(); + // Gaussian is computed as a "cost"; hence, "violation_above_threshold" is hard-coded to true std::tie( disturbance_min_, disturbance_max_, disturbance_total_, violations_percentage_ - ) = MetricGaussian::calculateGaussianStatistics(timed_disturbances, disturbance_threshold_, max_method_); + ) = MetricStatistics::calculateStatistics(timed_disturbances, disturbance_threshold_, true, max_method_); } }; diff --git a/srpb_evaluation/include/srpb_evaluation/metrics/passing_speed_discomfort.h b/srpb_evaluation/include/srpb_evaluation/metrics/passing_speed_discomfort.h index eae473f..17745d6 100644 --- a/srpb_evaluation/include/srpb_evaluation/metrics/passing_speed_discomfort.h +++ b/srpb_evaluation/include/srpb_evaluation/metrics/passing_speed_discomfort.h @@ -1,6 +1,6 @@ #pragma once -#include "srpb_evaluation/metric_gaussian.h" +#include "srpb_evaluation/metric_statistics.h" #include @@ -16,9 +16,6 @@ namespace evaluation { * Refer to the social_nav_utils::PassingSpeedComfort implementation for a detailed description of the underlying * bivariate model that fits the published data best. * - * @note The class inherits from the @ref MetricGaussian class but in fact @ref PassingSpeedDiscomfort is - * not a Gaussian metric. It just uses min/max and violations calculation methods from the base class. - * * @param robot_data * @param people_data * @param distance_min minimum distance between the center of the robot and a human at which (when robot speed @@ -28,7 +25,7 @@ namespace evaluation { * @param max_method whether to use the maximum discomfort in a given timestamp as an indicator (true); the average * is used when set to false */ -class PassingSpeedDiscomfort: public MetricGaussian { +class PassingSpeedDiscomfort: public MetricStatistics { public: PassingSpeedDiscomfort( const std::vector>& robot_data, @@ -38,7 +35,7 @@ class PassingSpeedDiscomfort: public MetricGaussian { double discomfort_threshold = 0.4, bool max_method = true ): - MetricGaussian(robot_data, people_data), + MetricStatistics(robot_data, people_data), distance_min_(distance_min), robot_speed_max_(speed_max), discomfort_threshold_(discomfort_threshold), @@ -148,12 +145,13 @@ class PassingSpeedDiscomfort: public MetricGaussian { ); rewinder_.perform(); + // Gaussian is computed as a "cost"; hence, "violation_above_threshold" is hard-coded to true std::tie( discomfort_min_, discomfort_max_, discomfort_total_, violations_percentage_ - ) = MetricGaussian::calculateGaussianStatistics(timed_discomforts, discomfort_threshold_, max_method_); + ) = MetricStatistics::calculateStatistics(timed_discomforts, discomfort_threshold_, true, max_method_); } }; diff --git a/srpb_evaluation/include/srpb_evaluation/metrics/personal_space_instrusion.h b/srpb_evaluation/include/srpb_evaluation/metrics/personal_space_instrusion.h index 0c40e87..eac0858 100644 --- a/srpb_evaluation/include/srpb_evaluation/metrics/personal_space_instrusion.h +++ b/srpb_evaluation/include/srpb_evaluation/metrics/personal_space_instrusion.h @@ -1,6 +1,6 @@ #pragma once -#include "srpb_evaluation/metric_gaussian.h" +#include "srpb_evaluation/metric_statistics.h" #include @@ -29,7 +29,7 @@ namespace evaluation { * “To Approach Humans?”: A Unified Framework for Approaching Pose Prediction and Socially Aware Robot Navigation * They called it `Social Individual Index`. Their method lacks normalization in terms of path duration. */ -class PersonalSpaceIntrusion: public MetricGaussian { +class PersonalSpaceIntrusion: public MetricStatistics { public: PersonalSpaceIntrusion( const std::vector>& robot_data, @@ -37,7 +37,7 @@ class PersonalSpaceIntrusion: public MetricGaussian { double personal_space_threshold, bool max_method = true ): - MetricGaussian(robot_data, people_data), + MetricStatistics(robot_data, people_data), personal_space_threshold_(personal_space_threshold), max_method_(max_method), intrusion_min_(0.0), @@ -146,12 +146,13 @@ class PersonalSpaceIntrusion: public MetricGaussian { ); rewinder_.perform(); + // Gaussian is computed as a "cost"; hence, "violation_above_threshold" is hard-coded to true std::tie( intrusion_min_, intrusion_max_, intrusion_total_, violations_percentage_ - ) = MetricGaussian::calculateGaussianStatistics(timed_gaussians, personal_space_threshold_, max_method_); + ) = MetricStatistics::calculateStatistics(timed_gaussians, personal_space_threshold_, true, max_method_); } }; diff --git a/srpb_evaluation/src/metric_gaussian.cpp b/srpb_evaluation/src/metric_gaussian.cpp deleted file mode 100644 index 969c7ef..0000000 --- a/srpb_evaluation/src/metric_gaussian.cpp +++ /dev/null @@ -1,83 +0,0 @@ -#include "srpb_evaluation/metric_gaussian.h" - -#include -#include -#include - -#include - -namespace srpb { -namespace evaluation { - -std::tuple MetricGaussian::calculateGaussianStatistics( - std::vector>> timed_gaussians, - double violation_threshold, - bool max_method -) { - // find actual duration - double duration = 0.0; - for (const auto& tg: timed_gaussians) { - duration += tg.first; - } - - // find number of personal space / f-formation space violations - unsigned int gaussian_violations = 0; - unsigned int gaussians_total = 0; - - // find gaussians and recompute according to recognized people (max/sum) - double metrics = 0.0; - double min_elem = std::numeric_limits::max(); - double max_elem = std::numeric_limits::min(); - - // rollout gaussians and compute score (metrics) - for (const auto& tg: timed_gaussians) { // tg in fact might be const but ac - double dt = tg.first; - if (tg.second.empty()) { - std::cout << "Gaussian(s) is empty for at least 1 sample. Metrics will be 0.0" << std::endl; - return std::make_tuple(0.0, 0.0, 0.0, 0); - } - - // count timestamps when violations ocurred - gaussian_violations += std::count_if( - tg.second.cbegin(), - tg.second.cend(), - [&](double g) { - // count total numbers to find percentage - gaussians_total++; - return g > violation_threshold; - } - ); - - // overall min and max computation - double local_min_elem = *std::min_element(tg.second.cbegin(), tg.second.cend()); - if (local_min_elem < min_elem) { - min_elem = local_min_elem; - } - - double local_max_elem = *std::max_element(tg.second.cbegin(), tg.second.cend()); - if (local_max_elem > max_elem) { - max_elem = local_max_elem; - } - - // check for selected method of normalization - double metrics_elem = 0.0; - if (max_method) { - // max method used here for normalization - metrics_elem = local_max_elem; - } else { - // average used for normalization - metrics_elem = std::accumulate(tg.second.cbegin(), tg.second.cend(), 0.0) / static_cast(tg.second.size()); - } - - // Gaussians must be referenced to timestamps when, e.g., people were actually detected - metrics += (metrics_elem * (dt / duration)); - } - - // find percentage of time when personal space / f-formation space etc. thresholds were violated - double gaussian_violations_percentage = gaussian_violations / static_cast(gaussians_total); - - return std::make_tuple(min_elem, max_elem, metrics, gaussian_violations_percentage); -} - -} // namespace evaluation -} // namespace srpb diff --git a/srpb_evaluation/src/metric_statistics.cpp b/srpb_evaluation/src/metric_statistics.cpp new file mode 100644 index 0000000..06340d0 --- /dev/null +++ b/srpb_evaluation/src/metric_statistics.cpp @@ -0,0 +1,88 @@ +#include "srpb_evaluation/metric_statistics.h" + +#include +#include +#include + +#include + +namespace srpb { +namespace evaluation { + +std::tuple MetricStatistics::calculateStatistics( + std::vector>> timed_values, + double violation_threshold, + bool violation_above_threshold, + bool max_method +) { + // find the actual duration + double duration = 0.0; + for (const auto& tg: timed_values) { + duration += tg.first; + } + + // find the number of violations of, e.g., personal space / f-formation's O-space + unsigned int threshold_violations = 0; + unsigned int values_total = 0; + + // find the values and recompute according to, e.g., recognized people/groups (max/sum) + double metrics = 0.0; + double min_elem = std::numeric_limits::max(); + double max_elem = std::numeric_limits::min(); + + // rollout values to compute the score (metrics) + for (const auto& tvalue: timed_values) { + double dt = tvalue.first; + if (tvalue.second.empty()) { + std::cout << "The value container is empty for at least 1 sample. The metrics will be 0.0" << std::endl; + return std::make_tuple(0.0, 0.0, 0.0, 0); + } + + // count timestamps when violations ocurred + threshold_violations += std::count_if( + tvalue.second.cbegin(), + tvalue.second.cend(), + [&](double g) { + // count total numbers to find percentage + values_total++; + return violation_above_threshold ? (g > violation_threshold) : (g < violation_threshold); + } + ); + + // overall min and max computation + double local_min_elem = *std::min_element(tvalue.second.cbegin(), tvalue.second.cend()); + if (local_min_elem < min_elem) { + min_elem = local_min_elem; + } + + double local_max_elem = *std::max_element(tvalue.second.cbegin(), tvalue.second.cend()); + if (local_max_elem > max_elem) { + max_elem = local_max_elem; + } + + // check for selected method of normalization + double metrics_elem = 0.0; + if (max_method) { + // max method used here for normalization + metrics_elem = local_max_elem; + } else { + // average used for normalization + metrics_elem = std::accumulate( + tvalue.second.cbegin(), + tvalue.second.cend(), + 0.0 + ) / static_cast(tvalue.second.size()); + } + + // values must be referenced to timestamps when, e.g., people were actually detected + metrics += (metrics_elem * (dt / duration)); + } + + // find the percentage of time when thresholds of, e.g., personal space / f-formation's O-space etc. were violated + double threshold_violations_percentage = threshold_violations / static_cast(values_total); + + return std::make_tuple(min_elem, max_elem, metrics, threshold_violations_percentage); +} + +} // namespace evaluation +} // namespace srpb diff --git a/srpb_evaluation/test/test_metric_gaussian.cpp b/srpb_evaluation/test/test_metric_statistics.cpp similarity index 79% rename from srpb_evaluation/test/test_metric_gaussian.cpp rename to srpb_evaluation/test/test_metric_statistics.cpp index e9994fb..8d23db5 100644 --- a/srpb_evaluation/test/test_metric_gaussian.cpp +++ b/srpb_evaluation/test/test_metric_statistics.cpp @@ -1,18 +1,18 @@ #include -#include +#include using namespace srpb::evaluation; -TEST(TestMetricGaussian, gaussianStatistics) { - // durations and gaussians +TEST(TestMetricStatistics, statistics) { + // durations and, e.g., Gaussians std::vector>> timed_gaussians{ {0.25, {1.0, 0.75, 0.51, 0.24, 0.51, 0.75, 1.0, 1.25}}, {0.50, {1.0, 0.75, 0.51, 0.25, 0.51, 0.75, 1.0, 1.27}}, {0.75, {1.0, 0.75, 0.51, 0.26, 0.51, 0.75, 1.0, 1.26}} }; // tuple with: min, max and normalized, and threshold violations - auto stats = MetricGaussian::calculateGaussianStatistics(timed_gaussians, 0.75, true); + auto stats = MetricStatistics::calculateStatistics(timed_gaussians, 0.75, true, true); ASSERT_EQ(std::get<0>(stats), 0.24); ASSERT_EQ(std::get<1>(stats), 1.27); double duration = 0.25 + 0.50 + 0.75; From b071729a3613426c969aa6d2b9f66630578720ac Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 24 Jan 2024 01:36:25 +0100 Subject: [PATCH 40/81] `evaluation` - added more unit tests for `calculateStatistics` when violation is regarded when value is less than the threshold --- .../test/test_metric_statistics.cpp | 71 +++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/srpb_evaluation/test/test_metric_statistics.cpp b/srpb_evaluation/test/test_metric_statistics.cpp index 8d23db5..3a207bb 100644 --- a/srpb_evaluation/test/test_metric_statistics.cpp +++ b/srpb_evaluation/test/test_metric_statistics.cpp @@ -22,9 +22,80 @@ TEST(TestMetricStatistics, statistics) { + 1.27 * (0.50 / duration) + 1.26 * (0.75 / duration) ); + // violations are computed with regards to all Gaussians, i.e., the number of violating samples is divided + // by the total number of samples ASSERT_EQ(std::get<3>(stats), 9.0 / (3.0 * 8.0)); } +TEST(TestMetricStatistics, statisticsObstacles1) { + // durations and, e.g., distances to obstacles + std::vector>> timed_dists{ + {0.24, {0.2}}, + {0.26, {0.6}}, + {0.25, {0.5}}, + {0.23, {0.3}} + }; + // tuple with: min, max and normalized, and threshold violations + auto stats = MetricStatistics::calculateStatistics(timed_dists, 0.5, false, true); + ASSERT_EQ(std::get<0>(stats), 0.2); + ASSERT_EQ(std::get<1>(stats), 0.6); + double duration = 0.24 + 0.26 + 0.25 + 0.23; + ASSERT_EQ( + std::get<2>(stats), + 0.2 * (0.24 / duration) + + 0.6 * (0.26 / duration) + + 0.5 * (0.25 / duration) + + 0.3 * (0.23 / duration) + ); + // 2 out of 4 samples are less than the threshold + ASSERT_EQ(std::get<3>(stats), 2.0 / 4.0); +} + +TEST(TestMetricStatistics, statisticsObstacles2) { + // durations and, e.g., distances to obstacles + std::vector>> timed_dists{ + {0.25, {0.51}}, + {0.25, {0.51}}, + {0.25, {0.51}}, + {0.25, {0.51}} + }; + // tuple with: min, max and normalized, and threshold violations + auto stats = MetricStatistics::calculateStatistics(timed_dists, 0.5, false, true); + ASSERT_EQ(std::get<0>(stats), 0.51); + ASSERT_EQ(std::get<1>(stats), 0.51); + double duration = 0.25 + 0.25 + 0.25 + 0.25; + ASSERT_EQ( + std::get<2>(stats), + 0.51 * (0.25 / duration) + + 0.51 * (0.25 / duration) + + 0.51 * (0.25 / duration) + + 0.51 * (0.25 / duration) + ); + ASSERT_EQ(std::get<3>(stats), 0.0 / 4.0); +} + +TEST(TestMetricStatistics, statisticsObstacles3) { + // durations and, e.g., distances to obstacles + std::vector>> timed_dists{ + {0.22, {0.48}}, + {0.23, {0.49}}, + {0.24, {0.50}}, + }; + // tuple with: min, max and normalized, and threshold violations + auto stats = MetricStatistics::calculateStatistics(timed_dists, 0.5, false, true); + ASSERT_EQ(std::get<0>(stats), 0.48); + ASSERT_EQ(std::get<1>(stats), 0.50); + double duration = 0.22 + 0.23 + 0.24; + ASSERT_EQ( + std::get<2>(stats), + 0.48 * (0.22 / duration) + + 0.49 * (0.23 / duration) + + 0.50 * (0.24 / duration) + ); + // 2 out of 3 are less than the threshold + ASSERT_EQ(std::get<3>(stats), 2.0 / 3.0); +} + int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); From 785606c092655b2c745cbdbe42517c3f8b17331c Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 24 Jan 2024 22:05:48 +0100 Subject: [PATCH 41/81] `evaluation` - fixed the calculation of the timing-corrected threshold violations percentage in `calculateStatistics` --- .../srpb_evaluation/metric_statistics.h | 7 +- srpb_evaluation/src/metric_statistics.cpp | 39 +++++----- .../test/test_metric_statistics.cpp | 76 +++++++++++++++---- 3 files changed, 90 insertions(+), 32 deletions(-) diff --git a/srpb_evaluation/include/srpb_evaluation/metric_statistics.h b/srpb_evaluation/include/srpb_evaluation/metric_statistics.h index 10492c2..5a0035c 100644 --- a/srpb_evaluation/include/srpb_evaluation/metric_statistics.h +++ b/srpb_evaluation/include/srpb_evaluation/metric_statistics.h @@ -45,8 +45,11 @@ class MetricStatistics: public Metric { * @param max_method set to true (default) so the max element is used to normalize metrics; * false means averaging over all values/occurrences in a current time step * - * @return std::tuple tuple with scores: min, max and normalized to execution - * time and percentage of violation of, e.g., personal space violations (according to given threshold and values) + * @return std::tuple tuple with scores: + * 1) min value, + * 2) max value, + * 3) a metric value normalized according to the duration/execution time, + * 4) timing-corrected percentage of the threshold value violations */ static std::tuple calculateStatistics( std::vector>> timed_values, diff --git a/srpb_evaluation/src/metric_statistics.cpp b/srpb_evaluation/src/metric_statistics.cpp index 06340d0..9abcdc0 100644 --- a/srpb_evaluation/src/metric_statistics.cpp +++ b/srpb_evaluation/src/metric_statistics.cpp @@ -21,31 +21,35 @@ std::tuple MetricStatistics::calculateStatistics duration += tg.first; } - // find the number of violations of, e.g., personal space / f-formation's O-space - unsigned int threshold_violations = 0; - unsigned int values_total = 0; - // find the values and recompute according to, e.g., recognized people/groups (max/sum) double metrics = 0.0; double min_elem = std::numeric_limits::max(); double max_elem = std::numeric_limits::min(); + // find the timing-corrected percentage of threshold violations of, e.g., personal space + double violations_duration = 0.0; // rollout values to compute the score (metrics) for (const auto& tvalue: timed_values) { double dt = tvalue.first; if (tvalue.second.empty()) { - std::cout << "The value container is empty for at least 1 sample. The metrics will be 0.0" << std::endl; + std::cout + << "\x1B[33m" + << "The value container is empty for at least 1 sample. The metrics will be 0.0" + << "\x1B[0m" + << std::endl; return std::make_tuple(0.0, 0.0, 0.0, 0); } - // count timestamps when violations ocurred - threshold_violations += std::count_if( + /* + * Count the duration when violations ocurred, i.e., it does not matter whether there are ten people whose + * personal spaces the robot intruded, but rather in it happened in a single time step, it will be regarded + * in a sum of "violating" time steps (durations) + */ + size_t threshold_violations_num = std::count_if( tvalue.second.cbegin(), tvalue.second.cend(), - [&](double g) { - // count total numbers to find percentage - values_total++; - return violation_above_threshold ? (g > violation_threshold) : (g < violation_threshold); + [=](double val) { + return violation_above_threshold ? (val > violation_threshold) : (val < violation_threshold); } ); @@ -74,14 +78,15 @@ std::tuple MetricStatistics::calculateStatistics ) / static_cast(tvalue.second.size()); } - // values must be referenced to timestamps when, e.g., people were actually detected + // values must be referenced to a duration of time steps when, e.g., people were actually detected metrics += (metrics_elem * (dt / duration)); - } - - // find the percentage of time when thresholds of, e.g., personal space / f-formation's O-space etc. were violated - double threshold_violations_percentage = threshold_violations / static_cast(values_total); - return std::make_tuple(min_elem, max_elem, metrics, threshold_violations_percentage); + // sum up the percentage of time when the threshold value of, e.g., personal space was violated + if (threshold_violations_num > 0) { + violations_duration += (dt / duration); + } + } + return std::make_tuple(min_elem, max_elem, metrics, violations_duration); } } // namespace evaluation diff --git a/srpb_evaluation/test/test_metric_statistics.cpp b/srpb_evaluation/test/test_metric_statistics.cpp index 3a207bb..6f13dc0 100644 --- a/srpb_evaluation/test/test_metric_statistics.cpp +++ b/srpb_evaluation/test/test_metric_statistics.cpp @@ -9,22 +9,28 @@ TEST(TestMetricStatistics, statistics) { std::vector>> timed_gaussians{ {0.25, {1.0, 0.75, 0.51, 0.24, 0.51, 0.75, 1.0, 1.25}}, {0.50, {1.0, 0.75, 0.51, 0.25, 0.51, 0.75, 1.0, 1.27}}, - {0.75, {1.0, 0.75, 0.51, 0.26, 0.51, 0.75, 1.0, 1.26}} + {0.75, {1.0, 0.75, 0.51, 0.26, 0.51, 0.75, 1.0, 1.26}}, + // {1.00, {}} // empty "second" - container, will force metrics to be 0.0 }; // tuple with: min, max and normalized, and threshold violations auto stats = MetricStatistics::calculateStatistics(timed_gaussians, 0.75, true, true); ASSERT_EQ(std::get<0>(stats), 0.24); ASSERT_EQ(std::get<1>(stats), 1.27); double duration = 0.25 + 0.50 + 0.75; - ASSERT_EQ( + // max_method is set to true, thus max values are pointed out below + ASSERT_DOUBLE_EQ( std::get<2>(stats), 1.25 * (0.25 / duration) + 1.27 * (0.50 / duration) + 1.26 * (0.75 / duration) ); - // violations are computed with regards to all Gaussians, i.e., the number of violating samples is divided - // by the total number of samples - ASSERT_EQ(std::get<3>(stats), 9.0 / (3.0 * 8.0)); + + // violations ocurred in each time step, i.e., sum up the subsequent durations and divide them by a total duration + // the metric is 1.0 here + ASSERT_DOUBLE_EQ( + std::get<3>(stats), + (0.25 + 0.50 + 0.75) / duration + ); } TEST(TestMetricStatistics, statisticsObstacles1) { @@ -40,15 +46,19 @@ TEST(TestMetricStatistics, statisticsObstacles1) { ASSERT_EQ(std::get<0>(stats), 0.2); ASSERT_EQ(std::get<1>(stats), 0.6); double duration = 0.24 + 0.26 + 0.25 + 0.23; - ASSERT_EQ( + ASSERT_DOUBLE_EQ( std::get<2>(stats), 0.2 * (0.24 / duration) + 0.6 * (0.26 / duration) + 0.5 * (0.25 / duration) + 0.3 * (0.23 / duration) ); - // 2 out of 4 samples are less than the threshold - ASSERT_EQ(std::get<3>(stats), 2.0 / 4.0); + + // sum up only a duration when the threshold was violated (distances less than a threshold) + ASSERT_DOUBLE_EQ( + std::get<3>(stats), + (0.24 + 0.23) / duration + ); } TEST(TestMetricStatistics, statisticsObstacles2) { @@ -64,14 +74,19 @@ TEST(TestMetricStatistics, statisticsObstacles2) { ASSERT_EQ(std::get<0>(stats), 0.51); ASSERT_EQ(std::get<1>(stats), 0.51); double duration = 0.25 + 0.25 + 0.25 + 0.25; - ASSERT_EQ( + ASSERT_DOUBLE_EQ( std::get<2>(stats), 0.51 * (0.25 / duration) + 0.51 * (0.25 / duration) + 0.51 * (0.25 / duration) + 0.51 * (0.25 / duration) ); - ASSERT_EQ(std::get<3>(stats), 0.0 / 4.0); + + // violations did not occur + ASSERT_DOUBLE_EQ( + std::get<3>(stats), + 0.0 / duration + ); } TEST(TestMetricStatistics, statisticsObstacles3) { @@ -86,14 +101,49 @@ TEST(TestMetricStatistics, statisticsObstacles3) { ASSERT_EQ(std::get<0>(stats), 0.48); ASSERT_EQ(std::get<1>(stats), 0.50); double duration = 0.22 + 0.23 + 0.24; - ASSERT_EQ( + ASSERT_DOUBLE_EQ( std::get<2>(stats), 0.48 * (0.22 / duration) + 0.49 * (0.23 / duration) + 0.50 * (0.24 / duration) ); - // 2 out of 3 are less than the threshold - ASSERT_EQ(std::get<3>(stats), 2.0 / 3.0); + + // 2 out of 3 samples are less than the threshold (they violate) + ASSERT_DOUBLE_EQ( + std::get<3>(stats), + (0.22 + 0.23) / duration + ); +} + +TEST(TestMetricStatistics, statisticsUnequal) { + // durations and values (e.g., Gaussians) + std::vector>> timed_data{ + {0.22, {0.48, 0.51, 0.68, 0.12}}, + {0.23, {0.49, 0.34, 0.54, 0.86, 0.76}}, + {0.24, {0.50, 0.20}}, + {0.38, {0.10, 0.12}}, + {0.32, {0.97}} + }; + // tuple with: min, max and normalized, and threshold violations + // violation occurs above the threshold + auto stats = MetricStatistics::calculateStatistics(timed_data, 0.5, true, true); + ASSERT_EQ(std::get<0>(stats), 0.10); + ASSERT_EQ(std::get<1>(stats), 0.97); + double duration = 0.22 + 0.23 + 0.24 + 0.38 + 0.32; + ASSERT_DOUBLE_EQ( + std::get<2>(stats), + 0.68 * (0.22 / duration) + + 0.86 * (0.23 / duration) + + 0.50 * (0.24 / duration) + + 0.12 * (0.38 / duration) + + 0.97 * (0.32 / duration) + ); + + // in 3 out of 5 sets the threshold was violated + ASSERT_DOUBLE_EQ( + std::get<3>(stats), + (0.22 + 0.23 + 0.32) / duration + ); } int main(int argc, char** argv) { From c5dc6ba21bc482252b96c1c3e11b22986aa12ae4 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 24 Jan 2024 22:27:29 +0100 Subject: [PATCH 42/81] `evaluation` - reworked the `ObstacleSafety` metric class to obtain min/max/threshold violations percentage too - now inherits from the `MetricStatistics` --- .../srpb_evaluation/metrics/obstacle_safety.h | 104 ++++++++++-------- srpb_evaluation/src/utils.cpp | 4 + 2 files changed, 63 insertions(+), 45 deletions(-) diff --git a/srpb_evaluation/include/srpb_evaluation/metrics/obstacle_safety.h b/srpb_evaluation/include/srpb_evaluation/metrics/obstacle_safety.h index 792965b..03a434e 100644 --- a/srpb_evaluation/include/srpb_evaluation/metrics/obstacle_safety.h +++ b/srpb_evaluation/include/srpb_evaluation/metrics/obstacle_safety.h @@ -1,77 +1,91 @@ #pragma once -#include "srpb_evaluation/metric.h" +#include "srpb_evaluation/metric_statistics.h" + +#include namespace srpb { namespace evaluation { -/// @details Originally implemented in MRPB 1.0 (https://github.com/NKU-MobFly-Robotics/local-planning-benchmark) -class ObstacleSafety: public Metric { +class ObstacleSafety: public MetricStatistics { public: ObstacleSafety( const std::vector>& robot_data, double safety_distance ): - Metric(robot_data), + MetricStatistics(robot_data), safety_distance_(safety_distance) { compute(); } - /// Returns total percentage of obstacle safety + /// Returns the time-corrected mean distance to the closest obstacle in subsequent time steps of the scenario virtual double getValue() const override { - return obstacle_safety_ * 1e2; + return obstacle_distance_total_; + } + + /// Returns minimum value obtained throughout the scenario + virtual double getValueMin() const override { + return obstacle_distance_min_; + } + + /// Returns maximum value obtained throughout the scenario + virtual double getValueMax() const override { + return obstacle_distance_max_; } - void printResults() const override { - printf("Obstacle safety = %.4f [%%]\n", obstacle_safety_ * 1e2); + /** + * Returns the percentage of violations obtained throughout the scenario (considering the threshold value) + * The value according to the metric originally implemented in MRPB 1.0 + * See https://github.com/NKU-MobFly-Robotics/local-planning-benchmark for details + */ + virtual double getViolations() const override { + return violations_percentage_ * 100.0; + } + + virtual void printResults() const override { + printf( + "Obstacle safety = %.4f [m] (min = %.4f [m], max = %.4f [m], violations %.4f [%%])\n", + obstacle_distance_total_, + obstacle_distance_min_, + obstacle_distance_max_, + violations_percentage_ * 100.0 + ); } protected: double safety_distance_; - double obstacle_safety_; - void compute() override { - // timestamps to sum up time - const double TS_NOT_SET = std::numeric_limits::min(); - double ts_start = TS_NOT_SET; - double ts_end = TS_NOT_SET; + double obstacle_distance_min_; + double obstacle_distance_max_; + double obstacle_distance_total_; + /// "Obstacle safety" in MRPB 1.0 + double violations_percentage_; - double ts_sum = 0.0; - auto calc_next = [&]() { - // compute how long robot traveled near obstacles located within safety distance - if (rewinder_.getRobotCurr().getDistToObstacle() < safety_distance_) { - if (ts_start == TS_NOT_SET) { - ts_start = rewinder_.getTimestampCurr(); - } else { - // save timestamp - ts_end = rewinder_.getTimestampCurr(); - } - } else { - if (ts_end != TS_NOT_SET) { - // robot started to maintain safety distance again - ts_sum += (ts_end - ts_start); + void compute() override { + // container to compute, i.a., min and max values of distances to obstacles + std::vector>> timed_obs_distances; - // reset timestamps for incoming counts - ts_start = TS_NOT_SET; - ts_end = TS_NOT_SET; - } - } - }; - // NOTE: external lambda must be explicitly passed to capture - auto calc_last = [&, calc_next]() { - calc_next(); - // sum up the last period (if robot finished course not maintaining the safety distance) - if (ts_start != TS_NOT_SET && ts_end != TS_NOT_SET) { - ts_sum += ts_end - ts_start; + rewinder_.setHandlerNextTimestamp( + [&]() { + timed_obs_distances.push_back({ + // step duration + rewinder_.getTimestampNext() - rewinder_.getTimestampCurr(), + // single value container (there is only 1 closest obstacle) + std::vector{rewinder_.getRobotCurr().getDistToObstacle()} + }); } - }; - rewinder_.setHandlerNextTimestamp(calc_next); - rewinder_.setHandlerLastTimestamp(calc_last); + ); rewinder_.perform(); - // save result - obstacle_safety_ = ts_sum / rewinder_.getDuration(); + // NOTE1: a violation is regarded when distance is less than the threshold + // NOTE2: max method does not matter here as only 1 observation in each step is available + std::tie( + obstacle_distance_min_, + obstacle_distance_max_, + obstacle_distance_total_, + violations_percentage_ + ) = MetricStatistics::calculateStatistics(timed_obs_distances, safety_distance_, false, true); } }; diff --git a/srpb_evaluation/src/utils.cpp b/srpb_evaluation/src/utils.cpp index d02ae50..50895e0 100644 --- a/srpb_evaluation/src/utils.cpp +++ b/srpb_evaluation/src/utils.cpp @@ -153,7 +153,11 @@ void createResultsFile( ss << "s_ppl ," << std::setw(9) << std::setprecision(4) << samples_people << std::endl; ss << "s_grp ," << std::setw(9) << std::setprecision(4) << samples_groups << std::endl; ss << "m_goal ," << std::setw(9) << std::setprecision(4) << goal_reached.getValue() << std::endl; + // NOTE: legacy m_obs (before making m_obs inherit from MetricStatistics) was equal to the current m_obs_viol ss << "m_obs ," << std::setw(9) << std::setprecision(4) << obstacle_safety.getValue() << std::endl; + ss << "m_obs_min ," << std::setw(9) << std::setprecision(4) << obstacle_safety.getValueMin() << std::endl; + ss << "m_obs_max ," << std::setw(9) << std::setprecision(4) << obstacle_safety.getValueMax() << std::endl; + ss << "m_obs_viol ," << std::setw(9) << std::setprecision(4) << obstacle_safety.getViolations() << std::endl; ss << "m_mef ," << std::setw(9) << std::setprecision(4) << motion_efficiency.getValue() << std::endl; ss << "m_cef ," << std::setw(9) << std::setprecision(4) << computational_efficiency.getValue() << std::endl; ss << "m_cre ," << std::setw(9) << std::setprecision(4) << computational_time_repeatability.getValue() << std::endl; From 18520519eab2c6041949673066242763ef73ee0b Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 24 Jan 2024 23:03:08 +0100 Subject: [PATCH 43/81] `evaluation` - fixed a description in the `evaluate_all_dirs` script --- srpb_evaluation/scripts/evaluate_all_dirs.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/srpb_evaluation/scripts/evaluate_all_dirs.sh b/srpb_evaluation/scripts/evaluate_all_dirs.sh index 39dae32..579616a 100755 --- a/srpb_evaluation/scripts/evaluate_all_dirs.sh +++ b/srpb_evaluation/scripts/evaluate_all_dirs.sh @@ -12,7 +12,7 @@ # if [ "$#" -lt 1 ] || [ "$#" -gt 2 ]; then echo "Wrong usage. Script args:" - echo " (1) [required] full path to the main directory with grouped logs (3 logs in each separate directory)" + echo " (1) [required] full path to the main directory with grouped logs (4 logs in each separate directory)" echo " (2) [optional] safety distance for m_obs metric, 0.55 by default" exit 0 fi From f758ec328108e1335b945b61bf4a5d331f5671a3 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 24 Jan 2024 23:31:39 +0100 Subject: [PATCH 44/81] `evaluation` - added `SrpbMetrics` class for usage in Python processing scripts - `SrpbMetrics` contains a set of available metrics --- srpb_evaluation/scripts/srpb_metrics.py | 340 ++++++++++++++++++++++++ 1 file changed, 340 insertions(+) create mode 100644 srpb_evaluation/scripts/srpb_metrics.py diff --git a/srpb_evaluation/scripts/srpb_metrics.py b/srpb_evaluation/scripts/srpb_metrics.py new file mode 100644 index 0000000..55c0f37 --- /dev/null +++ b/srpb_evaluation/scripts/srpb_metrics.py @@ -0,0 +1,340 @@ +class SrpbMetrics: + def __init__(self) -> None: + self.tex_missing_metric_value = r"---" + # short names to avoid line breaking + # excel_fun_range_begin + self.efrb = "{#1}" + # excel_fun_range_end + self.efre = "{#2}" + + self.map = { + 's_rbt': { + 'tex_name': r"$m_{\mathrm{srbt}}$", + 'tex_unit': r"$\left[ \mathrm{unit} \right]$", + 'description': 'Mean number of robot samples collected during a single trial', + # not necessarily true, but less samples with the same metric results indicates better effectiveness + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 's_ppl': { + 'tex_name': r"$m_{\mathrm{sppl}}$", + 'tex_unit': r"$\left[ \mathrm{unit} \right]$", + 'description': 'Mean number of people samples collected during a single trial', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 's_grp': { + 'tex_name': r"$m_{\mathrm{sgrp}}$", + 'tex_unit': r"$\left[ \mathrm{unit} \right]$", + 'description': 'Mean number of group samples collected during a single trial', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + + 'm_goal': { + 'tex_name': r"$m_{\mathrm{goal}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Navigation success rate', + 'min_is_best': False, + 'excel_calc_fun': f'=100.0 * (SUM({self.efrb}:{self.efre}) / COLUMNS({self.efrb}:{self.efre}))' + }, + # for the backward compatibility with IEEE Access article, this metric (not presented in the paper) + # is named '_dist m_obs', whereas violations metric (m_obs_viol) is 'm_obs' (as presented) + 'm_obs': { + 'tex_name': r"${}_{\mathrm{dist}} m_{\mathrm{obs}}$", + 'tex_unit': r"$\left[ \mathrm{m} \right]$", + 'description': 'Mean distance to the closest obstacle', + 'min_is_best': False, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_obs_min': { + 'tex_name': r"${}_{\mathrm{min}} m_{\mathrm{obs}}$", + 'tex_unit': r"$\left[ \mathrm{m} \right]$", + 'description': 'Minimum distance to the closest obstacle', + 'min_is_best': False, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_obs_max': { + 'tex_name': r"${}_{\mathrm{max}} m_{\mathrm{obs}}$", + 'tex_unit': r"$\left[ \mathrm{m} \right]$", + 'description': 'Maximum distance to the closest obstacle', + 'min_is_best': False, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_obs_viol': { + 'tex_name': r"$m_{\mathrm{obs}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Obstacle safety', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_mef': { + 'tex_name': r"$m_{\mathrm{mef}}$", + 'tex_unit': r"$\left[ \mathrm{s} \right]$", + 'description': 'Motion efficiency', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_path': { + 'tex_name': r"$m_{\mathrm{plin}}$", + 'tex_unit': r"$\left[ \mathrm{m} \right]$", + 'description': 'Path length', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_chc': { + 'tex_name': r"$m_{\mathrm{chc}}$", + 'tex_unit': r"$\left[ \mathrm{rad} \right]$", + 'description': 'Cumulative heading change', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_cef': { + 'tex_name': r"$m_{\mathrm{cef}}$", + 'tex_unit': r"$\left[ 10^{-3} \cdot \mathrm{s} \right]$", + 'description': 'Computational efficiency', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_cre': { + 'tex_name': r"$m_{\mathrm{cre}}$", + 'tex_unit': r"$\left[ 10^{-3} \cdot \mathrm{s} \right]$", + 'description': 'Computational time repeatability', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_vsm': { + 'tex_name': r"$m_{\mathrm{vsm}}$", + 'tex_unit': r"$\left[ \mathrm{\frac{m}{s^2}} \right]$", + 'description': 'Velocity smoothness', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_hsm': { + 'tex_name': r"$m_{\mathrm{hsm}}$", + 'tex_unit': r"$\left[ \mathrm{\frac{rad}{s^2}} \right]$", + 'description': 'Heading change smoothness', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_osc': { + 'tex_name': r"$m_{\mathrm{osc}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Oscillations', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_bwd': { + 'tex_name': r"$m_{\mathrm{bwd}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Backward movements', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_inp': { + 'tex_name': r"$m_{\mathrm{iprot}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'In-place rotations', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_psi': { + 'tex_name': r"$m_{\mathrm{psi}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Personal spaces intrusion', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_psi_min': { + 'tex_name': r"${}_{\mathrm{min}} m_{\mathrm{psi}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Minimum value of the personal spaces intrusion', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_psi_max': { + 'tex_name': r"${}_{\mathrm{max}} m_{\mathrm{psi}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Maximum value of the personal spaces intrusion', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_psi_viol': { + 'tex_name': r"${}_{\mathrm{viol}} m_{\mathrm{psi}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Percentage of the personal spaces intrusion violations', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_fsi': { + 'tex_name': r"$m_{\mathrm{fsi}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'F-Formations\' O-spaces intrusion', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_fsi_min': { + 'tex_name': r"${}_{\mathrm{min}} m_{\mathrm{fsi}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Minimum value of the F-Formations\' O-spaces intrusion', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_fsi_max': { + 'tex_name': r"${}_{\mathrm{max}} m_{\mathrm{fsi}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Maximum value of the F-Formations\' O-spaces intrusion', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_fsi_viol': { + 'tex_name': r"${}_{\mathrm{viol}} m_{\mathrm{fsi}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Percentage of the F-Formations\' O-spaces intrusion violations', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_dir': { + 'tex_name': r"$m_{\mathrm{dir}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Heading direction discomfort', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_dir_min': { + 'tex_name': r"${}_{\mathrm{min}} m_{\mathrm{dir}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Minimum value of the heading direction discomfort', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_dir_max': { + 'tex_name': r"${}_{\mathrm{max}} m_{\mathrm{dir}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Maximum value of the heading direction discomfort', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_dir_viol': { + 'tex_name': r"${}_{\mathrm{viol}} m_{\mathrm{dir}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Percentage of the heading direction discomfort violations', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_psd': { + 'tex_name': r"$m_{\mathrm{psd}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Passing speed discomfort', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_psd_min': { + 'tex_name': r"${}_{\mathrm{min}} m_{\mathrm{psd}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Minimum value of a passing speed discomfort', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_psd_max': { + 'tex_name': r"${}_{\mathrm{max}} m_{\mathrm{psd}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Maximum value of a passing speed discomfort', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + }, + 'm_psd_viol': { + 'tex_name': r"${}_{\mathrm{viol}} m_{\mathrm{psd}}$", + 'tex_unit': r"$\left[ \% \right]$", + 'description': 'Percentage of the passing speed discomfort violations', + 'min_is_best': True, + 'excel_calc_fun': f'=MEDIAN({self.efrb}:{self.efre})' + } + } + + + def get(self) -> dict: + """ + Returns a whole map + """ + return self.map + + + def has_metric(self, metric_id: str) -> bool: + """ + Evaluates whether a given metric exists + """ + return metric_id in self.map.keys() + + + def get_metric(self, metric_id: str) -> dict: + """ + Returns all data related to a certain metric_id + """ + if not self.has_metric(metric_id): + Exception(f'No such metric_id as {metric_id}') + return self.map[metric_id] + + + def get_metrics(self, metrics_id: list) -> dict: + """ + Returns a dict with data related to selected metrics + """ + metrics_map = {} + for name in metrics_id: + if not self.has_metric(name): + raise Exception( + f"Could not find {name} in the SRPB metrics map. Available metric names are: {self.map.keys()}" + ) + metrics_map[name] = self.get_metric(name) + return metrics_map + + + def get_latex_unit(self, metric_id: str) -> str: + if not self.has_metric(metric_id): + Exception(f'No such metric_id as {metric_id}') + return self.map[metric_id]['tex_unit'] + + + def get_latex_name(self, metric_id: str) -> str: + if not self.has_metric(metric_id): + Exception(f'No such metric_id as {metric_id}') + return self.map[metric_id]['tex_name'] + + + def get_description(self, metric_id: str) -> str: + if not self.has_metric(metric_id): + Exception(f'No such metric_id as {metric_id}') + return self.map[metric_id]['description'] + + + def is_minimum_best(self, metric_id: str) -> bool: + """ + Returns True if the minimum value of the metric is desired, or False otherwise + """ + if not self.has_metric(metric_id): + Exception(f'No such metric_id as {metric_id}') + return self.map[metric_id]['min_is_best'] + + + def get_excel_calc_fun_raw(self, metric_id: str) -> str: + """ + Returns the raw string representation of an Excel function + """ + if not self.has_metric(metric_id): + Exception(f'No such metric_id as {metric_id}') + return self.map[metric_id]['excel_calc_fun'] + + + def get_excel_calc_fun(self, metric_id: str, range_begin: str, range_end: str) -> str: + """ + Returns the string representation of an Excel function + """ + fun_str = self.get_excel_calc_fun_raw(metric_id) + fun_str = fun_str.replace(self.efrb, range_begin) + fun_str = fun_str.replace(self.efre, range_end) + return fun_str + + + def get_latex_missing_metric_value(self) -> str: + return self.tex_missing_metric_value From c6f0d7ed67443ba3430258ea75a56de93ea30f31 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 24 Jan 2024 23:33:39 +0100 Subject: [PATCH 45/81] `evaluation` - modernized the script creating a LaTeX table from a results spreadsheet: using `SrpbMetrics` helper class, argparser, parameterized planner selection --- .../create_latex_table_from_results.py | 165 +++++++++++------- 1 file changed, 103 insertions(+), 62 deletions(-) diff --git a/srpb_evaluation/scripts/create_latex_table_from_results.py b/srpb_evaluation/scripts/create_latex_table_from_results.py index 80ed920..d269d6d 100644 --- a/srpb_evaluation/scripts/create_latex_table_from_results.py +++ b/srpb_evaluation/scripts/create_latex_table_from_results.py @@ -6,9 +6,11 @@ ''' from excel_sheet_utils import load_data_from_excel +from srpb_metrics import SrpbMetrics +import argparse import json -import sys +import math from pathlib import Path from typing import List @@ -17,37 +19,21 @@ # results: results of a specific scenarios aggregated into a single structure # metrics: names (keys) of metrics to include in the LaTeX table -def create_latex_table(results: List[Dict[str, Dict[str, Dict[str, float]]]], metric_names: List[str]) -> str: - # only keys from this map will be put into the LaTeX table; keys must match the ones used in the Excel sheet - METRIC_LATEX_MAP = { - 'm_obs': {'name': r"$m_{\mathrm{obs}}$", 'unit': r"$\left[ \% \right]$"}, - 'm_mef': {'name': r"$m_{\mathrm{mef}}$", 'unit': r"$\left[ \mathrm{s} \right]$"}, - 'm_path': {'name': r"$m_{\mathrm{plin}}$", 'unit': r"$\left[ \mathrm{m} \right]$"}, - 'm_chc': {'name': r"$m_{\mathrm{chc}}$", 'unit': r"$\left[ \mathrm{rad} \right]$"}, - 'm_cef': {'name': r"$m_{\mathrm{cef}}$", 'unit': r"$\left[ 10^{-3} \cdot \mathrm{s} \right]$"}, - 'm_cre': {'name': r"$m_{\mathrm{cre}}$", 'unit': r"$\left[ 10^{-3} \cdot \mathrm{s} \right]$"}, - 'm_vsm': {'name': r"$m_{\mathrm{vsm}}$", 'unit': r"$\left[ \mathrm{\frac{m}{s^2}} \right]$"}, - 'm_hsm': {'name': r"$m_{\mathrm{hsm}}$", 'unit': r"$\left[ \mathrm{\frac{rad}{s^2}} \right]$"}, - 'm_osc': {'name': r"$m_{\mathrm{osc}}$", 'unit': r"$\left[ \% \right]$"}, - 'm_bwd': {'name': r"$m_{\mathrm{bwd}}$", 'unit': r"$\left[ \% \right]$"}, - 'm_inp': {'name': r"$m_{\mathrm{iprot}}$", 'unit': r"$\left[ \% \right]$"}, - 'm_psi': {'name': r"$m_{\mathrm{psi}}$", 'unit': r"$\left[ \% \right]$"}, - 'm_fsi': {'name': r"$m_{\mathrm{fsi}}$", 'unit': r"$\left[ \% \right]$"}, - 'm_dir': {'name': r"$m_{\mathrm{dir}}$", 'unit': r"$\left[ \% \right]$"}, - 'm_psd': {'name': r"$m_{\mathrm{psd}}$", 'unit': r"$\left[ \% \right]$"} - } - # select metrics from the predefined set, i.e., METRIC_LATEX_MAP +def create_latex_table( + results: List[Dict[str, Dict[str, Dict[str, float]]]], + metric_names: List[str], + planner_names: List[str] +) -> str: + # only keys known by the SrpbMetrics will be put into the LaTeX table; keys must match the ones used in the sheet + srpb_metrics = SrpbMetrics() + # select metrics from the predefined set stored in SrpbMetrics metrics_map = {} + # by default, all metrics are included if not len(metric_names): - metrics_map = METRIC_LATEX_MAP + metrics_map = srpb_metrics.get() else: - for name in metric_names: - if not name in METRIC_LATEX_MAP.keys(): - raise Exception( - f"Could not find {name} in the SRPB metrics map. Available metric names are: {METRIC_LATEX_MAP.keys()}" - ) - metrics_map[name] = METRIC_LATEX_MAP[name] + metrics_map = srpb_metrics.get_metrics(metric_names) if not len(metrics_map): raise Exception( @@ -56,20 +42,46 @@ def create_latex_table(results: List[Dict[str, Dict[str, Dict[str, float]]]], me ) print(f"Selected `{len(metrics_map)}` metrics to include in the LaTeX table: `{metrics_map.keys()}`") - # retrieve names of planners assuming that all scenarios results have the same planner entries; - # choose from the first scenario - planner_names = results[0]['results'].keys() - # save number of checked planners + # save the number of considered planners planners_num = len(planner_names) - # retrieve number of evaluated scenarios - scenarios_num = len(results) + if not planners_num: + # choose the set from the first scenario + print(f"The input list of selected planners is empty! Selecting the planners appearing in the first scenario.") + planner_names = results[0]['results'].keys() + planners_num = len(planner_names) # evaluate whether valid data are available if not len(planner_names) or not planners_num: raise Exception( - f"Aborting further execution as no planners are found in the results. " + f"Aborting further execution since no planners were selected or appeared in the first scenario." ) - print(f"Selected {planners_num} planners with names `{planner_names}` for `{scenarios_num}` scenarios") + + # retrieve number of evaluated scenarios + scenarios_num = len(results) + print(f"Selected '{planners_num}' planners with names `{planner_names}` for `{scenarios_num}` scenarios") + + + def get_metric_value_for_planner( + scenario_results: Dict[str, Dict[str, Dict[str, float]]], + planner_name: str, + metric_id: str + ) -> float: + """ + A nested function that returns a unified value once the required planner is nonexistent in the results. + It is assumed that the nested dict given in scenario_results contains keys defining: planner name + and dict nested further - a metric ID + """ + metric_val = math.nan + try: + metric_val = scenario_results[planner_name][metric_id] + except KeyError: + print( + f"Could not find a '{planner_name}' in a given results set. " + f"Available planners are: '{scenario_results.keys()}'. " + f"This might be intentional, returning NaN." + ) + return metric_val + tex = str("") @@ -139,9 +151,9 @@ def create_latex_table(results: List[Dict[str, Dict[str, Dict[str, float]]]], me # ID of the metric and its unit # whether to put the unit in a new line or not (when there are too few rows) if scenarios_num > 2: - metric_name_and_unit = f"{metrics_map[metric_id]['name']} \\ {metrics_map[metric_id]['unit']}" + metric_name_and_unit = f"{metrics_map[metric_id]['tex_name']} \\ {metrics_map[metric_id]['tex_unit']}" else: - metric_name_and_unit = f"{metrics_map[metric_id]['name']} {metrics_map[metric_id]['unit']}" + metric_name_and_unit = f"{metrics_map[metric_id]['tex_name']} {metrics_map[metric_id]['tex_unit']}" tex += (r" " + str(metric_name_and_unit) + "\r\n") tex += (r" }" + "\r\n") tex += (r" }" + "\r\n") @@ -163,10 +175,14 @@ def create_latex_table(results: List[Dict[str, Dict[str, Dict[str, float]]]], me # find the best value among checked planners metric_values_among_planners = [] for planner_name in planner_names: - metric_val = results[scenario_num]['results'][planner_name][metric_id] + metric_val = get_metric_value_for_planner(results[scenario_num]['results'], planner_name, metric_id) metric_values_among_planners.append(metric_val) - # NOTE: by default, the best metric is the one with the smallest value - metric_best_val = min(metric_values_among_planners) + + # select the best metric - the one with the smallest or largest value (excluding NaNs) + if srpb_metrics.is_minimum_best(metric_id): + metric_best_val = min(metric_values_among_planners) + else: + metric_best_val = max(metric_values_among_planners) # if all metric values are equal to the best - let's mark the best as 'invalid' if all(x == metric_best_val for x in metric_values_among_planners): @@ -174,9 +190,15 @@ def create_latex_table(results: List[Dict[str, Dict[str, Dict[str, float]]]], me # iterate over values of a specific metric for each planner for planner_name in planner_names: - metric_val = results[scenario_num]['results'][planner_name][metric_id] - # format to 2 decimal points - metric_val_str = "{:.2f}".format(metric_val) + metric_val = get_metric_value_for_planner(results[scenario_num]['results'], planner_name, metric_id) + # check for correctness/availability + if not math.isnan(metric_val): + # format to 2 decimal points + metric_val_str = "{:.2f}".format(metric_val) + else: + # indicate lack of valid data + metric_val_str = srpb_metrics.get_latex_missing_metric_value() + # mark the best value unless it is equal to 0 if metric_val == metric_best_val and metric_best_val != None: # bold @@ -207,33 +229,52 @@ def create_latex_table(results: List[Dict[str, Dict[str, Dict[str, float]]]], me # main # ########################### if __name__ == "__main__": - # command line arguments - if len(sys.argv) == 1: - print(f'Usage: ') - print( - f' python3 {sys.argv[0]} ' - f' ' - f' ' - ) - print(f'') - print(f'Example:') - print(f' python3 {sys.argv[0]} "{{"static": {{"sim": "path_static_sim", "real": "path_static_real"}}, "dynamic": {{"sim": "path_dynamic_sim", "real": "path_dynamic_real"}}}}" ~/table.tex m_obs m_chc') - exit(0) + # Ref: https://stackoverflow.com/a/32763023 + cli = argparse.ArgumentParser() + # positional arguments + cli.add_argument("input", help="JSON string with scenario identifiers and paths to SRPB results sheets") + cli.add_argument("output", help="path to the generated .tex file") + # optional arguments + cli.add_argument( + "--metrics", + nargs="*", + type=str, + default=[], + help="space-separated list of metrics to include in the LaTeX table (all available are included by default)" + ) + cli.add_argument( + "--planners", + nargs="*", + type=str, + default=[], + help="space-separated list of planners to include in the LaTeX table (all appearing in the first results file \ + are included by default)" + ) + # Usage example + # python3 create_latex_table_from_results.py \ + # "{{"static": {{"sim": "path_static_sim", "real": "path_static_real"}}, "dynamic": {{"sim": "path_dynamic_sim", "real": "path_dynamic_real"}}}}" \ + # ~/table.tex \ + # --metrics m_obs m_chc \ + # --planners dwa teb + + # parse the command line + args = cli.parse_args() # location of the output file - output_path = sys.argv[2] + output_path = args.output # metrics to include in the table - metric_names = sys.argv[3:] + metric_names = args.metrics + + # selected planner set + planner_names = args.planners # list of dicts {, } inputs = [] - cmd_json = json.loads(sys.argv[1]) + cmd_json = json.loads(args.input) for key in cmd_json.keys(): inputs.append({'name': str(key), 'path': Path(cmd_json[key])}) - print("\tScript inputs:") print(*inputs, sep='\n') @@ -246,7 +287,7 @@ def create_latex_table(results: List[Dict[str, Dict[str, Dict[str, float]]]], me results_total.append({'name': input_file['name'], 'results': scenario_results}) # generates a string representing LaTeX command that can be directly included and used in a LaTeX document - results_table = create_latex_table(results_total, metric_names) + results_table = create_latex_table(results_total, metric_names, planner_names) # save results to a file f = open(output_path, "w") From e45ea5164eae7b1f377e51f5a34f8fdd92347701 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Mon, 29 Jan 2024 13:04:05 +0100 Subject: [PATCH 46/81] `evaluation` - zero `dt` handled in the `VelocitySmoothness` and `HeadingChangeSmoothness` metrics --- .../srpb_evaluation/metrics/heading_change_smoothness.h | 5 +++++ .../include/srpb_evaluation/metrics/velocity_smoothness.h | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/srpb_evaluation/include/srpb_evaluation/metrics/heading_change_smoothness.h b/srpb_evaluation/include/srpb_evaluation/metrics/heading_change_smoothness.h index d54417e..44e417c 100644 --- a/srpb_evaluation/include/srpb_evaluation/metrics/heading_change_smoothness.h +++ b/srpb_evaluation/include/srpb_evaluation/metrics/heading_change_smoothness.h @@ -28,6 +28,11 @@ class HeadingChangeSmoothness: public Metric { rewinder_.setHandlerNextTimestamp( [&]() { double dt = rewinder_.getTimestampNext() - rewinder_.getTimestampCurr(); + // if we attempt to divide by 0 (due to possibly insufficient sampling rate of the logger), then skip this one; + // however, this sample will still be included in the final result + if (dt <= 0.0) { + return; + } double dtheta = rewinder_.getRobotNext().getVelocityTheta() - rewinder_.getRobotCurr().getVelocityTheta(); hsm += (std::abs(dtheta) / dt); } diff --git a/srpb_evaluation/include/srpb_evaluation/metrics/velocity_smoothness.h b/srpb_evaluation/include/srpb_evaluation/metrics/velocity_smoothness.h index 94ce4ee..ef436a5 100644 --- a/srpb_evaluation/include/srpb_evaluation/metrics/velocity_smoothness.h +++ b/srpb_evaluation/include/srpb_evaluation/metrics/velocity_smoothness.h @@ -28,6 +28,11 @@ class VelocitySmoothness: public Metric { rewinder_.setHandlerNextTimestamp( [&]() { double dt = rewinder_.getTimestampNext() - rewinder_.getTimestampCurr(); + // if we attempt to divide by 0 (due to possibly insufficient sampling rate of the logger), then skip this one; + // however, this sample will still be included in the final result + if (dt <= 0.0) { + return; + } double acc_x = std::abs(rewinder_.getRobotNext().getVelocityX() - rewinder_.getRobotCurr().getVelocityX()) / dt; double acc_y = std::abs(rewinder_.getRobotNext().getVelocityY() - rewinder_.getRobotCurr().getVelocityY()) / dt; mean += std::hypot(acc_x, acc_y); From 12603aa148db62c18cc7031577837752495b47f3 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Thu, 1 Feb 2024 00:07:13 +0100 Subject: [PATCH 47/81] `evaluation` - scripts creating Excel spreadsheet from results loads metric-specific filtering function from `SrpbMetrics` --- .../scripts/create_excel_from_results.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/srpb_evaluation/scripts/create_excel_from_results.py b/srpb_evaluation/scripts/create_excel_from_results.py index 9bae3aa..e23f70c 100644 --- a/srpb_evaluation/scripts/create_excel_from_results.py +++ b/srpb_evaluation/scripts/create_excel_from_results.py @@ -16,6 +16,7 @@ from string import ascii_uppercase from openpyxl import Workbook from pathlib import Path +from srpb_metrics import SrpbMetrics from typing import List from typing import Dict @@ -215,7 +216,7 @@ def increment_char(c: chr): return chr(ord(c) + 1) -def calculate_sheet(wb: Workbook, planner_names: List[str], results_total: Dict, calc_fun='MEDIAN'): +def calculate_sheet(wb: Workbook, planner_names: List[str], results_total: Dict): row_start = int(excel_sheet_utils.RESULT_INIT_ROW) col_header = str(excel_sheet_utils.RESULT_INIT_COL) ws[col_header + str(row_start)] = 'Planner' @@ -230,6 +231,9 @@ def calculate_sheet(wb: Workbook, planner_names: List[str], results_total: Dict, for key in results_example.keys(): metric_keys.append(str(key)) + # helps to find an Excel filtering function + srpb_metrics = SrpbMetrics() + # create compound representation (filtered) of raw data for planner in planner_names: # investigate each metric @@ -248,8 +252,9 @@ def calculate_sheet(wb: Workbook, planner_names: List[str], results_total: Dict, ws[col_header + str(row_metric_start + m_index)] = metric ws[col_planner + str(row_start)] = planner ws[col_planner + str(row_start + 1)] = planner_trials - # filtering: put excel function here - ws[col_planner + str(row_metric_start + m_index)] = '=' + str(calc_fun) + '(' + str(cell_begin) + ':' + str(cell_end) + ')' + # filtering: put a string with an Excel function into the cell + excel_fun = srpb_metrics.get_excel_calc_fun(metric, str(cell_begin), str(cell_end)) + ws[col_planner + str(row_metric_start + m_index)] = excel_fun # next planner col_planner = increment_char(col_planner) @@ -297,7 +302,7 @@ def calculate_sheet(wb: Workbook, planner_names: List[str], results_total: Dict, ws.append(row) # make some calculations -calculate_sheet(ws, planners, results, 'MEDIAN') +calculate_sheet(ws, planners, results) # Prepare name of the output file output_filename = 'results' + '_' + Path(logs_dir).name From f1c7ec47f77b7c5f40921b335882d877795aee74 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Fri, 2 Feb 2024 01:31:13 +0100 Subject: [PATCH 48/81] `evaluation` - `GoalReached` metric prints distance differences between the final pose and the goal pose --- .../srpb_evaluation/metrics/goal_reached.h | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/srpb_evaluation/include/srpb_evaluation/metrics/goal_reached.h b/srpb_evaluation/include/srpb_evaluation/metrics/goal_reached.h index 68c1b91..f684c30 100644 --- a/srpb_evaluation/include/srpb_evaluation/metrics/goal_reached.h +++ b/srpb_evaluation/include/srpb_evaluation/metrics/goal_reached.h @@ -30,28 +30,33 @@ class GoalReached: public Metric { void printResults() const override { printf( - "Goal reached = %d [bool] (tolerance violations: position %d, orientation %d)\n", + "Goal reached = %d [bool] " + "(tolerance violations: position %d [offset %7.4f m], orientation %d [offset %7.4f rad])\n", static_cast(!tolerance_xy_violated_ && !tolerance_yaw_violated_), static_cast(tolerance_xy_violated_), - static_cast(tolerance_yaw_violated_) + offset_xy_, + static_cast(tolerance_yaw_violated_), + offset_yaw_ ); } protected: double tolerance_xy_; double tolerance_yaw_; + double offset_xy_; + double offset_yaw_; bool tolerance_xy_violated_; bool tolerance_yaw_violated_; void compute() override { rewinder_.setHandlerLastTimestamp( [&]() { - double dist_to_goal_xy = std::hypot( + offset_xy_ = std::hypot( rewinder_.getRobotCurr().getPositionX() - rewinder_.getRobotCurr().getGoalPositionX(), rewinder_.getRobotCurr().getPositionY() - rewinder_.getRobotCurr().getGoalPositionY() ); - double dist_to_goal_yaw = std::abs( + offset_yaw_ = std::abs( angles::shortest_angular_distance( rewinder_.getRobotCurr().getOrientationYaw(), rewinder_.getRobotCurr().getGoalOrientationYaw() @@ -59,8 +64,8 @@ class GoalReached: public Metric { ); // save results - tolerance_xy_violated_ = dist_to_goal_xy > tolerance_xy_; - tolerance_yaw_violated_ = std::abs(dist_to_goal_yaw) > std::abs(tolerance_yaw_); + tolerance_xy_violated_ = offset_xy_ > tolerance_xy_; + tolerance_yaw_violated_ = std::abs(offset_yaw_) > std::abs(tolerance_yaw_); } ); rewinder_.perform(); From 0874845e5de9a2acc64af6e896c3ff381806c852 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Mon, 12 Feb 2024 16:39:49 +0100 Subject: [PATCH 49/81] `evaluation` - experiment duration logged in `rewind_experiment` script --- srpb_evaluation/scripts/rewind_experiment.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/srpb_evaluation/scripts/rewind_experiment.py b/srpb_evaluation/scripts/rewind_experiment.py index c726086..d29f320 100644 --- a/srpb_evaluation/scripts/rewind_experiment.py +++ b/srpb_evaluation/scripts/rewind_experiment.py @@ -302,6 +302,8 @@ def main( log_entities['groups'].append(group) # create timestamps + last_timestamp_of_experiment = log_entities['robot'][-1].get_timestamp() + print(f"The last timestamp of the experiment is {last_timestamp_of_experiment}") timestamps = extract_timestamps(log_entities['robot'], timestamp_max) print(f"Prepared timestamps from {timestamps[0]} to {timestamps[-1]} with {len(timestamps)} entries") From c939eef3be70eeefa3baa8b01720ae16b432dd49 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Sun, 18 Feb 2024 21:20:29 +0100 Subject: [PATCH 50/81] `evaluation` - extended spreadsheet tooling, added function to read also raw values from the sheet (not only filtered); updated `latex_table_from_results` --- .../create_latex_table_from_results.py | 21 ++- srpb_evaluation/scripts/excel_sheet_utils.py | 126 +++++++++++++++--- 2 files changed, 125 insertions(+), 22 deletions(-) diff --git a/srpb_evaluation/scripts/create_latex_table_from_results.py b/srpb_evaluation/scripts/create_latex_table_from_results.py index d269d6d..b427942 100644 --- a/srpb_evaluation/scripts/create_latex_table_from_results.py +++ b/srpb_evaluation/scripts/create_latex_table_from_results.py @@ -80,7 +80,26 @@ def get_metric_value_for_planner( f"Available planners are: '{scenario_results.keys()}'. " f"This might be intentional, returning NaN." ) - return metric_val + # value will store the value of the metric (if able to parse correctly) + value = None + if isinstance(metric_val, float): + value = metric_val + elif isinstance(metric_val, list): + if not len(metric_val): + raise Exception(f"'{metric_id}' metric value for the planner '{planner_name}' is an empty list") + # arbitrarily selecting the first element in the list + value = metric_val[0] + # treat as a warning when the list is bigger than 1-element + if len(metric_val) > 1: + print( + f"\033[93m" + f"'{metric_id}' metric value for the planner '{planner_name}' is '{len(metric_val)}'-elem list, " + f"arbitrarily selecting the first element '{value}'" + "\033[0m" + ) + else: + raise Exception(f"'{metric_id}' metric value for the planner '{planner_name}' is of unsupported type") + return value tex = str("") diff --git a/srpb_evaluation/scripts/excel_sheet_utils.py b/srpb_evaluation/scripts/excel_sheet_utils.py index e8088a3..b392b8a 100644 --- a/srpb_evaluation/scripts/excel_sheet_utils.py +++ b/srpb_evaluation/scripts/excel_sheet_utils.py @@ -1,34 +1,60 @@ from openpyxl import load_workbook +from openpyxl import Workbook from pathlib import Path from typing import Dict +from typing import List # name of the sheet in the Excel file with the results SHEET_NAME = "SRPB" # upper left cell - cell that initiates results table +RESULT_RAW_INIT_COL = 'A' +RESULT_RAW_INIT_ROW = '1' +# filtered (mean/median) RESULT_INIT_COL = 'A' RESULT_INIT_ROW = '50' -def load_data_from_excel(path: Path) -> Dict[str, Dict[str, float]]: +def increment_column(col: str): """ - Reads the spreadsheet following the convention introduced in `create_excel_from_results.calculate_sheet` script + Increments the ID of the column using A-Z characters, e.g., passing Z gives AA + :param col column ID to be incremented """ + if col == None: + raise Exception("Could not increment 'None'") + + # lambda that works for chars in A-Z range + increment_char = lambda c : chr(ord(c) + 1) + + # Generated by ChatGPT + if col == "": + return "A" + if col[-1] != 'Z': + return col[:-1] + increment_char(col[-1]) + else: + i = len(col) - 1 + while i >= 0 and col[i] == 'Z': + i -= 1 + if i == -1: + return 'A' * (len(col) + 1) + else: + return col[:i] + increment_char(col[i]) + 'A' * (len(col) - i - 1) - # NOTE: data_only allows to read values instead of formulas - wb = load_workbook(filename = str(path), data_only=True) - # access specific sheet - sheet = wb[SHEET_NAME] - # upper left cell - cell that initiates results table - col_init = RESULT_INIT_COL - row_init = RESULT_INIT_ROW +def read_data_from_excel(wb: Workbook, sheet_name: str, col_init: str, row_init: int) -> Dict[str, Dict[str, float]]: + """ + Reads the spreadsheet following the convention introduced in `create_excel_from_results.calculate_sheet` script + :param wb loaded workbook of the spreadsheet + :param sheet_name specific sheet to access + :param col_init column of the upper left cell - cell that initiates results table + :param row_init row of the upper left cell - cell that initiates results table + """ + + sheet = wb[sheet_name] col_metric_ids = col_init # lambda that creates col-row tuple to access cell in a sheet make_sheet_cell = lambda row, col : str(col + row) - # lambda that works for chars in A-Z range - increment_char = lambda c : chr(ord(c) + 1) # lambda that increments integer stored as string increment_row = lambda row : str(int(row) + 1) # lambda that retrieves value of sheet's cell @@ -36,11 +62,15 @@ def load_data_from_excel(path: Path) -> Dict[str, Dict[str, float]]: # lambda that checks if certain cell in the sheet is empty sheet_cell_empty = lambda sheet, row, col : get_sheet_val(sheet=sheet, row=row, col=col) == None - # local database with planners' metrics + # local database with planners' metrics, actually: + # Dict[str, Dict[str, List[float]]] + # str: planner ID + # str: metric ID + # list[float]: metric values for the subsequent trials data = {} # to iterate over planners - skip first, avoiding 'Planner' - col_iter = increment_char(col_init) + col_iter = increment_column(col_init) # to iterate over metrics row_iter = increment_row(row_init) # iterate over planners (names) @@ -61,25 +91,79 @@ def load_data_from_excel(path: Path) -> Dict[str, Dict[str, float]]: f"The metric ID cell `{col_metric_ids}{row_iter}` contains `{metric_name}` " f"and the metric value cell `{col_iter}{row_iter}` contains `{metric_value}`. " f"Cannot proceed with such values. Check whether the cells in your spreadsheet are empty. " - f"If they aren't, try to open the sheet at '{path}' and simply save it using Excel " - f"or LibreOffice Calc." + f"If they aren't, try to open the sheet and simply save it using Excel or LibreOffice Calc." ) + # try to convert + metric_name = str(metric_name) + try: + metric_value = float(metric_value) + except ValueError: + # try to proceed to the next metric + row_iter = increment_row(row_iter) + print( + f"Could not convert '{metric_value}' into float, proceeding to the next row '{row_iter}'" + f" in column '{col_iter}'" + ) + continue # collect - metric_names.append(str(metric_name)) - metric_values.append(float(metric_value)) + metric_names.append(metric_name) + metric_values.append(metric_value) # print(f"Checking metric ID {metric_names[-1]}, val {metric_values[-1]}") # try to proceed to the next metric row_iter = increment_row(row_iter) # finished collecting metrics for a given planner metrics = dict(zip(metric_names, metric_values)) - # append to overall results - result = {planner_name: metrics} - data.update(result) + + # update the planner data with the new set of metrics + if not planner_name in data.keys(): + # initialize key in dict + data[planner_name] = {} + for metric_id, metric_value in metrics.items(): + # assert that values are stored as lists + data[planner_name][metric_id] = [metric_value] + else: + # ready to append new values to an existing lists + for metric_id in data[planner_name].keys(): + data[planner_name][metric_id].append(metrics[metric_id]) + + # make sure that the same metrics are included in each new set of metrics + num_entries = None + for metric_id in data[planner_name].keys(): + prev_num_entries = num_entries + num_entries = len(data[planner_name][metric_id]) + if prev_num_entries == None: + continue + if num_entries != prev_num_entries: + raise Exception( + f"Number of value entries for metric '{metric_id}' is different than for the previous " + f"one ({num_entries} vs {prev_num_entries})" + ) # try to proceed to the next planner - col_iter = increment_char(col_iter) + col_iter = increment_column(col_iter) # reset metrics row 'pointer' row_iter = increment_row(row_init) return data + + +def load_data_from_excel(path: Path) -> Dict[str, Dict[str, float]]: + """ + Reads the filtered data from the spreadsheet following the convention introduced in + `create_excel_from_results.calculate_sheet` function + """ + + # NOTE: data_only allows to read values instead of formulas + wb = load_workbook(filename = str(path), data_only=True) + return read_data_from_excel(wb, SHEET_NAME, RESULT_INIT_COL, RESULT_INIT_ROW) + + +def load_raw_data_from_excel(path: Path) -> Dict[str, Dict[str, List[float]]]: + """ + Reads the raw data from the spreadsheet following the convention introduced in + `create_excel_from_results.calculate_sheet` function + """ + # NOTE: data_only allows to read values instead of formulas + wb = load_workbook(filename = str(path), data_only=True) + return read_data_from_excel(wb, SHEET_NAME, RESULT_RAW_INIT_COL, RESULT_RAW_INIT_ROW) From ba84c661e7cb11d347cef1157d39972f5138fa06 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Sun, 18 Feb 2024 21:50:44 +0100 Subject: [PATCH 51/81] `evaluation` - modernized `create_excel_from_results` script (`cell_coords_to_sheet_cell_id` fun. uses `utils` now) - this also seems to fix omitting some entries while creating a spreadsheet from the big amount of data --- .../scripts/create_excel_from_results.py | 35 +++++-------------- 1 file changed, 9 insertions(+), 26 deletions(-) diff --git a/srpb_evaluation/scripts/create_excel_from_results.py b/srpb_evaluation/scripts/create_excel_from_results.py index e23f70c..2566d06 100644 --- a/srpb_evaluation/scripts/create_excel_from_results.py +++ b/srpb_evaluation/scripts/create_excel_from_results.py @@ -10,10 +10,10 @@ import csv import excel_sheet_utils import glob +import math import re import sys -from string import ascii_uppercase from openpyxl import Workbook from pathlib import Path from srpb_metrics import SrpbMetrics @@ -33,7 +33,7 @@ def get_log_dirs_planner(dir_path: str, planner_name: str, min_logs=3): dirnames_valid = [] for dirname in dirnames: if re.search('ignore', dirname, re.IGNORECASE): - print(f'Ignoring a directory `{dirname}` from including to the overall `{planner_name}` planner results') + print(f'Ignoring a directory `{dirname}` from including in the overall `{planner_name}` planner results') continue dirnames_valid.append(dirname) @@ -136,32 +136,14 @@ def cell_coords_to_sheet_cell_id(row: int, col: int): col_w_offset = col + 1 cell_row = str(row_w_offset) - found_col = False + if not math.isfinite(row_w_offset) or not math.isfinite(col_w_offset): + exit(f"Could not find a valid column ID for ({row}, {col}) configuration") + it_col = 0 - cell_col = '' - for c in ascii_uppercase: - if it_col == col_w_offset: - cell_col = str(c) - found_col = True - break + cell_col = excel_sheet_utils.RESULT_RAW_INIT_COL + while it_col != col_w_offset: + cell_col = excel_sheet_utils.increment_column(cell_col) it_col = it_col + 1 - # repeat once again if required (doubling letters) - columns A - ZZ cover a sufficient number of cases - if not found_col: - # first letter of the cell address - for first in ascii_uppercase: - # second letter of the cell address - for second in ascii_uppercase: - # print(f'[cell_coords_to_sheet_cell_id] row {row}, col {col}, cell row {cell_row} | it col {it_col}, ADDR `{first}{second}`') - if it_col == col_w_offset: - cell_col = str(first) + str(second) - found_col = True - break - it_col = it_col + 1 - # break the outer loop if possible - if found_col: - break - if not found_col: - exit(f"Could not find a valid column ID for ({row}, {col}) configuration") # append row cell_id = str(cell_col) + str(cell_row) @@ -281,6 +263,7 @@ def calculate_sheet(wb: Workbook, planner_names: List[str], results_total: Dict) planners = sys.argv[2:] else: planners = ['teb', 'dwa', 'trajectory', 'eband', 'hateb', 'cohan'] + print(f'Using the default list of planners!') print(f'Investigated planners: {planners}') results = collect_results_planners(logs_dir, planners) From 65b0c4c70fb62846ca7a3ce5b08ec418556cd7b7 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Sun, 18 Feb 2024 21:51:27 +0100 Subject: [PATCH 52/81] `evaluation` - version bumped --- srpb_evaluation/package.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/srpb_evaluation/package.xml b/srpb_evaluation/package.xml index 25c15df..2425638 100644 --- a/srpb_evaluation/package.xml +++ b/srpb_evaluation/package.xml @@ -2,7 +2,7 @@ srpb_evaluation - 0.9.7 + 0.9.8 The srpb_evaluation package provides implementation of metrics that allow evaluation of robot behaviour during navigation From c90e14eb876304217364a55813844205dce55f8c Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Sun, 18 Feb 2024 23:50:20 +0100 Subject: [PATCH 53/81] `evaluation` - added script creating a violin/box plot from a spreadsheet with results --- .../scripts/create_box_plot_from_results.py | 127 ++++++++++++++++++ .../scripts/create_box_plot_from_results.yaml | 16 +++ 2 files changed, 143 insertions(+) create mode 100644 srpb_evaluation/scripts/create_box_plot_from_results.py create mode 100644 srpb_evaluation/scripts/create_box_plot_from_results.yaml diff --git a/srpb_evaluation/scripts/create_box_plot_from_results.py b/srpb_evaluation/scripts/create_box_plot_from_results.py new file mode 100644 index 0000000..9515199 --- /dev/null +++ b/srpb_evaluation/scripts/create_box_plot_from_results.py @@ -0,0 +1,127 @@ +''' +Script that creates a .pdf file with a violin/box plot presenting results + +Dependencies: +sudo apt install python3-openpyxl +sudo apt install python3-matplotlib +''' + +import argparse +import excel_sheet_utils +import matplotlib.pyplot as plt +import yaml + + +def remap_names(orig_names: list, remappings: dict): + # remap the original IDs (names) + names = [] + for name in orig_names: + label = remappings.get(name) + # check if found; if not - do not change the name + if not label == None: + names.append(label) + else: + names.append(name) + return names + + +if __name__ == "__main__": + # Ref: https://stackoverflow.com/a/32763023 + cli = argparse.ArgumentParser() + # positional arguments + cli.add_argument("input", type=str, help="Path to the SRPB results sheet") + cli.add_argument("output", type=str, help="Path to the .pdf file with the plot to save") + cli.add_argument("--config", type=str, help="Path to the plot configuration file") + cli.add_argument("--metric", type=str, help="ID of the metric") + # optional arguments + cli.add_argument( + "--planners", + nargs="*", + type=str, + help="Space-separated list of planners to include in the plot (all appearing in the results file \ + are included by default)" + ) + # Example usage: + # python3 \ + # create_box_plot_from_results.py \ + # \ + # test.pdf \ + # --config create_box_plot_from_results.yaml \ + # --metric m_mef \ + # --planners teb dwa + # + + # parse the command line + args = cli.parse_args() + + sheet_path = args.input + config_path = args.config + metric = args.metric + output_path = args.output + planners_raw = args.planners + + # load dict from the file + with open(config_path) as f: + cfg = yaml.safe_load(f) + + # load data from the spreadsheet (either only filtered data - bar plot, or all data for statistical presentation) + raw_data = excel_sheet_utils.load_raw_data_from_excel(sheet_path) + + # use all included in the input sheet + if planners_raw == None or not len(planners_raw): + print(f"Using planner IDs from the input spreadsheet as none were selected explicitly") + planners_raw = [] + keys = raw_data.keys() + for planner_id in keys: + planners_raw.append(planner_id) + + # + # Structure of data per individual metric: + # + # data = [ + # # -> ..., , ..., + # [0.0, 0.0, 0.0, 0.0, 0.0], # ^ + # [0.0, 0.0, 0.0, 0.0, 0.0], # data of planner X + # [0.0, 0.0, 0.0, 0.0, 0.0], # data of planner Y + # [0.0, 0.0, 0.0, 0.0, 0.0], # data of planner Z + # [0.0, 0.0, 0.0, 0.0, 0.0], # data of planner W + # [0.0, 0.0, 0.0, 0.0, 0.0] # | + # ] + # + # prepare the loaded data according to the example above + data = [] + for planner in planners_raw: + data.append(raw_data[planner][metric]) + + # remap the original IDs (names) of the planners + planners = remap_names(planners_raw, cfg['labels']) + + # Based on the example: https://matplotlib.org/stable/gallery/statistics/boxplot_vs_violin.html + if cfg['figure']['size_width'] == None or cfg['figure']['size_height'] == None: + fig, ax = plt.subplots() + else: + fig, ax = plt.subplots(figsize=(cfg['figure']['size_height'], cfg['figure']['size_width'])) + + if cfg['plot_type'] == "violin": + ax.violinplot( + data, + showmeans=False, + showmedians=True + ) + elif cfg['plot_type'] == "box": + ax.boxplot(data) + else: + raise Exception(f"Unknown plot_type") + + ax.set_title(cfg['figure']['title']) + ax.xaxis.grid(cfg['figure']['xaxis_grid']) + ax.yaxis.grid(cfg['figure']['yaxis_grid']) + ax.set_xlabel(cfg['figure']['xlabel']) + ax.set_ylabel(cfg['figure']['ylabel']) + ax.set_xticks(range(1, len(data) + 1)) + ax.set_xticklabels(planners) + plt.show() + + # save the figure + fig.savefig(output_path, bbox_inches='tight', pad_inches=0) + print(f"Plot saved to `{output_path}` file") diff --git a/srpb_evaluation/scripts/create_box_plot_from_results.yaml b/srpb_evaluation/scripts/create_box_plot_from_results.yaml new file mode 100644 index 0000000..4a91856 --- /dev/null +++ b/srpb_evaluation/scripts/create_box_plot_from_results.yaml @@ -0,0 +1,16 @@ +# (violin|box) +plot_type: violin + +figure: + size_width: 4 + size_height: 9 + title: Title + xlabel: Samples + ylabel: Values + xaxis_grid: True + yaxis_grid: True + +labels: + # defines how the planners will be named according to their IDs + teb: TEB + dwa: DWA From 5a9dd12aa8b0f2062e1a16b791cc575bc4dc6841 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 21 Feb 2024 20:44:46 +0100 Subject: [PATCH 54/81] `evaluation` - added main guard in `create_excel_from_results` Python script --- .../scripts/create_excel_from_results.py | 130 +++++++++--------- 1 file changed, 65 insertions(+), 65 deletions(-) diff --git a/srpb_evaluation/scripts/create_excel_from_results.py b/srpb_evaluation/scripts/create_excel_from_results.py index 2566d06..58d01b4 100644 --- a/srpb_evaluation/scripts/create_excel_from_results.py +++ b/srpb_evaluation/scripts/create_excel_from_results.py @@ -241,68 +241,68 @@ def calculate_sheet(wb: Workbook, planner_names: List[str], results_total: Dict) col_planner = increment_char(col_planner) -######################################################## -# command line arguments -if len(sys.argv) == 1: - print(f'Usage: ') - print(f'') - print(f' python3 {sys.argv[0]} <(OPTIONAL) space-separated planner names>') - print(f'') - print(f' 1. The main directory with logs should store the log files grouped into separate directories, ') - print(f' each containing a robot, people and group data.') - print(f' 2. In order to detect which planners to include into results sheet, each name of a planner ') - print(f' must be a part of log filenames.') - print(f'') - exit() - -logs_dir = str(sys.argv[1]) -print(f'Name of directory with logs: {logs_dir}') - -planners = [] -if not len(sys.argv) == 2: - planners = sys.argv[2:] -else: - planners = ['teb', 'dwa', 'trajectory', 'eband', 'hateb', 'cohan'] - print(f'Using the default list of planners!') - -print(f'Investigated planners: {planners}') -results = collect_results_planners(logs_dir, planners) - -############################################### -# documentation at https://openpyxl.readthedocs.io/en/stable/index.html -wb = Workbook() - -# grab the active worksheet (workbook always creates an arbitrary sheet) -ws_init = wb.active -# create a sheet with a custom name -ws = wb.create_sheet(title=excel_sheet_utils.SHEET_NAME) -# delete the sheet initially created -wb.remove_sheet(worksheet=ws_init) - -# Prepare rows of data and append them to the sheet -rows = prepare_sheet_rows(results) -for row in rows: - ws.append(row) - -# make some calculations -calculate_sheet(ws, planners, results) - -# Prepare name of the output file -output_filename = 'results' + '_' + Path(logs_dir).name -for planner in planners: - output_filename = output_filename + '_' + planner -output_filename = output_filename.rstrip('_') + '.xlsx' -output_path = Path(logs_dir).parent.absolute() / output_filename - -# Save the file -wb.save(str(output_path)) - -print(f'Results saved in: {output_path}') -print("") -# When the sheet with the results is not opened and saved by the Excel or LibreOffice Calc, then reading a non-empty -# cell will probably return None -# Ref1: https://itecnote.com/tecnote/python-openpyxl-data_onlytrue-returning-none/ -# Ref2: https://groups.google.com/g/openpyxl-users/c/GbBOnOa8g7Y -print(f'Consider opening the results file and saving it with Excel/LibreOffice Calc (without any modifications).') -print(f'It will produce cached values based on formulas written (`openpyxl` library is not able to do so).') -print(f'This is a necessary step when one wants to use the script that creates a LaTeX table from a spreadsheet') +if __name__ == "__main__": + # command line arguments + if len(sys.argv) == 1: + print(f'Usage: ') + print(f'') + print(f' python3 {sys.argv[0]} <(OPTIONAL) space-separated planner names>') + print(f'') + print(f' 1. The main directory with logs should store the log files grouped into separate directories, ') + print(f' each containing a robot, people and group data.') + print(f' 2. In order to detect which planners to include into results sheet, each name of a planner ') + print(f' must be a part of log filenames.') + print(f'') + exit() + + logs_dir = str(sys.argv[1]) + print(f'Name of directory with logs: {logs_dir}') + + planners = [] + if not len(sys.argv) == 2: + planners = sys.argv[2:] + else: + planners = ['teb', 'dwa', 'trajectory', 'eband', 'hateb', 'cohan'] + print(f'Using the default list of planners!') + + print(f'Investigated planners: {planners}') + results = collect_results_planners(logs_dir, planners) + + ############################################### + # documentation at https://openpyxl.readthedocs.io/en/stable/index.html + wb = Workbook() + + # grab the active worksheet (workbook always creates an arbitrary sheet) + ws_init = wb.active + # create a sheet with a custom name + ws = wb.create_sheet(title=excel_sheet_utils.SHEET_NAME) + # delete the sheet initially created + wb.remove_sheet(worksheet=ws_init) + + # Prepare rows of data and append them to the sheet + rows = prepare_sheet_rows(results) + for row in rows: + ws.append(row) + + # make some calculations + calculate_sheet(ws, planners, results) + + # Prepare name of the output file + output_filename = 'results' + '_' + Path(logs_dir).name + for planner in planners: + output_filename = output_filename + '_' + planner + output_filename = output_filename.rstrip('_') + '.xlsx' + output_path = Path(logs_dir).parent.absolute() / output_filename + + # Save the file + wb.save(str(output_path)) + + print(f'Results saved in: {output_path}') + print("") + # When the sheet with the results is not opened and saved by the Excel or LibreOffice Calc, then reading a non-empty + # cell will probably return None + # Ref1: https://itecnote.com/tecnote/python-openpyxl-data_onlytrue-returning-none/ + # Ref2: https://groups.google.com/g/openpyxl-users/c/GbBOnOa8g7Y + print(f'Consider opening the results file and saving it with Excel/LibreOffice Calc (without any modifications).') + print(f'It will produce cached values based on formulas written (`openpyxl` library is not able to do so).') + print(f'This is a necessary step when one wants to use the script that creates a LaTeX table from a spreadsheet') From c20dfc895efb66bbe9d14cd00fd41add5dd8c98e Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 21 Feb 2024 20:47:08 +0100 Subject: [PATCH 55/81] `evaluation` - `excel_sheet_utils`: internal lambdas from `read_data_from_excel` extracted as separate functions --- srpb_evaluation/scripts/excel_sheet_utils.py | 42 +++++++++++++++----- 1 file changed, 31 insertions(+), 11 deletions(-) diff --git a/srpb_evaluation/scripts/excel_sheet_utils.py b/srpb_evaluation/scripts/excel_sheet_utils.py index b392b8a..e7d7c26 100644 --- a/srpb_evaluation/scripts/excel_sheet_utils.py +++ b/srpb_evaluation/scripts/excel_sheet_utils.py @@ -1,5 +1,6 @@ from openpyxl import load_workbook from openpyxl import Workbook +from openpyxl.worksheet import Worksheet from pathlib import Path from typing import Dict from typing import List @@ -41,6 +42,34 @@ def increment_column(col: str): return col[:i] + increment_char(col[i]) + 'A' * (len(col) - i - 1) +def increment_row(row: int): + """ + Increments integer stored as string + """ + return str(int(row) + 1) + + +def make_sheet_cell(row: int, col: str): + """ + Creates col-row tuple to access cell in a sheet + """ + return str(col + row) + + +def get_sheet_val(sheet: Worksheet, row: int, col: str): + """ + Retrieves a value of sheet's cell + """ + return sheet[make_sheet_cell(col=col, row=row)].value + + +def is_sheet_cell_empty(sheet: Worksheet, row: int, col: str): + """ + Checks if certain cell in the sheet is empty + """ + return get_sheet_val(sheet=sheet, row=row, col=col) == None + + def read_data_from_excel(wb: Workbook, sheet_name: str, col_init: str, row_init: int) -> Dict[str, Dict[str, float]]: """ Reads the spreadsheet following the convention introduced in `create_excel_from_results.calculate_sheet` script @@ -53,15 +82,6 @@ def read_data_from_excel(wb: Workbook, sheet_name: str, col_init: str, row_init: sheet = wb[sheet_name] col_metric_ids = col_init - # lambda that creates col-row tuple to access cell in a sheet - make_sheet_cell = lambda row, col : str(col + row) - # lambda that increments integer stored as string - increment_row = lambda row : str(int(row) + 1) - # lambda that retrieves value of sheet's cell - get_sheet_val = lambda sheet, row, col : sheet[make_sheet_cell(col=col, row=row)].value - # lambda that checks if certain cell in the sheet is empty - sheet_cell_empty = lambda sheet, row, col : get_sheet_val(sheet=sheet, row=row, col=col) == None - # local database with planners' metrics, actually: # Dict[str, Dict[str, List[float]]] # str: planner ID @@ -74,14 +94,14 @@ def read_data_from_excel(wb: Workbook, sheet_name: str, col_init: str, row_init: # to iterate over metrics row_iter = increment_row(row_init) # iterate over planners (names) - while not sheet_cell_empty(sheet=sheet, row=row_init, col=col_iter): + while not is_sheet_cell_empty(sheet=sheet, row=row_init, col=col_iter): # save planner name for later use in database planner_name = str(get_sheet_val(sheet=sheet, row=row_init, col=col_iter)) # print(f"Checking sheet at {make_sheet_cell(col=col_iter, row=row_init)} for planner {planner_name}") # got a valid planner name - iterate over saved metrics metric_names = [] metric_values = [] - while not sheet_cell_empty(sheet=sheet, row=row_iter, col=col_metric_ids): + while not is_sheet_cell_empty(sheet=sheet, row=row_iter, col=col_metric_ids): # obtain sheet values metric_name = get_sheet_val(sheet=sheet, row=row_iter, col=col_metric_ids) metric_value = get_sheet_val(sheet=sheet, row=row_iter, col=col_iter) From 040653d0508870b5e4621c621967a47e42f4affd Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 21 Feb 2024 21:02:59 +0100 Subject: [PATCH 56/81] `evaluation` - added a script that clears out a spreadsheet from metrics related to unfinished trials (relies on `create_excel_from_results`) --- .../remove_failed_trials_from_excel.py | 119 ++++++++++++++++++ 1 file changed, 119 insertions(+) create mode 100644 srpb_evaluation/scripts/remove_failed_trials_from_excel.py diff --git a/srpb_evaluation/scripts/remove_failed_trials_from_excel.py b/srpb_evaluation/scripts/remove_failed_trials_from_excel.py new file mode 100644 index 0000000..2dafb74 --- /dev/null +++ b/srpb_evaluation/scripts/remove_failed_trials_from_excel.py @@ -0,0 +1,119 @@ +''' +Opens an Excel file with results obtained from SRPB logs, searches for trials that were not +successful (unfinished), and clears the metrics related to those trials so the failed trials are not considered +in filtering. +Note that the Excel spreadsheet must be previously generated with the relevant script, +i.e., 'create_excel_from_results.py', as this script strictly follows the spreadsheet data placement introduced +in the mentioned 'create_excel_from_results.py' + +Dependency: +sudo apt install python3-openpyxl +''' + +import argparse +import excel_sheet_utils +import shutil + +from excel_sheet_utils import get_sheet_val +from excel_sheet_utils import increment_column +from excel_sheet_utils import increment_row +from excel_sheet_utils import is_sheet_cell_empty +from excel_sheet_utils import make_sheet_cell + +from openpyxl import load_workbook +from openpyxl.worksheet import Worksheet + +from pathlib import Path + + +def remove_unfinished_trials( + ws: Worksheet, + metric_cond_name: str, + metric_cond_remove_val: float, + row_init: int, + col_init: str +): + # iterate over column with subsequent trials + col_it = col_init + # find the row of the target metric + row_it = row_init + while not get_sheet_val(ws, row_it, col_it) == metric_cond_name: + row_it = increment_row(row_it) + # save + row_target_metric = row_it + + # iterate along the same row + while not is_sheet_cell_empty(ws, row_init, col_it): + remove_trial_if_unsuccessful(ws, row_target_metric, col_it, metric_cond_remove_val) + col_it = increment_column(col_it) + + +def remove_trial_if_unsuccessful( + ws: Worksheet, + metric_cond_row: int, + metric_cond_col: str, + metric_cond_value_to_remove +): + """ + Assumes that the target metric is located above the metrics to remove + """ + cond_value = get_sheet_val(ws, metric_cond_row, metric_cond_col) + if not cond_value == metric_cond_value_to_remove: + return + + # do not remove the conditional value + row_it = increment_row(metric_cond_row) + # iterate over rows to remove all metrics in a given column (related to a given trial) + while not is_sheet_cell_empty(ws, row_it, metric_cond_col): + cell = make_sheet_cell(row_it, metric_cond_col) + ws[cell] = None + row_it = increment_row(row_it) + + +if __name__ == "__main__": + # Ref: https://stackoverflow.com/a/32763023 + cli = argparse.ArgumentParser() + # positional arguments + cli.add_argument("input", help="Path to an input spreadsheet with SRPB results") + cli.add_argument("output", nargs='?', help="Path to an output spreadsheet with SRPB results; empty overwrites the input file") + + # parse the command line + args = cli.parse_args() + + # location of the input/output files + input_path = Path(args.input) + # if optional argument is empty, input file will be overwritten + if args.output == None: + output_path = input_path + # create a temporary backup just in case + shutil.copyfile(input_path, "/tmp/" + str(input_path.stem) + str(input_path.suffix)) + else: + output_path = Path(args.output) + + TARGET_METRIC_NAME = 'm_goal' + TARGET_METRIC_VALUE_REMOVE = 0 + + # Load the spreadsheet contents and operate directly on them + # NOTE: False keeps the functions etc. instead of raw data (True) + wb = load_workbook(filename=input_path, data_only=False) + remove_unfinished_trials( + wb[excel_sheet_utils.SHEET_NAME], + TARGET_METRIC_NAME, + TARGET_METRIC_VALUE_REMOVE, + excel_sheet_utils.RESULT_RAW_INIT_ROW, + excel_sheet_utils.RESULT_RAW_INIT_COL + ) + + # Save the file + wb.save(output_path) + + # Copied from 'create_excel_from_results.py' + print(f'Results saved in: {output_path}') + print("") + # When the sheet with the results is not opened and saved by the Excel or LibreOffice Calc, then reading a non-empty + # cell will probably return None + # Ref1: https://itecnote.com/tecnote/python-openpyxl-data_onlytrue-returning-none/ + # Ref2: https://groups.google.com/g/openpyxl-users/c/GbBOnOa8g7Y + print(f'Consider opening the results file and saving it with Excel/LibreOffice Calc (without any modifications).') + print(f'It will produce cached values based on formulas written (`openpyxl` library is not able to do so).') + print(f'This is a necessary step when one wants to use the script that creates a LaTeX table from a spreadsheet') From e3ef324903f862888fc36de0bf0903ea643e2a77 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Fri, 23 Feb 2024 20:31:53 +0100 Subject: [PATCH 57/81] `evaluation` - configurable font family in the `rewind_experiment` script --- srpb_evaluation/scripts/rewind_experiment.py | 11 +++++++++-- srpb_evaluation/scripts/rewind_experiment.yaml | 2 ++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/srpb_evaluation/scripts/rewind_experiment.py b/srpb_evaluation/scripts/rewind_experiment.py index d29f320..1850e8b 100644 --- a/srpb_evaluation/scripts/rewind_experiment.py +++ b/srpb_evaluation/scripts/rewind_experiment.py @@ -153,9 +153,15 @@ def main( norm=mpl.colors.Normalize(vmin=timestamps[0], vmax=timestamps[-1]), orientation='vertical' ) + font_cfg = { + 'family': visuals['legend']['fontfamily'], + 'size': visuals['legend']['fontsize'], + } + colorbar.set_label('Seconds', fontdict=font_cfg) # https://stackoverflow.com/a/67438742 - colorbar.ax.tick_params(labelsize=visuals['legend']['fontsize']) - colorbar.set_label('Seconds', fontsize=visuals['legend']['fontsize']) + colorbar.ax.tick_params(labelsize=font_cfg['size']) + # Update tick label font family + colorbar.ax.set_yticklabels(colorbar.ax.get_yticklabels(), fontdict=font_cfg) # Save the auxiliary figure tim_scale_filename = vis_name + '_timing' + '.pdf' @@ -209,6 +215,7 @@ def main( visuals['legend']['width'] = config['vis']['legend']['width'] visuals['legend']['height'] = config['vis']['legend']['height'] visuals['legend']['fontsize'] = config['vis']['legend']['fontsize'] + visuals['legend']['fontfamily'] = config['vis']['legend']['fontfamily'] log_file_robot = log_file_basename + '_robot.txt' log_file_people = log_file_basename + '_people.txt' diff --git a/srpb_evaluation/scripts/rewind_experiment.yaml b/srpb_evaluation/scripts/rewind_experiment.yaml index e27f203..d668910 100644 --- a/srpb_evaluation/scripts/rewind_experiment.yaml +++ b/srpb_evaluation/scripts/rewind_experiment.yaml @@ -39,3 +39,5 @@ vis: width: 0.25 height: 5 fontsize: 17 + # (cursive|fantasy|monospace|sans|sans serif|sans-serif|serif) + fontfamily: serif From 46c850fc909c23cc5dc47dc1ff846ee43c320f09 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Fri, 23 Feb 2024 20:33:20 +0100 Subject: [PATCH 58/81] `evaluation` - configurable output fig. extension and DPI in the `rewind_experiment` script --- srpb_evaluation/scripts/rewind_experiment.py | 12 +++++++----- srpb_evaluation/scripts/rewind_experiment.yaml | 4 ++++ 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/srpb_evaluation/scripts/rewind_experiment.py b/srpb_evaluation/scripts/rewind_experiment.py index 1850e8b..b426e76 100644 --- a/srpb_evaluation/scripts/rewind_experiment.py +++ b/srpb_evaluation/scripts/rewind_experiment.py @@ -135,8 +135,8 @@ def main( ax.get_yaxis().set_visible(False) # save figure with the experiment data - output_filename = vis_name + '.pdf' - fig.savefig(output_filename, bbox_inches='tight', pad_inches=0) + output_filename = vis_name + '.' + str(visuals['figure']['extension']) + fig.savefig(output_filename, bbox_inches='tight', pad_inches=0, dpi=visuals['figure']['dpi']) print(f"Logged experiment data saved to `{output_filename}` file") # blocking call until window with the plot is closed @@ -164,8 +164,8 @@ def main( colorbar.ax.set_yticklabels(colorbar.ax.get_yticklabels(), fontdict=font_cfg) # Save the auxiliary figure - tim_scale_filename = vis_name + '_timing' + '.pdf' - fig_tim_scale.savefig(tim_scale_filename, bbox_inches='tight', pad_inches=0) + tim_scale_filename = vis_name + '_timing' + '.' + str(visuals['figure']['extension']) + fig_tim_scale.savefig(tim_scale_filename, bbox_inches='tight', pad_inches=0, dpi=visuals['figure']['dpi']) print(f"Figure with colorized passage of time saved to `{tim_scale_filename}` file") # show the timing legend figure @@ -206,7 +206,9 @@ def main( timestamp_max = config['log']['max_timestamp'] # load visual configuration - visuals = {'people': {}, 'groups': {}, 'legend': {}} + visuals = {'figure': {}, 'people': {}, 'groups': {}, 'legend': {}} + visuals['figure']['extension'] = config['vis']['figure']['extension'] + visuals['figure']['dpi'] = config['vis']['figure']['dpi'] visuals['people']['ec'] = config['vis']['people']['edge_color'] visuals['people']['radius'] = config['vis']['people']['radius'] visuals['groups']['edge_alpha'] = config['vis']['groups']['edge_alpha'] diff --git a/srpb_evaluation/scripts/rewind_experiment.yaml b/srpb_evaluation/scripts/rewind_experiment.yaml index d668910..56f886a 100644 --- a/srpb_evaluation/scripts/rewind_experiment.yaml +++ b/srpb_evaluation/scripts/rewind_experiment.yaml @@ -26,6 +26,10 @@ map: # map coordinates (x and y) of additional points to be drawn (list of 2-element lists) additional_pts: [] vis: + figure: + # (pdf|png|jpeg) affects the format of the output figures + extension: png + dpi: 100 people: # full RGBA edge_color: [0.23, 0.23, 0.23, 0.10] From bba17bf9584b15a6aa7e4ea9d66cb5efdda01538 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 24 Apr 2024 01:11:32 +0200 Subject: [PATCH 59/81] `evaluation` - added usage note in script creating a violin/box plot from a spreadsheet with results --- srpb_evaluation/scripts/create_box_plot_from_results.py | 1 + 1 file changed, 1 insertion(+) diff --git a/srpb_evaluation/scripts/create_box_plot_from_results.py b/srpb_evaluation/scripts/create_box_plot_from_results.py index 9515199..a9a492a 100644 --- a/srpb_evaluation/scripts/create_box_plot_from_results.py +++ b/srpb_evaluation/scripts/create_box_plot_from_results.py @@ -113,6 +113,7 @@ def remap_names(orig_names: list, remappings: dict): else: raise Exception(f"Unknown plot_type") + # NOTE: converts to "LaTeX-style" if a string (or its part) is written as "$a^x$" ax.set_title(cfg['figure']['title']) ax.xaxis.grid(cfg['figure']['xaxis_grid']) ax.yaxis.grid(cfg['figure']['yaxis_grid']) From f179b6115fbb67bcc38920457458b58f4a9d0594 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 24 Apr 2024 02:25:57 +0200 Subject: [PATCH 60/81] `evaluation` - `excel_sheet_utils` detects failed trials during the processing of the spreadsheet; adjusted related script `remove_failed_trials_from_excel` --- srpb_evaluation/scripts/excel_sheet_utils.py | 24 +++++++++++++------ .../remove_failed_trials_from_excel.py | 3 ++- 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/srpb_evaluation/scripts/excel_sheet_utils.py b/srpb_evaluation/scripts/excel_sheet_utils.py index e7d7c26..e774a63 100644 --- a/srpb_evaluation/scripts/excel_sheet_utils.py +++ b/srpb_evaluation/scripts/excel_sheet_utils.py @@ -15,6 +15,9 @@ RESULT_INIT_COL = 'A' RESULT_INIT_ROW = '50' +# Must be non-float and cannot be None, as that would break detection of that special case +FAILED_TRIAL_METRIC_VALUE = '#FAIL#' + def increment_column(col: str): """ @@ -118,13 +121,20 @@ def read_data_from_excel(wb: Workbook, sheet_name: str, col_init: str, row_init: try: metric_value = float(metric_value) except ValueError: - # try to proceed to the next metric - row_iter = increment_row(row_iter) - print( - f"Could not convert '{metric_value}' into float, proceeding to the next row '{row_iter}'" - f" in column '{col_iter}'" - ) - continue + # EDGE CASE detection: when iterating through the spreadsheet on which `remove_failed_trials_from_excel.py` + # has been called, it may happen that metric is indicated as invalid - set it to None + if not metric_value == FAILED_TRIAL_METRIC_VALUE: + # try to proceed to the next metric (this is the normal operation) + row_iter = increment_row(row_iter) + print( + f"Could not convert '{metric_value}' into float, proceeding to the next row '{row_iter}'" + f" in column '{col_iter}'" + ) + continue + else: + # convert failure indicator to None and proceed normally (FAILED_TRIAL_METRIC_VALUE is used for + # distinguishing failed trial cells from out-of-range cells) + metric_value = None # collect metric_names.append(metric_name) metric_values.append(metric_value) diff --git a/srpb_evaluation/scripts/remove_failed_trials_from_excel.py b/srpb_evaluation/scripts/remove_failed_trials_from_excel.py index 2dafb74..69746fd 100644 --- a/srpb_evaluation/scripts/remove_failed_trials_from_excel.py +++ b/srpb_evaluation/scripts/remove_failed_trials_from_excel.py @@ -66,7 +66,8 @@ def remove_trial_if_unsuccessful( # iterate over rows to remove all metrics in a given column (related to a given trial) while not is_sheet_cell_empty(ws, row_it, metric_cond_col): cell = make_sheet_cell(row_it, metric_cond_col) - ws[cell] = None + # NOTE: the cell is filled with a special value stored under the constant + ws[cell] = excel_sheet_utils.FAILED_TRIAL_METRIC_VALUE row_it = increment_row(row_it) From 72dbf5459e5113e325932afc6d7a7e6472056202 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 24 Apr 2024 02:29:27 +0200 Subject: [PATCH 61/81] `evaluation` - adjusted `create_box_plot` to handle incomplete data (`None`s omitted) --- .../scripts/create_box_plot_from_results.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/srpb_evaluation/scripts/create_box_plot_from_results.py b/srpb_evaluation/scripts/create_box_plot_from_results.py index a9a492a..7a8ce6b 100644 --- a/srpb_evaluation/scripts/create_box_plot_from_results.py +++ b/srpb_evaluation/scripts/create_box_plot_from_results.py @@ -77,6 +77,7 @@ def remap_names(orig_names: list, remappings: dict): # # Structure of data per individual metric: + # (the assumption of each list with an equal length applies when the dataset is perfectly clean) # # data = [ # # -> ..., , ..., @@ -91,7 +92,14 @@ def remap_names(orig_names: list, remappings: dict): # prepare the loaded data according to the example above data = [] for planner in planners_raw: - data.append(raw_data[planner][metric]) + # delete Nones - method is robust against the failed trials + data_planner_metric_clean = [] + for i, metric_value in enumerate(raw_data[planner][metric]): + if metric_value == None: + print(f"[{i+1}] Planner '{planner}', metric '{metric}' equals '{metric_value}', skipping...") + continue + data_planner_metric_clean.append(metric_value) + data.append(data_planner_metric_clean) # remap the original IDs (names) of the planners planners = remap_names(planners_raw, cfg['labels']) From 80ab61628aeb9fcda2d2fc9a8b5d2cb5954863ed Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 24 Apr 2024 15:39:58 +0200 Subject: [PATCH 62/81] `evaluation` - added `create_bar_plot` script to create simple bar plot from a single scenario, for 1 metric --- .../scripts/create_bar_plot_from_results.py | 236 ++++++++++++++++++ .../scripts/create_bar_plot_from_results.yaml | 30 +++ 2 files changed, 266 insertions(+) create mode 100644 srpb_evaluation/scripts/create_bar_plot_from_results.py create mode 100644 srpb_evaluation/scripts/create_bar_plot_from_results.yaml diff --git a/srpb_evaluation/scripts/create_bar_plot_from_results.py b/srpb_evaluation/scripts/create_bar_plot_from_results.py new file mode 100644 index 0000000..bf1e542 --- /dev/null +++ b/srpb_evaluation/scripts/create_bar_plot_from_results.py @@ -0,0 +1,236 @@ +''' +Script that creates a .pdf file with a bar plot presenting results stored in a spreadsheet + +Dependencies: +sudo apt install python3-openpyxl +sudo apt install python3-matplotlib +sudo apt install python3-numpy +''' + +import argparse +import excel_sheet_utils +import matplotlib.pyplot as plt +import numpy as np +import yaml + +from typing import Dict +from typing import List +from typing import Union + + +def filter_inner_keys(outer_dict: Dict, selected_keys: List[str]) -> Dict: + """ + ChatGPT: + This function takes the outer dictionary (outer_dict) and a list of selected keys (selected_keys). + It then uses a nested dictionary comprehension to iterate over the outer dictionary and, + for each inner dictionary, filter only the selected keys. The result is a new dictionary with the same outer + structure but only containing the selected keys in the inner dictionaries. + """ + return {outer_key: {inner_key: inner_dict[inner_key] for inner_key in selected_keys if inner_key in inner_dict} + for outer_key, inner_dict in outer_dict.items()} + + +def prepare_data_for_plotting(data: Dict) -> Union[List[str], Dict[str, float]]: + """ + Input data are constructed as follows: + * dict with planner names as keys + * dict with metric names as keys + * metric values + Output data are constructed as follows: + * Union[ + list of metric names, + dict of planner names with values of corresponding metrics (ordered according to the list) + ] + """ + if not data: + raise Exception(f"Empty data provided!") + + planner_names = list(data.keys()) + if not planner_names: + raise Exception(f"Inner dict is empty!") + + metric_names = list(data[planner_names[0]].keys()) + # NOTE: [0] because individual filtered values are stored in 1-element lists + metrics_values = { + planner: [ + data[planner].get(metric, None)[0] + for metric in metric_names + ] for planner in planner_names + } + return metric_names, metrics_values + + +def create_bar_plot(dataset: Dict, plot_cfg: Dict) -> plt.Figure: + """ + Prepares a bar plot according to this tutorial: + https://matplotlib.org/stable/gallery/lines_bars_and_markers/barchart.html#sphx-glr-gallery-lines-bars-and-markers-barchart-py + """ + metric_name = dataset['metric'] + metric_planner_data = dataset['data'] + planner_names_list = list(metric_planner_data.keys()) + + # find the number of scenarios as a size of a list with metric values of an arbitrary planner + if not len(planner_names_list): + raise Exception(f"Cannot proceed, data is empty") + scenarios_num = len(metric_planner_data[planner_names_list[0]]) + + # to find maximum (1-element metric sequences are expected) + all_values = [value[0] for value in metric_planner_data.values()] + # find the minimum and maximum values + min_value = min(all_values) + max_value = max(all_values) + ylim_min = plot_cfg['figure']['ylim_min'] + ylim_max = plot_cfg['figure']['ylim_max_value_multiplier'] * max_value + + # abstract from inputs (this is an early version) + planner_ids = metric_planner_data.keys() + metric_values = metric_planner_data + + # the label locations + x = np.arange(scenarios_num) + + # the width of the space reserved for bars + width_scale = 1.0 + width = width_scale / len(metric_values) / 2 + # could be useful for the multi-scenario case + multiplier = 0 + + # prepare configuration entries + font_cfg_legend = { + 'family': plot_cfg['legend']['fontfamily'], + 'size': plot_cfg['legend']['fontsize'], + } + # validate the color + if not len(plot_cfg['figure']['bar_color']): + plot_cfg['figure']['bar_color'] = None + + fig, ax = plt.subplots() + for attribute, measurement in metric_values.items(): + offset = width * multiplier + rects = ax.bar( + x + offset, + measurement, width * plot_cfg['figure']['bar_width'], + label=attribute, + color=plot_cfg['figure']['bar_color'], + edgecolor=plot_cfg['figure']['bar_edgecolor'], + linewidth=plot_cfg['figure']['bar_edgewidth'] + ) + + # Add labels manually instead of using "ax.bar_label()" function + if plot_cfg['figure']['bar_height_labels']: + for rect, label in zip(rects, measurement): + height = rect.get_height() + ax.text( + rect.get_x() + rect.get_width() / 2, # center the text + height, + str(plot_cfg['figure']['bar_height_label_format']).format(height), # limit decimal places + ha='center', + va='bottom', + fontdict=font_cfg_legend, + color='black' + ) + # for calculating offsets + multiplier += 1 + + # Add some text for labels, title and custom x-axis tick labels, etc. + ax.set_xlabel(plot_cfg['figure']['xlabel'], fontdict=font_cfg_legend) + ax.set_ylabel(plot_cfg['figure']['ylabel'], fontdict=font_cfg_legend) + ax.set_title(plot_cfg['figure']['title'], fontdict=font_cfg_legend) + + # Ref: https://stackoverflow.com/a/47893553 + idx = np.asarray([i * width for i in range(len(planner_ids))]) + ax.set_xticks(idx) + ax.set_xticklabels(list(planner_ids), fontsize=font_cfg_legend['size']) + ax.tick_params(axis='y', labelsize=font_cfg_legend['size']) + + # x labels rotation + ax.tick_params(axis='x', rotation=plot_cfg['figure']['xlabels_rotation']) + + if plot_cfg['legend']['enable']: + ax.legend( + loc=plot_cfg['legend']['loc'], + ncol=plot_cfg['legend']['ncol'], + fontsize=font_cfg_legend['size'] + ) + ax.set_ylim(ylim_min, ylim_max) + return fig + + +def create_remapped_dataset(data_orig: dict, names_orig_select: dict, remappings: dict): + # remap the original IDs (names) + data_remapped_keys = {} + for name_orig in names_orig_select:#data_orig.keys(): + label = remappings.get(name_orig) + # check if found; if not - do not change the name; also, if the remapped value already exists, do not overwrite + if not label == None and not label in data_orig.keys(): + data_remapped_keys[label] = data_orig[name_orig] + return data_remapped_keys + + +if __name__ == "__main__": + # API is similar to the `create_box_plot_from_results` script + # Ref: https://stackoverflow.com/a/32763023 + cli = argparse.ArgumentParser() + # positional arguments + cli.add_argument("input", type=str, help="Path to the SRPB results sheet") + cli.add_argument("output", type=str, help="Path to the .pdf file with the plot to save") + cli.add_argument("--config", type=str, help="Path to the plot configuration file") + cli.add_argument("--metric", type=str, help="ID of the metric") + # optional arguments + cli.add_argument( + "--planners", + nargs="*", + type=str, + help="Space-separated list of planners to include in the plot (all appearing in the results file \ + are included by default)" + ) + + # parse the command line + args = cli.parse_args() + + sheet_path = args.input + config_path = args.config + metric = args.metric + output_path = args.output + planners_raw = args.planners + + # load dict from the file + with open(config_path) as f: + cfg = yaml.safe_load(f) + + # this list will always be of size 1 (this is just a hack to pass it to `filter_inner_keys`) + metric_name = [str(metric)] + + # load data from the spreadsheet + data_loaded = excel_sheet_utils.load_data_from_excel(sheet_path) + data_selected = filter_inner_keys(data_loaded, metric_name) + + # use all included in the input sheet + if planners_raw == None or not len(planners_raw): + print(f"Using planner IDs from the input spreadsheet as none were selected explicitly") + planners_raw = [] + keys = data_loaded.keys() + for planner_id in keys: + planners_raw.append(planner_id) + + metric_name, planner_values = prepare_data_for_plotting(data_selected) + # remap the original IDs (names) of the planners, store in the same dict + planner_values = create_remapped_dataset(planner_values, planners_raw, cfg['labels']) + + # prepare dataset abstract (to eventually extend to a multi-plot scenario) + dataset = { + 'metric': metric_name, + 'data': planner_values + } + + # Create a new dictionary with only the desired keys + plot_cfg_dict = {key: cfg[key] for key in ['figure', 'legend'] if key in cfg} + + # Create bar plot and return a figure handle + fig = create_bar_plot(dataset, plot_cfg_dict) + # blocking call until window with the plot is closed + plt.show() + + # save the figure + fig.savefig(output_path, bbox_inches='tight', pad_inches=0) + print(f"Plot saved to `{output_path}` file") diff --git a/srpb_evaluation/scripts/create_bar_plot_from_results.yaml b/srpb_evaluation/scripts/create_bar_plot_from_results.yaml new file mode 100644 index 0000000..4cb3e2b --- /dev/null +++ b/srpb_evaluation/scripts/create_bar_plot_from_results.yaml @@ -0,0 +1,30 @@ +figure: + # one may want to embed mathematical typesetting inside to any label, use "$s^2$" then + title: Metric name + xlabel: Planner + # in degrees + xlabels_rotation: 0 + ylabel: Metric value + ylim_min: 0.0 + ylim_max_value_multiplier: 1.1 + bar_width: 0.5 + bar_height_labels: True + bar_height_label_format: "{:.1f}" + bar_edgecolor: black + bar_edgewidth: 0.7 + # set a valid value so all bars will have the same color; leave empty <""> for default setup + bar_color: "#1f77b4" + +legend: + # when enabling, it's wise to force a default "bar_color" + enable: False + loc: "upper left" + ncol: 3 + fontsize: 12 + fontfamily: serif + +labels: + # defines how the planners will be named according to their IDs + teb: TEB + dwa: DWA + trajectory: Traj. Rollout From c1507e472f82a34ab43c1c45162d1a2841128d82 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 24 Apr 2024 16:22:30 +0200 Subject: [PATCH 63/81] `evaluation` - fixed inverse height/width of the figure in the `create_box_plot` script --- srpb_evaluation/scripts/create_box_plot_from_results.py | 2 +- srpb_evaluation/scripts/create_box_plot_from_results.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/srpb_evaluation/scripts/create_box_plot_from_results.py b/srpb_evaluation/scripts/create_box_plot_from_results.py index 7a8ce6b..fc031b7 100644 --- a/srpb_evaluation/scripts/create_box_plot_from_results.py +++ b/srpb_evaluation/scripts/create_box_plot_from_results.py @@ -108,7 +108,7 @@ def remap_names(orig_names: list, remappings: dict): if cfg['figure']['size_width'] == None or cfg['figure']['size_height'] == None: fig, ax = plt.subplots() else: - fig, ax = plt.subplots(figsize=(cfg['figure']['size_height'], cfg['figure']['size_width'])) + fig, ax = plt.subplots(figsize=(cfg['figure']['size_width'], cfg['figure']['size_height'])) if cfg['plot_type'] == "violin": ax.violinplot( diff --git a/srpb_evaluation/scripts/create_box_plot_from_results.yaml b/srpb_evaluation/scripts/create_box_plot_from_results.yaml index 4a91856..a145a99 100644 --- a/srpb_evaluation/scripts/create_box_plot_from_results.yaml +++ b/srpb_evaluation/scripts/create_box_plot_from_results.yaml @@ -2,8 +2,8 @@ plot_type: violin figure: - size_width: 4 - size_height: 9 + size_width: 9 + size_height: 4 title: Title xlabel: Samples ylabel: Values From 525d018c6fee7eda78a6c6975c85fb78b027b6e5 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 24 Apr 2024 16:24:01 +0200 Subject: [PATCH 64/81] `evaluation` - added configurable figure size in the `create_bar_plot` script --- srpb_evaluation/scripts/create_bar_plot_from_results.py | 7 ++++++- srpb_evaluation/scripts/create_bar_plot_from_results.yaml | 2 ++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/srpb_evaluation/scripts/create_bar_plot_from_results.py b/srpb_evaluation/scripts/create_bar_plot_from_results.py index bf1e542..bd61c57 100644 --- a/srpb_evaluation/scripts/create_bar_plot_from_results.py +++ b/srpb_evaluation/scripts/create_bar_plot_from_results.py @@ -104,7 +104,12 @@ def create_bar_plot(dataset: Dict, plot_cfg: Dict) -> plt.Figure: if not len(plot_cfg['figure']['bar_color']): plot_cfg['figure']['bar_color'] = None - fig, ax = plt.subplots() + # Based on the example: https://matplotlib.org/stable/gallery/statistics/boxplot_vs_violin.html + if plot_cfg['figure']['size_width'] == None or plot_cfg['figure']['size_height'] == None: + fig, ax = plt.subplots() + else: + fig, ax = plt.subplots(figsize=(plot_cfg['figure']['size_width'], plot_cfg['figure']['size_height'])) + for attribute, measurement in metric_values.items(): offset = width * multiplier rects = ax.bar( diff --git a/srpb_evaluation/scripts/create_bar_plot_from_results.yaml b/srpb_evaluation/scripts/create_bar_plot_from_results.yaml index 4cb3e2b..af7f154 100644 --- a/srpb_evaluation/scripts/create_bar_plot_from_results.yaml +++ b/srpb_evaluation/scripts/create_bar_plot_from_results.yaml @@ -1,4 +1,6 @@ figure: + size_width: 9 + size_height: 4 # one may want to embed mathematical typesetting inside to any label, use "$s^2$" then title: Metric name xlabel: Planner From 631a19788d30f6e234f136be77086577a5705ae0 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 24 Apr 2024 20:14:18 +0200 Subject: [PATCH 65/81] `evaluation` - fixed `create_bar_plot` script's omitting the unremapped keys; extended to handle multiple input spreadsheets (data merged internally) --- .../scripts/create_bar_plot_from_results.py | 28 ++++++++++++++----- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/srpb_evaluation/scripts/create_bar_plot_from_results.py b/srpb_evaluation/scripts/create_bar_plot_from_results.py index bd61c57..1f02952 100644 --- a/srpb_evaluation/scripts/create_bar_plot_from_results.py +++ b/srpb_evaluation/scripts/create_bar_plot_from_results.py @@ -164,11 +164,14 @@ def create_bar_plot(dataset: Dict, plot_cfg: Dict) -> plt.Figure: def create_remapped_dataset(data_orig: dict, names_orig_select: dict, remappings: dict): # remap the original IDs (names) data_remapped_keys = {} - for name_orig in names_orig_select:#data_orig.keys(): + for name_orig in names_orig_select: label = remappings.get(name_orig) - # check if found; if not - do not change the name; also, if the remapped value already exists, do not overwrite + # check if found; also, if the remapped value already exists, do not overwrite if not label == None and not label in data_orig.keys(): data_remapped_keys[label] = data_orig[name_orig] + elif label == None: + # if key was not found amid the remappings - do not change the original name + data_remapped_keys[name_orig] = data_orig[name_orig] return data_remapped_keys @@ -176,9 +179,14 @@ def create_remapped_dataset(data_orig: dict, names_orig_select: dict, remappings # API is similar to the `create_box_plot_from_results` script # Ref: https://stackoverflow.com/a/32763023 cli = argparse.ArgumentParser() - # positional arguments - cli.add_argument("input", type=str, help="Path to the SRPB results sheet") + # positional argument cli.add_argument("output", type=str, help="Path to the .pdf file with the plot to save") + cli.add_argument( + "--input", + nargs="*", + type=str, + help="Path(s) to the SRPB results sheet(s). Their contents should be orthogonal - different planners in each" + ) cli.add_argument("--config", type=str, help="Path to the plot configuration file") cli.add_argument("--metric", type=str, help="ID of the metric") # optional arguments @@ -193,7 +201,7 @@ def create_remapped_dataset(data_orig: dict, names_orig_select: dict, remappings # parse the command line args = cli.parse_args() - sheet_path = args.input + sheet_paths = args.input config_path = args.config metric = args.metric output_path = args.output @@ -206,8 +214,14 @@ def create_remapped_dataset(data_orig: dict, names_orig_select: dict, remappings # this list will always be of size 1 (this is just a hack to pass it to `filter_inner_keys`) metric_name = [str(metric)] - # load data from the spreadsheet - data_loaded = excel_sheet_utils.load_data_from_excel(sheet_path) + # load data from the spreadsheet(s) + data_loaded = {} + for sheet_path in sheet_paths: + data_single_sheet = excel_sheet_utils.load_data_from_excel(sheet_path) + # NOTE: this is probably not an optimal solution; ref: https://stackoverflow.com/a/26853961 + data_loaded_so_far = data_loaded + data_loaded = {**data_loaded_so_far, **data_single_sheet} + data_selected = filter_inner_keys(data_loaded, metric_name) # use all included in the input sheet From b3f33b8b20745c1b5850f5c5f30c0afe97b36e5b Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 24 Apr 2024 20:34:15 +0200 Subject: [PATCH 66/81] `evaluation` - `create_box_plot` script's extended to handle multiple input spreadsheets (data merged internally) --- .../scripts/create_box_plot_from_results.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/srpb_evaluation/scripts/create_box_plot_from_results.py b/srpb_evaluation/scripts/create_box_plot_from_results.py index fc031b7..27e7e2f 100644 --- a/srpb_evaluation/scripts/create_box_plot_from_results.py +++ b/srpb_evaluation/scripts/create_box_plot_from_results.py @@ -29,8 +29,13 @@ def remap_names(orig_names: list, remappings: dict): # Ref: https://stackoverflow.com/a/32763023 cli = argparse.ArgumentParser() # positional arguments - cli.add_argument("input", type=str, help="Path to the SRPB results sheet") cli.add_argument("output", type=str, help="Path to the .pdf file with the plot to save") + cli.add_argument( + "--input", + nargs="*", + type=str, + help="Path(s) to the SRPB results sheet(s). Their contents should be orthogonal - different planners in each" + ) cli.add_argument("--config", type=str, help="Path to the plot configuration file") cli.add_argument("--metric", type=str, help="ID of the metric") # optional arguments @@ -54,7 +59,7 @@ def remap_names(orig_names: list, remappings: dict): # parse the command line args = cli.parse_args() - sheet_path = args.input + sheet_paths = args.input config_path = args.config metric = args.metric output_path = args.output @@ -64,8 +69,13 @@ def remap_names(orig_names: list, remappings: dict): with open(config_path) as f: cfg = yaml.safe_load(f) - # load data from the spreadsheet (either only filtered data - bar plot, or all data for statistical presentation) - raw_data = excel_sheet_utils.load_raw_data_from_excel(sheet_path) + # load data from the spreadsheet(s) (all data for statistical presentation) + raw_data = {} + for sheet_path in sheet_paths: + raw_data_single_sheet = excel_sheet_utils.load_raw_data_from_excel(sheet_path) + # NOTE: this is probably not an optimal solution; ref: https://stackoverflow.com/a/26853961 + raw_data_so_far = raw_data + raw_data = {**raw_data_so_far, **raw_data_single_sheet} # use all included in the input sheet if planners_raw == None or not len(planners_raw): From 98ef0c7bec108469d064a99f4b8d24bad1f2dc80 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 24 Apr 2024 20:56:34 +0200 Subject: [PATCH 67/81] `evaluation` - refined `create_bar_plot` script's: unified the font of x tick labels --- srpb_evaluation/scripts/create_bar_plot_from_results.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/srpb_evaluation/scripts/create_bar_plot_from_results.py b/srpb_evaluation/scripts/create_bar_plot_from_results.py index 1f02952..5539e3c 100644 --- a/srpb_evaluation/scripts/create_bar_plot_from_results.py +++ b/srpb_evaluation/scripts/create_bar_plot_from_results.py @@ -145,7 +145,7 @@ def create_bar_plot(dataset: Dict, plot_cfg: Dict) -> plt.Figure: # Ref: https://stackoverflow.com/a/47893553 idx = np.asarray([i * width for i in range(len(planner_ids))]) ax.set_xticks(idx) - ax.set_xticklabels(list(planner_ids), fontsize=font_cfg_legend['size']) + ax.set_xticklabels(list(planner_ids), fontdict=font_cfg_legend) ax.tick_params(axis='y', labelsize=font_cfg_legend['size']) # x labels rotation From 4fb4020331b7369ce3100f6d047519b695bf86dc Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 24 Apr 2024 20:57:50 +0200 Subject: [PATCH 68/81] `evaluation` - added font configuration and x labels rotation in the `create_box_plot` script --- .../scripts/create_box_plot_from_results.py | 14 ++++++++++---- .../scripts/create_box_plot_from_results.yaml | 5 +++++ 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/srpb_evaluation/scripts/create_box_plot_from_results.py b/srpb_evaluation/scripts/create_box_plot_from_results.py index 27e7e2f..4291538 100644 --- a/srpb_evaluation/scripts/create_box_plot_from_results.py +++ b/srpb_evaluation/scripts/create_box_plot_from_results.py @@ -131,14 +131,20 @@ def remap_names(orig_names: list, remappings: dict): else: raise Exception(f"Unknown plot_type") + font_cfg = { + 'family': cfg['figure']['fontfamily'], + 'size': cfg['figure']['fontsize'], + } # NOTE: converts to "LaTeX-style" if a string (or its part) is written as "$a^x$" - ax.set_title(cfg['figure']['title']) + ax.set_title(cfg['figure']['title'], fontdict=font_cfg) ax.xaxis.grid(cfg['figure']['xaxis_grid']) ax.yaxis.grid(cfg['figure']['yaxis_grid']) - ax.set_xlabel(cfg['figure']['xlabel']) - ax.set_ylabel(cfg['figure']['ylabel']) + ax.set_xlabel(cfg['figure']['xlabel'], fontdict=font_cfg) + ax.set_ylabel(cfg['figure']['ylabel'], fontdict=font_cfg) ax.set_xticks(range(1, len(data) + 1)) - ax.set_xticklabels(planners) + ax.set_xticklabels(planners, fontdict=font_cfg) + # x labels rotation + ax.tick_params(axis='x', rotation=cfg['figure']['xlabels_rotation']) plt.show() # save the figure diff --git a/srpb_evaluation/scripts/create_box_plot_from_results.yaml b/srpb_evaluation/scripts/create_box_plot_from_results.yaml index a145a99..e14703f 100644 --- a/srpb_evaluation/scripts/create_box_plot_from_results.yaml +++ b/srpb_evaluation/scripts/create_box_plot_from_results.yaml @@ -6,9 +6,14 @@ figure: size_height: 4 title: Title xlabel: Samples + # in degrees + xlabels_rotation: 0 ylabel: Values xaxis_grid: True yaxis_grid: True + fontsize: 12 + # (cursive|fantasy|monospace|sans|sans serif|sans-serif|serif) + fontfamily: serif labels: # defines how the planners will be named according to their IDs From 2bbdc23c7ba538e6066ad23d5f17ce7d22eaa0e5 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Sun, 5 May 2024 13:53:19 +0200 Subject: [PATCH 69/81] `evaluation` - fixes in the script creating LaTeX table: dynamically calculated column arrangement and "_" in planner names replaced with "_\" --- .../scripts/create_latex_table_from_results.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/srpb_evaluation/scripts/create_latex_table_from_results.py b/srpb_evaluation/scripts/create_latex_table_from_results.py index b427942..51cc36e 100644 --- a/srpb_evaluation/scripts/create_latex_table_from_results.py +++ b/srpb_evaluation/scripts/create_latex_table_from_results.py @@ -130,7 +130,15 @@ def get_metric_value_for_planner( tex += (r" % NOTE1: arg to parbox defines how high the text will start" + "\r\n") tex += (r" % NOTE2: \cline{2-8} is a partial horizontal line, ref: https://tex.stackexchange.com/a/8555" + "\r\n") tex += (r" \begin{tabular}" + "\r\n") - tex += (r" {||c||c||c|c|c|c|c|c||}" + "\r\n") + + # Prepare column for metric identifiers and units and for scenario identifiers + tex += (r" {||c||c||") + # centered columns according to the number of planners + for _ in range(planners_num): + tex += (r"c|") + # double vertical border and line end + tex += (r"|}" + "\r\n") + tex += (r" \hline" + "\r\n") tex += (r" % =============================== header" + "\r\n") tex += (r" \multicolumn{2}{|c|}{ % spreads across metric and scenario ID" + "\r\n") @@ -146,11 +154,12 @@ def get_metric_value_for_planner( # header columns of planners - assuming that all results have the same planner entries for planner_name in planner_names: + planner_name_latex_safe = planner_name.replace("_", "\_") tex += (r" & \rotatebox[origin=c]{90}{" + "\r\n") tex += (r" \parbox[c]{\benchresultspheaderheight}{" + "\r\n") tex += (r" \centering" + "\r\n") # enter a name of the planner - tex += (r" \emph{" + str(planner_name) + r"}" + "\r\n") + tex += (r" \emph{" + str(planner_name_latex_safe) + r"}" + "\r\n") tex += (r" }" + "\r\n") tex += (r" }" + "\r\n") From 77d168cc3ae4d39d06334e4a8422166ab09082a5 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Sun, 5 May 2024 15:48:03 +0200 Subject: [PATCH 70/81] `evaluation` - extended the script creating LaTeX table: capable of associating multiple spreadsheets to a single scenario --- .../scripts/create_latex_table_from_results.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/srpb_evaluation/scripts/create_latex_table_from_results.py b/srpb_evaluation/scripts/create_latex_table_from_results.py index 51cc36e..d5cf678 100644 --- a/srpb_evaluation/scripts/create_latex_table_from_results.py +++ b/srpb_evaluation/scripts/create_latex_table_from_results.py @@ -302,7 +302,12 @@ def get_metric_value_for_planner( cmd_json = json.loads(args.input) for key in cmd_json.keys(): - inputs.append({'name': str(key), 'path': Path(cmd_json[key])}) + # (multiple) spreadsheet path(s) is(are) given as a string(s) (separated with a comma) + paths = cmd_json[key].split(",") + paths_pathlib = [Path(path.strip("'")) for path in paths] + # collect a single input + inputs.append({'name': str(key), 'path': paths_pathlib}) + print("\tScript inputs:") print(*inputs, sep='\n') @@ -310,9 +315,13 @@ def get_metric_value_for_planner( results_total = [] # loop for processing Excel sheets for all input files - for input_file in inputs: - scenario_results = load_data_from_excel(input_file['path']) - results_total.append({'name': input_file['name'], 'results': scenario_results}) + for input in inputs: + # aggregated results (possibly from multiple files) + scenario_results = {} + for file in input['path']: + scenario_results_partial = load_data_from_excel(file) + scenario_results.update(scenario_results_partial) + results_total.append({'name': input['name'], 'results': scenario_results}) # generates a string representing LaTeX command that can be directly included and used in a LaTeX document results_table = create_latex_table(results_total, metric_names, planner_names) From 46f0e41566d226255a1e069573442589a2f16cf9 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Sun, 5 May 2024 16:28:06 +0200 Subject: [PATCH 71/81] `evaluation` - fixes in the script creating LaTeX table: fixed newline between the metric ID and its unit --- srpb_evaluation/scripts/create_latex_table_from_results.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/srpb_evaluation/scripts/create_latex_table_from_results.py b/srpb_evaluation/scripts/create_latex_table_from_results.py index d5cf678..4fea140 100644 --- a/srpb_evaluation/scripts/create_latex_table_from_results.py +++ b/srpb_evaluation/scripts/create_latex_table_from_results.py @@ -179,7 +179,7 @@ def get_metric_value_for_planner( # ID of the metric and its unit # whether to put the unit in a new line or not (when there are too few rows) if scenarios_num > 2: - metric_name_and_unit = f"{metrics_map[metric_id]['tex_name']} \\ {metrics_map[metric_id]['tex_unit']}" + metric_name_and_unit = f"{metrics_map[metric_id]['tex_name']} \\\\ {metrics_map[metric_id]['tex_unit']}" else: metric_name_and_unit = f"{metrics_map[metric_id]['tex_name']} {metrics_map[metric_id]['tex_unit']}" tex += (r" " + str(metric_name_and_unit) + "\r\n") From 24bcaf8d06c8fc5945f648eca0eecea4c125b647 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Wed, 8 May 2024 00:41:13 +0200 Subject: [PATCH 72/81] `evaluation` - fixed script creating LaTeX table: lack of best metric selection with incomplete data corrected --- srpb_evaluation/scripts/create_latex_table_from_results.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/srpb_evaluation/scripts/create_latex_table_from_results.py b/srpb_evaluation/scripts/create_latex_table_from_results.py index 4fea140..72dd889 100644 --- a/srpb_evaluation/scripts/create_latex_table_from_results.py +++ b/srpb_evaluation/scripts/create_latex_table_from_results.py @@ -204,6 +204,11 @@ def get_metric_value_for_planner( metric_values_among_planners = [] for planner_name in planner_names: metric_val = get_metric_value_for_planner(results[scenario_num]['results'], planner_name, metric_id) + # there might be incorrect data: + # * 'nan' will break the min/max selection (nan will be returned) + # * 'None' cannot be compared with floats + if math.isnan(metric_val): + continue metric_values_among_planners.append(metric_val) # select the best metric - the one with the smallest or largest value (excluding NaNs) From 07a40dce22b8af88b0f9e6425871bee92895b348 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Mon, 3 Jun 2024 20:37:01 +0200 Subject: [PATCH 73/81] `evaluation` - version bumped --- srpb_evaluation/package.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/srpb_evaluation/package.xml b/srpb_evaluation/package.xml index 2425638..88454bb 100644 --- a/srpb_evaluation/package.xml +++ b/srpb_evaluation/package.xml @@ -2,7 +2,7 @@ srpb_evaluation - 0.9.8 + 0.9.9 The srpb_evaluation package provides implementation of metrics that allow evaluation of robot behaviour during navigation From 96e3cbee32c552b5dece32e13e6f648a774bab4a Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Mon, 3 Jun 2024 20:40:25 +0200 Subject: [PATCH 74/81] README updated (preliminary usage instructions) --- README.md | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 0637fad..575a855 100644 --- a/README.md +++ b/README.md @@ -35,9 +35,18 @@ rosinstall -n . srpb/srpb/srpb.rosinstall ## Usage +### Setup + A log file is saved once the goal is reached by the `srpb_move_base` node. Renewing the goal pose before reaching the previous one does not cause the files to be divided into parts. -TBD... +### Typical workflow + +If one intends to collect a bunch of logs, this is the correct workflow: + +* instead of running the typical `move_base`, run the `srpb_move_base` node that aggregates the `srpb_logger` modules, +* after each trial, run the `scripts/copy_logs.sh` to copy the logs related to the newest trial into a separate directory, +* if one wishes to be extra safe, one might want to evaluate the newest logs at this point with `scripts/evaluate_from_dir.sh ` OR (typically) once all trials were completed, run the `scripts/evaluate_all_dirs.sh `, +* now an Excel sheet can be created with `python3 create_excel_from_results.py` script. ## Acknowledgments From 5df698d6611f3d7e1d03a848b6349db70f2c834a Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Fri, 23 Aug 2024 20:27:16 +0200 Subject: [PATCH 75/81] `evaluation` - added logs regarding the scales clipping in the `HeadingDirectionDisturbance` metric calculations --- .../metrics/heading_direction_disturbance.h | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/srpb_evaluation/include/srpb_evaluation/metrics/heading_direction_disturbance.h b/srpb_evaluation/include/srpb_evaluation/metrics/heading_direction_disturbance.h index 2fac921..d87bff3 100644 --- a/srpb_evaluation/include/srpb_evaluation/metrics/heading_direction_disturbance.h +++ b/srpb_evaluation/include/srpb_evaluation/metrics/heading_direction_disturbance.h @@ -115,7 +115,14 @@ class HeadingDirectionDisturbance: public MetricStatistics { person_fov_ ); // normalize cost - heading.normalize(robot_circumradius_, robot_max_speed_); + if (!heading.normalize(robot_circumradius_, robot_max_speed_)) { + printf( + "During calculation of the `HeadingDirectionDisturbance` metric, scales had to be clipped for person '%s' " + "at t=%.4f. Your data might be corrupted!", + rewinder_.getPersonCurr().getName().c_str(), + rewinder_.getTimestampCurr() + ); + } timed_disturbance.second.push_back(heading.getScale()); } From 32cb11b44460fa32f816d4a1432937f225628d99 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Sat, 24 Aug 2024 02:53:07 +0200 Subject: [PATCH 76/81] `logger` and `evaluation` - switched from `people_msgs_utils::EMPTY_GROUP` to dummy group created with a default ctor (compatibility fix) --- srpb_evaluation/test/test_rewinder.cpp | 2 +- srpb_logger/src/people_logger.cpp | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/srpb_evaluation/test/test_rewinder.cpp b/srpb_evaluation/test/test_rewinder.cpp index 6325aea..8cba62c 100644 --- a/srpb_evaluation/test/test_rewinder.cpp +++ b/srpb_evaluation/test/test_rewinder.cpp @@ -27,7 +27,7 @@ people_msgs_utils::Person createPerson(const std::string& name) { "123" ); } -const people_msgs_utils::Group DUMMY_GROUP = people_msgs_utils::EMPTY_GROUP; +const people_msgs_utils::Group DUMMY_GROUP = people_msgs_utils::Group(); /* * Mocking references: diff --git a/srpb_logger/src/people_logger.cpp b/srpb_logger/src/people_logger.cpp index 96acd3c..4397cca 100644 --- a/srpb_logger/src/people_logger.cpp +++ b/srpb_logger/src/people_logger.cpp @@ -255,14 +255,14 @@ std::pair PeopleLogger::groupFromString(const st if (vals1.empty()) { // return a dummy group without any IDs of tracked people - return {false, people_msgs_utils::EMPTY_GROUP}; + return {false, people_msgs_utils::Group()}; } // 5 static entries and `group` must contain at least 2 IDs if (vals1.size() < 7) { std::cout << "\x1B[31mFound corrupted data of a group:\r\n\t" << str << "\x1B[0m" << std::endl; // return a dummy group - return {false, people_msgs_utils::EMPTY_GROUP}; + return {false, people_msgs_utils::Group()}; } std::string name = vals1.at(0); From 55861aed84253570d87f2a24cbeed6b4a56fdbab Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Tue, 27 Aug 2024 10:35:14 +0200 Subject: [PATCH 77/81] `evaluation` - reduced the default number of required planner-specific logs to create a spreadsheet from results --- srpb_evaluation/scripts/create_excel_from_results.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/srpb_evaluation/scripts/create_excel_from_results.py b/srpb_evaluation/scripts/create_excel_from_results.py index 58d01b4..13a8176 100644 --- a/srpb_evaluation/scripts/create_excel_from_results.py +++ b/srpb_evaluation/scripts/create_excel_from_results.py @@ -21,7 +21,7 @@ from typing import Dict -def get_log_dirs_planner(dir_path: str, planner_name: str, min_logs=3): +def get_log_dirs_planner(dir_path: str, planner_name: str, min_logs=1): dirnames_underscore = glob.glob(dir_path + '/' + '*_' + planner_name + '*') dirnames_dash = glob.glob(dir_path + '/' + '*-' + planner_name + '*') dirnames = dirnames_underscore From 2353fb63614a921f1422f1dd2f2f8b6f69eb13c3 Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Sun, 26 Oct 2025 17:55:00 +0100 Subject: [PATCH 78/81] `evaluation` - added planner name definitions in the `create_box_plot` config --- srpb_evaluation/scripts/create_box_plot_from_results.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/srpb_evaluation/scripts/create_box_plot_from_results.yaml b/srpb_evaluation/scripts/create_box_plot_from_results.yaml index e14703f..06ee080 100644 --- a/srpb_evaluation/scripts/create_box_plot_from_results.yaml +++ b/srpb_evaluation/scripts/create_box_plot_from_results.yaml @@ -17,5 +17,10 @@ figure: labels: # defines how the planners will be named according to their IDs + humap: HuMAP + cohan: CoHAN + hateb: HaTEB teb: TEB dwa: DWA + trajectory: Trajectory Rollout + eband: EBand From 877cf66e689b8bdec56b25eb3c61794423fb6e7c Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Sun, 26 Oct 2025 17:55:15 +0100 Subject: [PATCH 79/81] `evaluation` - added planner name definitions in the `create_bar_plot` config --- srpb_evaluation/scripts/create_bar_plot_from_results.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/srpb_evaluation/scripts/create_bar_plot_from_results.yaml b/srpb_evaluation/scripts/create_bar_plot_from_results.yaml index af7f154..59a9599 100644 --- a/srpb_evaluation/scripts/create_bar_plot_from_results.yaml +++ b/srpb_evaluation/scripts/create_bar_plot_from_results.yaml @@ -27,6 +27,10 @@ legend: labels: # defines how the planners will be named according to their IDs + humap: HUMAP + cohan: CoHAN + hateb: HaTEB teb: TEB dwa: DWA trajectory: Traj. Rollout + eband: EBand From d646ff482db0b91ad3fb61b99702db550810da0b Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Sun, 26 Oct 2025 17:57:57 +0100 Subject: [PATCH 80/81] `evaluation` - added data integrity check in `create_latex_table_from_results` --- srpb_evaluation/scripts/create_latex_table_from_results.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/srpb_evaluation/scripts/create_latex_table_from_results.py b/srpb_evaluation/scripts/create_latex_table_from_results.py index 72dd889..ff75ba7 100644 --- a/srpb_evaluation/scripts/create_latex_table_from_results.py +++ b/srpb_evaluation/scripts/create_latex_table_from_results.py @@ -211,6 +211,9 @@ def get_metric_value_for_planner( continue metric_values_among_planners.append(metric_val) + if not len(metric_values_among_planners): + raise Exception(f"Expected '{metric_id}' metric value but none of the planners have one assigned. Results are: {results[scenario_num]['results']}") + # select the best metric - the one with the smallest or largest value (excluding NaNs) if srpb_metrics.is_minimum_best(metric_id): metric_best_val = min(metric_values_among_planners) From 3e34e32fab3c280b1361060678d359db9666ccab Mon Sep 17 00:00:00 2001 From: Jarek Karwowski Date: Sun, 26 Oct 2025 18:06:11 +0100 Subject: [PATCH 81/81] `evaluation` - added guard to avoid float division by zero in `rewind_experiment_utils.recompute_group_cog` --- srpb_evaluation/scripts/rewind_experiment_utils.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/srpb_evaluation/scripts/rewind_experiment_utils.py b/srpb_evaluation/scripts/rewind_experiment_utils.py index 1d15c8a..73d77a7 100644 --- a/srpb_evaluation/scripts/rewind_experiment_utils.py +++ b/srpb_evaluation/scripts/rewind_experiment_utils.py @@ -184,8 +184,9 @@ def recompute_group_cog(group: Group, people_set: List[Person], people_hide: Lis for person in valid_people_this_group: new_cog_x += person.get_x() new_cog_y += person.get_y() - new_cog_x /= len(valid_people_this_group) - new_cog_y /= len(valid_people_this_group) + if valid_people_this_group: + new_cog_x /= len(valid_people_this_group) + new_cog_y /= len(valid_people_this_group) new_cog_z = 0.0 # collect valid member IDs