From 250254b4ae2f72352b33a6ad49d88335e7efef53 Mon Sep 17 00:00:00 2001 From: EliseCos Date: Wed, 25 Feb 2026 16:52:45 +0100 Subject: [PATCH 1/5] Adding keyword to be used with json_merge_entries --- .../cli/scil_json_convert_entries_to_xlsx.py | 45 +++++++++++++++++-- 1 file changed, 41 insertions(+), 4 deletions(-) diff --git a/src/scilpy/cli/scil_json_convert_entries_to_xlsx.py b/src/scilpy/cli/scil_json_convert_entries_to_xlsx.py index 98b5903f3..a23612c7b 100755 --- a/src/scilpy/cli/scil_json_convert_entries_to_xlsx.py +++ b/src/scilpy/cli/scil_json_convert_entries_to_xlsx.py @@ -17,7 +17,8 @@ from scilpy.version import version_string dps_dpp = ['data_per_streamline_keys', 'data_per_point_keys'] - +required_keys = {'mean', 'std'} +optional_keys = {'ROI-name', 'nb-vx-roi', 'nb-vx-seed', 'max'} def _get_all_bundle_names(stats): bnames = set() @@ -82,7 +83,7 @@ def _get_stats_parse_function(stats, stats_over_population): return _parse_lesion elif type(first_bundle_substat) is dict: sub_keys = list(first_bundle_substat.keys()) - if set(sub_keys) == set(['mean', 'std']): # when you have mean and std per stats + if required_keys.issubset(sub_keys): # when you used scil_json_merge_entries if stats_over_population: return _parse_per_label_population_stats else: @@ -135,6 +136,21 @@ def _parse_scalar_meanstd(stats, subs, bundles): means = np.full((nb_subs, nb_bundles, nb_metrics), np.NaN) stddev = np.full((nb_subs, nb_bundles, nb_metrics), np.NaN) + + found_keys = set() + for sub_dict in stats.values(): + for bundle_dict in sub_dict.values(): + for m_stat in bundle_dict.values(): + if isinstance(m_stat, dict): + found_keys.update(m_stat.keys()) + keys_present = found_keys & optional_keys + + if 'nb-vx-roi' in keys_present: + nb_vx_roi = np.full((nb_subs, nb_bundles, nb_metrics), np.NaN) + if 'nb-vx-seed' in keys_present: + nb_vx_seed = np.full((nb_subs, nb_bundles, nb_metrics), np.NaN) + if 'max' in keys_present: + maxi = np.full((nb_subs, nb_bundles, nb_metrics), np.NaN) for sub_id, sub_name in enumerate(subs): for bundle_id, bundle_name in enumerate(bundles): @@ -147,6 +163,12 @@ def _parse_scalar_meanstd(stats, subs, bundles): if m_stat is not None: means[sub_id, bundle_id, metric_id] = m_stat['mean'] stddev[sub_id, bundle_id, metric_id] = m_stat['std'] + if 'nb-vx-roi' in keys_present: + nb_vx_roi[sub_id, bundle_id, metric_id] = m_stat.get('nb-vx-roi', np.nan) + if 'nb-vx-seed' in keys_present: + nb_vx_seed[sub_id, bundle_id, metric_id] = m_stat.get('nb-vx-seed', np.nan) + if 'max' in keys_present: + maxi[sub_id, bundle_id, metric_id] = m_stat.get('max', np.nan) dataframes = [] df_names = [] @@ -157,8 +179,23 @@ def _parse_scalar_meanstd(stats, subs, bundles): df_names.append(metric_name + "_mean") dataframes.append(pd.DataFrame(data=stddev[:, :, metric_id], - index=subs, columns=bundles)) + index=subs, columns=bundles)) df_names.append(metric_name + "_std") + + if 'nb-vx-roi' in keys_present: + dataframes.append(pd.DataFrame(data=nb_vx_roi[:, :, metric_id], + index=subs, columns=bundles)) + df_names.append(metric_name + "_nb-vx-roi") + + if 'nb-vx-seed' in keys_present: + dataframes.append(pd.DataFrame(data=nb_vx_seed[:, :, metric_id], + index=subs, columns=bundles)) + df_names.append(metric_name + "_nb-vx-seed") + + if 'max' in keys_present: + dataframes.append(pd.DataFrame(data=maxi[:, :, metric_id], + index=subs, columns=bundles)) + df_names.append(metric_name + "_max") return dataframes, df_names @@ -194,7 +231,7 @@ def _parse_scalar_lesions(stats, subs, bundles): dataframes.append(pd.DataFrame(data=stddev[:, :, metric_id], index=subs, columns=bundles)) - df_names.append(metric_name + "_std") + df_names.append(metric_name + "_std") return dataframes, df_names From 5b920e69d5c08efcd3968c722fc14c9a531f91f6 Mon Sep 17 00:00:00 2001 From: EliseCos Date: Wed, 25 Feb 2026 17:08:47 +0100 Subject: [PATCH 2/5] fix typo --- src/scilpy/cli/scil_json_convert_entries_to_xlsx.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/scilpy/cli/scil_json_convert_entries_to_xlsx.py b/src/scilpy/cli/scil_json_convert_entries_to_xlsx.py index a23612c7b..3d40d827f 100755 --- a/src/scilpy/cli/scil_json_convert_entries_to_xlsx.py +++ b/src/scilpy/cli/scil_json_convert_entries_to_xlsx.py @@ -83,7 +83,7 @@ def _get_stats_parse_function(stats, stats_over_population): return _parse_lesion elif type(first_bundle_substat) is dict: sub_keys = list(first_bundle_substat.keys()) - if required_keys.issubset(sub_keys): # when you used scil_json_merge_entries + if required_keys.issubset(sub_keys): # when you have mean and std per stats if stats_over_population: return _parse_per_label_population_stats else: @@ -137,7 +137,7 @@ def _parse_scalar_meanstd(stats, subs, bundles): means = np.full((nb_subs, nb_bundles, nb_metrics), np.NaN) stddev = np.full((nb_subs, nb_bundles, nb_metrics), np.NaN) - found_keys = set() + found_keys = set() # when you used scil_json_merge_entries for sub_dict in stats.values(): for bundle_dict in sub_dict.values(): for m_stat in bundle_dict.values(): @@ -231,7 +231,7 @@ def _parse_scalar_lesions(stats, subs, bundles): dataframes.append(pd.DataFrame(data=stddev[:, :, metric_id], index=subs, columns=bundles)) - df_names.append(metric_name + "_std") + df_names.append(metric_name + "_std") return dataframes, df_names From 0a760081c26428ffce63ebc5ef0822cb6f5ee2fa Mon Sep 17 00:00:00 2001 From: EliseCos Date: Mon, 2 Mar 2026 19:11:42 +0100 Subject: [PATCH 3/5] [ENH] change to a loop --- .../cli/scil_json_convert_entries_to_xlsx.py | 62 +++++++++---------- 1 file changed, 28 insertions(+), 34 deletions(-) diff --git a/src/scilpy/cli/scil_json_convert_entries_to_xlsx.py b/src/scilpy/cli/scil_json_convert_entries_to_xlsx.py index 3d40d827f..73da4e74b 100755 --- a/src/scilpy/cli/scil_json_convert_entries_to_xlsx.py +++ b/src/scilpy/cli/scil_json_convert_entries_to_xlsx.py @@ -18,7 +18,6 @@ dps_dpp = ['data_per_streamline_keys', 'data_per_point_keys'] required_keys = {'mean', 'std'} -optional_keys = {'ROI-name', 'nb-vx-roi', 'nb-vx-seed', 'max'} def _get_all_bundle_names(stats): bnames = set() @@ -127,7 +126,7 @@ def _parse_scalar_stats(stats, subs, bundles): return dataframes, df_names -def _parse_scalar_meanstd(stats, subs, bundles): +def _parse_scalar_meanstd(stats, subs, bundles, optional_keys): metric_names = _get_metrics_names(stats) nb_subs = len(subs) @@ -137,20 +136,19 @@ def _parse_scalar_meanstd(stats, subs, bundles): means = np.full((nb_subs, nb_bundles, nb_metrics), np.NaN) stddev = np.full((nb_subs, nb_bundles, nb_metrics), np.NaN) - found_keys = set() # when you used scil_json_merge_entries + found_keys = set() for sub_dict in stats.values(): for bundle_dict in sub_dict.values(): for m_stat in bundle_dict.values(): if isinstance(m_stat, dict): found_keys.update(m_stat.keys()) - keys_present = found_keys & optional_keys - - if 'nb-vx-roi' in keys_present: - nb_vx_roi = np.full((nb_subs, nb_bundles, nb_metrics), np.NaN) - if 'nb-vx-seed' in keys_present: - nb_vx_seed = np.full((nb_subs, nb_bundles, nb_metrics), np.NaN) - if 'max' in keys_present: - maxi = np.full((nb_subs, nb_bundles, nb_metrics), np.NaN) + keys_present = set(optional_keys) & found_keys + optional_arrays = {} + + for key in keys_present: + optional_arrays[key] = np.full( + (nb_subs, nb_bundles, nb_metrics), + np.nan) for sub_id, sub_name in enumerate(subs): for bundle_id, bundle_name in enumerate(bundles): @@ -163,12 +161,9 @@ def _parse_scalar_meanstd(stats, subs, bundles): if m_stat is not None: means[sub_id, bundle_id, metric_id] = m_stat['mean'] stddev[sub_id, bundle_id, metric_id] = m_stat['std'] - if 'nb-vx-roi' in keys_present: - nb_vx_roi[sub_id, bundle_id, metric_id] = m_stat.get('nb-vx-roi', np.nan) - if 'nb-vx-seed' in keys_present: - nb_vx_seed[sub_id, bundle_id, metric_id] = m_stat.get('nb-vx-seed', np.nan) - if 'max' in keys_present: - maxi[sub_id, bundle_id, metric_id] = m_stat.get('max', np.nan) + for key in keys_present: + optional_arrays[key][sub_id, bundle_id, metric_id] = \ + m_stat.get(key, np.nan) dataframes = [] df_names = [] @@ -182,20 +177,13 @@ def _parse_scalar_meanstd(stats, subs, bundles): index=subs, columns=bundles)) df_names.append(metric_name + "_std") - if 'nb-vx-roi' in keys_present: - dataframes.append(pd.DataFrame(data=nb_vx_roi[:, :, metric_id], - index=subs, columns=bundles)) - df_names.append(metric_name + "_nb-vx-roi") - - if 'nb-vx-seed' in keys_present: - dataframes.append(pd.DataFrame(data=nb_vx_seed[:, :, metric_id], - index=subs, columns=bundles)) - df_names.append(metric_name + "_nb-vx-seed") - - if 'max' in keys_present: - dataframes.append(pd.DataFrame(data=maxi[:, :, metric_id], - index=subs, columns=bundles)) - df_names.append(metric_name + "_max") + for key in keys_present: + dataframes.append( + pd.DataFrame( + data=optional_arrays[key][:, :, metric_id], + index=subs, + columns=bundles)) + df_names.append(f"{metric_name}_{key}") return dataframes, df_names @@ -445,7 +433,8 @@ def _parse_per_label_population_stats(stats, bundles, metrics): def _create_xlsx_from_json(json_path, xlsx_path, sort_subs=True, sort_bundles=True, ignored_bundles_fpath=None, - stats_over_population=False): + stats_over_population=False, + optional_keys=None): with open(json_path, 'r') as json_file: stats = json.load(json_file) @@ -465,7 +454,7 @@ def _create_xlsx_from_json(json_path, xlsx_path, cur_stats_func = _get_stats_parse_function(stats, stats_over_population) - dataframes, df_names = cur_stats_func(stats, subs, bundle_names) + dataframes, df_names = cur_stats_func(stats, subs, bundle_names, optional_keys) if len(dataframes): _write_dataframes(dataframes, df_names, xlsx_path) @@ -484,6 +473,9 @@ def _build_arg_parser(): p.add_argument('--no_sort_subs', action='store_false', help='If set, subjects won\'t be sorted alphabetically.') + + p.add_argument('--extra_key', nargs='+', default=[], + help='Optional keys to export (only numeric values).') p.add_argument('--no_sort_bundles', action='store_false', help='If set, bundles won\'t be sorted alphabetically.') @@ -505,6 +497,7 @@ def main(): parser = _build_arg_parser() args = parser.parse_args() logging.getLogger().setLevel(logging.getLevelName(args.verbose)) + optional_keys = set(args.extra_key) assert_inputs_exist(parser, args.in_json) assert_outputs_exist(parser, args, args.out_xlsx) @@ -513,7 +506,8 @@ def main(): sort_subs=args.no_sort_subs, sort_bundles=args.no_sort_bundles, ignored_bundles_fpath=args.ignore_bundles, - stats_over_population=args.stats_over_population) + stats_over_population=args.stats_over_population, + optional_keys=optional_keys) if __name__ == "__main__": From 736cb34fa12a0264e307d1420b302790534e7bca Mon Sep 17 00:00:00 2001 From: EliseCos Date: Tue, 3 Mar 2026 16:04:42 +0100 Subject: [PATCH 4/5] fix error test --- src/scilpy/cli/scil_json_convert_entries_to_xlsx.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/scilpy/cli/scil_json_convert_entries_to_xlsx.py b/src/scilpy/cli/scil_json_convert_entries_to_xlsx.py index 73da4e74b..1a364ab4c 100755 --- a/src/scilpy/cli/scil_json_convert_entries_to_xlsx.py +++ b/src/scilpy/cli/scil_json_convert_entries_to_xlsx.py @@ -224,7 +224,7 @@ def _parse_scalar_lesions(stats, subs, bundles): return dataframes, df_names -def _parse_stats(stats, subs, bundles): +def _parse_stats(stats, subs, bundles, optional_keys=None): nb_subs = len(subs) nb_bundles = len(bundles) @@ -497,7 +497,7 @@ def main(): parser = _build_arg_parser() args = parser.parse_args() logging.getLogger().setLevel(logging.getLevelName(args.verbose)) - optional_keys = set(args.extra_key) + extra_keys = set(args.extra_key) assert_inputs_exist(parser, args.in_json) assert_outputs_exist(parser, args, args.out_xlsx) @@ -507,7 +507,7 @@ def main(): sort_bundles=args.no_sort_bundles, ignored_bundles_fpath=args.ignore_bundles, stats_over_population=args.stats_over_population, - optional_keys=optional_keys) + optional_keys=extra_keys) if __name__ == "__main__": From 1de99bd811137555ce84142da9fb6c23c04d8b3f Mon Sep 17 00:00:00 2001 From: EliseCos Date: Fri, 6 Mar 2026 13:47:46 +0100 Subject: [PATCH 5/5] =?UTF-8?q?Answer=20Fran=C3=A7ois's=20comments?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/scilpy/cli/scil_json_convert_entries_to_xlsx.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/scilpy/cli/scil_json_convert_entries_to_xlsx.py b/src/scilpy/cli/scil_json_convert_entries_to_xlsx.py index 1a364ab4c..21e801970 100755 --- a/src/scilpy/cli/scil_json_convert_entries_to_xlsx.py +++ b/src/scilpy/cli/scil_json_convert_entries_to_xlsx.py @@ -142,7 +142,7 @@ def _parse_scalar_meanstd(stats, subs, bundles, optional_keys): for m_stat in bundle_dict.values(): if isinstance(m_stat, dict): found_keys.update(m_stat.keys()) - keys_present = set(optional_keys) & found_keys + keys_present = set(optional_keys).intersection(found_keys) optional_arrays = {} for key in keys_present: @@ -475,7 +475,7 @@ def _build_arg_parser(): help='If set, subjects won\'t be sorted alphabetically.') p.add_argument('--extra_key', nargs='+', default=[], - help='Optional keys to export (only numeric values).') + help='Optional keys to export (must be associated to numeric values only)') p.add_argument('--no_sort_bundles', action='store_false', help='If set, bundles won\'t be sorted alphabetically.')