diff --git a/brainscore/benchmarks/geirhos2021.py b/brainscore/benchmarks/geirhos2021.py index 0fe9d4788..579692067 100644 --- a/brainscore/benchmarks/geirhos2021.py +++ b/brainscore/benchmarks/geirhos2021.py @@ -1,6 +1,7 @@ import numpy as np import brainscore +from brainio.assemblies import walk_coords from brainscore.benchmarks import BenchmarkBase from brainscore.benchmarks.screen import place_on_screen from brainscore.metrics import Score @@ -111,4 +112,19 @@ def load_assembly(dataset): stimulus_set = assembly.attrs['stimulus_set'] stimulus_set = stimulus_set[stimulus_set['image_id'].isin(set(assembly['image_id'].values))] assembly.attrs['stimulus_set'] = stimulus_set + # convert condition float to string to avoid xarray indexing errors. + # See https://app.travis-ci.com/github/brain-score/brain-score/builds/256059224 + assembly = cast_coordinate_type(assembly, coordinate='condition', newtype=str) + return assembly + + +def cast_coordinate_type(assembly, coordinate, newtype): + attrs = assembly.attrs + condition_values = assembly[coordinate].values + assembly = type(assembly)(assembly.values, coords={ + coord: (dims, values) for coord, dims, values in walk_coords(assembly) if coord != coordinate}, + dims=assembly.dims) + assembly[coordinate] = 'presentation', condition_values.astype(newtype) + assembly = type(assembly)(assembly) + assembly.attrs = attrs return assembly diff --git a/brainscore/metrics/__init__.py b/brainscore/metrics/__init__.py index 3271dd5aa..1759b6a85 100644 --- a/brainscore/metrics/__init__.py +++ b/brainscore/metrics/__init__.py @@ -68,6 +68,9 @@ def std(self, *args, _apply_raw=False, **kwargs): def min(self, *args, _apply_raw=False, **kwargs): return self._preserve_raw('min', *args, **kwargs, _apply_raw=_apply_raw) + def reduce(self, *args, _apply_raw=False, **kwargs): + return self._preserve_raw('reduce', *args, **kwargs, _apply_raw=_apply_raw) + def _preserve_raw(self, operation, *args, _apply_raw=False, _ignore_errors=True, **kwargs): result = getattr(super(Score, self), operation)(*args, **kwargs) if self.RAW_VALUES_KEY in self.attrs: diff --git a/brainscore/metrics/ceiling.py b/brainscore/metrics/ceiling.py index 982725c51..da1a70fa5 100644 --- a/brainscore/metrics/ceiling.py +++ b/brainscore/metrics/ceiling.py @@ -12,7 +12,7 @@ from result_caching import store -class Ceiling(object): +class Ceiling: def __call__(self, *args, **kwargs): raise NotImplementedError() diff --git a/brainscore/metrics/image_level_behavior.py b/brainscore/metrics/image_level_behavior.py index 5c930218d..de64e8a9b 100644 --- a/brainscore/metrics/image_level_behavior.py +++ b/brainscore/metrics/image_level_behavior.py @@ -134,6 +134,7 @@ def build_response_matrix_from_responses(self, responses): def dprimes(self, response_matrix, cap=5): dprime_scores = self.dprime(response_matrix) dprime_scores_clipped = dprime_scores.clip(-cap, cap) + dprime_scores_clipped = type(dprime_scores)(dprime_scores_clipped) # make sure type is preserved if not self._normalize: return dprime_scores_clipped else: diff --git a/setup.py b/setup.py index 69f7772c1..53a72f36a 100644 --- a/setup.py +++ b/setup.py @@ -17,6 +17,7 @@ requirements = [ "numpy>=1.17", "brainio @ git+https://github.com/brain-score/brainio", + "importlib-metadata<5", # workaround to https://github.com/brain-score/brainio/issues/28 "scikit-learn<0.24", # 0.24 breaks pls regression "scipy", "h5py", diff --git a/tests/test_benchmarks/test_geirhos2021.py b/tests/test_benchmarks/test_geirhos2021.py index 06bb6d14b..85fbfbfcd 100644 --- a/tests/test_benchmarks/test_geirhos2021.py +++ b/tests/test_benchmarks/test_geirhos2021.py @@ -6,7 +6,7 @@ from brainio.assemblies import BehavioralAssembly from brainscore import benchmark_pool -from brainscore.benchmarks.geirhos2021 import DATASETS +from brainscore.benchmarks.geirhos2021 import DATASETS, cast_coordinate_type from tests.test_benchmarks import PrecomputedFeatures @@ -75,6 +75,9 @@ def test_model_3degrees(self, dataset, model, expected_raw_score): # load features precomputed_features = Path(__file__).parent / f'{model}-3deg-Geirhos2021_{dataset}.nc' precomputed_features = BehavioralAssembly.from_files(file_path=precomputed_features) + # these features were packaged with condition as int/float. Current xarray versions have trouble when + # selecting for a float coordinate however, so we had to change the type to string. + precomputed_features = cast_coordinate_type(precomputed_features, 'condition', newtype=str) precomputed_features = PrecomputedFeatures(precomputed_features, visual_degrees=8, # doesn't matter, features are already computed ) @@ -95,6 +98,9 @@ def test_model_mean(self, model, expected_raw_score): benchmark = benchmark_pool[f"brendel.Geirhos2021{dataset.replace('-', '')}-error_consistency"] precomputed_features = Path(__file__).parent / f'{model}-Geirhos2021_{dataset}.nc' precomputed_features = BehavioralAssembly.from_files(file_path=precomputed_features) + # these features were packaged with condition as int/float. Current xarray versions have trouble when + # selecting for a float coordinate however, so we had to change the type to string. + precomputed_features = cast_coordinate_type(precomputed_features, 'condition', newtype=str) precomputed_features = PrecomputedFeatures(precomputed_features, visual_degrees=8) score = benchmark(precomputed_features).raw scores.append(score.sel(aggregation='center')) diff --git a/tests/test_metrics/test___init__.py b/tests/test_metrics/test___init__.py index f6337b248..c30a14147 100644 --- a/tests/test_metrics/test___init__.py +++ b/tests/test_metrics/test___init__.py @@ -35,7 +35,7 @@ def test_mean(self): mean_score = score.mean('a') np.testing.assert_array_equal(mean_score.raw['a'], [1, 1, 2, 2]) - def test_mean_no_apply_raw(self): + def test_mean_apply_raw(self): score = Score([1, 2], coords={'a': [1, 2]}, dims=['a']) score.attrs['raw'] = DataAssembly([0, 2, 1, 3], coords={'a': [1, 1, 2, 2]}, dims=['a']) mean_score = score.mean('a', _apply_raw=True)