diff --git a/tests/test_integration.py b/tests/test_integration.py index 8b2fce1a..a6ebb45c 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -58,6 +58,7 @@ def test_score(model_identifier, benchmark_identifier, expected_score): assert actual_score == expected_score +@pytest.mark.travis_slow @pytest.mark.parametrize( "model_identifier, benchmark_identifier, expected_score, install_dependencies", [ @@ -82,6 +83,7 @@ def test_score_with_install_dependencies( assert actual_score == expected_score +@pytest.mark.travis_slow def test_commandline_score(): process = subprocess.run( [ diff --git a/tests/test_submission/test_endpoints.py b/tests/test_submission/test_endpoints.py index 9265b395..441be5ba 100644 --- a/tests/test_submission/test_endpoints.py +++ b/tests/test_submission/test_endpoints.py @@ -34,6 +34,7 @@ def teardown_method(self): logger.info('Clean database') clear_schema() + @pytest.mark.travis_slow def test_successful_run(self): args_dict = {'jenkins_id': 62, 'user_id': 1, 'model_type': 'artificialsubject', 'public': True, 'competition': 'None', 'new_models': ['randomembedding-100'],