diff --git a/src/aerie_cli/__main__.py b/src/aerie_cli/__main__.py index 9108f09a..96720dd5 100644 --- a/src/aerie_cli/__main__.py +++ b/src/aerie_cli/__main__.py @@ -1,24 +1,28 @@ """Programmatic entrypoint for CLI application. """ import sys -from rich.console import Console - +import logging +import traceback from aerie_cli.app import app -from aerie_cli.persistent import NoActiveSessionError +from aerie_cli.persistent import NoActiveSessionError, CURRENT_LOG_PATH from aerie_cli.__version__ import __version__ - def main(): try: app() except NoActiveSessionError: - Console().print( + logging.error( "There is no active session. Please start a session with aerie-cli activate" ) sys.exit(-1) - except Exception: - Console().print_exception() + except Exception as e: + logging.error(f"{type(e).__name__}\n" + "Check log file for more information:\n" + f"{CURRENT_LOG_PATH}") + # We don't want to print the full traceback, + # so we use debug + logging.debug(traceback.format_exc()) if __name__ == "__main__": diff --git a/src/aerie_cli/app.py b/src/aerie_cli/app.py index 6d188d18..0831a56c 100644 --- a/src/aerie_cli/app.py +++ b/src/aerie_cli/app.py @@ -3,6 +3,8 @@ `app` is the CLI application with which all commands, subcommands, and callbacks are registered. """ import typer +import logging +import sys from typing import Optional from aerie_cli.commands import models @@ -18,6 +20,8 @@ from aerie_cli.persistent import ( PersistentConfigurationManager, PersistentSessionManager, + clear_old_log_files, + CURRENT_LOG_PATH ) from aerie_cli.utils.prompts import select_from_list from aerie_cli.utils.sessions import ( @@ -25,6 +29,7 @@ get_active_session_client, ) from aerie_cli.utils.configurations import find_configuration +from aerie_cli.utils.logger import TyperLoggingHandler app = typer.Typer() app.add_typer(plans.plans_app, name="plans") @@ -38,7 +43,7 @@ def print_version(print_version: bool): if print_version: - typer.echo(__version__) + logging.info(__version__) raise typer.Exit() @@ -53,9 +58,40 @@ def set_alternate_configuration(configuration_identifier: str): def setup_global_command_context(hasura_admin_secret: str): CommandContext.hasura_admin_secret = hasura_admin_secret +def setup_logging(debug: bool): + clear_old_log_files() + file_formatter = logging.Formatter( + '%(asctime)s - %(name)s - %(levelname)s - %(message)s', + ) + file_handler = logging.FileHandler(filename=str(CURRENT_LOG_PATH), + mode='w', + encoding='utf-8') + file_handler.setLevel(logging.DEBUG) + file_handler.setFormatter(file_formatter) + + console_formatter = logging.Formatter( + '%(message)s' + ) + console_handler = TyperLoggingHandler() + level = logging.DEBUG if debug else logging.INFO + console_handler.setLevel(level) # Set the level of the stream and file handlers to different ones + # Debug should be in file, others should go into stdout + # unless verbose/debug option is selected + console_handler.setFormatter(console_formatter) + logging.basicConfig(level=logging.DEBUG, + handlers=[file_handler, + console_handler]) + @app.callback() def app_callback( + debug: Optional[bool]=typer.Option( + False, + "--debug", + "-d", + callback=setup_logging, + help="View the debug output", + ), version: Optional[bool] = typer.Option( None, "--version", @@ -108,7 +144,7 @@ def activate_session( if role in session.aerie_jwt.allowed_roles: session.change_role(role) else: - typer.echo(f"Role {role} not in allowed roles") + logging.info(f"Role {role} not in allowed roles") PersistentSessionManager.set_active_session(session) @@ -120,9 +156,9 @@ def deactivate_session(): """ name = PersistentSessionManager.unset_active_session() if name is None: - typer.echo("No active session") + logging.info("No active session") else: - typer.echo(f"Deactivated session: {name}") + logging.info(f"Deactivated session: {name}") @app.command("role") @@ -137,14 +173,14 @@ def change_role( client = get_active_session_client() if role is None: - typer.echo(f"Active Role: {client.aerie_host.active_role}") + logging.info(f"Active Role: {client.aerie_host.active_role}") role = select_from_list(client.aerie_host.aerie_jwt.allowed_roles) client.aerie_host.change_role(role) PersistentSessionManager.set_active_session(client.aerie_host) - typer.echo(f"Changed role to: {client.aerie_host.active_role}") + logging.info(f"Changed role to: {client.aerie_host.active_role}") @app.command("status") @@ -156,6 +192,6 @@ def print_status(): client = CommandContext.get_client() if client.aerie_host.configuration_name: - typer.echo(f"Active configuration: {client.aerie_host.configuration_name}") + logging.info(f"Active configuration: {client.aerie_host.configuration_name}") - typer.echo(f"Active role: {client.aerie_host.active_role}") + logging.info(f"Active role: {client.aerie_host.active_role}") diff --git a/src/aerie_cli/commands/configurations.py b/src/aerie_cli/commands/configurations.py index b6410e08..1c24680e 100644 --- a/src/aerie_cli/commands/configurations.py +++ b/src/aerie_cli/commands/configurations.py @@ -4,6 +4,7 @@ """ import typer +import logging import json from pathlib import Path from rich.console import Console @@ -79,10 +80,10 @@ def upload_configurations( raise e if len(new_confs): - Console().print(f"Added configurations: {', '.join(new_confs)}") + logging.info(f"Added configurations: {', '.join(new_confs)}") if len(updated_confs): - Console().print(f"Updated configurations: {', '.join(updated_confs)}") + logging.info(f"Updated configurations: {', '.join(updated_confs)}") @app.command('list') @@ -98,8 +99,7 @@ def list_configurations(): except NoActiveSessionError: active_config = None - typer.echo(f"Configuration file location: {CONFIGURATION_FILE_PATH}") - typer.echo() + logging.info(f"Configuration file location: {CONFIGURATION_FILE_PATH}") table = Table(title='Aerie Host Configurations', caption='Active configuration in red') diff --git a/src/aerie_cli/commands/constraints.py b/src/aerie_cli/commands/constraints.py index 60e2ceda..83aa6d2d 100644 --- a/src/aerie_cli/commands/constraints.py +++ b/src/aerie_cli/commands/constraints.py @@ -1,6 +1,7 @@ import json import arrow +import logging import typer from rich.console import Console from rich.table import Table @@ -34,7 +35,7 @@ def upload( "definition": str_contents } constraint_id = client.upload_constraint(constraint) - typer.echo(f"Created constraint: {constraint_id}") + logging.info(f"Created constraint: {constraint_id}") @app.command() def delete( @@ -44,7 +45,7 @@ def delete( client = CommandContext.get_client() client.delete_constraint(id) - typer.echo(f"Successfully deleted constraint {id}") + logging.info(f"Successfully deleted constraint {id}") @app.command() def update( @@ -66,7 +67,7 @@ def update( "definition": str_contents } constraint_id = client.update_constraint(id, constraint) - typer.echo(f"Updated constraint: {constraint_id}") + logging.info(f"Updated constraint: {constraint_id}") @app.command() def violations( @@ -75,7 +76,7 @@ def violations( client = CommandContext.get_client() constraint_violations = client.get_constraint_violations(plan_id) - typer.echo(f"Constraint violations: {constraint_violations}") + logging.info(f"Constraint violations: {constraint_violations}") diff --git a/src/aerie_cli/commands/metadata.py b/src/aerie_cli/commands/metadata.py index e8e64154..e288e3e3 100644 --- a/src/aerie_cli/commands/metadata.py +++ b/src/aerie_cli/commands/metadata.py @@ -2,6 +2,7 @@ import arrow import typer +import logging from rich.console import Console from rich.table import Table @@ -30,7 +31,7 @@ def upload( contents = in_file.read() schema_data = json.loads(contents) result = client.add_directive_metadata_schemas(schema_data["schemas"]) - typer.echo(f"{len(schema_data['schemas'])} new schema have been added.") + logging.info(f"{len(schema_data['schemas'])} new schema have been added.") @app.command() @@ -41,7 +42,7 @@ def delete( ): """Delete a metadata schema by its name.""" resp = CommandContext.get_client().delete_directive_metadata_schema(schema_name) - typer.echo(f"Schema `{resp}` has been removed.") + logging.info(f"Schema `{resp}` has been removed.") @app.command() @@ -72,4 +73,4 @@ def clean(): for schema in resp: client.delete_directive_metadata_schema(schema["key"]) - typer.echo(f"All metadata schemas have been deleted") + logging.info(f"All metadata schemas have been deleted") diff --git a/src/aerie_cli/commands/models.py b/src/aerie_cli/commands/models.py index 093c1376..db33c6e2 100644 --- a/src/aerie_cli/commands/models.py +++ b/src/aerie_cli/commands/models.py @@ -1,6 +1,7 @@ import json import arrow +import logging import typer from rich.console import Console from rich.table import Table @@ -63,9 +64,9 @@ def upload( # Attach sim template to model client.upload_sim_template(model_id=model_id, args=json_obj, name=name) - typer.echo(f"Attached simulation template to model {model_id}.") + logging.info(f"Attached simulation template to model {model_id}.") - typer.echo(f"Created new mission model: {model_name} with Model ID: {model_id}") + logging.info(f"Created new mission model: {model_name} with Model ID: {model_id}") @app.command() @@ -77,7 +78,7 @@ def delete( """Delete a mission model by its model id.""" model_name = CommandContext.get_client().delete_mission_model(model_id) - typer.echo(f"Mission Model `{model_name}` with ID: {model_id} has been removed.") + logging.info(f"Mission Model `{model_name}` with ID: {model_id} has been removed.") @app.command() @@ -89,7 +90,7 @@ def clean(): for api_mission_model in resp: client.delete_mission_model(api_mission_model.id) - typer.echo(f"All mission models have been deleted") + logging.info(f"All mission models have been deleted") @app.command() diff --git a/src/aerie_cli/commands/plans.py b/src/aerie_cli/commands/plans.py index 7b616b84..2deb03bd 100644 --- a/src/aerie_cli/commands/plans.py +++ b/src/aerie_cli/commands/plans.py @@ -4,6 +4,7 @@ import arrow import pandas as pd import typer +import logging from rich.console import Console from rich.table import Table @@ -29,7 +30,7 @@ def download( plan = CommandContext.get_client().get_activity_plan_by_id(id, full_args) with open(output, "w") as out_file: out_file.write(plan.to_json(indent=2)) - typer.echo(f"Wrote activity plan to {output}") + logging.info(f"Wrote activity plan to {output}") @plans_app.command() @@ -48,7 +49,7 @@ def download_simulation( simulated_activities = client.get_simulation_results(sim_id) with open(output, "w") as out_file: out_file.write(json.dumps(simulated_activities, indent=2)) - typer.echo(f"Wrote activity plan to {output}") + logging.info(f"Wrote activity plan to {output}") @plans_app.command() @@ -141,7 +142,7 @@ def download_resources( # write to file with open(output, "w") as out_file: df.to_csv(out_file, index=False, header=field_name) - typer.echo(f"Wrote resource timelines to {output}") + logging.info(f"Wrote resource timelines to {output}") else: if absolute_time: @@ -157,7 +158,7 @@ def download_resources( # write to file with open(output, "w") as out_file: out_file.write(json.dumps(resources, indent=2)) - typer.echo(f"Wrote resource timelines to {output}") + logging.info(f"Wrote resource timelines to {output}") @plans_app.command() @@ -179,7 +180,7 @@ def upload( if time_tag: plan_to_create.name += arrow.utcnow().format("YYYY-MM-DDTHH-mm-ss") plan_id = client.create_activity_plan(model_id, plan_to_create) - typer.echo(f"Created plan ID: {plan_id}") + logging.info(f"Created plan ID: {plan_id}") @plans_app.command() @@ -196,7 +197,7 @@ def duplicate( plan_to_duplicate = ActivityPlanCreate.from_plan_read(plan) plan_to_duplicate.name = duplicated_plan_name duplicated_plan_id = client.create_activity_plan(plan.model_id, plan_to_duplicate) - typer.echo(f"Duplicate activity plan created with ID: {duplicated_plan_id}") + logging.info(f"Duplicate activity plan created with ID: {duplicated_plan_id}") @plans_app.command() @@ -218,12 +219,12 @@ def simulate( end_time = arrow.utcnow() res = client.get_simulation_results(sim_dataset_id) total_sim_time = end_time - start_time - typer.echo(f"Simulation completed in " + str(total_sim_time)) + logging.info(f"Simulation completed in " + str(total_sim_time)) if output: with open(output, "w") as out_file: out_file.write(json.dumps(res, indent=2)) - typer.echo(f"Wrote simulation results to {output}") + logging.info(f"Wrote simulation results to {output}") @plans_app.command() @@ -273,9 +274,9 @@ def create_config( resp = CommandContext.get_client().create_config_args(plan_id=plan_id, args=json_obj) - typer.echo(f"Configuration Arguments for Plan ID: {plan_id}") + logging.info(f"Configuration Arguments for Plan ID: {plan_id}") for arg in resp: - typer.echo(f"(*) {arg}: {resp[arg]}") + logging.info(f"(*) {arg}: {resp[arg]}") @plans_app.command() @@ -292,9 +293,9 @@ def update_config( resp = CommandContext.get_client().update_config_args(plan_id=plan_id, args=json_obj) - typer.echo(f"Configuration Arguments for Plan ID: {plan_id}") + logging.info(f"Configuration Arguments for Plan ID: {plan_id}") for arg in resp: - typer.echo(f"(*) {arg}: {resp[arg]}") + logging.info(f"(*) {arg}: {resp[arg]}") @plans_app.command() @@ -304,7 +305,7 @@ def delete( """Delete an activity plan by its id.""" plan_name = CommandContext.get_client().delete_plan(plan_id) - typer.echo(f"Plan `{plan_name}` with ID: {plan_id} has been removed.") + logging.info(f"Plan `{plan_name}` with ID: {plan_id} has been removed.") @plans_app.command() @@ -316,7 +317,7 @@ def clean(): for activity_plan in resp: client.delete_plan(activity_plan.id) - typer.echo(f"All activity plans have been deleted") + logging.info(f"All activity plans have been deleted") @collaborators_app.command("list") def list_collaborators( @@ -328,9 +329,9 @@ def list_collaborators( collaborators = client.list_plan_collaborators(plan_id) if len(collaborators): - typer.echo("\n".join(collaborators)) + logging.info("\n".join(collaborators)) else: - typer.echo("No collaborators") + logging.info("No collaborators") @collaborators_app.command("add") @@ -346,9 +347,9 @@ def add_collaborator( client.add_plan_collaborator(plan_id, user) if user in client.list_plan_collaborators(plan_id): - typer.echo(f"Successfully added collaborator: {user}") + logging.info(f"Successfully added collaborator: {user}") else: - typer.echo(f"Failed to add collaborator") + logging.info(f"Failed to add collaborator") @collaborators_app.command("delete") @@ -368,6 +369,6 @@ def delete_collaborator( client.delete_plan_collaborator(plan_id, user) if user not in client.list_plan_collaborators(plan_id): - typer.echo("Successfully deleted collaborator") + logging.info("Successfully deleted collaborator") else: - typer.echo("Failed to delete collaborator") + logging.info("Failed to delete collaborator") diff --git a/src/aerie_cli/commands/scheduling.py b/src/aerie_cli/commands/scheduling.py index 0ccad565..2119edd8 100644 --- a/src/aerie_cli/commands/scheduling.py +++ b/src/aerie_cli/commands/scheduling.py @@ -1,4 +1,5 @@ import typer +import logging from aerie_cli.commands.command_context import CommandContext @@ -31,7 +32,7 @@ def upload( resp = client.upload_scheduling_goals(upload_obj) - typer.echo(f"Uploaded scheduling goals to venue.") + logging.info(f"Uploaded scheduling goals to venue.") uploaded_ids = [kv["id"] for kv in resp] @@ -43,7 +44,7 @@ def upload( client.add_goals_to_specifications(upload_to_spec) - typer.echo(f"Assigned goals in priority order to plan ID {plan_id}.") + logging.info(f"Assigned goals in priority order to plan ID {plan_id}.") @app.command() @@ -56,7 +57,7 @@ def delete( client = CommandContext.get_client() resp = client.delete_scheduling_goal(goal_id) - typer.echo("Successfully deleted Goal ID: " + str(resp)) + logging.info("Successfully deleted Goal ID: " + str(resp)) @app.command() def delete_all_goals_for_plan( @@ -71,14 +72,14 @@ def delete_all_goals_for_plan( clear_goals = client.get_scheduling_goals_by_specification(specification) #response is in asc order if len(clear_goals) == 0: #no goals to clear - typer.echo("No goals to delete.") + logging.info("No goals to delete.") return - typer.echo("Deleting goals for Plan ID {plan}: ".format(plan=plan_id), nl=False) + to_print = "Deleting goals for Plan ID {plan}: ".format(plan=plan_id) goal_ids = [] for goal in clear_goals: goal_ids.append(goal["goal"]["id"]) - typer.echo(str(goal["goal"]["id"]) + " ", nl=False) - typer.echo() - + to_print += str(goal["goal"]["id"]) + " " + logging.info(to_print) + client.delete_scheduling_goals(goal_ids) diff --git a/src/aerie_cli/persistent.py b/src/aerie_cli/persistent.py index b60916b2..38305dc4 100644 --- a/src/aerie_cli/persistent.py +++ b/src/aerie_cli/persistent.py @@ -21,15 +21,53 @@ APP_DIRS.user_config_dir).resolve().absolute() CONFIGURATION_FILE_PATH = CONFIGURATION_FILE_DIRECTORY.joinpath('config.json') -SESSION_FILE_DIRECTORY = Path(APP_DIRS.user_config_dir).resolve().absolute() +SESSION_FILE_DIRECTORY = Path(APP_DIRS.user_config_dir).resolve().absolute() / "session_files" +SESSION_FILE_DIRECTORY.mkdir(parents=True, exist_ok=True) SESSION_TIMESTAMP_FSTRING = r'%Y-%jT%H-%M-%S.%f' SESSION_TIMEOUT = timedelta(hours=12) +MAX_LOG_FILES = 25 + +TIME_FORMAT = '%Y-%m-%d_%H:%M:%S' +START_TIME = datetime.now().strftime(TIME_FORMAT) +LOGS_PATH = Path(APP_DIRS.user_config_dir).resolve().absolute() / "logs" +LOGS_PATH.mkdir(parents=True, exist_ok=True) +CURRENT_LOG_FILE_NAME = f"aerie_cli_{START_TIME}" +CURRENT_LOG_PATH = LOGS_PATH / (f"{CURRENT_LOG_FILE_NAME}.log") +number_appended_to_log = 1 +while CURRENT_LOG_PATH.exists(): + CURRENT_LOG_PATH = LOGS_PATH / (f"{CURRENT_LOG_FILE_NAME}_{number_appended_to_log}.log") def delete_all_persistent_files(): shutil.rmtree(CONFIGURATION_FILE_DIRECTORY, ignore_errors=True) shutil.rmtree(SESSION_FILE_DIRECTORY, ignore_errors=True) - + shutil.rmtree(LOGS_PATH, ignore_errors=True) + # The configuration file directory is the parent of the session and log directories. + # Those are only initialized at the start of the program, + # so we must re-create them here to prevent a 'directory doesn't exist' error + SESSION_FILE_DIRECTORY.mkdir(parents=True, exist_ok=True) + LOGS_PATH.mkdir(parents=True, exist_ok=True) + +def clear_old_log_files(): + # Periodically delete old log files + log_files = {} # time: file + times = [] + for log_file in LOGS_PATH.glob("*"): + if not log_file.is_file() or not log_file.suffix == ".log" \ + or not log_file.name.startswith("aerie_cli"): + continue + time_part = log_file.name.replace("aerie_cli_", "")\ + .replace(f"_{number_appended_to_log}.log", "")\ + .replace(".log", "") + time = datetime.strptime(time_part, TIME_FORMAT) + log_files[time] = log_file + times.append(time) + i = 0 + times.sort() + while len(log_files) > MAX_LOG_FILES - 1: + log_files[times[i]].unlink() + log_files.pop(times[i]) + i += 1 class PersistentConfigurationManager: _configurations: List[AerieHostConfiguration] = None diff --git a/src/aerie_cli/utils/logger.py b/src/aerie_cli/utils/logger.py new file mode 100644 index 00000000..7836181f --- /dev/null +++ b/src/aerie_cli/utils/logger.py @@ -0,0 +1,16 @@ +import typer +import logging + +class TyperLoggingHandler(logging.Handler): + """A logger that uses typer.echo""" + def emit(self, record: logging.LogRecord) -> None: + fg = None + bg = None + if record.levelno == logging.WARNING: + fg = typer.colors.YELLOW + elif record.levelno == logging.CRITICAL: + fg = typer.colors.BRIGHT_WHITE + bg = typer.colors.BRIGHT_RED + elif record.levelno == logging.ERROR: + fg = typer.colors.BRIGHT_RED + typer.secho(self.format(record), bg=bg, fg=fg) \ No newline at end of file diff --git a/src/aerie_cli/utils/prompts.py b/src/aerie_cli/utils/prompts.py index e3c8bd14..9afd8967 100644 --- a/src/aerie_cli/utils/prompts.py +++ b/src/aerie_cli/utils/prompts.py @@ -1,13 +1,16 @@ from typing import List import typer - +import logging def select_from_list(options: List[str], prompt: str = 'Select an option'): while True: for i, c in enumerate(options): print(f"\t{i+1}) {c}") choice_id = typer.prompt(prompt) + logging.debug(f"Prompt: {prompt}") + logging.debug(f"Options: {options}") + logging.debug(f"Selected {choice_id}") try: return options[int(choice_id)-1] except (KeyError, ValueError): diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py index a5e20dfe..d822f951 100644 --- a/tests/integration_tests/conftest.py +++ b/tests/integration_tests/conftest.py @@ -3,6 +3,7 @@ # The scope is global either way, but this allows for easier debugging, type hints, and autofill import os import sys +import logging from aerie_cli.aerie_client import AerieClient from aerie_cli.aerie_host import AerieHost, AerieHostConfiguration @@ -17,6 +18,9 @@ start_session_from_configuration, ) +GLOBAL_LOGGER = logging.getLogger(__name__) +GLOBAL_LOGGER.propagate = True # we must set this to see stdout in caplog + # in case src_path is not from aeri-cli src and from site-packages src_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../src") sys.path.insert(0, src_path) diff --git a/tests/integration_tests/test_configurations.py b/tests/integration_tests/test_configurations.py index 01959958..5824f1ba 100644 --- a/tests/integration_tests/test_configurations.py +++ b/tests/integration_tests/test_configurations.py @@ -4,6 +4,7 @@ from aerie_cli.__main__ import app import pytest +import logging from .conftest import\ HASURA_ADMIN_SECRET,\ @@ -26,7 +27,8 @@ CONFIGURATION_NAME = "localhost" configuration_id = -1 -def test_configurations_clean(): +def test_configurations_clean(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["configurations", "clean"], @@ -34,12 +36,12 @@ def test_configurations_clean(): catch_exceptions=False, ) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert len(PersistentConfigurationManager.get_configurations()) == 0,\ f"CONFIGURATIONS NOT CLEARED! CONFIGURATIONS: {PersistentConfigurationManager.get_configurations()}\n"\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" no_session_error = False @@ -71,7 +73,8 @@ def test_configurations_clean(): pytest.exit("CONFIGURATION SHOULD NOT EXIST. Failed when using active configuration\n", returncode=pytest.ExitCode.TESTS_FAILED) -def test_configurations_create(): +def test_configurations_create(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["configurations", "create"], @@ -84,7 +87,7 @@ def test_configurations_create(): ) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" global configuration_id @@ -94,7 +97,8 @@ def test_configurations_create(): configuration_id = i assert configuration_id != -1, "CONFIGURATION NOT LOADED, is it's name localhost?" -def test_activate(): +def test_activate(caplog): + caplog.set_level(logging.INFO) before_refresh = len(PersistentConfigurationManager.get_configurations()) assert before_refresh > 0 PersistentConfigurationManager.read_configurations() @@ -108,18 +112,18 @@ def test_activate(): ) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert PersistentSessionManager.get_active_session().configuration_name == "localhost" -def test_deactivate(): +def test_deactivate(caplog): + caplog.set_level(logging.INFO) before_refresh = len(PersistentConfigurationManager.get_configurations()) assert before_refresh > 0 PersistentConfigurationManager.read_configurations() assert len(PersistentConfigurationManager.get_configurations()) == before_refresh assert PersistentSessionManager.get_active_session().configuration_name == "localhost" - result = runner.invoke( app, ["deactivate"], @@ -128,11 +132,11 @@ def test_deactivate(): ) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert ( f"Deactivated session: {CONFIGURATION_NAME}" - in result.stdout + in caplog.text ) try: @@ -156,7 +160,8 @@ def test_deactivate(): f"CONFIGURATION SHOULD NOT BE ACTIVE. Active config: {active_config}", returncode=pytest.ExitCode.TESTS_FAILED) -def test_configurations_delete(): +def test_configurations_delete(caplog): + caplog.set_level(logging.INFO) before_refresh = len(PersistentConfigurationManager.get_configurations()) assert before_refresh > 0 PersistentConfigurationManager.read_configurations() @@ -177,10 +182,11 @@ def test_configurations_delete(): ) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" -def test_configurations_load(): +def test_configurations_load(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["configurations", "load"], @@ -189,11 +195,11 @@ def test_configurations_load(): ) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert ( "Added configurations" - in result.stdout + in caplog.text ) # def test_configurations_update(): @@ -214,10 +220,11 @@ def test_configurations_load(): # ) # assert result.exit_code == 0,\ -# f"{result.stdout}"\ +# f"{caplog.text}"\ # f"{result.stderr}" -def test_configurations_list(): +def test_configurations_list(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["configurations", "list"], @@ -225,15 +232,16 @@ def test_configurations_list(): ) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert ( "Aerie Host Configurations" - in result.stdout + in result.stdout # we use Console.print for this, not log ) # We're activating at the end to ensure that localhost is still active # for other integration tests. -def test_last_activate(): +def test_last_activate(caplog): + caplog.set_level(logging.INFO) before_refresh = len(PersistentConfigurationManager.get_configurations()) assert before_refresh > 0 PersistentConfigurationManager.read_configurations() @@ -249,6 +257,6 @@ def test_last_activate(): ) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert PersistentSessionManager.get_active_session().configuration_name == "localhost" diff --git a/tests/integration_tests/test_constraints.py b/tests/integration_tests/test_constraints.py index 1b04ce1d..b21bb322 100644 --- a/tests/integration_tests/test_constraints.py +++ b/tests/integration_tests/test_constraints.py @@ -8,6 +8,7 @@ import os import pytest import arrow +import logging runner = CliRunner(mix_stderr = False) @@ -50,50 +51,54 @@ def set_up_environment(request): plan_id = client.create_activity_plan(model_id, plan_to_create) client.simulate_plan(plan_id) -def test_constraint_upload(): +def test_constraint_upload(caplog): + caplog.set_level(logging.INFO) result = runner.invoke(app, ["constraints", "upload"], input="Test" + "\n" + CONSTRAINT_PATH + "\n" + str(model_id) + "\n", catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert "Created constraint" in result.stdout + assert "Created constraint" in caplog.text global constraint_id - for line in result.stdout.splitlines(): + for line in caplog.text.splitlines(): if not "Created constraint: " in line: continue # get constraint id from the end of the line constraint_id = int(line.split(": ")[1]) assert constraint_id != -1, "Could not find constraint ID, constraint upload may have failed"\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" -def test_constraint_update(): +def test_constraint_update(caplog): + caplog.set_level(logging.INFO) result = runner.invoke(app, ["constraints", "update"], input=str(constraint_id) + "\n" + CONSTRAINT_PATH + "\n", catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert "Updated constraint" in result.stdout + assert "Updated constraint" in caplog.text -def test_constraint_violations(): +def test_constraint_violations(caplog): + caplog.set_level(logging.INFO) result = runner.invoke(app, ["constraints", "violations"], input=str(plan_id) + "\n", catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" # Check that a constraint violation is returned with the open bracket and curly brace # (The integration test constraint should report a violation) - assert "Constraint violations: [{" in result.stdout + assert "Constraint violations: [{" in caplog.text -def test_constraint_delete(): +def test_constraint_delete(caplog): + caplog.set_level(logging.INFO) result = runner.invoke(app, ["constraints", "delete"], input=str(constraint_id) + "\n", catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert f"Successfully deleted constraint {str(constraint_id)}" in result.stdout + assert f"Successfully deleted constraint {str(constraint_id)}" in caplog.text diff --git a/tests/integration_tests/test_expansion.py b/tests/integration_tests/test_expansion.py index c004fdb8..38f952be 100644 --- a/tests/integration_tests/test_expansion.py +++ b/tests/integration_tests/test_expansion.py @@ -1,6 +1,7 @@ import os import pytest import arrow +import logging from typer.testing import CliRunner from pathlib import Path @@ -76,49 +77,53 @@ def set_up_environment(request): # Uses plan and simulation dataset ####################### -def test_expansion_sequence_create(): +def test_expansion_sequence_create(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["expansion", "sequences", "create"], input=str(sim_id) + "\n" + str(expansion_sequence_id) + "\n" + str(2) + "\n", catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert "Successfully created sequence" in result.stdout -def test_expansion_sequence_list(): +def test_expansion_sequence_list(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["expansion", "sequences", "list"], input="2" + "\n" + str(sim_id) + "\n", catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert "All sequences for Simulation Dataset" in result.stdout -def test_expansion_sequence_download(): +def test_expansion_sequence_download(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["expansion", "sequences", "download"], input=str(sim_id) + "\n" + str(expansion_sequence_id) + "\n" + DOWNLOADED_FILE_NAME + "\n", catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" path_to_sequence = Path(DOWNLOADED_FILE_NAME) assert path_to_sequence.exists() path_to_sequence.unlink() -def test_expansion_sequence_delete(): +def test_expansion_sequence_delete(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["expansion", "sequences", "delete"], input=str(sim_id) + "\n" + str(expansion_sequence_id) + "\n", catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert "Successfully deleted sequence" in result.stdout @@ -127,7 +132,8 @@ def test_expansion_sequence_delete(): # Uses model, command dictionary, and activity types ####################### -def test_expansion_set_create(): +def test_expansion_set_create(caplog): + caplog.set_level(logging.INFO) client.create_expansion_rule( expansion_logic=""" export default function MyExpansion(props: { @@ -158,7 +164,7 @@ def test_expansion_set_create(): ], catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" global expansion_set_id for line in result.stdout.splitlines(): @@ -167,27 +173,29 @@ def test_expansion_set_create(): # get expansion id from the end of the line expansion_set_id = int(line.split(": ")[1]) assert expansion_set_id != -1, "Could not find expansion run ID, expansion create may have failed"\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" -def test_expansion_set_get(): +def test_expansion_set_get(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["expansion", "sets", "get"], input=str(expansion_set_id) + "\n", catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert "Expansion Set" in result.stdout and "Contents" in result.stdout -def test_expansion_set_list(): +def test_expansion_set_list(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["expansion", "sets", "list"], catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert "Expansion Sets" in result.stdout assert "integration_test" in result.stdout @@ -196,25 +204,27 @@ def test_expansion_set_list(): # TEST EXPANSION RUNS # Uses plan and simulation dataset ####################### -def test_expansion_run_create(): +def test_expansion_run_create(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["expansion", "runs", "create"], input=str(sim_id) + "\n" + str(expansion_set_id) + "\n", catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert "Expansion Run ID: " in result.stdout -def test_expansion_runs_list(): +def test_expansion_runs_list(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["expansion", "runs", "list"], input="2" + "\n" + str(sim_id) + "\n", catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert "Expansion Runs" in result.stdout @@ -222,13 +232,14 @@ def test_expansion_runs_list(): # DELETE MODELS ####################### -def test_model_delete(): +def test_model_delete(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["models", "delete"], input=str(model_id), catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert f"ID: {model_id} has been removed" in result.stdout + assert f"ID: {model_id} has been removed" in caplog.text diff --git a/tests/integration_tests/test_metadata.py b/tests/integration_tests/test_metadata.py index a49e73cb..1465456e 100644 --- a/tests/integration_tests/test_metadata.py +++ b/tests/integration_tests/test_metadata.py @@ -3,6 +3,7 @@ from aerie_cli.__main__ import app import os +import logging runner = CliRunner(mix_stderr = False) @@ -13,36 +14,40 @@ METADATA_SCHEMAS_PATH = os.path.join(FILES_PATH, "metadata_schemas") METADATA_SCHEMA_PATH = os.path.join(METADATA_SCHEMAS_PATH, "metadata_schema.json") -def test_metadata_list(): +def test_metadata_list(caplog): + caplog.set_level(logging.INFO) result = runner.invoke(app, ["metadata", "list"], catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert "Metadata Schemas" in result.stdout -def test_metadata_clean(): +def test_metadata_clean(caplog): + caplog.set_level(logging.INFO) result = runner.invoke(app, ["metadata", "clean"], catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert "All metadata schemas have been deleted" in result.stdout + assert "All metadata schemas have been deleted" in caplog.text -def test_metadata_upload(): +def test_metadata_upload(caplog): + caplog.set_level(logging.INFO) result = runner.invoke(app, ["metadata", "upload"], input=METADATA_SCHEMA_PATH, catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert "2 new schema have been added" in result.stdout + assert "2 new schema have been added" in caplog.text -def test_metadata_delete(): +def test_metadata_delete(caplog): + caplog.set_level(logging.INFO) result = runner.invoke(app, ["metadata", "delete"], input="STRING_EXAMPLE", catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert "Schema `STRING_EXAMPLE` has been removed" in result.stdout + assert "Schema `STRING_EXAMPLE` has been removed" in caplog.text diff --git a/tests/integration_tests/test_models.py b/tests/integration_tests/test_models.py index 36171ace..6cc09b93 100644 --- a/tests/integration_tests/test_models.py +++ b/tests/integration_tests/test_models.py @@ -1,4 +1,5 @@ import os +import logging from typer.testing import CliRunner @@ -15,21 +16,23 @@ # Model Variables model_id = -1 -def test_model_clean(): +def test_model_clean(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["models", "clean"], catch_exceptions=False, ) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert ( f"All mission models have been deleted" - in result.stdout + in caplog.text ) -def test_model_upload(): +def test_model_upload(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["models", "upload", "--time-tag-version"], @@ -49,30 +52,32 @@ def test_model_upload(): model_id = latest_model.id assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert ( f"Created new mission model: {MODEL_NAME} with Model ID: {model_id}" - in result.stdout + in caplog.text ) -def test_model_list(): +def test_model_list(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["models", "list"], catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert "Current Mission Models" in result.stdout -def test_model_delete(): +def test_model_delete(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["models", "delete"], input=str(model_id), catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert f"ID: {model_id} has been removed" in result.stdout + assert f"ID: {model_id} has been removed" in caplog.text diff --git a/tests/integration_tests/test_plans.py b/tests/integration_tests/test_plans.py index 16585426..88372df7 100644 --- a/tests/integration_tests/test_plans.py +++ b/tests/integration_tests/test_plans.py @@ -1,5 +1,6 @@ import os import pytest +import logging from typer.testing import CliRunner from pathlib import Path @@ -54,7 +55,8 @@ def cli_plan_simulate(): # Uses model ####################### -def test_plan_upload(): +def test_plan_upload(caplog): + caplog.set_level(logging.INFO) # Clean out plans first plans.clean() @@ -66,7 +68,7 @@ def test_plan_upload(): catch_exceptions=False, ) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" # Get uploaded plan id @@ -75,9 +77,10 @@ def test_plan_upload(): global plan_id plan_id = latest_plan.id - assert f"Created plan ID: {plan_id}" in result.stdout + assert f"Created plan ID: {plan_id}" in caplog.text -def test_plan_duplicate(): +def test_plan_duplicate(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["plans", "duplicate"], @@ -85,7 +88,7 @@ def test_plan_duplicate(): catch_exceptions=False, ) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" # Get duplicated plan id @@ -93,14 +96,15 @@ def test_plan_duplicate(): latest_plan = resp[-1] duplicated_plan_id = latest_plan.id - assert f"Duplicate activity plan created with ID: {duplicated_plan_id}" in result.stdout + assert f"Duplicate activity plan created with ID: {duplicated_plan_id}" in caplog.text -def test_plan_list(): +def test_plan_list(caplog): + caplog.set_level(logging.INFO) result = runner.invoke(app, ["plans", "list"], catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert "Current Activity Plans" in result.stdout @@ -111,7 +115,8 @@ def test_plan_list(): ####################### -def test_list_empty_plan_collaborators(): +def test_list_empty_plan_collaborators(caplog): + caplog.set_level(logging.INFO) """ Should be no plan collaborators to start """ @@ -121,12 +126,13 @@ def test_list_empty_plan_collaborators(): input=str(plan_id) + "\n", catch_exceptions=False, ) - assert result.exit_code == 0, f"{result.stdout}" f"{result.stderr}" + assert result.exit_code == 0, f"{caplog.text}" f"{result.stderr}" - assert "No collaborators" in result.stdout + assert "No collaborators" in caplog.text -def test_add_collaborators(): +def test_add_collaborators(caplog): + caplog.set_level(logging.INFO) """ Add all users as collaborators and check the final list """ @@ -139,15 +145,16 @@ def test_add_collaborators(): input=str(plan_id) + "\n" + username + "\n", catch_exceptions=False, ) - assert result.exit_code == 0, f"{result.stdout} {result.stderr}" + assert result.exit_code == 0, f"{caplog.text} {result.stderr}" - assert "Success" in result.stdout + assert "Success" in caplog.text # Check full list of collaborators assert ADDITIONAL_USERS == client.list_plan_collaborators(plan_id) -def test_list_plan_collaborators(): +def test_list_plan_collaborators(caplog): + caplog.set_level(logging.INFO) """ Check that the `plans collaborators list` command lists all collaborators """ @@ -157,13 +164,14 @@ def test_list_plan_collaborators(): input=str(plan_id) + "\n", catch_exceptions=False, ) - assert result.exit_code == 0, f"{result.stdout} {result.stderr}" + assert result.exit_code == 0, f"{caplog.text} {result.stderr}" for username in ADDITIONAL_USERS: - assert username in result.stdout + assert username in caplog.text -def test_delete_collaborators(): +def test_delete_collaborators(caplog): + caplog.set_level(logging.INFO) """ Delete a collaborator and verify the result """ @@ -174,9 +182,9 @@ def test_delete_collaborators(): input=str(plan_id) + "\n" + "1" + "\n", catch_exceptions=False, ) - assert result.exit_code == 0, f"{result.stdout} {result.stderr}" + assert result.exit_code == 0, f"{caplog.text} {result.stderr}" - assert "Success" in result.stdout + assert "Success" in caplog.text ####################### @@ -184,17 +192,19 @@ def test_delete_collaborators(): # Uses plan ####################### -def test_plan_simulate(): +def test_plan_simulate(caplog): + caplog.set_level(logging.INFO) result = cli_plan_simulate() sim_ids = client.get_simulation_dataset_ids_by_plan_id(plan_id) global sim_id sim_id = sim_ids[-1] assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert f"Simulation completed" in result.stdout + assert f"Simulation completed" in caplog.text -def test_plan_download(): +def test_plan_download(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["plans", "download"], @@ -205,11 +215,12 @@ def test_plan_download(): assert path_to_plan.exists() path_to_plan.unlink() assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert f"Wrote activity plan" in result.stdout + assert f"Wrote activity plan" in caplog.text -def test_plan_download_expanded_args(): +def test_plan_download_expanded_args(caplog): + caplog.set_level(logging.INFO) """ Download a plan, exercising the --full-args option to get effective activity arguments """ @@ -223,11 +234,12 @@ def test_plan_download_expanded_args(): assert path_to_plan.exists() path_to_plan.unlink() assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert f"Wrote activity plan" in result.stdout + assert f"Wrote activity plan" in caplog.text -def test_plan_download_resources(): +def test_plan_download_resources(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["plans", "download-resources"], @@ -238,11 +250,12 @@ def test_plan_download_resources(): assert path_to_resources.exists() path_to_resources.unlink() assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert f"Wrote resource timelines" in result.stdout + assert f"Wrote resource timelines" in caplog.text -def test_plan_download_simulation(): +def test_plan_download_simulation(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["plans", "download-simulation"], @@ -253,11 +266,12 @@ def test_plan_download_simulation(): assert path_to_resources.exists() path_to_resources.unlink() assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert f"Wrote activity plan" in result.stdout + assert f"Wrote activity plan" in caplog.text -def test_plan_create_config(): +def test_plan_create_config(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["plans", "create-config"], @@ -265,20 +279,22 @@ def test_plan_create_config(): catch_exceptions=False, ) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert f"Configuration Arguments for Plan ID: {plan_id}" in result.stdout - assert "initialPlantCount: 2" in result.stdout - assert "initialProducer: nobody" in result.stdout + assert f"Configuration Arguments for Plan ID: {plan_id}" in caplog.text + assert "initialPlantCount: 2" in caplog.text + assert "initialProducer: nobody" in caplog.text -def test_simulate_after_create_config(): +def test_simulate_after_create_config(caplog): + caplog.set_level(logging.INFO) result = cli_plan_simulate() assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert f"Simulation completed" in result.stdout + assert f"Simulation completed" in caplog.text -def test_plan_update_config(): +def test_plan_update_config(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["plans", "update-config"], @@ -286,59 +302,63 @@ def test_plan_update_config(): catch_exceptions=False, ) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert f"Configuration Arguments for Plan ID: {plan_id}" in result.stdout - assert "initialPlantCount: 3" in result.stdout - assert "initialProducer: somebody" in result.stdout + assert f"Configuration Arguments for Plan ID: {plan_id}" in caplog.text + assert "initialPlantCount: 3" in caplog.text + assert "initialProducer: somebody" in caplog.text -def test_simulate_after_update_config(): +def test_simulate_after_update_config(caplog): + caplog.set_level(logging.INFO) result = cli_plan_simulate() assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert f"Simulation completed" in result.stdout + assert f"Simulation completed" in caplog.text ####################### # DELETE PLANS ####################### -def test_plan_delete(): +def test_plan_delete(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["plans", "delete"], input=str(plan_id) + "\n", catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert f"ID: {plan_id} has been removed." in result.stdout + assert f"ID: {plan_id} has been removed." in caplog.text -def test_plan_clean(): +def test_plan_clean(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["plans", "clean"], catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert ( f"All activity plans have been deleted" - in result.stdout + in caplog.text ) ####################### # DELETE MODELS ####################### -def test_model_delete(): +def test_model_delete(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["models", "delete"], input=str(model_id), catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert f"ID: {model_id} has been removed" in result.stdout + assert f"ID: {model_id} has been removed" in caplog.text diff --git a/tests/integration_tests/test_scheduling.py b/tests/integration_tests/test_scheduling.py index 887c039f..9c06560e 100644 --- a/tests/integration_tests/test_scheduling.py +++ b/tests/integration_tests/test_scheduling.py @@ -1,6 +1,7 @@ import os import pytest import arrow +import logging from typer.testing import CliRunner from pathlib import Path @@ -77,22 +78,24 @@ def cli_schedule_upload(): os.remove(schedule_file_path) return result -def test_schedule_upload(): +def test_schedule_upload(caplog): + caplog.set_level(logging.INFO) result = cli_schedule_upload() assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert "Assigned goals in priority order" in result.stdout + assert "Assigned goals in priority order" in caplog.text global goal_id - for line in result.stdout.splitlines(): + for line in caplog.text.splitlines(): if not "Assigned goals in priority order" in line: continue # get expansion id from the end of the line goal_id = int(line.split("ID ")[1][:-1]) assert goal_id != -1, "Could not find goal ID, goal upload may have failed"\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" -def test_schedule_delete(): +def test_schedule_delete(caplog): + caplog.set_level(logging.INFO) assert goal_id != -1, "Goal id was not set" result = runner.invoke( @@ -102,11 +105,12 @@ def test_schedule_delete(): catch_exceptions=False, ) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert f"Successfully deleted Goal" in result.stdout + assert f"Successfully deleted Goal" in caplog.text -def test_schedule_delete_all(): +def test_schedule_delete_all(caplog): + caplog.set_level(logging.INFO) # Upload a goal to delete cli_schedule_upload() @@ -118,6 +122,6 @@ def test_schedule_delete_all(): catch_exceptions=False, ) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert f"Deleting goals for Plan ID {plan_id}" in result.stdout + assert f"Deleting goals for Plan ID {plan_id}" in caplog.text