From 9f99a69251131616cde158ea5a86843287e6197b Mon Sep 17 00:00:00 2001 From: JD Date: Wed, 12 Jan 2022 19:01:53 -0800 Subject: [PATCH] First version of the meister profiler Contains the first version of the meister profiler/decorator. This is just a small package with a decorator, meister_timer, being the only thing (besides the config vars) someone should be setting. What this decorator does is simple: when a decorated function is called (works for sync and async), it prints the time it took and keeps track of the call's metadata (start time, end time, calling line, etc...) and at the end uses it to report a list of the top longest calls as well as a color-coded bar graph representing the rough times testfixing Update opta/meister.py Co-authored-by: Patrick Fiedler Update opta/cli.py Co-authored-by: Patrick Fiedler Update opta/meister.py Co-authored-by: Patrick Fiedler Update opta/meister.py Co-authored-by: Patrick Fiedler Update opta/meister.py Co-authored-by: Patrick Fiedler Update opta/meister.py Co-authored-by: Patrick Fiedler Update opta/meister.py Co-authored-by: Patrick Fiedler Update opta/meister.py Co-authored-by: Patrick Fiedler bug fixing addressing cr typo bug fixing bugfixing wip --- opta/cli.py | 3 + opta/commands/apply.py | 4 + opta/commands/destroy.py | 6 + opta/commands/local_flag.py | 3 + opta/commands/output.py | 4 +- opta/commands/push.py | 10 ++ opta/core/aws.py | 22 ++++ opta/core/azure.py | 6 + opta/core/gcp.py | 11 ++ opta/core/generator.py | 4 + opta/core/helm.py | 5 + opta/core/kubernetes.py | 12 ++ opta/core/local.py | 6 + opta/core/terraform.py | 40 +++++++ opta/core/validator.py | 4 + opta/layer.py | 23 ++++ opta/meister.py | 174 ++++++++++++++++++++++++++++ opta/module.py | 9 ++ opta/plugins/derived_providers.py | 2 + opta/utils/__init__.py | 20 +++- tests/commands/test_apply.py | 6 +- tests/commands/test_deploy.py | 8 +- tests/commands/test_force_unlock.py | 6 +- tests/commands/test_inspect.py | 2 +- tests/commands/test_logs.py | 2 +- tests/commands/test_output.py | 4 +- tests/commands/test_push.py | 10 +- tests/commands/test_secret.py | 6 +- tests/commands/test_shell.py | 6 +- tests/core/test_generator.py | 4 +- tests/utils/test_utils.py | 16 +-- 31 files changed, 396 insertions(+), 42 deletions(-) create mode 100644 opta/meister.py diff --git a/opta/cli.py b/opta/cli.py index 7f8cf1a20..0b5ca5af2 100755 --- a/opta/cli.py +++ b/opta/cli.py @@ -8,6 +8,7 @@ from colored import attr, fg import opta.sentry # noqa: F401 This leads to initialization of sentry sdk +from opta import meister from opta.cleanup_files import cleanup_files from opta.commands.apply import apply from opta.commands.deploy import deploy @@ -24,6 +25,7 @@ from opta.commands.shell import shell from opta.commands.validate import validate from opta.commands.version import version +from opta.constants import DEV_VERSION, VERSION from opta.crash_reporter import CURRENT_CRASH_REPORTER from opta.exceptions import UserErrors from opta.one_time import one_time @@ -61,6 +63,7 @@ def cli() -> None: # after the command. # However, we should still clean them up before the next command, or # else it may interfere with it. + meister.REPORT_ENABLED = VERSION in [DEV_VERSION, ""] one_time() cleanup_files() cli() diff --git a/opta/commands/apply.py b/opta/commands/apply.py index 8367f18db..25a4557c5 100644 --- a/opta/commands/apply.py +++ b/opta/commands/apply.py @@ -29,6 +29,7 @@ from opta.error_constants import USER_ERROR_TF_LOCK from opta.exceptions import MissingState, UserErrors from opta.layer import Layer, StructuredConfig +from opta.meister import time as meister_time from opta.pre_check import pre_check from opta.utils import check_opta_file_exists, fmt_msg, logger @@ -329,6 +330,7 @@ def _apply( ) +@meister_time def _verify_semver( old_semver_string: str, current_semver_string: str, @@ -374,6 +376,7 @@ def _verify_semver( # Fetch the AZs of a region with boto3 +@meister_time def _fetch_availability_zones(aws_region: str) -> List[str]: client = boto3.client("ec2", config=Config(region_name=aws_region)) azs: List[str] = [] @@ -385,6 +388,7 @@ def _fetch_availability_zones(aws_region: str) -> List[str]: # Verify whether the parent layer exists or not +@meister_time def _verify_parent_layer(layer: Layer, auto_approve: bool = False) -> None: if layer.parent is None: return diff --git a/opta/commands/destroy.py b/opta/commands/destroy.py index 5c5941c2d..7c10494b8 100644 --- a/opta/commands/destroy.py +++ b/opta/commands/destroy.py @@ -20,6 +20,7 @@ from opta.error_constants import USER_ERROR_TF_LOCK from opta.exceptions import UserErrors from opta.layer import Layer +from opta.meister import time as meister_time from opta.pre_check import pre_check from opta.utils import check_opta_file_exists, logger @@ -108,6 +109,7 @@ def destroy( # Fetch all the children layers of the current layer. +@meister_time def _fetch_children_layers(layer: "Layer") -> List[str]: # Only environment layers have children (service) layers. # If the current layer has a parent, it is *not* an environment layer. @@ -130,6 +132,7 @@ def _fetch_children_layers(layer: "Layer") -> List[str]: # Get the names for all services for this environment based on the bucket file paths +@meister_time def _azure_get_configs(layer: "Layer") -> List[str]: providers = layer.gen_providers(0) @@ -151,6 +154,7 @@ def _azure_get_configs(layer: "Layer") -> List[str]: return configs +@meister_time def _aws_get_configs(layer: "Layer") -> List[str]: # Opta configs for every layer are saved in the opta_config/ directory # in the state bucket. @@ -168,6 +172,7 @@ def _aws_get_configs(layer: "Layer") -> List[str]: return configs +@meister_time def _gcp_get_configs(layer: "Layer") -> List[str]: bucket_name = layer.state_storage() gcs_config_dir = "opta_config/" @@ -189,6 +194,7 @@ def _gcp_get_configs(layer: "Layer") -> List[str]: return configs +@meister_time def _local_get_configs(layer: "Layer") -> List[str]: local_config_dir = os.path.join( os.path.join(str(Path.home()), ".opta", "local", "opta_config") diff --git a/opta/commands/local_flag.py b/opta/commands/local_flag.py index 7f62f5b81..350576d10 100644 --- a/opta/commands/local_flag.py +++ b/opta/commands/local_flag.py @@ -4,9 +4,11 @@ from ruamel import yaml +from opta.meister import time as meister_time from opta.utils import logger +@meister_time def _handle_local_flag(config: str, test: bool = False) -> str: if test: return config @@ -41,6 +43,7 @@ def _handle_local_flag(config: str, test: bool = False) -> str: return config +@meister_time def _clean_tf_folder() -> None: if os.path.isdir(os.getcwd() + "/.terraform"): rmtree(os.getcwd() + "/.terraform") diff --git a/opta/commands/output.py b/opta/commands/output.py index e97f6e7b0..643169931 100644 --- a/opta/commands/output.py +++ b/opta/commands/output.py @@ -6,6 +6,7 @@ from opta.core.generator import gen_all from opta.core.terraform import get_terraform_outputs from opta.layer import Layer +from opta.meister import time as meister_time from opta.utils import check_opta_file_exists, json @@ -16,7 +17,6 @@ ) def output(config: str, env: Optional[str],) -> None: """Print TF outputs""" - config = check_opta_file_exists(config) layer = Layer.load_from_yaml(config, env) amplitude_client.send_event( @@ -35,6 +35,7 @@ def output(config: str, env: Optional[str],) -> None: print(outputs_formatted) +@meister_time def _load_extra_aws_outputs(current_outputs: dict) -> dict: if "parent.load_balancer_raw_dns" in current_outputs: current_outputs["load_balancer_raw_dns"] = current_outputs[ @@ -44,6 +45,7 @@ def _load_extra_aws_outputs(current_outputs: dict) -> dict: return current_outputs +@meister_time def _load_extra_gcp_outputs(current_outputs: dict) -> dict: if "parent.load_balancer_raw_ip" in current_outputs: current_outputs["load_balancer_raw_ip"] = current_outputs[ diff --git a/opta/commands/push.py b/opta/commands/push.py index ef8545af5..64b4588c8 100644 --- a/opta/commands/push.py +++ b/opta/commands/push.py @@ -12,11 +12,13 @@ from opta.core.terraform import get_terraform_outputs from opta.exceptions import UserErrors from opta.layer import Layer +from opta.meister import time as meister_time from opta.nice_subprocess import nice_run from opta.utils import check_opta_file_exists, fmt_msg, yaml from opta.utils.dependencies import ensure_installed +@meister_time def get_push_tag(local_image: str, tag_override: Optional[str]) -> str: if ":" not in local_image: raise Exception( @@ -26,6 +28,7 @@ def get_push_tag(local_image: str, tag_override: Optional[str]) -> str: return tag_override or local_image_tag +@meister_time def get_image_digest(registry_url: str, image_tag: str) -> str: docker_client = from_env() current_image = docker_client.images.get(f"{registry_url}:{image_tag}") @@ -42,6 +45,7 @@ def get_image_digest(registry_url: str, image_tag: str) -> str: ) +@meister_time def get_registry_url(layer: Layer) -> str: outputs = get_terraform_outputs(layer) if "docker_repo_url" not in outputs: @@ -52,6 +56,7 @@ def get_registry_url(layer: Layer) -> str: return outputs["docker_repo_url"] +@meister_time def get_ecr_auth_info(layer: Layer) -> Tuple[str, str]: providers = layer.gen_providers(0) account_id = providers["provider"]["aws"]["allowed_account_ids"][0] @@ -73,6 +78,7 @@ def get_ecr_auth_info(layer: Layer) -> Tuple[str, str]: return username, password +@meister_time def get_gcr_auth_info(layer: Layer) -> Tuple[str, str]: if GCP.using_service_account(): service_account_key = GCP.get_service_account_raw_credentials() @@ -82,6 +88,7 @@ def get_gcr_auth_info(layer: Layer) -> Tuple[str, str]: return "oauth2accesstoken", credentials.token +@meister_time def get_acr_auth_info(layer: Layer) -> Tuple[str, str]: acr_name = get_terraform_outputs(layer.root()).get("acr_name") if acr_name is None: @@ -105,6 +112,7 @@ def get_acr_auth_info(layer: Layer) -> Tuple[str, str]: return "00000000-0000-0000-0000-000000000000", token +@meister_time def push_to_docker( username: str, password: str, @@ -124,6 +132,7 @@ def push_to_docker( return get_image_digest(registry_url, image_tag), image_tag +@meister_time def push_to_docker_local( local_image: str, registry_url: str, image_tag_override: Optional[str], ) -> Tuple[str, str]: @@ -173,6 +182,7 @@ def push(image: str, config: str, env: Optional[str], tag: Optional[str]) -> Non _push(image, config, env, tag) +@meister_time def _push( image: str, config: str, env: Optional[str], tag: Optional[str] ) -> Tuple[str, str]: diff --git a/opta/core/aws.py b/opta/core/aws.py index b0e438d6f..a08d8257f 100644 --- a/opta/core/aws.py +++ b/opta/core/aws.py @@ -7,6 +7,7 @@ from opta.core.cloud_client import CloudClient from opta.exceptions import UserErrors +from opta.meister import time as meister_time from opta.utils import fmt_msg, json, logger if TYPE_CHECKING: @@ -28,6 +29,7 @@ def __init__(self, layer: "Layer"): self.region = layer.root().providers["aws"]["region"] super().__init__(layer) + @meister_time def __get_dynamodb(self, dynamodb_table: str) -> DynamoDBClient: dynamodb_client: DynamoDBClient = boto3.client( "dynamodb", config=Config(region_name=self.region) @@ -51,6 +53,7 @@ def __get_dynamodb(self, dynamodb_table: str) -> DynamoDBClient: # { # "terraform.address" : "aws resource arn" # } + @meister_time def get_opta_resources(self) -> dict: client = boto3.client( "resourcegroupstaggingapi", config=Config(region_name=self.region) @@ -74,6 +77,7 @@ def get_opta_resources(self) -> dict: return resources_map + @meister_time def get_remote_config(self) -> Optional["StructuredConfig"]: bucket = self.layer.state_storage() config_path = f"opta_config/{self.layer.name}" @@ -89,6 +93,7 @@ def get_remote_config(self) -> Optional["StructuredConfig"]: return None # Upload the current opta config to the state bucket, under opta_config/. + @meister_time def upload_opta_config(self) -> None: bucket = self.layer.state_storage() config_path = f"opta_config/{self.layer.name}" @@ -101,6 +106,7 @@ def upload_opta_config(self) -> None: ) logger.debug("Uploaded opta config to s3") + @meister_time def delete_opta_config(self) -> None: bucket = self.layer.state_storage() config_path = f"opta_config/{self.layer.name}" @@ -116,6 +122,7 @@ def delete_opta_config(self) -> None: logger.info("Deleted opta config from s3") + @meister_time def delete_remote_state(self) -> None: bucket = self.layer.state_storage() providers = self.layer.gen_providers(0) @@ -138,6 +145,7 @@ def delete_remote_state(self) -> None: s3_client.delete_object(Bucket=bucket, Key=self.layer.name, VersionId=version) logger.info(f"Deleted opta tf state for {self.layer.name}") + @meister_time def get_terraform_lock_id(self) -> str: bucket = self.layer.state_storage() providers = self.layer.gen_providers(0) @@ -167,6 +175,7 @@ def force_delete_terraform_lock_id(self) -> None: ) @staticmethod + @meister_time def get_all_versions(bucket: str, filename: str, region: str) -> List[str]: s3 = boto3.client("s3", config=Config(region_name=region)) results = [] @@ -177,6 +186,7 @@ def get_all_versions(bucket: str, filename: str, region: str) -> List[str]: return results @staticmethod + @meister_time def prepare_read_buckets_iam_statements(bucket_names: List[str]) -> dict: return { "Sid": "ReadBuckets", @@ -187,6 +197,7 @@ def prepare_read_buckets_iam_statements(bucket_names: List[str]) -> dict: } @staticmethod + @meister_time def prepare_write_buckets_iam_statements(bucket_names: List[str]) -> dict: return { "Sid": "WriteBuckets", @@ -202,6 +213,7 @@ def prepare_write_buckets_iam_statements(bucket_names: List[str]) -> dict: } @staticmethod + @meister_time def prepare_publish_queues_iam_statements(queue_arns: List[str]) -> dict: return { "Sid": "PublishQueues", @@ -218,6 +230,7 @@ def prepare_publish_queues_iam_statements(queue_arns: List[str]) -> dict: } @staticmethod + @meister_time def prepare_subscribe_queues_iam_statements(queue_arns: List[str]) -> dict: return { "Sid": "SubscribeQueues", @@ -227,6 +240,7 @@ def prepare_subscribe_queues_iam_statements(queue_arns: List[str]) -> dict: } @staticmethod + @meister_time def prepare_publish_sns_iam_statements(topic_arns: List[str]) -> dict: return { "Sid": "PublishSns", @@ -236,6 +250,7 @@ def prepare_publish_sns_iam_statements(topic_arns: List[str]) -> dict: } @staticmethod + @meister_time def prepare_kms_write_keys_statements(kms_arns: List[str]) -> dict: return { "Sid": "KMSWrite", @@ -245,6 +260,7 @@ def prepare_kms_write_keys_statements(kms_arns: List[str]) -> dict: } @staticmethod + @meister_time def prepare_kms_read_keys_statements(kms_arns: List[str]) -> dict: return { "Sid": "KMSRead", @@ -254,6 +270,7 @@ def prepare_kms_read_keys_statements(kms_arns: List[str]) -> dict: } @staticmethod + @meister_time def prepare_dynamodb_write_tables_statements(dynamodb_table_arns: List[str]) -> dict: return { "Sid": "DynamodbWrite", @@ -284,6 +301,7 @@ def prepare_dynamodb_write_tables_statements(dynamodb_table_arns: List[str]) -> } @staticmethod + @meister_time def prepare_dynamodb_read_tables_statements(dynamodb_table_arns: List[str]) -> dict: return { "Sid": "DynamodbRead", @@ -307,6 +325,7 @@ def prepare_dynamodb_read_tables_statements(dynamodb_table_arns: List[str]) -> d } @staticmethod + @meister_time def delete_bucket(bucket_name: str, region: str) -> None: # Before a bucket can be deleted, all of the objects inside must be removed. bucket = boto3.resource("s3").Bucket(bucket_name) @@ -322,6 +341,7 @@ def delete_bucket(bucket_name: str, region: str) -> None: print(f"Bucket ({bucket_name}) successfully deleted.") @staticmethod + @meister_time def delete_dynamodb_table(table_name: str, region: str) -> None: client = boto3.client("dynamodb", config=Config(region_name=region)) @@ -344,6 +364,7 @@ def delete_dynamodb_table(table_name: str, region: str) -> None: raise Exception("Failed to delete after 20 retries, quitting.") @staticmethod + @meister_time def parse_arn(arn: str) -> AwsArn: # http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html elements = arn.split(":", 5) @@ -367,6 +388,7 @@ def parse_arn(arn: str) -> AwsArn: # 1). arn:partition:service:region:account-id:resource-id # 2). arn:partition:service:region:account-id:resource-type/resource-id # 3). arn:partition:service:region:account-id:resource-type:resource-id +@meister_time def get_aws_resource_id(resource_arn: str) -> str: arn_parts = resource_arn.split(":") diff --git a/opta/core/azure.py b/opta/core/azure.py index db867d995..c1a9779a7 100644 --- a/opta/core/azure.py +++ b/opta/core/azure.py @@ -6,6 +6,7 @@ from azure.storage.blob import BlobServiceClient, ContainerClient, StorageStreamDownloader from opta.core.cloud_client import CloudClient +from opta.meister import time as meister_time from opta.utils import json, logger if TYPE_CHECKING: @@ -19,6 +20,7 @@ class Azure(CloudClient): def get_credentials(cls) -> Any: return DefaultAzureCredential() + @meister_time def get_remote_config(self) -> Optional["StructuredConfig"]: providers = self.layer.gen_providers(0) credentials = self.get_credentials() @@ -47,6 +49,7 @@ def get_remote_config(self) -> Optional["StructuredConfig"]: return None # Upload the current opta config to the state bucket, under opta_config/. + @meister_time def upload_opta_config(self) -> None: providers = self.layer.gen_providers(0) credentials = self.get_credentials() @@ -68,6 +71,7 @@ def upload_opta_config(self) -> None: overwrite=True, ) + @meister_time def delete_opta_config(self) -> None: providers = self.layer.gen_providers(0) credentials = self.get_credentials() @@ -88,6 +92,7 @@ def delete_opta_config(self) -> None: except ResourceNotFoundError: logger.info("Remote opta config was already deleted") + @meister_time def delete_remote_state(self) -> None: providers = self.layer.gen_providers(0) credentials = self.get_credentials() @@ -108,6 +113,7 @@ def delete_remote_state(self) -> None: except ResourceNotFoundError: logger.info("Remote opta state was already deleted") + @meister_time def get_terraform_lock_id(self) -> str: providers = self.layer.gen_providers(0) credentials = self.get_credentials() diff --git a/opta/core/gcp.py b/opta/core/gcp.py index 912d9bc43..4ff6efcfb 100644 --- a/opta/core/gcp.py +++ b/opta/core/gcp.py @@ -12,6 +12,7 @@ from opta.core.cloud_client import CloudClient from opta.exceptions import UserErrors +from opta.meister import time as meister_time from opta.utils import fmt_msg, json, logger if TYPE_CHECKING: @@ -28,6 +29,7 @@ def __init__(self, layer: "Layer"): super().__init__(layer) @classmethod + @meister_time def get_credentials(cls) -> Tuple[Credentials, str]: if cls.project_id is None or cls.credentials is None: try: @@ -48,11 +50,13 @@ def get_credentials(cls) -> Tuple[Credentials, str]: return cls.credentials, cls.project_id # type: ignore @classmethod + @meister_time def using_service_account(cls) -> bool: credentials = cls.credentials or cls.get_credentials()[0] return type(credentials) == service_account.Credentials @classmethod + @meister_time def get_service_account_key_path(cls) -> str: if not cls.using_service_account: raise Exception( @@ -73,12 +77,14 @@ def get_service_account_key_path(cls) -> str: return service_account_key_file_path @classmethod + @meister_time def get_service_account_raw_credentials(cls) -> str: service_account_key_file_path = cls.get_service_account_key_path() with open(service_account_key_file_path, "r") as f: service_account_key = f.read() return service_account_key + @meister_time def get_remote_config(self) -> Optional["StructuredConfig"]: bucket = self.layer.state_storage() config_path = f"opta_config/{self.layer.name}" @@ -95,6 +101,7 @@ def get_remote_config(self) -> Optional["StructuredConfig"]: return None # Upload the current opta config to the state bucket, under opta_config/. + @meister_time def upload_opta_config(self) -> None: bucket = self.layer.state_storage() config_path = f"opta_config/{self.layer.name}" @@ -105,6 +112,7 @@ def upload_opta_config(self) -> None: blob.upload_from_string(json.dumps(self.layer.structured_config())) logger.debug("Uploaded opta config to gcs") + @meister_time def delete_opta_config(self) -> None: bucket = self.layer.state_storage() config_path = f"opta_config/{self.layer.name}" @@ -117,6 +125,7 @@ def delete_opta_config(self) -> None: logger.warn(f"Did not find opta config {config_path} to delete") logger.info("Deleted opta config from gcs") + @meister_time def delete_remote_state(self) -> None: bucket = self.layer.state_storage() tfstate_path = f"{self.layer.name}/default.tfstate" @@ -129,6 +138,7 @@ def delete_remote_state(self) -> None: logger.warn(f"Did not find opta tf state {tfstate_path} to delete") logger.info(f"Deleted opta tf state for {self.layer.name}") + @meister_time def get_current_zones(self, max_number: int = 3) -> List[str]: credentials, project_id = self.get_credentials() service = discovery.build( @@ -141,6 +151,7 @@ def get_current_zones(self, max_number: int = 3) -> List[str]: response: Dict = request.execute() return sorted([x["name"] for x in response.get("items", [])])[:max_number] + @meister_time def get_terraform_lock_id(self) -> str: bucket = self.layer.state_storage() tf_lock_path = f"{self.layer.name}/default.tflock" diff --git a/opta/core/generator.py b/opta/core/generator.py index c6a757306..16b8a3e5b 100644 --- a/opta/core/generator.py +++ b/opta/core/generator.py @@ -10,6 +10,7 @@ current_image_digest_tag, get_cluster_name, ) +from opta.meister import time as meister_time from opta.utils import deep_merge, logger if TYPE_CHECKING: @@ -17,11 +18,13 @@ from opta.module import Module +@meister_time def gen_all(layer: "Layer", existing_config: Optional["StructuredConfig"] = None) -> None: # Just run the generator till the end list(gen(layer, existing_config)) +@meister_time def gen( layer: "Layer", existing_config: Optional["StructuredConfig"] = None, @@ -89,6 +92,7 @@ def gen( # Generate a tags override file in every module, that adds opta tags to every resource. +@meister_time def gen_opta_resource_tags(layer: "Layer") -> None: if "aws" in layer.providers: for module in layer.modules: diff --git a/opta/core/helm.py b/opta/core/helm.py index 6dd2ab847..cc7e123ff 100644 --- a/opta/core/helm.py +++ b/opta/core/helm.py @@ -2,6 +2,7 @@ from typing import FrozenSet, List from opta.exceptions import UserErrors +from opta.meister import time as meister_time from opta.nice_subprocess import nice_run from opta.utils import json from opta.utils.dependencies import ensure_installed @@ -9,14 +10,17 @@ class Helm: @staticmethod + @meister_time def get_required_path_executables() -> FrozenSet[str]: return frozenset({"helm"}) @staticmethod + @meister_time def validate_helm_installed() -> None: ensure_installed("helm") @classmethod + @meister_time def rollback_helm(cls, release: str, namespace: str, revision: str = "") -> None: cls.validate_helm_installed() try: @@ -39,6 +43,7 @@ def rollback_helm(cls, release: str, namespace: str, revision: str = "") -> None raise e @classmethod + @meister_time def get_helm_list(cls, **kwargs) -> List: # type: ignore # nosec cls.validate_helm_installed() namespaces: List[str] = [] diff --git a/opta/core/kubernetes.py b/opta/core/kubernetes.py index f7c3bac44..abb74ea6f 100644 --- a/opta/core/kubernetes.py +++ b/opta/core/kubernetes.py @@ -43,6 +43,7 @@ from opta.core.gcp import GCP from opta.core.terraform import get_terraform_outputs from opta.exceptions import UserErrors +from opta.meister import time as meister_time from opta.nice_subprocess import nice_run from opta.utils import deep_merge, logger from opta.utils.dependencies import ensure_installed @@ -66,6 +67,7 @@ def get_required_path_executables(cloud: str) -> FrozenSet[str]: return frozenset({"kubectl"}) | exec_map.get(cloud, set()) +@meister_time def configure_kubectl(layer: "Layer") -> None: """Configure the kubectl CLI tool for the given layer""" # Make sure the user has the prerequisite CLI tools installed @@ -91,6 +93,7 @@ def _local_configure_kubectl(layer: "Layer") -> None: ).stdout +@meister_time def load_opta_config_to_default(layer: "Layer") -> None: config_file_name = ( f"{GENERATED_KUBE_CONFIG_DIR}/kubeconfig-{layer.root().name}-{layer.cloud}.yaml" @@ -375,6 +378,7 @@ def current_image_digest_tag(layer: "Layer") -> dict: return image_info +@meister_time def create_namespace_if_not_exists(layer_name: str) -> None: load_opta_kube_config() v1 = CoreV1Api() @@ -389,6 +393,7 @@ def create_namespace_if_not_exists(layer_name: str) -> None: ) +@meister_time def create_manual_secrets_if_not_exists(layer_name: str) -> None: load_opta_kube_config() v1 = CoreV1Api() @@ -401,6 +406,7 @@ def create_manual_secrets_if_not_exists(layer_name: str) -> None: ) +@meister_time def get_manual_secrets(layer_name: str) -> dict: load_opta_kube_config() v1 = CoreV1Api() @@ -419,6 +425,7 @@ def get_manual_secrets(layer_name: str) -> dict: ) +@meister_time def update_manual_secrets(layer_name: str, new_values: dict) -> None: load_opta_kube_config() v1 = CoreV1Api() @@ -434,6 +441,7 @@ def update_manual_secrets(layer_name: str, new_values: dict) -> None: v1.replace_namespaced_secret("manual-secrets", layer_name, current_secret_object) +@meister_time def get_linked_secrets(layer_name: str) -> dict: load_opta_kube_config() v1 = CoreV1Api() @@ -452,6 +460,7 @@ def get_linked_secrets(layer_name: str) -> dict: ) +@meister_time def list_namespaces() -> None: load_opta_kube_config() v1 = CoreV1Api() @@ -568,6 +577,7 @@ def list_services(*, namespace: Optional[str] = None) -> List[V1Service]: return services.items +@meister_time def get_config_map(namespace: str, name: str) -> V1ConfigMap: load_opta_kube_config() v1 = CoreV1Api() @@ -582,6 +592,7 @@ def get_config_map(namespace: str, name: str) -> V1ConfigMap: return cm +@meister_time def update_config_map_data(namespace: str, name: str, data: Dict[str, str]) -> None: load_opta_kube_config() v1 = CoreV1Api() @@ -601,6 +612,7 @@ def update_config_map_data(namespace: str, name: str, data: Dict[str, str]) -> N v1.patch_namespaced_config_map(name, namespace, manifest) +@meister_time def get_secrets(layer_name: str) -> dict: return deep_merge(get_manual_secrets(layer_name), get_linked_secrets(layer_name)) diff --git a/opta/core/local.py b/opta/core/local.py index c0fe9458b..304e65942 100644 --- a/opta/core/local.py +++ b/opta/core/local.py @@ -3,6 +3,7 @@ from typing import TYPE_CHECKING, Optional from opta.core.cloud_client import CloudClient +from opta.meister import time as meister_time from opta.utils import json, logger if TYPE_CHECKING: @@ -23,6 +24,7 @@ def __init__(self, layer: "Layer"): super().__init__(layer) + @meister_time def get_remote_config(self) -> Optional["StructuredConfig"]: try: return json.load(open(self.config_file_path, "r")) @@ -32,6 +34,7 @@ def get_remote_config(self) -> Optional["StructuredConfig"]: ) return None + @meister_time def upload_opta_config(self) -> None: with open(self.config_file_path, "w") as f: @@ -39,6 +42,7 @@ def upload_opta_config(self) -> None: logger.debug("Uploaded opta config to local") + @meister_time def delete_opta_config(self) -> None: if os.path.isfile(self.config_file_path): @@ -47,6 +51,7 @@ def delete_opta_config(self) -> None: else: logger.warn(f"Did not find opta config {self.config_file_path} to delete") + @meister_time def delete_remote_state(self) -> None: if os.path.isfile(self.tf_file): @@ -58,5 +63,6 @@ def delete_remote_state(self) -> None: else: logger.warn(f"Did not find opta tf state {self.tf_file} to delete") + @meister_time def get_terraform_lock_id(self) -> str: return "" diff --git a/opta/core/terraform.py b/opta/core/terraform.py index d2a74ba62..cfb921533 100644 --- a/opta/core/terraform.py +++ b/opta/core/terraform.py @@ -35,6 +35,7 @@ from opta.core.gcp import GCP from opta.core.local import Local from opta.exceptions import MissingState, UserErrors +from opta.meister import time as meister_time from opta.nice_subprocess import nice_run from opta.utils import deep_merge, fmt_msg, json, logger from opta.utils.dependencies import ensure_installed @@ -51,6 +52,7 @@ class Terraform: downloaded_state: Dict[str, Dict[Any, Any]] = {} @classmethod + @meister_time def init(cls, quiet: Optional[bool] = False, *tf_flags: str, layer: "Layer") -> None: kwargs = cls.insert_extra_env(layer) if quiet: @@ -68,6 +70,7 @@ def init(cls, quiet: Optional[bool] = False, *tf_flags: str, layer: "Layer") -> # Get outputs of the current terraform state @classmethod + @meister_time def get_outputs(cls, layer: "Layer") -> dict: state = cls.get_state(layer) outputs = state.get("outputs", {}) @@ -77,6 +80,7 @@ def get_outputs(cls, layer: "Layer") -> dict: return cleaned_outputs @classmethod + @meister_time def get_version(cls) -> str: try: out = nice_run( @@ -92,6 +96,7 @@ def get_version(cls) -> str: # Get the full terraform state. @classmethod + @meister_time def get_state(cls, layer: "Layer") -> dict: if layer.name in cls.downloaded_state: return cls.downloaded_state[layer.name] @@ -100,6 +105,7 @@ def get_state(cls, layer: "Layer") -> dict: raise MissingState(f"Unable to download state for layer {layer.name}") @classmethod + @meister_time def insert_extra_env(cls, layer: "Layer") -> dict: kwargs: Dict[str, Any] = {"env": {**os.environ.copy(), **EXTRA_ENV}} if layer and layer.cloud == "local": @@ -112,6 +118,7 @@ def insert_extra_env(cls, layer: "Layer") -> dict: return kwargs @classmethod + @meister_time def validate_version(cls) -> None: ensure_installed("terraform") @@ -128,6 +135,7 @@ def validate_version(cls) -> None: ) @classmethod + @meister_time def apply( cls, layer: "Layer", @@ -152,6 +160,7 @@ def apply( raise @classmethod + @meister_time def import_resource( cls, tf_resource_address: str, aws_resource_id: str, layer: "Layer" ) -> None: @@ -164,6 +173,7 @@ def import_resource( ) @classmethod + @meister_time def refresh(cls, layer: "Layer", *tf_flags: str) -> None: kwargs = cls.insert_extra_env(layer) nice_run( @@ -174,6 +184,7 @@ def refresh(cls, layer: "Layer", *tf_flags: str) -> None: ) @classmethod + @meister_time def destroy_resources( cls, layer: "Layer", target_resources: List[str], *tf_flags: str ) -> None: @@ -212,6 +223,7 @@ def destroy_resources( raise @classmethod + @meister_time def destroy_all(cls, layer: "Layer", *tf_flags: str) -> None: # Refreshing the state is necessary to update terraform outputs. @@ -271,6 +283,7 @@ def destroy_all(cls, layer: "Layer", *tf_flags: str) -> None: # Remove a resource from the terraform state, but does not destroy it. @classmethod + @meister_time def remove_from_state(cls, resource_address: str) -> None: kwargs: Dict[str, Any] = {"env": {**os.environ.copy(), **EXTRA_ENV}} nice_run( @@ -280,6 +293,7 @@ def remove_from_state(cls, resource_address: str) -> None: ) @classmethod + @meister_time def verify_storage(cls, layer: "Layer") -> bool: if layer.cloud == "aws": return cls._aws_verify_storage(layer) @@ -293,10 +307,12 @@ def verify_storage(cls, layer: "Layer") -> bool: raise Exception(f"Can not verify state storage for cloud {layer.cloud}") @classmethod + @meister_time def _local_verify_storage(cls, layer: "Layer") -> bool: return True @classmethod + @meister_time def _azure_verify_storage(cls, layer: "Layer") -> bool: credentials = Azure.get_credentials() providers = layer.gen_providers(0) @@ -320,6 +336,7 @@ def _azure_verify_storage(cls, layer: "Layer") -> bool: return False @classmethod + @meister_time def _gcp_verify_storage(cls, layer: "Layer") -> bool: credentials, project_id = GCP.get_credentials() bucket = layer.state_storage() @@ -331,6 +348,7 @@ def _gcp_verify_storage(cls, layer: "Layer") -> bool: return True @classmethod + @meister_time def _aws_verify_storage(cls, layer: "Layer") -> bool: bucket = layer.state_storage() region = layer.root().providers["aws"]["region"] @@ -344,6 +362,7 @@ def _aws_verify_storage(cls, layer: "Layer") -> bool: return True @classmethod + @meister_time def plan(cls, *tf_flags: str, quiet: Optional[bool] = False, layer: "Layer",) -> None: cls.init(quiet, layer=layer) kwargs = cls.insert_extra_env(layer) @@ -362,6 +381,7 @@ def plan(cls, *tf_flags: str, quiet: Optional[bool] = False, layer: "Layer",) -> raise @classmethod + @meister_time def show(cls, *tf_flags: str, capture_output: bool = False) -> Optional[str]: kwargs: Dict[str, Any] = {"env": {**os.environ.copy(), **EXTRA_ENV}} try: @@ -386,11 +406,13 @@ def show(cls, *tf_flags: str, capture_output: bool = False) -> Optional[str]: return None @classmethod + @meister_time def get_existing_modules(cls, layer: "Layer") -> Set[str]: existing_resources = cls.get_existing_module_resources(layer) return set(map(lambda r: r.split(".")[1], existing_resources)) @classmethod + @meister_time def get_existing_module_resources(cls, layer: "Layer") -> List[str]: try: state = cls.get_state(layer) @@ -420,6 +442,7 @@ def get_existing_module_resources(cls, layer: "Layer") -> List[str]: return module_resources @classmethod + @meister_time def download_state(cls, layer: "Layer") -> bool: if not cls.verify_storage(layer): logger.debug( @@ -517,6 +540,7 @@ def download_state(cls, layer: "Layer") -> bool: return False @classmethod + @meister_time def _aws_delete_state_storage(cls, layer: "Layer") -> None: providers = layer.gen_providers(0) if "s3" not in providers.get("terraform", {}).get("backend", {}): @@ -534,6 +558,7 @@ def _aws_delete_state_storage(cls, layer: "Layer") -> None: logger.info("Successfully deleted AWS state storage") @classmethod + @meister_time def _gcp_delete_state_storage(cls, layer: "Layer") -> None: providers = layer.gen_providers(0) if "gcs" not in providers.get("terraform", {}).get("backend", {}): @@ -549,6 +574,7 @@ def _gcp_delete_state_storage(cls, layer: "Layer") -> None: logger.warn("State bucket was already deleted") @classmethod + @meister_time def _local_delete_state_storage(cls, layer: "Layer") -> None: providers = layer.gen_providers(0) if "local" not in providers.get("terraform", {}).get("backend", {}): @@ -559,6 +585,7 @@ def _local_delete_state_storage(cls, layer: "Layer") -> None: logger.warn("Local state delete did not work?") @classmethod + @meister_time def _create_local_state_storage(cls, providers: dict) -> None: if not os.path.exists(cls.get_local_opta_dir()): try: @@ -571,6 +598,7 @@ def _create_local_state_storage(cls, providers: dict) -> None: ) @classmethod + @meister_time def _create_azure_state_storage(cls, providers: dict) -> None: resource_group_name = providers["terraform"]["backend"]["azurerm"][ "resource_group_name" @@ -704,6 +732,7 @@ def _create_azure_state_storage(cls, providers: dict) -> None: logger.debug(f"Provisioned container {container.name}") @classmethod + @meister_time def _create_gcp_state_storage(cls, providers: dict) -> None: bucket_name = providers["terraform"]["backend"]["gcs"]["bucket"] region = providers["provider"]["google"]["region"] @@ -797,6 +826,7 @@ def _create_gcp_state_storage(cls, providers: dict) -> None: ) @classmethod + @meister_time def _create_aws_state_storage(cls, providers: dict) -> None: bucket_name = providers["terraform"]["backend"]["s3"]["bucket"] dynamodb_table = providers["terraform"]["backend"]["s3"]["dynamodb_table"] @@ -926,6 +956,7 @@ def _create_aws_state_storage(cls, providers: dict) -> None: logger.debug("Load balancing service linked role present") @classmethod + @meister_time def create_state_storage(cls, layer: "Layer") -> None: """ Idempotently create remote storage for tf state @@ -941,10 +972,12 @@ def create_state_storage(cls, layer: "Layer") -> None: cls._create_local_state_storage(providers) @classmethod + @meister_time def get_local_opta_dir(cls) -> str: return os.path.join(str(Path.home()), ".opta", "local") @classmethod + @meister_time def force_unlock(cls, layer: "Layer", *tf_flags: str) -> None: tf_lock_exists, lock_id = cls.tf_lock_details(layer) @@ -972,6 +1005,7 @@ def force_delete_terraform_lock(cls, layer: "Layer", exception: Exception) -> No raise exception @classmethod + @meister_time def tf_lock_details(cls, layer: "Layer") -> Tuple[bool, str]: providers = layer.gen_providers(0, clean=False) lock_id: str = "" @@ -985,21 +1019,25 @@ def tf_lock_details(cls, layer: "Layer") -> Tuple[bool, str]: return "" != lock_id, lock_id @classmethod + @meister_time def _get_aws_lock_id(cls, layer: "Layer") -> str: aws = AWS(layer) return aws.get_terraform_lock_id() @classmethod + @meister_time def _get_gcp_lock_id(cls, layer: "Layer") -> str: gcp = GCP(layer) return gcp.get_terraform_lock_id() @classmethod + @meister_time def _get_azure_lock_id(cls, layer: "Layer") -> str: azure = Azure(layer) return azure.get_terraform_lock_id() +@meister_time def get_terraform_outputs(layer: "Layer") -> dict: """Fetch terraform outputs from existing TF file""" current_outputs = Terraform.get_outputs(layer) @@ -1007,6 +1045,7 @@ def get_terraform_outputs(layer: "Layer") -> dict: return deep_merge(current_outputs, parent_outputs) +@meister_time def _fetch_parent_outputs(layer: "Layer") -> dict: # Fetch the terraform state state = Terraform.get_state(layer) @@ -1036,6 +1075,7 @@ def _fetch_parent_outputs(layer: "Layer") -> dict: return parent_state_outputs +@meister_time def fetch_terraform_state_resources(layer: "Layer") -> dict: Terraform.download_state(layer) state = Terraform.get_state(layer) diff --git a/opta/core/validator.py b/opta/core/validator.py index 20fefb097..647323511 100644 --- a/opta/core/validator.py +++ b/opta/core/validator.py @@ -8,6 +8,7 @@ from opta.constants import REGISTRY, yaml from opta.exceptions import UserErrors +from opta.meister import time as meister_time class Module(Validator): @@ -18,6 +19,7 @@ class Module(Validator): cloud: Optional[str] = None # Yamale expects this function to return an array of errors + @meister_time def validate(self, value: Any) -> List[str]: if not isinstance(value, Mapping): return ["module is not a Map"] @@ -71,6 +73,7 @@ def _is_valid(self, value: Any) -> bool: return False return "org_name" in value or "name" in value + @meister_time def validate(self, value: Any) -> List[str]: if not isinstance(value, Mapping): return ["opta.yaml files should be a map"] @@ -189,6 +192,7 @@ def _print_errors(errors: List[str]) -> None: print(attr("reset"), end="") +@meister_time def validate_yaml( config_file_path: str, cloud: str, json_schema: bool = False ) -> Literal[True]: diff --git a/opta/layer.py b/opta/layer.py index ff4d2bbcd..0ef249c2d 100644 --- a/opta/layer.py +++ b/opta/layer.py @@ -29,6 +29,7 @@ from opta.core.validator import validate_yaml from opta.crash_reporter import CURRENT_CRASH_REPORTER from opta.exceptions import UserErrors +from opta.meister import time as meister_time from opta.module import Module from opta.plugins.derived_providers import DerivedProviders from opta.utils import check_opta_file_exists, deep_merge, hydrate, logger, yaml @@ -154,6 +155,7 @@ def __init__( ) @classmethod + @meister_time def load_from_yaml( cls, config: str, @@ -222,6 +224,7 @@ def structured_config(self) -> StructuredConfig: } @classmethod + @meister_time def load_from_dict( cls, conf: Dict[Any, Any], env: Optional[str], is_parent: bool = False ) -> Layer: @@ -301,6 +304,7 @@ def load_from_dict( ) @classmethod + @meister_time def validate_layer(cls, layer: "Layer") -> None: # Check for Uniqueness of Modules unique_modules: Set[str] = set() @@ -328,15 +332,18 @@ def validate_layer(cls, layer: "Layer") -> None: previous_modules.add(module.aliased_type or module.type) @staticmethod + @meister_time def valid_name(name: str) -> bool: pattern = "^[A-Za-z0-9-]*$" return bool(re.match(pattern, name)) + @meister_time def get_env(self) -> str: if self.parent is not None: return self.parent.get_env() return self.name + @meister_time def get_module( self, module_name: str, module_idx: Optional[int] = None ) -> Optional[Module]: @@ -346,6 +353,7 @@ def get_module( return module return None + @meister_time def get_required_path_dependencies(self) -> FrozenSet[str]: deps: Set[str] = set() for module in self.modules: @@ -354,10 +362,12 @@ def get_required_path_dependencies(self) -> FrozenSet[str]: return frozenset(deps) + @meister_time def validate_required_path_dependencies(self) -> None: deps = self.get_required_path_dependencies() validate_installed_path_executables(deps) + @meister_time def get_module_by_type( self, module_type: str, module_idx: Optional[int] = None ) -> list[Module]: @@ -368,6 +378,7 @@ def get_module_by_type( modules.append(module) return modules + @meister_time def outputs(self, module_idx: Optional[int] = None) -> Iterable[str]: ret: List[str] = [] module_idx = len(self.modules) - 1 if module_idx is None else module_idx @@ -375,6 +386,7 @@ def outputs(self, module_idx: Optional[int] = None) -> Iterable[str]: ret += module.outputs() return ret + @meister_time def gen_tf( self, module_idx: int, existing_config: Optional[StructuredConfig] = None ) -> Dict[Any, Any]: @@ -413,6 +425,7 @@ def gen_tf( return hydrate(ret, self.metadata_hydration()) + @meister_time def pre_hook(self, module_idx: int) -> None: for module in self.modules[0 : module_idx + 1]: self.processor_for(module).pre_hook(module_idx) @@ -422,6 +435,7 @@ def pre_hook(self, module_idx: int) -> None: module_idx ) + @meister_time def post_hook(self, module_idx: int, exception: Optional[Exception]) -> None: for module in self.modules[0 : module_idx + 1]: self.processor_for(module).post_hook(module_idx, exception) @@ -431,16 +445,19 @@ def post_hook(self, module_idx: int, exception: Optional[Exception]) -> None: module_idx, exception ) + @meister_time def post_delete(self, module_idx: int) -> None: module = self.modules[module_idx] logger.debug(f"Running post delete for module {module.name}") self.processor_for(module).post_delete(module_idx) + @meister_time def processor_for(self, module: Module) -> ModuleProcessor: module_type = module.aliased_type or module.type processor_class = get_processor_class(module_type) return processor_class(module, self) + @meister_time def metadata_hydration(self) -> Dict[Any, Any]: parent_name = self.parent.name if self.parent is not None else "nil" parent = None @@ -469,6 +486,7 @@ def metadata_hydration(self) -> Dict[Any, Any]: **provider_hydration, } + @meister_time def get_event_properties(self) -> Dict[str, Any]: current_keys: Dict[str, Any] = {} for module in self.modules: @@ -481,6 +499,7 @@ def get_event_properties(self) -> Dict[str, Any]: current_keys["parent_name"] = self.parent.name if self.parent is not None else "" return current_keys + @meister_time def state_storage(self) -> str: if self.parent is not None: return self.parent.state_storage() @@ -494,6 +513,7 @@ def state_storage(self) -> str: else: return f"opta-tf-state-{self.org_name}-{self.name}" + @meister_time def gen_providers(self, module_idx: int, clean: bool = True) -> Dict[Any, Any]: ret: Dict[Any, Any] = {"provider": {}} region: Optional[str] = None @@ -581,6 +601,7 @@ def gen_providers(self, module_idx: int, clean: bool = True) -> Dict[Any, Any]: return ret # Special logic for mapping the opta config to the provider block + @meister_time def handle_special_providers( self, provider_name: str, provider_data: dict, clean: bool ) -> dict: @@ -603,6 +624,7 @@ def handle_special_providers( return new_provider_data # Get the root-most layer + @meister_time def root(self) -> "Layer": layer = self while layer.parent is not None: @@ -610,6 +632,7 @@ def root(self) -> "Layer": return layer + @meister_time def verify_cloud_credentials(self) -> None: if self.cloud == "aws": self._verify_aws_cloud_credentials() diff --git a/opta/meister.py b/opta/meister.py new file mode 100644 index 000000000..63237db64 --- /dev/null +++ b/opta/meister.py @@ -0,0 +1,174 @@ +import atexit +import inspect +import os +import sys +import textwrap +from asyncio import Lock, iscoroutinefunction +from functools import wraps +from math import ceil, floor, log10 +from time import time as unix_time +from typing import Any, Callable, Dict, Final, List, Tuple, TypedDict +from uuid import UUID, uuid4 + +from colored import attr, fg + + +def round_sig(x: float, sig: int = 3) -> float: + return round(x, sig - int(floor(log10(abs(x)))) - 1) + + +class TimedCall(TypedDict): + id: UUID + function_name: str + positional_args: Tuple[Any, ...] + keyword_args: Dict[str, Any] + start_time: float + end_time: float + caller_file: str + caller_line: int + + +_records: List[TimedCall] = [] +ASYNC_LOCK: Final = Lock() +REPORT_SIZE: Final = 20 +TIME_THRESHOLD: Final = 0.1 +REPORT_ENABLED: bool = True +BLOCK_SYMBOL: Final = "\u2588" +START_TIME: Final = unix_time() + + +def time(func: Callable) -> Callable: + if iscoroutinefunction(func): + + @wraps(func) + async def wrapper(*args: Any, **kwds: Any) -> Any: + if not REPORT_ENABLED: + return await func(*args, **kwds) + frame_info = inspect.stack()[1] + start_time = unix_time() + output = await func(*args, **kwds) + end_time = unix_time() + if end_time - start_time <= TIME_THRESHOLD: + return output + _records.append( + { + "function_name": func.__name__, + "positional_args": args, + "keyword_args": kwds, + "start_time": start_time, + "end_time": end_time, + "id": uuid4(), + "caller_file": "/".join(frame_info.filename.split("/")[-2:]), + "caller_line": frame_info.lineno, + } + ) + print( + f"{fg('green')}Meister{attr(0)}: Function {func.__name__} took {end_time - start_time} seconds" + ) + return output + + else: + + @wraps(func) + def wrapper(*args: Any, **kwds: Any) -> Any: + if not REPORT_ENABLED: + return func(*args, **kwds) + frame_info = inspect.stack()[1] + start_time = unix_time() + output = func(*args, **kwds) + end_time = unix_time() + if end_time - start_time <= TIME_THRESHOLD: + return output + _records.append( + { + "id": uuid4(), + "function_name": func.__name__, + "positional_args": args, + "keyword_args": kwds, + "start_time": start_time, + "end_time": end_time, + "caller_file": "/".join(frame_info.filename.split("/")[-2:]), + "caller_line": frame_info.lineno, + } + ) + print( + f"{fg('green')}Meister{attr(0)}: Function {func.__name__} took {round_sig(end_time - start_time)} seconds" + ) + return output + + return wrapper + + +def make_report() -> None: + if not REPORT_ENABLED or hasattr(sys, "_called_from_test"): + return + end_time = unix_time() + total_time = end_time - START_TIME + sorted_records = sorted( + _records, key=lambda x: x["end_time"] - x["start_time"], reverse=True + ) + sorted_records = sorted_records[:REPORT_SIZE] + print( + f"{fg('green')}Meister Report{attr(0)}: Total time: {round_sig(total_time)} seconds. Here are the top " + f"{min(REPORT_SIZE, len(sorted_records))} longest function calls" + ) + color_map: Dict[UUID, int] = {} + for idx, record in enumerate(sorted_records): + color_idx = 2 + (idx % 254) + positional_args_str = str(record["positional_args"]) + positional_args_str = textwrap.shorten( + positional_args_str, width=40, placeholder="..." + ) + keyword_args_str = str(record["keyword_args"]) + keyword_args_str = textwrap.shorten(keyword_args_str, width=40, placeholder="...") + print( + f"{fg(color_idx)}{record['function_name']}{attr(0)} called in file {record['caller_file']}, line {record['caller_line']} with args {positional_args_str}, " + f"kwargs {keyword_args_str}: {round_sig(record['end_time'] - record['start_time'])} seconds" + ) + color_map[record["id"]] = color_idx + try: + max_column = os.get_terminal_size()[0] + except OSError: + max_column = 80 # Assume 80 columns if we aren't in a terminal + sorted_records = sorted(sorted_records, key=lambda x: x["start_time"]) + lane_printer = GreedyLanePrinter() + for record in sorted_records: + lane_object: LaneObject = { + "start": floor((record["start_time"] - START_TIME) / total_time * max_column), + "end": ceil((record["end_time"] - START_TIME) / total_time * max_column), + "color_idx": color_map[record["id"]], + } + lane_printer.add_entry(lane_object) + lane_printer.render() + + +class LaneObject(TypedDict): + start: int + end: int + color_idx: int + + +class GreedyLanePrinter: + """Only works nicely if you add from smallest number to largest""" + + def __init__(self) -> None: + self.lanes: List[List[LaneObject]] = [] + + def add_entry(self, lane_object: LaneObject) -> None: + for lane in self.lanes: + last_object = lane[-1] + if last_object["end"] <= lane_object["start"]: + lane.append(lane_object) + return + self.lanes.append([lane_object]) + + def render(self) -> None: + for lane in self.lanes: + lane_string = f"{fg(0)}{BLOCK_SYMBOL * lane[0]['start']}{attr(0)}" + for lane_object in lane: + blocks = BLOCK_SYMBOL * (lane_object["end"] - lane_object["start"]) + lane_string += f"{fg(lane_object['color_idx'])}{blocks}{attr(0)}" + print(lane_string) + + +atexit.register(make_report) diff --git a/opta/module.py b/opta/module.py index 3e9354cb0..f7f8b97e7 100644 --- a/opta/module.py +++ b/opta/module.py @@ -6,6 +6,7 @@ from opta.constants import REGISTRY from opta.exceptions import UserErrors +from opta.meister import time as meister_time from opta.resource import Resource from opta.utils import deep_merge, json @@ -43,6 +44,7 @@ def __init__( ) self.used_defaults: List["StructuredDefault"] = [] + @meister_time def outputs(self) -> Iterable[str]: ret = [] for output in self.desc["outputs"]: @@ -52,10 +54,12 @@ def outputs(self) -> Iterable[str]: return ret @staticmethod + @meister_time def valid_name(name: str) -> bool: pattern = "^[A-Za-z0-9]*$" return bool(re.match(pattern, name)) + @meister_time def resolve_default_input( self, input: Dict[str, Any], @@ -82,6 +86,7 @@ def resolve_default_input( existing_default["force_update_default_counter"], ) + @meister_time def gen_tf( self, depends_on: Optional[List[str]] = None, @@ -143,6 +148,7 @@ def gen_tf( return module_blk # Generate an override file in the module, that adds extra tags to every resource. + @meister_time def gen_tags_override(self) -> None: override_config: Any = {"resource": []} @@ -171,6 +177,7 @@ def gen_tags_override(self) -> None: with open(f"{self.module_dir_path}/{TAGS_OVERRIDE_FILE}", "w") as f: json.dump(override_config, f, ensure_ascii=False, indent=2) + @meister_time def translate_location(self, loc: str) -> str: if loc == "": return "" @@ -190,6 +197,7 @@ def translate_location(self, loc: str) -> str: return relative_path # Get the list of resources created by the current module. + @meister_time def get_terraform_resources(self) -> List[Resource]: if self.module_dir_path == "": return [] @@ -207,6 +215,7 @@ def get_terraform_resources(self) -> List[Resource]: return terraform_resources # Read all terraform files in the module and return its contents as a single dict. + @meister_time def _read_tf_module_config(self) -> dict: tf_module_config = {} diff --git a/opta/plugins/derived_providers.py b/opta/plugins/derived_providers.py index e0fe5ac03..68c49e044 100644 --- a/opta/plugins/derived_providers.py +++ b/opta/plugins/derived_providers.py @@ -1,5 +1,6 @@ from typing import Any, Dict, Optional +from opta.meister import time as meister_time from opta.utils import deep_merge, hydrate @@ -8,6 +9,7 @@ def __init__(self, layer: Any, is_parent: bool): self.layer = layer self.is_parent = is_parent + @meister_time def gen_tf( self, base_hydration: dict, module_idx: Optional[int] = None ) -> Dict[Any, Any]: diff --git a/opta/utils/__init__.py b/opta/utils/__init__.py index c6b5bba68..01ada5fe8 100644 --- a/opta/utils/__init__.py +++ b/opta/utils/__init__.py @@ -4,6 +4,7 @@ import sys from logging import Formatter, Logger, LogRecord from logging.handlers import QueueHandler, QueueListener +from os.path import exists from queue import Queue from shutil import which from textwrap import dedent @@ -16,6 +17,7 @@ from opta.constants import DEV_VERSION, VERSION from opta.datadog_logging import DatadogLogHandler from opta.exceptions import UserErrors +from opta.meister import time as meister_time from opta.special_formatter import PartialFormatter yaml = YAML(typ="safe") @@ -48,11 +50,13 @@ def format(self, record: LogRecord) -> str: ).format(record) +@meister_time def ansi_scrub(text: str) -> str: ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])") return ansi_escape.sub("", text) +@meister_time def initialize_logger() -> Tuple[Logger, QueueListener, DatadogLogHandler]: logger = logging.getLogger("opta") logger.setLevel(logging.DEBUG) @@ -92,6 +96,7 @@ class RawString(str): pass +@meister_time def deep_merge(dict1: Dict[Any, Any], dict2: Dict[Any, Any]) -> Dict[Any, Any]: dict2 = dict2.copy() for key, value in dict1.items(): @@ -106,6 +111,7 @@ def deep_merge(dict1: Dict[Any, Any], dict2: Dict[Any, Any]) -> Dict[Any, Any]: return dict2 +@meister_time def hydrate(target: Any, hydration: Dict[Any, Any]) -> Dict[Any, Any]: if isinstance(target, dict): target = target.copy() @@ -121,11 +127,13 @@ def hydrate(target: Any, hydration: Dict[Any, Any]) -> Dict[Any, Any]: return target +@meister_time def is_tool(name: str) -> bool: """Check whether `name` is on PATH and marked as executable.""" return which(name) is not None +@meister_time def safe_run(func): # type: ignore def func_wrapper(*args, **kwargs): # type: ignore try: @@ -140,6 +148,7 @@ def func_wrapper(*args, **kwargs): # type: ignore return func_wrapper +@meister_time def fmt_msg(message: str) -> str: """Format triple quote python strings""" # TODO: Replace with better message formatting @@ -151,6 +160,7 @@ def fmt_msg(message: str) -> str: # TODO: Support max-width. # The data should be a 2D array of the shape rows x columns. +@meister_time def column_print(data: List[Any]) -> None: # Determine the width of each column (the length of the longest word + 1) longest_char_len_by_column = [0] * len(data[0]) @@ -175,6 +185,7 @@ def column_print(data: List[Any]) -> None: # Get all substrings separated by the delimiter. # Ex: "foo.bar.baz", delimiter = "." # -> ['foo', 'foo.bar', 'foo.bar.baz', 'bar.baz', 'bar', 'bar.baz', 'baz'] +@meister_time def all_substrings(string: str, delimiter: str = "") -> List[str]: all_substrings = [] words = string.split(delimiter) if len(delimiter) else list(string) @@ -193,6 +204,7 @@ def add_words(i: int, j: int) -> None: # Exponential backoff for some external requests that may not work 100% on the # first try. +@meister_time def exp_backoff(num_tries: int = 3) -> Generator: seconds = 2 @@ -207,6 +219,7 @@ def exp_backoff(num_tries: int = 3) -> Generator: # opta.yaml -> opta.yml, True # opta.yml -> opta.yaml, True # test.txt -> test.txt, False +@meister_time def alternate_yaml_extension(config_path: str) -> Tuple[str, bool]: pre, ext = os.path.splitext(config_path) if ext.lower() == ".yaml": @@ -218,11 +231,12 @@ def alternate_yaml_extension(config_path: str) -> Tuple[str, bool]: # Return the existing opta file, if not found it checks the alternate y(a)ml extension # If still not found, it prompts the user or raise a UserErrors +@meister_time def check_opta_file_exists(config_path: str, prompt: bool = True) -> str: - if not os.path.exists(config_path): + if not exists(config_path): # try alternate y(a)ml extension alternate_yaml, changed = alternate_yaml_extension(config_path) - if changed and os.path.exists(alternate_yaml): + if changed and exists(alternate_yaml): logger.warning( "Could not find file %s, but loaded %s.", config_path, alternate_yaml, ) @@ -249,7 +263,7 @@ def check_opta_file_exists(config_path: str, prompt: bool = True) -> str: if not prompt_config_path: logger.info("Exiting...") sys.exit(0) - elif not os.path.exists(prompt_config_path): + elif not exists(prompt_config_path): raise UserErrors("Invalid Configuration Path provided.") config_path = prompt_config_path diff --git a/tests/commands/test_apply.py b/tests/commands/test_apply.py index f2626365e..2f88c0569 100644 --- a/tests/commands/test_apply.py +++ b/tests/commands/test_apply.py @@ -58,7 +58,7 @@ def mocked_layer(mocker: MockFixture) -> Any: def test_apply(mocker: MockFixture, mocked_layer: Any, basic_mocks: Any) -> None: - mocked_os_path_exists = mocker.patch("opta.utils.os.path.exists") + mocked_os_path_exists = mocker.patch("opta.utils.exists") mocked_os_path_exists.return_value = True mocker.patch( "opta.commands.apply.Terraform.tf_lock_details", @@ -121,7 +121,7 @@ def test_apply(mocker: MockFixture, mocked_layer: Any, basic_mocks: Any) -> None def test_auto_approve(mocker: MockFixture, mocked_layer: Any, basic_mocks: Any) -> None: - mocked_os_path_exists = mocker.patch("opta.utils.os.path.exists") + mocked_os_path_exists = mocker.patch("opta.utils.exists") mocked_os_path_exists.return_value = True mocker.patch( "opta.commands.apply.Terraform.tf_lock_details", @@ -180,7 +180,7 @@ def test_auto_approve(mocker: MockFixture, mocked_layer: Any, basic_mocks: Any) def test_fail_on_2_azs(mocker: MockFixture, mocked_layer: Any) -> None: - mocked_os_path_exists = mocker.patch("opta.utils.os.path.exists") + mocked_os_path_exists = mocker.patch("opta.utils.exists") mocked_os_path_exists.return_value = True mock_tf_download_state = mocker.patch( "opta.commands.apply.Terraform.download_state", return_value=True diff --git a/tests/commands/test_deploy.py b/tests/commands/test_deploy.py index 5032730c1..cbc67f2a8 100644 --- a/tests/commands/test_deploy.py +++ b/tests/commands/test_deploy.py @@ -12,7 +12,7 @@ def mock_is_service_config(module_mocker: MockFixture) -> None: def test_deploy_basic(mocker: MockFixture) -> None: - mocked_os_path_exists = mocker.patch("opta.utils.os.path.exists") + mocked_os_path_exists = mocker.patch("opta.utils.exists") mocked_os_path_exists.return_value = True mock_tf_download_state = mocker.patch( "opta.commands.deploy.Terraform.download_state", return_value=True @@ -59,7 +59,7 @@ def test_deploy_basic(mocker: MockFixture) -> None: def test_deploy_auto_approve(mocker: MockFixture) -> None: - mocked_os_path_exists = mocker.patch("opta.utils.os.path.exists") + mocked_os_path_exists = mocker.patch("opta.utils.exists") mocked_os_path_exists.return_value = True mock_tf_download_state = mocker.patch( "opta.commands.deploy.Terraform.download_state", return_value=True @@ -107,7 +107,7 @@ def test_deploy_auto_approve(mocker: MockFixture) -> None: def test_deploy_all_flags(mocker: MockFixture) -> None: - mocked_os_path_exists = mocker.patch("opta.utils.os.path.exists") + mocked_os_path_exists = mocker.patch("opta.utils.exists") mocked_os_path_exists.return_value = True mock_tf_download_state = mocker.patch( "opta.commands.deploy.Terraform.download_state", return_value=True @@ -165,7 +165,7 @@ def test_deploy_all_flags(mocker: MockFixture) -> None: def test_deploy_ecr_apply(mocker: MockFixture) -> None: - mocked_os_path_exists = mocker.patch("opta.utils.os.path.exists") + mocked_os_path_exists = mocker.patch("opta.utils.exists") mocked_os_path_exists.return_value = True mock_tf_download_state = mocker.patch( "opta.commands.deploy.Terraform.download_state", return_value=True diff --git a/tests/commands/test_force_unlock.py b/tests/commands/test_force_unlock.py index 7e7f66c14..c448c7542 100644 --- a/tests/commands/test_force_unlock.py +++ b/tests/commands/test_force_unlock.py @@ -18,7 +18,7 @@ def test_force_unlock_env(mocker: MockFixture) -> None: mocked_click_confirm = mocker.patch( "opta.commands.apply.click.confirm", return_value="y" ) - mocked_os_path_exists = mocker.patch("opta.utils.os.path.exists") + mocked_os_path_exists = mocker.patch("opta.utils.exists") mocked_os_path_exists.return_value = True mocked_layer = mocker.Mock(spec=Layer) mocked_layer.parent = None @@ -93,7 +93,7 @@ def test_force_unlock_env_no_cluster(mocker: MockFixture) -> None: mocked_click_confirm = mocker.patch( "opta.commands.apply.click.confirm", return_value="y" ) - mocked_os_path_exists = mocker.patch("opta.utils.os.path.exists") + mocked_os_path_exists = mocker.patch("opta.utils.exists") mocked_os_path_exists.return_value = True mocked_layer = mocker.Mock(spec=Layer) mocked_layer.parent = None @@ -157,7 +157,7 @@ def test_force_unlock_env_no_rollback(mocker: MockFixture) -> None: mocked_click_confirm = mocker.patch( "opta.commands.apply.click.confirm", return_value="y" ) - mocked_os_path_exists = mocker.patch("opta.utils.os.path.exists") + mocked_os_path_exists = mocker.patch("opta.utils.exists") mocked_os_path_exists.return_value = True mocked_layer = mocker.Mock(spec=Layer) mocked_layer.parent = None diff --git a/tests/commands/test_inspect.py b/tests/commands/test_inspect.py index f34399afc..dd820a586 100644 --- a/tests/commands/test_inspect.py +++ b/tests/commands/test_inspect.py @@ -36,7 +36,7 @@ def test_inspect(mocker: MockFixture) -> None: # Opta file check - mocked_os_path_exists = mocker.patch("opta.utils.os.path.exists") + mocked_os_path_exists = mocker.patch("opta.utils.exists") mocked_os_path_exists.return_value = True # Mock tf file generation diff --git a/tests/commands/test_logs.py b/tests/commands/test_logs.py index f5fbaed93..bc1992fec 100644 --- a/tests/commands/test_logs.py +++ b/tests/commands/test_logs.py @@ -8,7 +8,7 @@ def test_logs(mocker: MockFixture) -> None: # Opta file check - mocked_os_path_exists = mocker.patch("opta.utils.os.path.exists") + mocked_os_path_exists = mocker.patch("opta.utils.exists") mocked_os_path_exists.return_value = True mocked_layer_class = mocker.patch("opta.commands.logs.Layer") diff --git a/tests/commands/test_output.py b/tests/commands/test_output.py index fbb3afc69..4e7665390 100644 --- a/tests/commands/test_output.py +++ b/tests/commands/test_output.py @@ -43,7 +43,7 @@ def test_output_aws(mocker: MockFixture) -> None: # Opta file check - mocked_os_path_exists = mocker.patch("opta.utils.os.path.exists") + mocked_os_path_exists = mocker.patch("opta.utils.exists") mocked_os_path_exists.return_value = True mocker.patch("opta.cli.os.remove") @@ -76,7 +76,7 @@ def test_output_aws(mocker: MockFixture) -> None: def test_output_gcp(mocker: MockFixture) -> None: # Opta file check - mocked_os_path_exists = mocker.patch("opta.utils.os.path.exists") + mocked_os_path_exists = mocker.patch("opta.utils.exists") mocked_os_path_exists.return_value = True mocker.patch("opta.cli.os.remove") diff --git a/tests/commands/test_push.py b/tests/commands/test_push.py index bc3323a64..d59f8982b 100644 --- a/tests/commands/test_push.py +++ b/tests/commands/test_push.py @@ -28,7 +28,7 @@ def mock_is_service_config(module_mocker: MockFixture) -> None: def test_is_env_config(mocker: MockFixture) -> None: # Opta file check - mocked_os_path_exists = mocker.patch("opta.utils.os.path.exists") + mocked_os_path_exists = mocker.patch("opta.utils.exists") mocked_os_path_exists.return_value = True mocker.patch("opta.commands.push.is_service_config", return_value=False) @@ -184,7 +184,7 @@ def test_no_tag(mocker: MockFixture) -> None: def test_no_docker(mocker: MockFixture) -> None: mocker.patch( - "opta.utils.os.path.exists", return_value=True + "opta.utils.exists", return_value=True ) # Make check_opta_file_exists succeed mocker.patch("opta.commands.push.ensure_installed", side_effect=UserErrors("foobar")) @@ -195,7 +195,7 @@ def test_no_docker(mocker: MockFixture) -> None: def test_no_tag_override(mocker: MockFixture) -> None: # Opta file check - mocked_os_path_exists = mocker.patch("opta.utils.os.path.exists") + mocked_os_path_exists = mocker.patch("opta.utils.exists") mocked_os_path_exists.return_value = True nice_run_mock = mocker.patch("opta.commands.push.nice_run") @@ -251,7 +251,7 @@ def test_no_tag_override(mocker: MockFixture) -> None: def test_with_tag_override(mocker: MockFixture) -> None: # Opta file check - mocked_os_path_exists = mocker.patch("opta.utils.os.path.exists") + mocked_os_path_exists = mocker.patch("opta.utils.exists") mocked_os_path_exists.return_value = True nice_run_mock = mocker.patch("opta.commands.push.nice_run") @@ -317,7 +317,7 @@ def test_with_tag_override(mocker: MockFixture) -> None: def test_bad_image_name(mocker: MockFixture) -> None: # Opta file check - mocked_os_path_exists = mocker.patch("opta.utils.os.path.exists") + mocked_os_path_exists = mocker.patch("opta.utils.exists") mocked_os_path_exists.return_value = True gen_mock = mocker.patch("opta.commands.push.gen_all") diff --git a/tests/commands/test_secret.py b/tests/commands/test_secret.py index c0d0d0709..e96d222e3 100644 --- a/tests/commands/test_secret.py +++ b/tests/commands/test_secret.py @@ -23,7 +23,7 @@ def mocked_layer(self, mocker: MockFixture) -> Any: def test_view(self, mocker: MockFixture, mocked_layer: Any) -> None: # noqa # Opta file check - mocked_os_path_exists = mocker.patch("opta.utils.os.path.exists") + mocked_os_path_exists = mocker.patch("opta.utils.exists") mocked_os_path_exists.return_value = os.path.join( os.getcwd(), "tests", "fixtures", "dummy_data", "dummy_config1.yaml" ) @@ -56,7 +56,7 @@ def test_view(self, mocker: MockFixture, mocked_layer: Any) -> None: # noqa ) def test_list_secrets(self, mocker: MockFixture, mocked_layer: Any) -> None: - mocked_os_path_exists = mocker.patch("opta.utils.os.path.exists") + mocked_os_path_exists = mocker.patch("opta.utils.exists") mocked_os_path_exists.return_value = os.path.join( os.getcwd(), "tests", "fixtures", "dummy_data", "dummy_config1.yaml" ) @@ -92,7 +92,7 @@ def test_list_secrets(self, mocker: MockFixture, mocked_layer: Any) -> None: def test_update(self, mocker: MockFixture, mocked_layer: Any) -> None: # Opta file check - mocked_os_path_exists = mocker.patch("opta.utils.os.path.exists") + mocked_os_path_exists = mocker.patch("opta.utils.exists") mocked_os_path_exists.return_value = os.path.join( os.getcwd(), "tests", "fixtures", "dummy_data", "dummy_config1.yaml" ) diff --git a/tests/commands/test_shell.py b/tests/commands/test_shell.py index 64fe77016..4e231e2d1 100644 --- a/tests/commands/test_shell.py +++ b/tests/commands/test_shell.py @@ -7,7 +7,7 @@ def test_shell(mocker: MockFixture) -> None: - mocked_os_path_exists = mocker.patch("opta.utils.os.path.exists") + mocked_os_path_exists = mocker.patch("opta.utils.exists") mocked_os_path_exists.return_value = True mocked_layer_class = mocker.patch("opta.commands.shell.Layer") @@ -58,7 +58,7 @@ def test_shell(mocker: MockFixture) -> None: def test_shell_with_sh(mocker: MockFixture) -> None: - mocked_os_path_exists = mocker.patch("opta.utils.os.path.exists") + mocked_os_path_exists = mocker.patch("opta.utils.exists") mocked_os_path_exists.return_value = True mocked_layer_class = mocker.patch("opta.commands.shell.Layer") @@ -109,7 +109,7 @@ def test_shell_with_sh(mocker: MockFixture) -> None: def test_shell_with_invalid_shell(mocker: MockFixture) -> None: - mocked_os_path_exists = mocker.patch("opta.utils.os.path.exists") + mocked_os_path_exists = mocker.patch("opta.utils.exists") mocked_os_path_exists.return_value = True mocked_layer_class = mocker.patch("opta.commands.shell.Layer") diff --git a/tests/core/test_generator.py b/tests/core/test_generator.py index 9ee30dbc8..3963da0ae 100644 --- a/tests/core/test_generator.py +++ b/tests/core/test_generator.py @@ -14,7 +14,7 @@ class TestGenerator: def test_gen(self, mocker: MockFixture) -> None: - mocker.patch("opta.layer.os.path.exists") + mocker.patch("opta.utils.exists") mocker.patch("opta.layer.validate_yaml") test_gen_file_path = "pytest_main.tf.json" @@ -52,7 +52,7 @@ def test_gen_resource_tags(self, mocker: MockFixture) -> None: ) mocker.patch("opta.layer.open") - mocker.patch("opta.layer.os.path.exists") + mocker.patch("opta.utils.exists") mocker.patch("opta.layer.validate_yaml") opta_config, gen_tf_file = BASIC_APPLY diff --git a/tests/utils/test_utils.py b/tests/utils/test_utils.py index 14ab8cc35..d6f4597d1 100644 --- a/tests/utils/test_utils.py +++ b/tests/utils/test_utils.py @@ -25,14 +25,14 @@ def test_exp_backoff(mocker: MockFixture) -> None: def test_check_opta_file_exists_file_exists(mocker: MockFixture) -> None: mock_config_path = "mock_config_path" - mock_os_path_exists = mocker.patch("opta.utils.os.path.exists", return_value=True) + mock_os_path_exists = mocker.patch("opta.utils.exists", return_value=True) mock_click_prompt = mocker.patch("opta.utils.click.prompt") mock_system_exit = mocker.patch("opta.utils.sys.exit") config_path = check_opta_file_exists(mock_config_path) assert config_path == mock_config_path - mock_os_path_exists.assert_called_once_with(mock_config_path) + mock_os_path_exists.assert_has_calls([mocker.call(mock_config_path)]) mock_click_prompt.assert_not_called() mock_system_exit.assert_not_called() @@ -42,9 +42,7 @@ def test_check_opta_file_exists_file_does_not_exists_user_input( ) -> None: mock_config_path = "mock_config_path" mock_user_config_path = "mock_user_config_path" - mock_os_path_exists = mocker.patch( - "opta.utils.os.path.exists", side_effect=[False, True] - ) + mock_os_path_exists = mocker.patch("opta.utils.exists", side_effect=[False, True]) mock_click_prompt = mocker.patch( "opta.utils.click.prompt", return_value=mock_user_config_path ) @@ -70,9 +68,7 @@ def test_check_opta_file_exists_file_does_not_exists_no_user_input( ) -> None: mock_config_path = "mock_config_path" mock_no_user_config_path = "" - mock_os_path_exists = mocker.patch( - "opta.utils.os.path.exists", side_effect=[False, False] - ) + mock_os_path_exists = mocker.patch("opta.utils.exists", side_effect=[False, False]) mock_click_prompt = mocker.patch( "opta.utils.click.prompt", return_value=mock_no_user_config_path ) @@ -96,9 +92,7 @@ def test_check_opta_file_exists_file_does_not_exists_invalid_user_input( ) -> None: mock_config_path = "mock_config_path" mock_invalid_user_config_path = "mock_invalid_user_config_path" - mock_os_path_exists = mocker.patch( - "opta.utils.os.path.exists", side_effect=[False, False] - ) + mock_os_path_exists = mocker.patch("opta.utils.exists", side_effect=[False, False]) mock_click_prompt = mocker.patch( "opta.utils.click.prompt", return_value=mock_invalid_user_config_path )