From 613d49be206f6ee4119509ea5da6812aa69cef7b Mon Sep 17 00:00:00 2001 From: Himawan Winarto Date: Fri, 3 Apr 2026 20:43:02 -0400 Subject: [PATCH 1/5] chore: format tox.ini Signed-off-by: Himawan Winarto --- sunbeam-python/tox.ini | 48 ++++++++++++++++++++++-------------------- 1 file changed, 25 insertions(+), 23 deletions(-) diff --git a/sunbeam-python/tox.ini b/sunbeam-python/tox.ini index 8dc81acfa..6170c1c39 100644 --- a/sunbeam-python/tox.ini +++ b/sunbeam-python/tox.ini @@ -37,33 +37,34 @@ passenv = USER LOGNAME USERNAME HOME -commands = uv run {[vars]uv_flags} \ - python -m pytest -s -vv tests/functional/local \ - --basetemp={env:HOME}/.local/share/openstack/tmp \ - {posargs} +commands = + uv run {[vars]uv_flags} \ + python -m pytest -s -vv tests/functional/local \ + --basetemp={env:HOME}/.local/share/openstack/tmp \ + {posargs} [testenv:fmt] description = Apply coding style standards to code deps = ruff commands = - ruff check --select I --fix {[vars]all_path} # run isort - ruff format {[vars]all_path} + ruff check --select I --fix {posargs:{[vars]all_path}} # run isort + ruff format {posargs:{[vars]all_path}} [testenv:pep8] deps = - ruff + ruff commands = - ruff format --diff {[vars]all_path} - ruff check {[vars]all_path} + ruff format --diff {posargs:{[vars]all_path}} + ruff check {posargs:{[vars]all_path}} [testenv:mypy] commands = - uv run {[vars]uv_flags} mypy {[vars]src_path}/sunbeam - # TODO: consider uncommenting the following line once - # the unit tests pass the mypy check. - # uv run {[vars]uv_flags} mypy {[vars]tst_path}/unit - uv run {[vars]uv_flags} mypy {[vars]tst_path}/functional + uv run {[vars]uv_flags} mypy {[vars]src_path}/sunbeam + # TODO: consider uncommenting the following line once + # the unit tests pass the mypy check. + # uv run {[vars]uv_flags} mypy {[vars]tst_path}/unit + uv run {[vars]uv_flags} mypy {[vars]tst_path}/functional [testenv:cover] setenv = @@ -82,20 +83,21 @@ commands = [testenv:docs] deps = - -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} - -r{toxinidir}/doc/requirements.txt + -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} + -r{toxinidir}/doc/requirements.txt commands = - sphinx-build -a -E -W -d doc/build/doctrees -b html doc/source doc/build/html - sphinx-build -a -E -W -d doc/build/doctrees -b man doc/source doc/build/man - # Validate redirects (must be done after the docs build - whereto doc/build/html/.htaccess doc/test/redirect-tests.txt + sphinx-build -a -E -W -d doc/build/doctrees -b html doc/source doc/build/html + sphinx-build -a -E -W -d doc/build/doctrees -b man doc/source doc/build/man + # Validate redirects (must be done after the docs build + whereto doc/build/html/.htaccess doc/test/redirect-tests.txt [testenv:releasenotes] deps = - -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} - -r{toxinidir}/doc/requirements.txt + -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} + -r{toxinidir}/doc/requirements.txt commands = - sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html + sphinx-build -a -E -W -d releasenotes/build/doctrees \ + -b html releasenotes/source releasenotes/build/html [testenv:lock] description = Update lock file From d44208cc3292a476e07ba173d9280e315e54944b Mon Sep 17 00:00:00 2001 From: Himawan Winarto Date: Mon, 6 Apr 2026 07:19:23 -0400 Subject: [PATCH 2/5] chore: add linting for logging function Refactor logging calls on the files that break the linter Signed-off-by: Himawan Winarto --- sunbeam-python/pyproject.toml | 2 + sunbeam-python/sunbeam/commands/configure.py | 12 +- .../sunbeam/commands/generate_cloud_config.py | 8 +- sunbeam-python/sunbeam/commands/launch.py | 12 +- sunbeam-python/sunbeam/commands/manifest.py | 10 +- sunbeam-python/sunbeam/commands/proxy.py | 10 +- sunbeam-python/sunbeam/commands/refresh.py | 2 +- sunbeam-python/sunbeam/core/checks.py | 2 +- sunbeam-python/sunbeam/core/common.py | 10 +- sunbeam-python/sunbeam/core/deployment.py | 22 +- sunbeam-python/sunbeam/core/deployments.py | 4 +- sunbeam-python/sunbeam/core/juju.py | 80 +++--- sunbeam-python/sunbeam/core/k8s.py | 2 +- sunbeam-python/sunbeam/core/openstack_api.py | 4 +- sunbeam-python/sunbeam/core/questions.py | 8 +- sunbeam-python/sunbeam/core/steps.py | 42 +-- sunbeam-python/sunbeam/core/terraform.py | 47 ++-- sunbeam-python/sunbeam/core/watcher.py | 18 +- sunbeam-python/sunbeam/feature_gates.py | 8 +- sunbeam-python/sunbeam/feature_manager.py | 10 +- .../sunbeam/features/baremetal/steps.py | 16 +- .../sunbeam/features/caas/feature.py | 39 +-- .../sunbeam/features/dns/feature.py | 4 +- .../features/instance_recovery/consul.py | 6 +- .../sunbeam/features/interface/utils.py | 14 +- .../sunbeam/features/interface/v1/base.py | 64 +++-- .../features/interface/v1/openstack.py | 23 +- .../sunbeam/features/ldap/feature.py | 10 +- .../sunbeam/features/maintenance/checks.py | 24 +- .../sunbeam/features/observability/feature.py | 10 +- .../features/shared_filesystem/manila_data.py | 4 +- .../sunbeam/features/telemetry/feature.py | 22 +- sunbeam-python/sunbeam/features/tls/common.py | 50 ++-- .../sunbeam/features/validation/feature.py | 4 +- .../sunbeam/features/vault/feature.py | 41 +-- sunbeam-python/sunbeam/hooks.py | 24 +- sunbeam-python/sunbeam/log.py | 2 +- .../sunbeam/provider/common/multiregion.py | 2 +- .../sunbeam/provider/local/commands.py | 22 +- .../sunbeam/provider/local/deployment.py | 2 +- .../sunbeam/provider/local/steps.py | 44 +-- .../sunbeam/provider/maas/commands.py | 31 ++- sunbeam-python/sunbeam/provider/maas/steps.py | 77 ++++-- sunbeam-python/sunbeam/steps/cinder_volume.py | 4 +- .../sunbeam/steps/cluster_status.py | 2 +- sunbeam-python/sunbeam/steps/clusterd.py | 65 ++--- sunbeam-python/sunbeam/steps/configure.py | 2 +- sunbeam-python/sunbeam/steps/hypervisor.py | 40 +-- sunbeam-python/sunbeam/steps/juju.py | 253 ++++++++++-------- sunbeam-python/sunbeam/steps/k8s.py | 28 +- sunbeam-python/sunbeam/steps/microceph.py | 28 +- sunbeam-python/sunbeam/steps/microovn.py | 16 +- sunbeam-python/sunbeam/steps/mysql.py | 30 ++- sunbeam-python/sunbeam/steps/openstack.py | 43 +-- .../sunbeam/steps/sync_feature_gates.py | 8 +- .../sunbeam/steps/upgrades/inter_channel.py | 6 +- .../sunbeam/steps/upgrades/intra_channel.py | 40 +-- sunbeam-python/sunbeam/steps/vault.py | 16 +- sunbeam-python/sunbeam/storage/base.py | 5 +- sunbeam-python/sunbeam/storage/manager.py | 14 +- sunbeam-python/sunbeam/storage/service.py | 14 +- sunbeam-python/sunbeam/storage/steps.py | 70 +++-- sunbeam-python/sunbeam/utils.py | 9 +- .../tests/functional/local/test_dpdk.py | 10 +- .../functional/local/test_network_node.py | 12 +- .../tests/functional/local/test_sriov.py | 10 +- .../tests/functional/local/utils.py | 12 +- .../tests/unit/sunbeam/test_clusterd.py | 14 +- 68 files changed, 893 insertions(+), 706 deletions(-) diff --git a/sunbeam-python/pyproject.toml b/sunbeam-python/pyproject.toml index 41ef70f26..f01c5d02a 100644 --- a/sunbeam-python/pyproject.toml +++ b/sunbeam-python/pyproject.toml @@ -778,6 +778,8 @@ select = [ "S", # bandit "I", # isort "CPY", # flake8-copyright + "LOG", # flake8-logging + "G", # flake8-logging-format ] ignore = [ "F821", diff --git a/sunbeam-python/sunbeam/commands/configure.py b/sunbeam-python/sunbeam/commands/configure.py index 40699ccf7..c3726fe99 100644 --- a/sunbeam-python/sunbeam/commands/configure.py +++ b/sunbeam-python/sunbeam/commands/configure.py @@ -103,7 +103,7 @@ def retrieve_admin_credentials( try: action_result = jhelper.run_action(unit, model, action_cmd) except (ActionFailedException, TimeoutError) as e: - LOG.debug(f"Running action {action_cmd} on {unit} failed: {str(e)}") + LOG.debug("Running action %s on %s failed: %r", action_cmd, unit, e) raise click.ClickException("Unable to retrieve openrc from Keystone service") region_name = deployment.get_region_name() or DEFAULT_REGION @@ -124,7 +124,7 @@ def retrieve_admin_credentials( try: action_result = jhelper.run_action(unit, model, action_cmd) except ActionFailedException as e: - LOG.debug(f"Running action {action_cmd} on {unit} failed: {str(e)}") + LOG.debug("Running action %s on %s failed: %r", action_cmd, unit, e) raise click.ClickException("Unable to retrieve CA certs from Keystone service") ca_bundle = [] @@ -142,7 +142,7 @@ def retrieve_admin_credentials( if bundle: home = os.environ["SNAP_REAL_HOME"] cafile = Path(home) / ".config" / "openstack" / "ca_bundle.pem" - LOG.debug("Writing CA bundle to {str(cafile)}") + LOG.debug("Writing CA bundle to %s", cafile) cafile.parent.mkdir(mode=0o775, parents=True, exist_ok=True) if not cafile.exists(): @@ -210,7 +210,7 @@ def is_skip(self, context: StepContext) -> Result: self.client, CLOUD_CONFIG_SECTION ) if "user" not in self.variables: - LOG.debug("Demo setup not yet done") + LOG.debug("Demo setup is not yet done") return Result(ResultType.SKIPPED) if self.variables["user"]["run_demo_setup"]: return Result(ResultType.COMPLETED) @@ -225,7 +225,7 @@ def run(self, context: StepContext) -> Result: self._print_openrc(tf_output) return Result(ResultType.COMPLETED) except TerraformException as e: - LOG.exception("Error getting terraform output") + LOG.exception("Error getting Terraform output") return Result(ResultType.FAILED, str(e)) def _print_openrc(self, tf_output: dict) -> None: @@ -393,7 +393,7 @@ def prompt( self.variables["enabled"] = dpdk_bank.enabled.ask() if not self.variables["enabled"]: - LOG.debug("DPDK disabled.") + LOG.debug("DPDK disabled") else: self._prompt_nics(console, show_hint) diff --git a/sunbeam-python/sunbeam/commands/generate_cloud_config.py b/sunbeam-python/sunbeam/commands/generate_cloud_config.py index fa6778171..1edce12da 100644 --- a/sunbeam-python/sunbeam/commands/generate_cloud_config.py +++ b/sunbeam-python/sunbeam/commands/generate_cloud_config.py @@ -75,7 +75,7 @@ def is_skip(self, context: StepContext) -> Result: self.client, CLOUD_CONFIG_SECTION ) if "user" not in self.variables: - LOG.debug("Demo setup not yet done") + LOG.debug("Demo setup is not yet done") return Result(ResultType.SKIPPED) if self.variables["user"]["run_demo_setup"]: return Result(ResultType.COMPLETED) @@ -146,7 +146,7 @@ def _get_cloud_config_from_file(self, clouds_yaml: Path) -> dict: If cloud yaml is not present, create a file along with parent directories. """ - LOG.debug(f"Creating {clouds_yaml} if it does not exist") + LOG.debug("Creating %s if it does not exist", clouds_yaml) clouds_yaml.parent.mkdir(mode=0o775, parents=True, exist_ok=True) if not clouds_yaml.exists(): clouds_yaml.touch() @@ -166,7 +166,7 @@ def _create_backup_file(self, clouds_yaml: Path) -> None: """ timestamp = datetime.now().strftime("%Y%m%d%H%M%S") clouds_yaml_backup = Path(clouds_yaml.parent) / f"clouds.yaml.bk.{timestamp}" - LOG.debug(f"Backing up clouds.yaml to {clouds_yaml_backup}") + LOG.debug("Backing up clouds.yaml to %s", clouds_yaml_backup) shutil.copy(clouds_yaml, clouds_yaml_backup) clouds_yaml_backup.chmod(0o660) @@ -255,7 +255,7 @@ def cloud_config( run_preflight_checks(preflight_checks, console) jhelper_keystone = deployment.get_juju_helper(keystone=True) if not jhelper_keystone.model_exists(OPENSTACK_MODEL): - LOG.error(f"Expected model {OPENSTACK_MODEL} missing") + LOG.error("Expected model %s is missing", OPENSTACK_MODEL) raise click.ClickException("Please run `sunbeam cluster bootstrap` first") admin_credentials = retrieve_admin_credentials( jhelper_keystone, deployment, OPENSTACK_MODEL diff --git a/sunbeam-python/sunbeam/commands/launch.py b/sunbeam-python/sunbeam/commands/launch.py index c501fdde6..294fc68a3 100644 --- a/sunbeam-python/sunbeam/commands/launch.py +++ b/sunbeam-python/sunbeam/commands/launch.py @@ -67,7 +67,7 @@ def launch( with console.status("Fetching user credentials ... "): if not jhelper.model_exists(OPENSTACK_MODEL): - LOG.error(f"Expected model {OPENSTACK_MODEL} missing") + LOG.error("Expected model %s is missing", OPENSTACK_MODEL) raise click.ClickException( f"Cannot find {OPENSTACK_MODEL}. Please destroy and re-bootstrap." ) @@ -98,7 +98,7 @@ def launch( cacert=admin_auth_info.get("OS_CACERT"), ) except openstack.exceptions.SDKException: - LOG.error("Could not authenticate to Keystone.") + LOG.exception("Could not authenticate to Keystone") raise click.ClickException("Unable to connect to OpenStack") with console.status("Checking for SSH key pair ... ") as status: @@ -134,8 +134,8 @@ def launch( ) server = conn.compute.wait_for_server(server, wait=INSTANCE_WAIT_TIMEOUT) - except openstack.exceptions.SDKException as e: - LOG.error(f"Instance creation request failed: {e}") + except openstack.exceptions.SDKException: + LOG.exception("Instance creation request failed") raise click.ClickException( "Unable to request new instance. Please run `sunbeam configure` first." ) @@ -149,8 +149,8 @@ def launch( console.print( f"`ssh -i {key_path} ubuntu@{ip_.floating_ip_address}`", ) - except openstack.exceptions.SDKException as e: - LOG.error(f"Error allocating IP address: {e}") + except openstack.exceptions.SDKException: + LOG.exception("Error allocating IP address") raise click.ClickException( "Could not allocate IP address. Check your configuration." ) diff --git a/sunbeam-python/sunbeam/commands/manifest.py b/sunbeam-python/sunbeam/commands/manifest.py index 0c0ff26c2..2e6d7ed07 100644 --- a/sunbeam-python/sunbeam/commands/manifest.py +++ b/sunbeam-python/sunbeam/commands/manifest.py @@ -35,7 +35,7 @@ def generate_software_manifest( try: software_dict = software_config.model_dump(by_alias=True) - LOG.debug(f"Manifest software dict with extra fields: {software_dict}") + LOG.debug("Manifest software dict with extra fields: %s", software_dict) # Remove terraform default sources manifest_terraform_dict = software_dict.get("terraform", {}) @@ -60,8 +60,8 @@ def generate_software_manifest( software_content = f"{indent}{comment}software:\n{software_yaml_commented}" return software_content except Exception as e: - LOG.debug(e) - raise click.ClickException(f"Manifest generation failed: {str(e)}") + LOG.exception("Software manifest generation failed") + raise click.ClickException(f"Software manifest generation failed: {str(e)}") def _dump_feature( @@ -167,7 +167,7 @@ def generate( home = os.environ["SNAP_REAL_HOME"] manifest_file = Path(home) / ".config" / "openstack" / "manifest.yaml" - LOG.debug(f"Creating {manifest_file} parent directory if it does not exist") + LOG.debug("Creating %s parent directory if it does not exist", manifest_file) manifest_file.parent.mkdir(mode=0o775, parents=True, exist_ok=True) manifest = deployment.get_manifest() @@ -197,7 +197,7 @@ def generate( file.write("# Generated Sunbeam Deployment Manifest\n\n") file.write(manifest_content) except IOError as e: - LOG.debug(e) + LOG.exception("Manifest generation failed") raise click.ClickException(f"Manifest generation failed: {str(e)}") click.echo(f"Generated manifest is at {str(manifest_file)}") diff --git a/sunbeam-python/sunbeam/commands/proxy.py b/sunbeam-python/sunbeam/commands/proxy.py index cc39d2942..affda3145 100644 --- a/sunbeam-python/sunbeam/commands/proxy.py +++ b/sunbeam-python/sunbeam/commands/proxy.py @@ -177,8 +177,8 @@ def set( try: _update_proxy(variables, deployment, show_hints) except (ClusterServiceUnavailableException, ConfigItemNotFoundException) as e: - LOG.debug(f"Exception in updating config {str(e)}") - click.echo("ERROR: Not able to update proxy config: str(e)") + LOG.debug("Exception in updating proxy config: %r", e) + click.echo(f"ERROR: Not able to update proxy config: {str(e)}") return @@ -197,8 +197,8 @@ def clear(ctx: click.Context, show_hints: bool) -> None: try: _update_proxy(variables, deployment, show_hints) except (ClusterServiceUnavailableException, ConfigItemNotFoundException) as e: - LOG.debug(f"Exception in updating config {str(e)}") - click.echo("ERROR: Not able to clear proxy config: str(e)") + LOG.debug("Exception in clearing proxy config: %r", e) + click.echo(f"ERROR: Not able to clear proxy config: {str(e)}") return @@ -281,7 +281,7 @@ def prompt( self.variables.setdefault("proxy", {}) previous_answers = self.variables.get("proxy", {}) - LOG.debug(f"Previous answers: {previous_answers}") + LOG.debug("Previous answers: %s", previous_answers) if not ( previous_answers.get("http_proxy") and previous_answers.get("https_proxy") diff --git a/sunbeam-python/sunbeam/commands/refresh.py b/sunbeam-python/sunbeam/commands/refresh.py index 41069de32..d7e3dfdca 100644 --- a/sunbeam-python/sunbeam/commands/refresh.py +++ b/sunbeam-python/sunbeam/commands/refresh.py @@ -162,7 +162,7 @@ def refresh( LOG.debug("Getting latest manifest from cluster db") manifest = deployment.get_manifest() - LOG.debug(f"Manifest used for deployment - core: {manifest.core}") + LOG.debug("Manifest used for deployment - core: %s", manifest.core) jhelper = JujuHelper(deployment.juju_controller) upgrade_coordinator: UpgradeCoordinator if upgrade_release: diff --git a/sunbeam-python/sunbeam/core/checks.py b/sunbeam-python/sunbeam/core/checks.py index caa63288a..7da735b8f 100644 --- a/sunbeam-python/sunbeam/core/checks.py +++ b/sunbeam-python/sunbeam/core/checks.py @@ -38,7 +38,7 @@ def run_preflight_checks(checks: Sequence["Check"], console: Console): Raise ClickException in case of Result Failures. """ for check in checks: - LOG.debug(f"Starting pre-flight check {check.name}") + LOG.debug("Starting pre-flight check %s", check.name) message = f"{check.description} ... " with console.status(message): if not check.run(): diff --git a/sunbeam-python/sunbeam/core/common.py b/sunbeam-python/sunbeam/core/common.py index 92b4218d1..a881203e3 100644 --- a/sunbeam-python/sunbeam/core/common.py +++ b/sunbeam-python/sunbeam/core/common.py @@ -169,7 +169,7 @@ def _is_role_enabled(role: Role) -> bool: # Check feature gate (will return True if GA or snap config enabled) enabled = is_feature_gate_enabled(gate_key) if not enabled: - LOG.debug(f"Role {role.name} is gated by {gate_key} (disabled)") + LOG.debug("Role %s is gated by %s (disabled)", role.name, gate_key) return enabled @@ -323,7 +323,7 @@ def run_plan( results = {} for step in plan: - LOG.debug(f"Starting step {step.name!r}") + LOG.debug("Starting step %r", step.name) with console.status(step.status) as status: rich_reporter = RichProgressReporter(status, step.status) logging_reporter = LoggingProgressReporter() @@ -338,7 +338,7 @@ def run_plan( skip_result = step.is_skip(context) if skip_result.result_type == ResultType.SKIPPED: results[step.__class__.__name__] = skip_result - LOG.debug(f"Skipping step {step.name}") + LOG.debug("Skipping step %r", step.name) continue if skip_result.result_type == ResultType.FAILED: @@ -347,11 +347,11 @@ def run_plan( break raise click.ClickException(skip_result.message) - LOG.debug(f"Running step {step.name}") + LOG.debug("Running step %r", step.name) result = step.run(context) results[step.__class__.__name__] = result LOG.debug( - f"Finished running step {step.name!r}. Result: {result.result_type}" + "Finished running step %r. Result: %r", step.name, result.result_type ) if result.result_type == ResultType.FAILED: diff --git a/sunbeam-python/sunbeam/core/deployment.py b/sunbeam-python/sunbeam/core/deployment.py index 62a00a70f..e92848e0d 100644 --- a/sunbeam-python/sunbeam/core/deployment.py +++ b/sunbeam-python/sunbeam/core/deployment.py @@ -236,7 +236,7 @@ def get_proxy_settings(self) -> dict: ConfigItemNotFoundException, ValueError, ) as e: - LOG.debug(f"Using default Proxy settings from provider due to {str(e)}") + LOG.warning("Using default proxy settings from provider due to %r", e) proxy = self.get_default_proxy_settings() if "NO_PROXY" in proxy: @@ -273,7 +273,7 @@ def _parse_feature( feature = features.get(name) group = groups.get(name) if not feature and not group: - LOG.warning(f"Feature {name} not found in feature manager.") + LOG.warning("Feature %s is not found in feature manager", name) continue if feature and feature_or_group_manifest_dict: feature_manifests[name] = _parse_feature( @@ -287,7 +287,9 @@ def _parse_feature( ) in feature_or_group_manifest_dict.items(): feature = features.get(group.name + "." + name) if not feature: - LOG.warning(f"Feature {name} not found in group {group.name}.") + LOG.warning( + "Feature %s is not found in group %s", name, group.name + ) continue if not feature_manifest_dict: continue @@ -351,28 +353,28 @@ def get_manifest(self, manifest_file: pathlib.Path | None = None) -> Manifest: if manifest_file is not None: manifest_dict = yaml.safe_load(manifest_file.read_text("utf-8")) override_manifest = self.parse_manifest(manifest_dict) - LOG.debug("Manifest loaded from file.") + LOG.debug("Manifest loaded from file") else: try: client = self.get_client() override_manifest = self.parse_manifest( yaml.safe_load(client.cluster.get_latest_manifest()["data"]) ) - LOG.debug("Manifest loaded from clusterd.") + LOG.debug("Manifest loaded from clusterd") except ClusterServiceUnavailableException: LOG.debug( "Failed to get manifest from clusterd, might not be bootstrapped," - " consider default manifest." + " consider default manifest" ) except ConfigItemNotFoundException: LOG.debug( "No manifest found in clusterd, consider default" - " manifest from database." + " manifest from database" ) except ValueError: LOG.debug( "Failed to get clusterd client, might no be bootstrapped," - " consider empty manifest from database." + " consider empty manifest from database" ) if override_manifest is None: # Only get manifest from embedded if manifest not present in clusterd @@ -384,7 +386,7 @@ def get_manifest(self, manifest_file: pathlib.Path | None = None) -> Manifest: override_manifest = self.parse_manifest( yaml.safe_load(manifest_file.read_text()) ) - LOG.debug("Manifest loaded from embedded manifest.") + LOG.debug("Manifest loaded from embedded manifest") if override_manifest is not None: override_manifest.validate_against_default(manifest) @@ -438,7 +440,7 @@ def _load_tfhelpers(self): tfplan_dir = TERRAFORM_DIR_NAMES.get(tfplan, tfplan) src = tf_manifest.source dst = self.plans_directory / tfplan_dir - LOG.debug(f"Updating {dst} from {src}...") + LOG.debug("Updating %s from %s...", dst, src) shutil.copytree(src, dst, dirs_exist_ok=True) self._tfhelpers[tfplan] = TerraformHelper( diff --git a/sunbeam-python/sunbeam/core/deployments.py b/sunbeam-python/sunbeam/core/deployments.py index 18611d394..fb422dd7c 100644 --- a/sunbeam-python/sunbeam/core/deployments.py +++ b/sunbeam-python/sunbeam/core/deployments.py @@ -37,7 +37,7 @@ def _validate_deployments(cls, deployment: dict | Deployment) -> Deployment: # @classmethod def load(cls, path: Path) -> "DeploymentsConfig": """Load deployment configuration from file.""" - LOG.debug(f"Loading deployment configuration from {str(path)!r}") + LOG.debug("Loading deployment configuration from %r", str(path)) with path.open() as fd: data = yaml.safe_load(fd) if data is None: @@ -62,7 +62,7 @@ def write(self): # so workaround to add each deployment based on provider deployments = [d.model_dump(by_alias=True) for d in self.deployments] self_dict["deployments"] = deployments - LOG.debug(f"Writing deployment configuration to {str(self.path)!r}") + LOG.debug("Writing deployment configuration to %r", str(self.path)) yaml.SafeDumper.add_representer(str, str_presenter) with tempfile.NamedTemporaryFile("w") as tmp: yaml.safe_dump(self_dict, tmp) diff --git a/sunbeam-python/sunbeam/core/juju.py b/sunbeam-python/sunbeam/core/juju.py index 9653199b6..ca5c74080 100644 --- a/sunbeam-python/sunbeam/core/juju.py +++ b/sunbeam-python/sunbeam/core/juju.py @@ -1193,7 +1193,7 @@ def _wait( ) break except jubilant.CLIError as e: - LOG.error(f"Error occurred while waiting: {e}") + LOG.error("Error occurred while waiting: %r", e) else: raise TimeoutError( f"Timed out after {timeout} seconds while waiting for status" @@ -1227,11 +1227,11 @@ def _ready_callback(status: "jubilant.statustypes.Status") -> bool: app = juju.status().apps.get(name) if not app: return - LOG.debug(f"Application {name!r} is in status: {app.app_status.current!r}") + LOG.debug("Application %r is in status: %r", name, app.app_status.current) LOG.debug( - "Waiting for app status to be: {} {}".format( - app.app_status.current, accepted_status - ) + "Waiting for app status %r to be %r", + app.app_status.current, + accepted_status, ) self._wait(_ready_callback, juju, delay=MODEL_DELAY, timeout=timeout) @@ -1789,8 +1789,11 @@ def get_relation_map( relation_map = {} relation_ids = result.stdout.strip().splitlines() LOG.debug( - f"Relation IDs for interface {interface!r} on provider application " - f"{provider_app!r} in model {model!r}: {relation_ids}" + "Relation IDs for interface %r on provider application %r in model %r: %r", + interface, + provider_app, + model, + relation_ids, ) for relation_id in relation_ids: cmd = f"relation-list -r {relation_id} --app" @@ -1813,8 +1816,11 @@ def get_relation_map( relation_map[relation_id] = app_name LOG.debug( - f"Relation map for interface {interface!r} on provider application " - f"{provider_app!r} in model {model!r}: {relation_map}" + "Relation map for interface %r on provider application %r in model %r: %r", + interface, + provider_app, + model, + relation_map, ) return relation_map @@ -1850,9 +1856,11 @@ def _juju_cmd(self, *args): cmd.extend(args) cmd.extend(["--format", "json"]) - LOG.debug(f"Running command {' '.join(cmd)}") + LOG.debug("Running command %s", " ".join(cmd)) process = subprocess.run(cmd, capture_output=True, text=True, check=True) - LOG.debug(f"Command finished. stdout={process.stdout}, stderr={process.stderr}") + LOG.debug( + "Command finished. stdout=%r, stderr=%r", process.stdout, process.stderr + ) return json.loads(process.stdout.strip()) @@ -1874,13 +1882,15 @@ def get_clouds( if controller: cmd.extend(["--controller", controller]) clouds_from_juju_cmd = self._juju_cmd(*cmd) - LOG.debug(f"Available clouds in juju are {clouds_from_juju_cmd.keys()}") + LOG.debug("Available clouds in juju are %s", list(clouds_from_juju_cmd.keys())) for name, details in clouds_from_juju_cmd.items(): if details["type"] == cloud_type: clouds.append(name) - LOG.debug(f"There are {len(clouds)} {cloud_type} clouds available: {clouds}") + LOG.debug( + "There are %d %s clouds available: %s", len(clouds), cloud_type, clouds + ) return clouds @@ -1909,8 +1919,10 @@ def get_controllers(self, clouds: list | None = None) -> list: name for name, details in controllers.items() if details["cloud"] in clouds ] LOG.debug( - f"There are {len(existing_controllers)} existing {clouds} " - f"controllers running: {existing_controllers}" + "There are %d existing %s controllers running: %s", + len(existing_controllers), + clouds, + existing_controllers, ) return existing_controllers @@ -1933,7 +1945,7 @@ def get_controller(self, controller: str) -> dict: try: return self._juju_cmd("show-controller", controller)[controller] except subprocess.CalledProcessError as e: - LOG.debug(e) + LOG.debug("%s: %s", e, e.stderr) raise ControllerNotFoundException() from e def get_controller_ip(self, controller: str) -> str: @@ -1975,10 +1987,10 @@ def add_cloud(self, name: str, cloud: dict, controller: str | None) -> bool: ] if controller: cmd.extend(["--controller", controller, "--force"]) - LOG.debug(f"Running command {' '.join(cmd)}") + LOG.debug("Running command %s", " ".join(cmd)) process = subprocess.run(cmd, capture_output=True, text=True, check=True) LOG.debug( - f"Command finished. stdout={process.stdout}, stderr={process.stderr}" + "Command finished. stdout=%r, stderr=%r", process.stdout, process.stderr ) return True @@ -1998,12 +2010,12 @@ def add_k8s_cloud_in_client(self, name: str, kubeconfig: dict): env = os.environ.copy() env.update({"KUBECONFIG": temp.name}) - LOG.debug(f"Running command {' '.join(cmd)}") + LOG.debug("Running command %s", " ".join(cmd)) process = subprocess.run( cmd, capture_output=True, text=True, check=True, env=env ) LOG.debug( - f"Command finished. stdout={process.stdout}, stderr={process.stderr}" + "Command finished. stdout=%r, stderr=%r", process.stdout, process.stderr ) def add_credential(self, cloud: str, credential: dict, controller: str | None): @@ -2026,10 +2038,10 @@ def add_credential(self, cloud: str, credential: dict, controller: str | None): cmd.extend(["--controller", controller]) else: cmd.extend(["--client"]) - LOG.debug(f"Running command {' '.join(cmd)}") + LOG.debug("Running command %s", " ".join(cmd)) process = subprocess.run(cmd, capture_output=True, text=True, check=True) LOG.debug( - f"Command finished. stdout={process.stdout}, stderr={process.stderr}" + "Command finished. stdout=%r, stderr=%r", process.stdout, process.stderr ) def integrate( @@ -2049,13 +2061,13 @@ def integrate( requirer, ] try: - LOG.debug(f"Running command {' '.join(cmd)}") + LOG.debug("Running command %s", " ".join(cmd)) process = subprocess.run(cmd, capture_output=True, text=True, check=True) LOG.debug( - f"Command finished. stdout={process.stdout}, stderr={process.stderr}" + "Command finished. stdout=%r, stderr=%r", process.stdout, process.stderr ) except subprocess.CalledProcessError as e: - LOG.debug(e.stderr) + LOG.debug("%s: %s", e, e.stderr) if ignore_error_if_exists and "already exists" not in e.stderr: raise e @@ -2069,9 +2081,11 @@ def remove_relation(self, model: str, provider: str, requirer: str): provider, requirer, ] - LOG.debug(f"Running command {' '.join(cmd)}") + LOG.debug("Running command %s", " ".join(cmd)) process = subprocess.run(cmd, capture_output=True, text=True, check=True) - LOG.debug(f"Command finished. stdout={process.stdout}, stderr={process.stderr}") + LOG.debug( + "Command finished. stdout=%r, stderr=%r", process.stdout, process.stderr + ) def get_charm_deployed_versions(self, model: str) -> dict: """Return charm deployed info for all the applications in model. @@ -2129,7 +2143,11 @@ def channel_update_needed(self, channel: str, new_channel: str) -> bool: try: return version.parse(current_track) < version.parse(new_track) except version.InvalidVersion: - LOG.error("Error: Could not compare tracks") + LOG.error( + "Could not compare tracks between %r and %r channels", + current_track, + new_track, + ) return False if risks.index(current_risk) < risks.index(new_risk): return True @@ -2241,7 +2259,7 @@ def run_action( try: unit = JujuActionHelper.get_unit(client, jhelper, model, node, app) LOG.debug( - "Running action '%s' on unit '%s', params: %s", + "Running action %r on unit %r, params: %s", action_name, unit, action_params, @@ -2255,8 +2273,8 @@ def run_action( ) return action_result except UnitNotFoundException as e: - LOG.debug(f"Application {app} not found on node {node}") + LOG.debug("Application %r is not found on node %r: %r", app, node, e) raise e except ActionFailedException as e: - LOG.debug("Action '%s' failed on node '%s': %s", action_name, node, e) + LOG.debug("Action %r failed on node %r: %r", action_name, node, e) raise e diff --git a/sunbeam-python/sunbeam/core/k8s.py b/sunbeam-python/sunbeam/core/k8s.py index 6240c2cca..239125e8c 100644 --- a/sunbeam-python/sunbeam/core/k8s.py +++ b/sunbeam-python/sunbeam/core/k8s.py @@ -264,7 +264,7 @@ def evict_pods(client: "l_client.Client", pods: list["core_v1.Pod"]) -> None: for pod in pods: if pod.metadata is None: continue - LOG.debug(f"Evicting pod {pod.metadata.name}") + LOG.debug("Evicting pod %s", pod.metadata.name) evict = core_v1.Pod.Eviction( metadata=meta_v1.ObjectMeta( name=pod.metadata.name, namespace=pod.metadata.namespace diff --git a/sunbeam-python/sunbeam/core/openstack_api.py b/sunbeam-python/sunbeam/core/openstack_api.py index 0cb1d9975..6479e9b2f 100644 --- a/sunbeam-python/sunbeam/core/openstack_api.py +++ b/sunbeam-python/sunbeam/core/openstack_api.py @@ -66,7 +66,7 @@ def remove_compute_service( :param conn: Admin connection """ for service in conn.compute.services(host=hypervisor_name): - LOG.info(f"Disabling {service.binary} on {service.host}") + LOG.info("Disabling %s on %s", service.binary, service.host) conn.compute.disable_service(service) conn.compute.delete_service(service) @@ -80,7 +80,7 @@ def remove_network_service( :param conn: Admin connection """ for service in conn.network.agents(host=hypervisor_name): - LOG.info(f"Disabling {service.binary} on {service.host}") + LOG.info("Disabling %s on %s", service.binary, service.host) conn.network.delete_agent(service) diff --git a/sunbeam-python/sunbeam/core/questions.py b/sunbeam-python/sunbeam/core/questions.py index f6e03399d..a8eea8a59 100644 --- a/sunbeam-python/sunbeam/core/questions.py +++ b/sunbeam-python/sunbeam/core/questions.py @@ -74,7 +74,7 @@ def get_stdin_reopen_tty() -> str: sys.stdin.close() sys.stdin = open("/dev/tty", "r") except OSError as e: - LOG.debug("Failed to reopen stdin to /dev/tty: %s", e) + LOG.debug("Failed to reopen stdin to /dev/tty: %r", e) raise SunbeamException("Failed to open terminal for input") from e # note(gboutry): Reassign stream wrapper read_stream # to the new stdin. @@ -167,7 +167,7 @@ def calculate_default(self, new_default: T | None = None) -> T | None: elif self.default_function: default = self.default_function() if not self.password: - LOG.debug("Value from default function {}".format(default)) + LOG.debug("Value from default function %s", default) elif self.default_value is not None: default = self.default_value return default @@ -209,7 +209,7 @@ def ask( try: self.validation_function(self.answer) # type: ignore except ValueError as e: - message = f"Invalid value for {self.question!r}: {e}" + message = f"Invalid value for {self.question!r}: {e!r}" if self.preseed is not None: LOG.error(message) raise @@ -331,7 +331,7 @@ def load_answers(client: Client, key: str) -> dict: try: variables = json.loads(client.cluster.get_config(key)) except ConfigItemNotFoundException as e: - LOG.debug(f"{key}: " + str(e)) + LOG.debug("%s: %r", key, e) return variables diff --git a/sunbeam-python/sunbeam/core/steps.py b/sunbeam-python/sunbeam/core/steps.py index cecf50661..086bdfcea 100644 --- a/sunbeam-python/sunbeam/core/steps.py +++ b/sunbeam-python/sunbeam/core/steps.py @@ -155,7 +155,7 @@ def run(self, context: StepContext) -> Result: timeout=self.get_application_timeout(), ) except TimeoutError as e: - LOG.warning(str(e)) + LOG.warning("Application %r is not ready: %r", self.application, e) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) @@ -209,7 +209,7 @@ def is_skip(self, context: StepContext) -> Result: filtered_node_names = [node["name"] for node in filtered_nodes] missing_nodes = set(self.names) - set(filtered_node_names) LOG.debug( - f"Nodes '{','.join(missing_nodes)}' do not exist in cluster database" + "Nodes do not exist in cluster database: %s", ",".join(missing_nodes) ) try: @@ -225,7 +225,7 @@ def is_skip(self, context: StepContext) -> Result: for name, unit in app.units.items(): if unit.machine in to_remove_node_ids: - LOG.debug(f"Unit {name} is deployed on machine: {self.machine_id}") + LOG.debug("Unit %s is deployed on machine: %s", name, self.machine_id) self.units_to_remove.add(name) if len(self.units_to_remove) == 0: @@ -238,7 +238,9 @@ def run(self, context: StepContext) -> Result: try: self.update_status(context, "Removing units") for unit in self.units_to_remove: - LOG.debug(f"Removing unit {unit} from application {self.application}") + LOG.debug( + "Removing unit %s from application %s", unit, self.application + ) self.jhelper.remove_unit(self.application, unit, self.model) self.update_status(context, "Waiting for units to be removed") self.jhelper.wait_units_gone( @@ -251,7 +253,7 @@ def run(self, context: StepContext) -> Result: timeout=self.get_unit_timeout(), ) except (ApplicationNotFoundException, TimeoutError) as e: - LOG.warning(str(e)) + LOG.warning("Failed to remove application %s: %r", self.application, e) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) @@ -410,7 +412,7 @@ def _check_ippool_exists(self, kube_client: "l_client.Client") -> Result: pools = kube_client.list(lbpool_resource, namespace=lbpool_model) pool_names = [pool.metadata.name for pool in pools if pool.metadata] if self.pool_name not in pool_names: - LOG.debug(f"IPAddresspool {self.pool_name} does not exist, skipping") + LOG.debug("IPAddresspool %s does not exist, skipping", self.pool_name) return Result(ResultType.SKIPPED) except l_exceptions.ApiError as e: LOG.debug("Error listing load balancer pools", exc_info=True) @@ -669,7 +671,7 @@ def _check_ippool_exists(self, kube_client: "l_client.Client") -> Result: pools = kube_client.list(lbpool_resource, namespace=lbpool_model) pool_names = [pool.metadata.name for pool in pools if pool.metadata] if self.pool_name not in pool_names: - LOG.debug(f"IPAddresspool {self.pool_name} does not exist, skipping") + LOG.debug("IPAddresspool %s does not exist, skipping", self.pool_name) return Result(ResultType.SKIPPED) except l_exceptions.ApiError as e: LOG.debug("Error listing load balancer pools", exc_info=True) @@ -735,7 +737,7 @@ def _wait_for_ip_allocated_from_pool_annotation_update( Raises ApiError from lightkube if not connected to k8s """ service = self._get_service(service_name, find_lb=False) - LOG.debug(f"Waiting for service {service} annotations to get updated") + LOG.debug("Waiting for service %s annotations to get updated", service) if not service.metadata: raise ValueError(f"Service {service_name} has no metadata") @@ -833,19 +835,23 @@ def run(self, context: StepContext) -> Result: # noqa: C901 service_annotations[self.lb_pool_annotation] = self.pool_name if self.lb_ip_annotation in service_annotations: LOG.debug( - f"Removing {self.lb_ip_annotation!r} for service " - f"{service_name!r}" + "Removing %r for service %r", + self.lb_ip_annotation, + service_name, ) service_annotations.pop(self.lb_ip_annotation) if self.lb_allocated_pool_annotation in service_annotations: LOG.debug( - f"Removing {self.lb_allocated_pool_annotation!r} for service" - f" {service_name!r}" + "Removing %r for service %r", + self.lb_allocated_pool_annotation, + service_name, ) service_annotations.pop(self.lb_allocated_pool_annotation) LOG.debug( - f"Updating {service_name!r} to use annotation " - f"{self.lb_pool_annotation!r} with value {self.pool_name!r}" + "Updating %r to use annotation %r with value %r", + service_name, + self.lb_pool_annotation, + self.pool_name, ) # Some services like consul have Nodeport for protocol TCP and UDP # defined with same port number and so kubernetes cannot patch the @@ -903,11 +909,15 @@ def handle_lb_pools(self, name: str, addresses: list[str]): # Pool already exists in k8s, replace the pool if addresses vary if pool: if pool.spec["addresses"] != addresses: - LOG.debug(f"Update IP Address pool {name} addresses with {addresses}") + LOG.debug( + "Update IP Address pool %s addresses with %s", name, addresses + ) pool.spec["addresses"] = addresses self.kube.replace(pool) else: - LOG.debug(f"Create new IP Address Pool {name} with addresses {addresses}") + LOG.debug( + "Create new IP Address Pool %s with addresses %s", name, addresses + ) new_ippool = self.lbpool_resource( metadata=meta_v1.ObjectMeta(name=name), spec={"addresses": addresses, "autoAssign": False}, diff --git a/sunbeam-python/sunbeam/core/terraform.py b/sunbeam-python/sunbeam/core/terraform.py index 27839ec8b..ce679c461 100644 --- a/sunbeam-python/sunbeam/core/terraform.py +++ b/sunbeam-python/sunbeam/core/terraform.py @@ -185,7 +185,7 @@ def init(self) -> None: if backend_updated: LOG.debug("Backend updated, running terraform init -reconfigure") cmd.append("-reconfigure") - LOG.debug(f"Running command {' '.join(cmd)}") + LOG.debug("Running command %s", " ".join(cmd)) process = subprocess.run( cmd, capture_output=True, @@ -195,11 +195,10 @@ def init(self) -> None: env=os_env, ) LOG.debug( - f"Command finished. stdout={process.stdout}, stderr={process.stderr}" + "Command finished. stdout=%r, stderr=%r", process.stdout, process.stderr ) except subprocess.CalledProcessError as e: - LOG.error(f"terraform init failed: {e.output}") - LOG.warning(e.stderr) + LOG.exception("terraform init failed: %s", e.stderr) raise TerraformException(str(e)) def apply( @@ -258,7 +257,7 @@ def output(self, hide_output: bool = False) -> dict: try: cmd = [self.terraform, "output", "-json", "-no-color"] - LOG.debug(f"Running command {' '.join(cmd)}") + LOG.debug("Running command %s", " ".join(cmd)) process = subprocess.run( cmd, capture_output=True, @@ -270,16 +269,15 @@ def output(self, hide_output: bool = False) -> dict: stdout = process.stdout logged_output = "" if not hide_output: - logged_output = f" stdout={stdout}, stderr={process.stderr}" - LOG.debug("Command finished." + logged_output) + logged_output = f" stdout={stdout!r}, stderr={process.stderr!r}" + LOG.debug("Command finished. %r", logged_output) tf_output = json.loads(stdout) output = {} for key, value in tf_output.items(): output[key] = value["value"] return output except subprocess.CalledProcessError as e: - LOG.error(f"terraform output failed: {e.output}") - LOG.warning(e.stderr) + LOG.exception("terraform output failed: %s", e.stderr) raise TerraformException(str(e)) def pull_state(self) -> dict: @@ -294,7 +292,7 @@ def pull_state(self) -> dict: try: cmd = [self.terraform, "state", "pull"] - LOG.debug(f"Running command {' '.join(cmd)}") + LOG.debug("Running command %s", " ".join(cmd)) process = subprocess.run( cmd, capture_output=True, @@ -304,11 +302,10 @@ def pull_state(self) -> dict: env=os_env, ) # don't log the state as it can be large and contain sensitive data - LOG.debug(f"Command finished. stderr={process.stderr}") + LOG.debug("Command finished. stderr=%r", process.stderr) return json.loads(process.stdout) except subprocess.CalledProcessError as e: - LOG.error(f"terraform state pull failed: {e.output}") - LOG.error(e.stderr) + LOG.exception("terraform state pull failed: %s", e.stderr) raise TerraformException(str(e)) def state_list(self) -> list: @@ -323,7 +320,7 @@ def state_list(self) -> list: try: cmd = [self.terraform, "state", "list"] - LOG.debug(f"Running command {' '.join(cmd)}") + LOG.debug("Running command %s", " ".join(cmd)) process = subprocess.run( cmd, capture_output=True, @@ -333,12 +330,11 @@ def state_list(self) -> list: env=os_env, ) LOG.debug( - f"Command finished. stdout={process.stdout}, stderr={process.stderr}" + "Command finished. stdout=%r, stderr=%r", process.stdout, process.stderr ) return process.stdout.splitlines() except subprocess.CalledProcessError as e: - LOG.error(f"terraform state list failed: {e.output}") - LOG.error(e.stderr) + LOG.exception("terraform state list failed: %s", e.stderr) raise TerraformException(str(e)) def state_rm(self, resource: str) -> None: @@ -353,7 +349,7 @@ def state_rm(self, resource: str) -> None: try: cmd = [self.terraform, "state", "rm", resource] - LOG.debug(f"Running command {' '.join(cmd)}") + LOG.debug("Running command %s", " ".join(cmd)) process = subprocess.run( cmd, capture_output=True, @@ -363,11 +359,10 @@ def state_rm(self, resource: str) -> None: env=os_env, ) LOG.debug( - f"Command finished. stdout={process.stdout}, stderr={process.stderr}" + "Command finished. stdout=%r, stderr=%r", process.stdout, process.stderr ) except subprocess.CalledProcessError as e: - LOG.error(f"terraform state rm failed: {e.output}") - LOG.error(e.stderr) + LOG.exception("terraform state rm failed: %s", e.stderr) raise TerraformException(str(e)) def sync(self, reporter: ProgressReporter | None = None) -> None: @@ -413,7 +408,7 @@ def update_partial_tfvars_and_apply_tf( update_config(client, tfvar_config, data_to_save) self.write_tfvars(updated_tfvars) - LOG.debug(f"Applying plan {self.plan} with tfvars {updated_tfvars}") + LOG.debug("Applying plan %s with tfvars %s", self.plan, updated_tfvars) self.apply(tf_apply_extra_args, reporter=reporter) def update_tfvars_and_apply_tf( @@ -464,7 +459,7 @@ def update_tfvars_and_apply_tf( update_config(client, tfvar_config, data_to_save) self.write_tfvars(updated_tfvars) - LOG.debug(f"Applying plan {self.plan} with tfvars {updated_tfvars}") + LOG.debug("Applying plan %s with tfvars %s", self.plan, updated_tfvars) self.apply(tf_apply_extra_args, reporter=reporter) def _load_and_filter_db_tfvars( @@ -725,7 +720,7 @@ def _run_terraform_command( Reads stdout line-by-line, parses JSON events, and reports them. Reads stderr in a separate thread to avoid pipe buffer deadlock. """ - LOG.debug(f"Running command {' '.join(cmd)}, cwd: {self.path}") + LOG.debug("Running command %s with cwd: %s", " ".join(cmd), self.path) process = subprocess.Popen( cmd, @@ -764,9 +759,9 @@ def _read_stderr(): stderr_thread.join(timeout=10) stderr_output = "".join(stderr_lines) - LOG.debug(f"Command finished. returncode={process.returncode}") + LOG.debug("Command finished. returncode=%s", process.returncode) if stderr_output: - LOG.debug(f"stderr: {stderr_output}") + LOG.debug("stderr: %s", stderr_output) if process.returncode != 0: if state_lock_flag[0] or "remote state already locked" in stderr_output: diff --git a/sunbeam-python/sunbeam/core/watcher.py b/sunbeam-python/sunbeam/core/watcher.py index b838fe9cc..7210b9e35 100644 --- a/sunbeam-python/sunbeam/core/watcher.py +++ b/sunbeam-python/sunbeam/core/watcher.py @@ -134,9 +134,9 @@ def create_audit( ) if audit_details.state == "SUCCEEDED": - LOG.debug(f"Create Watcher audit {audit.uuid} successfully") + LOG.debug("Create Watcher audit %s succeeded", audit.uuid) else: - LOG.debug(f"Create Watcher audit {audit.uuid} failed") + LOG.debug("Create Watcher audit %s failed", audit.uuid) raise SunbeamException( f"Create watcher audit failed, template: {template.name}" ) @@ -176,16 +176,16 @@ def exec_audit(client: "watcher_client.Client", audit: "watcher.Audit"): action_plans = client.action_plan.list(audit=audit.uuid) for action_plan in action_plans: _exec_plan(client=client, action_plan=action_plan) - LOG.debug(f"All Action plans for Audit {audit.uuid} started") + LOG.debug("All Action plans for Audit %s started", audit.uuid) def _exec_plan(client: "watcher_client.Client", action_plan: "watcher.ActionPlan"): """Run action plan.""" if action_plan.state == "SUCCEEDED": - LOG.debug(f"action plan {action_plan.uuid} state is SUCCEEDED, skip execution") + LOG.debug("action plan %s state is SUCCEEDED, skip execution", action_plan.uuid) return client.action_plan.start(action_plan_id=action_plan.uuid) - LOG.debug(f"Start Watcher action plan {action_plan.uuid}") + LOG.debug("Start Watcher action plan %s", action_plan.uuid) def wait_until_action_state( @@ -218,7 +218,9 @@ def wait_until_action_state( for action in actions: if completed_actions.get(action.uuid) is True: continue - LOG.debug(f"Watcher action {action.uuid} is in {action.state} state.") + LOG.debug( + "Watcher action %s is in %s state.", action.uuid, action.state + ) if action.state == "FAILED": raise WatcherActionFailedException if action.state in expected_state: @@ -244,6 +246,6 @@ def wait_until_action_state( ) ) LOG.debug( - f"All actions for Watcher audit {audit.uuid}" - " have been successfully completed." + "All actions for Watcher audit %s have been successfully completed.", + audit.uuid, ) diff --git a/sunbeam-python/sunbeam/feature_gates.py b/sunbeam-python/sunbeam/feature_gates.py index 227bd4e7e..a7df49c09 100644 --- a/sunbeam-python/sunbeam/feature_gates.py +++ b/sunbeam-python/sunbeam/feature_gates.py @@ -759,8 +759,10 @@ def log_gated_feature(feature_name: str, gate_key: str) -> None: gate_key: The snap config key for the gate """ LOG.debug( - f"Feature '{feature_name}' is gated via '{gate_key}'. " - f"Enable with: snap set openstack {gate_key}=true" + "Feature %r is gated via %r. Enable with: snap set openstack %s=true", + feature_name, + gate_key, + gate_key, ) @@ -794,5 +796,5 @@ def get_feature_gate_from_cluster( gate = client.cluster.get_feature_gate(gate_key) return gate.enabled except Exception as e: - LOG.debug(f"Feature gate '{gate_key}' not found in cluster DB: {e}") + LOG.debug("Feature gate %r not found in cluster DB: %r", gate_key, e) return None diff --git a/sunbeam-python/sunbeam/feature_manager.py b/sunbeam-python/sunbeam/feature_manager.py index 1163af54f..c9019cdc6 100644 --- a/sunbeam-python/sunbeam/feature_manager.py +++ b/sunbeam-python/sunbeam/feature_manager.py @@ -112,7 +112,7 @@ def _check_gate_enabled( if gate and gate.enabled: return True except Exception: - LOG.debug(f"Failed to get feature gate {gate_key} from cluster") + LOG.debug("Failed to get feature gate %r from cluster", gate_key) # Fall through to snap config check # Check snap config @@ -412,7 +412,7 @@ def register(self, cli: click.Group, deployment: Deployment) -> None: try: enabled = feature.is_enabled(deployment.get_client()) # type: ignore except AttributeError: - LOG.debug("Feature %r is not enable / disable feature", feature.name) + LOG.debug("Feature %r is not an enable / disable feature", feature.name) enabled = False except ( SunbeamException, @@ -471,12 +471,12 @@ def update_features( """ for feature in self.enabled_features(deployment): if hasattr(feature, "upgrade_hook"): - LOG.debug(f"Upgrading feature {feature.name}") + LOG.debug("Upgrading feature %r", feature.name) try: feature.upgrade_hook(deployment, upgrade_release=upgrade_release) except TypeError: LOG.debug( - f"Feature {feature.name} does not support upgrades between " - "channels" + "Feature %r does not support upgrades between channels", + feature.name, ) feature.upgrade_hook(deployment) diff --git a/sunbeam-python/sunbeam/features/baremetal/steps.py b/sunbeam-python/sunbeam/features/baremetal/steps.py index bb746ec3d..d212264ee 100644 --- a/sunbeam-python/sunbeam/features/baremetal/steps.py +++ b/sunbeam-python/sunbeam/features/baremetal/steps.py @@ -73,13 +73,13 @@ def run(self, context: StepContext) -> Result: ) except (ActionFailedException, LeaderNotFoundException) as e: LOG.error( - "Error running the set-temp-url-secret action on %s: %s", + "Error running the set-temp-url-secret action on %s: %r", app, e, ) return Result(ResultType.FAILED, str(e)) - LOG.debug(f"Application monitored for readiness: {self.apps}") + LOG.debug("Application monitored for readiness: %s", self.apps) status_queue: queue.Queue[str] = queue.Queue() task = update_status_background(self, self.apps, status_queue, context.status) try: @@ -90,7 +90,7 @@ def run(self, context: StepContext) -> Result: queue=status_queue, ) except (JujuWaitException, TimeoutError) as e: - LOG.warning(str(e)) + LOG.warning("Application %s failed to become active: %r", app, e) return Result(ResultType.FAILED, str(e)) finally: task.stop() @@ -123,9 +123,9 @@ def run(self, context: StepContext) -> Result: """Execute step.""" try: self._run(reporter=context.reporter) - except Exception as ex: - LOG.exception(str(ex)) - return Result(ResultType.FAILED, str(ex)) + except Exception as e: + LOG.exception("Failed to execute step %s: %r", self.name, e) + return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) @@ -152,7 +152,7 @@ def _apply_tfvars( override_tfvars=tfvars, reporter=reporter, ) - LOG.debug(f"Applications monitored for readiness: {apps}") + LOG.debug("Applications monitored for readiness: %s", apps) status_queue: queue.Queue[str] = queue.Queue() task = update_status_background(self.feature, apps, status_queue) @@ -247,7 +247,7 @@ def _run(self, reporter=None) -> None: reporter=reporter, ) - LOG.debug(f"Waiting for application to disappear: {self.charm_name}") + LOG.debug("Waiting for application to disappear: %s", self.charm_name) try: self.jhelper.wait_application_gone( [self.charm_name], diff --git a/sunbeam-python/sunbeam/features/caas/feature.py b/sunbeam-python/sunbeam/features/caas/feature.py index 3eedccd26..4c10c343c 100644 --- a/sunbeam-python/sunbeam/features/caas/feature.py +++ b/sunbeam-python/sunbeam/features/caas/feature.py @@ -144,8 +144,10 @@ def is_skip(self, context: StepContext) -> Result: CLUSTER_API_VERSIONS.get(provider_name, "0.0.0") ) LOG.debug( - f"{provider_name}: {provider_installed_version}" - f" {provider_required_version}" + "Provider %s: %s <- %s", + provider_name, + provider_installed_version, + provider_required_version, ) if ( provider_installed_version.major != provider_required_version.major @@ -369,7 +371,8 @@ def run(self, context: StepContext) -> Result: self._grant_access_to_magnum(secret_id) except JujuException as e: LOG.debug( - f"Failed to grant magnum application access to juju secret {secret_id}", + "Failed to grant magnum application access to juju secret %s", + secret_id, exc_info=True, ) return Result(ResultType.FAILED, str(e)) @@ -413,7 +416,7 @@ def run(self, context: StepContext) -> Result: self.jhelper.remove_secret(OPENSTACK_MODEL, KUBECONFIG_SECRET_NAME) except JujuException as e: # Ignore the exception - LOG.debug(f"Unable to remove secret {KUBECONFIG_SECRET_NAME}: {str(e)}") + LOG.debug("Unable to remove secret %s: %r", KUBECONFIG_SECRET_NAME, e) return Result(ResultType.COMPLETED) @@ -460,17 +463,19 @@ def _delete_capi_namespaces(self) -> None: namespaces_in_cluster_list = [ ns.metadata.name for ns in namespaces_in_cluster if ns.metadata ] - LOG.debug(f"Namespaces in the management cluster: {namespaces_in_cluster_list}") + LOG.debug( + "Namespaces in the management cluster: %s", namespaces_in_cluster_list + ) namespaces_to_delete = set(capi_namespaces).intersection( namespaces_in_cluster_list ) LOG.debug( - f"Namespaces to delete in the management cluster: {namespaces_to_delete}" + "Namespaces to delete in the management cluster: %s", namespaces_to_delete ) for ns in namespaces_to_delete: - LOG.debug(f"Deleting namespace {ns}") + LOG.debug("Deleting namespace %s", ns) self.kube.delete(core_v1.Namespace, name=ns) def _delete_capi_crds(self) -> None: @@ -495,7 +500,7 @@ def _delete_capi_crds(self) -> None: name="images.openstack.k-orc.cloud", ) except l_exceptions.ApiError as e: - LOG.debug(f"Error in deleting ORC CRD: {str(e)}") + LOG.debug("Error in deleting ORC CRD: %r", e) # Delete Provider CRD, ignore if delete fails try: @@ -505,7 +510,7 @@ def _delete_capi_crds(self) -> None: name="providers.clusterctl.cluster.x-k8s.io", ) except l_exceptions.ApiError as e: - LOG.debug(f"Error in deleting provider CRD: {str(e)}") + LOG.debug("Error in deleting provider CRD: %r", e) def _delete_orc_resources(self) -> None: orc_cluster_roles = [ @@ -529,12 +534,12 @@ def _delete_orc_resources(self) -> None: clusterroles_in_cluster_list ) LOG.debug( - "ClusterRoles to delete in the management cluster: " - f"{clusterroles_to_delete}" + "ClusterRoles to delete in the management cluster: %s", + clusterroles_to_delete, ) for clusterrole in clusterroles_to_delete: - LOG.debug(f"Deleting ClusterRole {clusterrole}") + LOG.debug("Deleting ClusterRole %s", clusterrole) self.kube.delete(rbac_authorization_v1.ClusterRole, clusterrole) # Delete ORC ClusterRoleBindings @@ -550,12 +555,12 @@ def _delete_orc_resources(self) -> None: clusterrolebindings_in_cluster_list ) LOG.debug( - "ClusterRoleBindings to delete in the management cluster: " - f"{clusterrolebindings_to_delete}" + "ClusterRoleBindings to delete in the management cluster: %s", + clusterrolebindings_to_delete, ) for clusterrolebinding in clusterrolebindings_to_delete: - LOG.debug(f"Deleting ClusterRoleBinding {clusterrolebinding}") + LOG.debug("Deleting ClusterRoleBinding %s", clusterrolebinding) self.kube.delete( rbac_authorization_v1.ClusterRoleBinding, clusterrolebinding ) @@ -597,7 +602,7 @@ def run(self, context: StepContext) -> Result: try: self._delete_capi_components() except subprocess.CalledProcessError as e: - LOG.debug(f"Error from clusterctl delete: {e.stderr}") + LOG.debug("Error from clusterctl delete: %s", e.stderr) # If CRDs are already deleted, the command results in following error # Error: failed to check Cluster API version: # customresourcedefinitions.apiextensions.k8s.io "clusters.cluster.x-k8s.io" @@ -607,7 +612,7 @@ def run(self, context: StepContext) -> Result: message = f"Error in deleting Cluster API components: {str(e)}" return Result(ResultType.FAILED, message) else: - LOG.debug("CRDs already deleted and so ignore clusterctl delete error") + LOG.debug("CRDs is already deleted, ignoring clusterctl delete error") except subprocess.TimeoutExpired as e: message = f"Timed out deleting Cluster API components: {str(e)}" return Result(ResultType.FAILED, message) diff --git a/sunbeam-python/sunbeam/features/dns/feature.py b/sunbeam-python/sunbeam/features/dns/feature.py index dfb780cff..e7724c922 100644 --- a/sunbeam-python/sunbeam/features/dns/feature.py +++ b/sunbeam-python/sunbeam/features/dns/feature.py @@ -287,7 +287,7 @@ def upgrade_hook( :param upgrade_release: Whether to upgrade release """ if upgrade_release: - LOG.debug(f"Release upgrade not supported for feature {self.name}") + LOG.debug("Release upgrade is not supported for feature %s", self.name) return super().upgrade_hook(deployment, upgrade_release, show_hints) @@ -302,4 +302,4 @@ def upgrade_hook( plan.append(PatchBindLoadBalancerIPStep(deployment.get_client())) run_plan(plan, console, show_hints) - LOG.debug(f"OpenStack {self.display_name} application refreshed.") + LOG.debug("OpenStack %s application refreshed", self.display_name) diff --git a/sunbeam-python/sunbeam/features/instance_recovery/consul.py b/sunbeam-python/sunbeam/features/instance_recovery/consul.py index 5c8d805dd..dfdaaeb99 100644 --- a/sunbeam-python/sunbeam/features/instance_recovery/consul.py +++ b/sunbeam-python/sunbeam/features/instance_recovery/consul.py @@ -217,7 +217,7 @@ def run(self, context: StepContext) -> Result: return Result(ResultType.FAILED, str(e)) apps = ConsulFeature.set_consul_client_application_names(self.deployment) - LOG.debug(f"Application monitored for readiness: {apps}") + LOG.debug("Application monitored for readiness: %s", apps) status_queue: queue.Queue[str] = queue.Queue() task = update_status_background(self, apps, status_queue, context.status) try: @@ -264,7 +264,7 @@ def run(self, context: StepContext) -> Result: return Result(ResultType.FAILED, str(e)) apps = ConsulFeature.set_consul_client_application_names(self.deployment) - LOG.debug(f"Application monitored for removal: {apps}") + LOG.debug("Application monitored for removal: %s", apps) try: self.jhelper.wait_application_gone( apps, @@ -272,7 +272,7 @@ def run(self, context: StepContext) -> Result: timeout=APPLICATION_DEPLOY_TIMEOUT, ) except TimeoutError as e: - LOG.debug(f"Failed to destroy {apps}", exc_info=True) + LOG.debug("Failed to destroy %s", apps, exc_info=True) return Result(ResultType.FAILED, str(e)) extra_tfvars = { diff --git a/sunbeam-python/sunbeam/features/interface/utils.py b/sunbeam-python/sunbeam/features/interface/utils.py index af68f811c..ff20e5a83 100644 --- a/sunbeam-python/sunbeam/features/interface/utils.py +++ b/sunbeam-python/sunbeam/features/interface/utils.py @@ -69,7 +69,7 @@ def is_certificate_valid(certificate: bytes) -> bool: certificate_bytes = base64.b64decode(certificate) x509.load_pem_x509_certificate(certificate_bytes) except (binascii.Error, TypeError, ValueError) as e: - LOG.debug(e) + LOG.debug("Failed to validate certificate: %r", e) return False return True @@ -95,7 +95,7 @@ def validate_ca_certificate( x509.load_pem_x509_certificate(ca_bytes) return value except (binascii.Error, TypeError, ValueError) as e: - LOG.debug(e) + LOG.debug("Failed to validate CA certificate: %r", e) raise click.BadParameter(str(e)) @@ -135,7 +135,7 @@ def validate_ca_chain( ValueError, crypto_exceptions.InvalidSignature, ) as e: - LOG.debug(e) + LOG.debug("Failed to validate CA chain: %r", e) raise click.BadParameter(str(e)) @@ -145,11 +145,11 @@ def get_subject_from_csr(csr: str) -> str | None: uid = req.subject.get_attributes_for_oid( x509_oid.NameOID.X500_UNIQUE_IDENTIFIER ) - LOG.debug(f"UID for requested csr: {uid}") + LOG.debug("UID for requested csr: %s", uid) # Pick the first available ID return str(uid[0].value) except (binascii.Error, TypeError, ValueError) as e: - LOG.debug(e) + LOG.debug("Failed to get subject from CSR: %r", e) return None @@ -157,7 +157,7 @@ def encode_base64_as_string(data: str) -> str | None: try: return base64.b64encode(bytes(data, "utf-8")).decode() except (binascii.Error, TypeError) as e: - LOG.debug(f"Error in encoding data {data} : {str(e)}") + LOG.debug("Error in encoding data %s: %r", data, e) return None @@ -165,7 +165,7 @@ def decode_base64_as_string(data: str) -> str | None: try: return base64.b64decode(data).decode() except (binascii.Error, TypeError) as e: - LOG.debug(f"Error in decoding data {data} : {str(e)}") + LOG.debug("Error in decoding data %s: %r", data, e) return None diff --git a/sunbeam-python/sunbeam/features/interface/v1/base.py b/sunbeam-python/sunbeam/features/interface/v1/base.py index d14e3dcf3..baa368d15 100644 --- a/sunbeam-python/sunbeam/features/interface/v1/base.py +++ b/sunbeam-python/sunbeam/features/interface/v1/base.py @@ -95,15 +95,15 @@ def validate_commands( :returns: True if validation is successful, else False. """ self_commands = self.commands(conditions) - LOG.debug(f"Validating commands: {self_commands}") + LOG.debug("Validating commands: %s", self_commands) for group, commands in self_commands.items(): for command in commands: cmd_name = command.get("name") cmd_func = command.get("command") if None in (cmd_name, cmd_func): LOG.warning( - f"Feature {self.name}: Commands dictionary is not in " - "required format" + "Feature %s: Commands dictionary is not in the required format", + self.name, ) return False @@ -114,8 +114,9 @@ def validate_commands( ] ): LOG.warning( - f"Feature {self.name}: {cmd_func} should be either " - "click.Group or click.Command" + "Feature %s: %s should be either click.Group or click.Command", + self.name, + cmd_func, ) return False @@ -205,20 +206,23 @@ def register( :param cli: Sunbeam main cli group """ - LOG.debug(f"Registering feature {self.name}") + LOG.debug("Registering feature %s", self.name) if not self.validate_commands(conditions): - LOG.warning(f"Not able to register the feature {self.name}") + LOG.warning("Not able to register the feature %s", self.name) return groups = utils.get_all_registered_groups(cli) - LOG.debug(f"Registered groups: {groups}") + LOG.debug("Registered groups: %s", groups) for group, commands in self.commands(conditions).items(): group_obj = groups.get(group) if not group_obj: cmd_names = [command.get("name") for command in commands] LOG.warning( - f"Feature {self.name}: Not able to register command " - f"{cmd_names} in group {group} as group does not exist" + "Feature %s: Not able to register command %s in group %s" + " as group does not exist", + self.name, + cmd_names, + group, ) continue @@ -226,30 +230,40 @@ def register( cmd = command.get("command") if not cmd: LOG.warning( - f"Feature {self.name}: Not able to register command " - f"{command.get('name')} in group {group} as command is None" + "Feature %s: Not able to register command %s in group %s" + " as command is None", + self.name, + command.get("name"), + group, ) continue cmd_name = command.get("name") if cmd_name in group_obj.list_commands({}): if isinstance(cmd, click.Command): LOG.warning( - f"Feature {self.name}: Discarding adding command " - f"{cmd_name} as it already exists in group {group}" + "Feature %s: Discarding adding command %s as it already" + " exists in group %s", + self.name, + cmd_name, + group, ) else: # Should be sub group and already exists LOG.debug( - f"Feature {self.name}: Group {cmd_name} already " - f"part of parent group {group}" + "Feature %s: Group %s is already part of parent group %s", + self.name, + cmd_name, + group, ) continue cmd.callback = ClickInstantiator(cmd.callback, self) group_obj.add_command(cmd, cmd_name) LOG.debug( - f"Feature {self.name}: Command {cmd_name} registered in " - f"group {group}" + "Feature %s: Command %s is registered in group %s", + self.name, + cmd_name, + group, ) # Add newly created click groups to the registered groups so that @@ -601,9 +615,11 @@ def check_enabled_requirement_is_compatible( current_version = self.fetch_feature_version( deployment.get_client(), requirement.name ) - LOG.debug(f"Feature {requirement.name} version {current_version} found") + LOG.debug( + "Feature %s version %s is found", requirement.name, current_version + ) except MissingVersionInfoError as e: - LOG.debug(f"Version info for feature {requirement.name} not found") + LOG.debug("Version info for feature %s is not found", requirement.name) raise FeatureError( f"{requirement.name} has no version recorded," f" {requirement.specifier} required" @@ -678,7 +694,9 @@ def check_enablement_requirements( feature_classes = features().values() for klass in feature_classes: if not issubclass(klass, EnableDisableFeature): - LOG.debug(f"Skipping {klass} as it is not of type EnableDisableFeature") + LOG.debug( + "Skipping %s as it is not of type EnableDisableFeature", klass + ) continue feature = klass() if not feature.is_enabled(deployment.get_client()): @@ -705,8 +723,8 @@ def enable_requirements(self, deployment: Deployment, show_hints: bool): for requirement in self.requires: if not issubclass(requirement.klass, EnableDisableFeature): LOG.debug( - f"Skipping {requirement.klass} as it is not of type" - " EnableDisableFeature" + "Skipping %s as it is not of type EnableDisableFeature", + requirement.klass, ) continue feature = requirement.klass() diff --git a/sunbeam-python/sunbeam/features/interface/v1/openstack.py b/sunbeam-python/sunbeam/features/interface/v1/openstack.py index b7966a6bd..a215e561d 100644 --- a/sunbeam-python/sunbeam/features/interface/v1/openstack.py +++ b/sunbeam-python/sunbeam/features/interface/v1/openstack.py @@ -412,15 +412,16 @@ def upgrade_hook( :param upgrade_release: Whether to upgrade release """ if upgrade_release: - LOG.debug(f"Release upgrade not supported for feature {self.name}") + LOG.debug("Release upgrade is not supported for feature %s", self.name) return # Nothig to do if the plan is openstack-plan as it is taken # care during control plane refresh if self.tf_plan_location == TerraformPlanLocation.SUNBEAM_TERRAFORM_REPO: LOG.debug( - f"Ignore upgrade_hook for feature {self.name}, the corresponding apps" - f" will be refreshed as part of Control plane refresh" + "Ignore upgrade_hook for feature %s, the corresponding apps" + " will be refreshed as part of Control plane refresh", + self.name, ) return @@ -463,7 +464,7 @@ def __init__( def run(self, context: StepContext) -> Result: """Run feature upgrade.""" - LOG.debug(f"Upgrading feature {self.feature.name}") + LOG.debug("Upgrading feature %s", self.feature.name) expected_wls = ["active", "blocked", "unknown"] tfvar_map = self.feature.manifest_attributes_tfvar_map() charms = list(tfvar_map.get(self.feature.tfplan, {}).get("charms", {}).keys()) @@ -480,7 +481,7 @@ def run(self, context: StepContext) -> Result: reporter=context.reporter, ) except TerraformException as e: - LOG.exception(f"Error upgrading feature {self.feature.name}") + LOG.exception("Error upgrading feature %s", self.feature.name) return Result(ResultType.FAILED, str(e)) status_queue: queue.Queue[str] = queue.Queue() task = update_status_background(self, apps, status_queue, context.status) @@ -494,7 +495,7 @@ def run(self, context: StepContext) -> Result: overlay=build_overlay_dict(apps), ) except (JujuWaitException, TimeoutError) as e: - LOG.debug(str(e)) + LOG.debug("Timed out upgrading %s: %r", self.feature.name, e) return Result(ResultType.FAILED, str(e)) finally: task.stop() @@ -584,7 +585,7 @@ def run(self, context: StepContext) -> Result: return Result(ResultType.FAILED, str(e)) apps = self.feature.set_application_names(self.deployment) - LOG.debug(f"Application monitored for readiness: {apps}") + LOG.debug("Application monitored for readiness: %s", apps) status_queue: queue.Queue[str] = queue.Queue() task = update_status_background(self, apps, status_queue, context.status) overlay = {**build_overlay_dict(apps), **self.extra_overlay} @@ -599,7 +600,7 @@ def run(self, context: StepContext) -> Result: overlay=overlay, ) except (JujuWaitException, TimeoutError) as e: - LOG.warning(str(e)) + LOG.warning("Timed out enabling %s: %r", self.feature.name, e) return Result(ResultType.FAILED, str(e)) finally: task.stop() @@ -685,7 +686,7 @@ def run(self, context: StepContext) -> Result: return Result(ResultType.FAILED, str(e)) apps = self.feature.set_application_names(self.deployment) - LOG.debug(f"Application monitored for removal: {apps}") + LOG.debug("Application monitored for removal: %s", apps) try: self.jhelper.wait_application_gone( apps, @@ -695,7 +696,7 @@ def run(self, context: StepContext) -> Result: ), ) except TimeoutError as e: - LOG.debug(f"Failed to destroy {apps}", exc_info=True) + LOG.debug("Failed to destroy %s", apps, exc_info=True) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) @@ -715,7 +716,7 @@ def __init__(self, jhelper: JujuHelper, apps: list, model: str, timeout: int = 3 def run(self, context: StepContext) -> Result: """Wait for applications to be idle.""" - LOG.debug(f"Application monitored for readiness: {self.apps}") + LOG.debug("Application monitored for readiness: %s", self.apps) status_queue: queue.Queue[str] = queue.Queue() task = update_status_background(self, self.apps, status_queue, context.status) try: diff --git a/sunbeam-python/sunbeam/features/ldap/feature.py b/sunbeam-python/sunbeam/features/ldap/feature.py index a8542564f..6ee06f45c 100644 --- a/sunbeam-python/sunbeam/features/ldap/feature.py +++ b/sunbeam-python/sunbeam/features/ldap/feature.py @@ -110,7 +110,7 @@ def run(self, context: StepContext) -> Result: timeout=APPLICATION_REMOVE_TIMEOUT, ) except (JujuWaitException, TimeoutError) as e: - LOG.warning(str(e)) + LOG.warning("Timed out disabling LDAP domain: %r", e) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) @@ -167,7 +167,7 @@ def run(self, context: StepContext) -> Result: return Result(ResultType.FAILED, str(e)) charm_name = "keystone-ldap-{}".format(self.charm_config["domain-name"]) apps = ["keystone", charm_name] - LOG.debug(f"Application monitored for readiness: {apps}") + LOG.debug("Application monitored for readiness: %s", apps) status_queue: queue.Queue[str] = queue.Queue() task = update_status_background(self, apps, status_queue, context.status) try: @@ -178,7 +178,7 @@ def run(self, context: StepContext) -> Result: queue=status_queue, ) except (JujuWaitException, TimeoutError) as e: - LOG.warning(str(e)) + LOG.warning("Timed out updating LDAP domain: %r", e) return Result(ResultType.FAILED, str(e)) finally: task.stop() @@ -237,7 +237,7 @@ def run(self, context: StepContext) -> Result: return Result(ResultType.FAILED, str(e)) charm_name = "keystone-ldap-{}".format(self.charm_config["domain-name"]) apps = ["keystone", charm_name] - LOG.debug(f"Application monitored for readiness: {apps}") + LOG.debug("Application monitored for readiness: %s", apps) status_queue: queue.Queue[str] = queue.Queue() task = update_status_background(self, apps, status_queue, context.status) try: @@ -248,7 +248,7 @@ def run(self, context: StepContext) -> Result: queue=status_queue, ) except (JujuWaitException, TimeoutError) as e: - LOG.warning(str(e)) + LOG.warning("Timed out adding LDAP domain: %r", e) return Result(ResultType.FAILED, str(e)) finally: task.stop() diff --git a/sunbeam-python/sunbeam/features/maintenance/checks.py b/sunbeam-python/sunbeam/features/maintenance/checks.py index 34c757564..4b3da96e8 100644 --- a/sunbeam-python/sunbeam/features/maintenance/checks.py +++ b/sunbeam-python/sunbeam/features/maintenance/checks.py @@ -98,7 +98,7 @@ def run(self) -> bool: if not_expected_status_instances: _msg = f"Instances not in expected status: {not_expected_status_instances}" if self.force: - LOG.warning(f"Ignore issue: {_msg}") + LOG.warning("Ignore issue: %s", _msg) return True self.message = _msg return False @@ -138,7 +138,7 @@ def run(self) -> bool: if unexpected_instances: _msg = f"Instances have ephemeral disk: {unexpected_instances}" if self.force: - LOG.warning(f"Ignore issue: {_msg}") + LOG.warning("Ignore issue: %s", _msg) return True self.message = _msg return False @@ -172,7 +172,7 @@ def run(self) -> bool: instance_ids = ",".join([inst.id for inst in instances]) _msg = f"Instances {instance_ids} still on node {self.node}" if self.force: - LOG.warning(f"Ignore issue: {_msg}") + LOG.warning("Ignore issue: %s", _msg) return True self.message = _msg return False @@ -209,7 +209,7 @@ def run(self) -> bool: if not len(expected_services) == 1: _msg = f"Nova compute still not disabled on node {self.node}" if self.force: - LOG.warning(f"Ignore issue: {_msg}") + LOG.warning("Ignore issue: %s", _msg) return True self.message = _msg return False @@ -265,7 +265,7 @@ def run(self) -> bool: if action.get("error"): msg = action.get("error") if self.force: - LOG.warning(f"Ignore issue: {msg}") + LOG.warning("Ignore issue: %s", msg) else: self.message = msg return False @@ -485,7 +485,7 @@ def _get_k8s_configs(self) -> Mapping: ) LOG.debug("Got config %s for %s", config, K8S_APP_NAME) except (ApplicationNotFoundException, JujuException) as e: - LOG.debug("%s", str(e)) + LOG.debug("Failed to get config for %s: %r", K8S_APP_NAME, e) return {} return config @@ -497,7 +497,7 @@ def _get_k8s_unit_names(self) -> list[str]: ) LOG.debug("Got %s application", k8s_application) except ApplicationNotFoundException as e: - LOG.debug("%s", str(e)) + LOG.debug("Failed to get application %s: %r", K8S_APP_NAME, e) return [] return list(k8s_application.units.keys()) @@ -512,7 +512,7 @@ def _get_this_k8s_unit_name(self, node: str) -> str: K8S_APP_NAME, ) except UnitNotFoundException: - LOG.debug("cannot find %s unit in '%s'", K8S_APP_NAME, node) + LOG.debug("Cannot find %s unit in %r", K8S_APP_NAME, node) return "" def _has_active_k8s_dqlite_svc(self, k8s_unit_name: str) -> bool: @@ -524,10 +524,10 @@ def _has_active_k8s_dqlite_svc(self, k8s_unit_name: str) -> bool: f"snap services {K8S_DQLITE_SVC_NAME} | grep active", timeout=COMMAND_TIMEOUT, ) - LOG.debug(result) + LOG.debug("Checking k8s dqlite svc for %s: %r", k8s_unit_name, result) status = result.stdout.split()[2] except (ExecFailedException, IndexError) as e: - LOG.debug("%s", str(e)) + LOG.debug("Failed to check k8s dqlite svc for %s: %r", k8s_unit_name, e) return False return "active" == status @@ -579,7 +579,7 @@ def run(self) -> bool: ready_replicas = r.status.readyReplicas or 0 if ready_replicas <= 1: LOG.debug( - "name=%s kind=%s namespace=%s replicas=%d/%d", + "K8S name=%s kind=%s namespace=%s replicas=%d/%d", owner.name, owner.kind, namespace, @@ -646,7 +646,7 @@ def run(self) -> bool: # Found juju controller pod in this node if pod.spec and pod.spec.nodeName == self.node: LOG.debug( - "Found juju controller pod '%s' in '%s'", + "Found juju controller pod %r in %r", pod.metadata.name, self.node, ) diff --git a/sunbeam-python/sunbeam/features/observability/feature.py b/sunbeam-python/sunbeam/features/observability/feature.py index bfb12033d..117d30381 100644 --- a/sunbeam-python/sunbeam/features/observability/feature.py +++ b/sunbeam-python/sunbeam/features/observability/feature.py @@ -166,7 +166,7 @@ def run(self, context: StepContext) -> Result: return Result(ResultType.FAILED, str(e)) apps = self.jhelper.get_application_names(self.model) - LOG.debug(f"Application monitored for readiness: {apps}") + LOG.debug("Application monitored for readiness: %s", apps) status_queue: queue.Queue[str] = queue.Queue() task = update_status_background(self, apps, status_queue, context.status) try: @@ -287,7 +287,7 @@ def run(self, context: StepContext) -> Result: return Result(ResultType.FAILED, str(e)) app = "opentelemetry-collector" - LOG.debug(f"Application monitored for readiness: {app}") + LOG.debug("Application monitored for readiness: %s", app) try: self.jhelper.wait_application_ready( app, @@ -463,7 +463,7 @@ def run(self, context: StepContext) -> Result: self.deployment.openstack_machines_model, ]: app = "opentelemetry-collector" - LOG.debug(f"Application monitored for readiness: {app}") + LOG.debug("Application monitored for readiness: %s", app) try: self.jhelper.wait_application_ready( app, @@ -524,7 +524,7 @@ def run(self, context: StepContext) -> Result: self.deployment.openstack_machines_model, ]: relations = self._get_relations(model, self.endpoints) - LOG.debug(f"List of relations to remove in model {model}: {relations}") + LOG.debug("List of relations to remove in model %s: %s", model, relations) for relation_pair in relations: self.remove_relation( model, @@ -537,7 +537,7 @@ def run(self, context: StepContext) -> Result: self.deployment.openstack_machines_model, ]: app = "opentelemetry-collector" - LOG.debug(f"Application monitored for readiness: {app}") + LOG.debug("Application monitored for readiness: %s", app) try: self.jhelper.wait_application_ready( app, diff --git a/sunbeam-python/sunbeam/features/shared_filesystem/manila_data.py b/sunbeam-python/sunbeam/features/shared_filesystem/manila_data.py index 09da54b3d..3b1e75a34 100644 --- a/sunbeam-python/sunbeam/features/shared_filesystem/manila_data.py +++ b/sunbeam-python/sunbeam/features/shared_filesystem/manila_data.py @@ -170,7 +170,7 @@ def run(self, context: StepContext) -> Result: try: resources = self.tfhelper.state_list() except TerraformException as e: - LOG.debug(f"Failed to list terraform state: {str(e)}") + LOG.debug("Failed to list terraform state: %r", e) return Result(ResultType.FAILED, "Failed to list terraform state") for resource in resources: @@ -178,7 +178,7 @@ def run(self, context: StepContext) -> Result: try: self.tfhelper.state_rm(resource) except TerraformException as e: - LOG.debug(f"Failed to remove resource {resource}: {str(e)}") + LOG.debug("Failed to remove resource %s: %r", resource, e) return Result( ResultType.FAILED, f"Failed to remove resource {resource} from state", diff --git a/sunbeam-python/sunbeam/features/telemetry/feature.py b/sunbeam-python/sunbeam/features/telemetry/feature.py index e7313a033..de3c3363e 100644 --- a/sunbeam-python/sunbeam/features/telemetry/feature.py +++ b/sunbeam-python/sunbeam/features/telemetry/feature.py @@ -163,8 +163,10 @@ def run_enable_plans( principal_app = backend_instance.principal_application if principal_app in processed_principals: LOG.debug( - f"Skipping {backend_name}: principal application " - f"{principal_app} already processed" + "Skipping %s: principal application %s is already" + " processed", + backend_name, + principal_app, ) continue @@ -186,8 +188,9 @@ def run_enable_plans( ) except Exception as e: LOG.warning( - f"Failed to add specific cinder-volume step for backend " - f"{backend_name}: {e}" + "Failed to add specific cinder-volume step for backend %s: %r", + backend_name, + e, ) if len(plan3) > 1: # More than just TerraformInitStep @@ -261,8 +264,10 @@ def run_disable_plans(self, deployment: Deployment, show_hints: bool) -> None: principal_app = backend_instance.principal_application if principal_app in processed_principals: LOG.debug( - f"Skipping {backend_name}: principal application " - f"{principal_app} already processed" + "Skipping %s: principal application %s is already" + " processed", + backend_name, + principal_app, ) continue @@ -285,8 +290,9 @@ def run_disable_plans(self, deployment: Deployment, show_hints: bool) -> None: ) except Exception as e: LOG.warning( - f"Failed to add specific cinder-volume step for backend " - f"{backend_name}: {e}" + "Failed to add specific cinder-volume step for backend %s: %r", + backend_name, + e, ) if len(plan2) > 1: # More than just TerraformInitStep diff --git a/sunbeam-python/sunbeam/features/tls/common.py b/sunbeam-python/sunbeam/features/tls/common.py index 3ef7b147b..9d7f36e19 100644 --- a/sunbeam-python/sunbeam/features/tls/common.py +++ b/sunbeam-python/sunbeam/features/tls/common.py @@ -262,16 +262,16 @@ def is_skip(self, context: StepContext) -> Result: try: unit = self.jhelper.get_leader_unit(self.app, self.model) except LeaderNotFoundException as e: - LOG.debug(f"Unable to get {self.app} leader") + LOG.debug("Unable to get %s leader", self.app) return Result(ResultType.FAILED, str(e)) try: action_result = self.jhelper.run_action(unit, self.model, action_cmd) except ActionFailedException as e: - LOG.debug(f"Running action {action_cmd} on {unit} failed") + LOG.debug("Running action %s on %s failed", action_cmd, unit) return Result(ResultType.FAILED, str(e)) - LOG.debug(f"Result from action {action_cmd}: {action_result}") + LOG.debug("Result from action %s: %s", action_cmd, action_result) ca_list = action_result if self.cert_name in ca_list: @@ -285,7 +285,7 @@ def run(self, context: StepContext) -> Result: try: unit = self.jhelper.get_leader_unit(self.app, self.model) except LeaderNotFoundException as e: - LOG.debug(f"Unable to get {self.app} leader") + LOG.debug("Unable to get %s leader", self.app) return Result(ResultType.FAILED, str(e)) action_params = { @@ -296,10 +296,10 @@ def run(self, context: StepContext) -> Result: action_params["chain"] = self.ca_chain try: - LOG.debug(f"Running action {action_cmd} with params {action_params}") + LOG.debug("Running action %s with params %s", action_cmd, action_params) self.jhelper.run_action(unit, self.model, action_cmd, action_params) except ActionFailedException as e: - LOG.debug(f"Running action {action_cmd} on {unit} failed") + LOG.debug("Running action %s on %s failed", action_cmd, unit) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) @@ -333,16 +333,16 @@ def is_skip(self, context: StepContext) -> Result: try: unit = self.jhelper.get_leader_unit(self.app, self.model) except LeaderNotFoundException as e: - LOG.debug(f"Unable to get {self.app} leader") + LOG.debug("Unable to get %s leader", self.app) return Result(ResultType.FAILED, str(e)) try: action_result = self.jhelper.run_action(unit, self.model, action_cmd) except ActionFailedException as e: - LOG.debug(f"Running action {action_cmd} on {unit} failed") + LOG.debug("Running action %s on %s failed", action_cmd, unit) return Result(ResultType.FAILED, str(e)) - LOG.debug(f"Result from action {action_cmd}: {action_result}") + LOG.debug("Result from action %s: %s", action_cmd, action_result) ca_list = action_result # Replace any dot with hyphen in ca cert name. @@ -359,34 +359,34 @@ def run(self, context: StepContext) -> Result: try: unit = self.jhelper.get_leader_unit(self.app, self.model) except LeaderNotFoundException as e: - LOG.debug(f"Unable to get {self.app} leader") + LOG.debug("Unable to get %s leader", self.app) return Result(ResultType.FAILED, str(e)) retry_with_feature_key = False action_params = {"name": self.cert_name} - LOG.debug(f"Running action {action_cmd} with params {action_params}") + LOG.debug("Running action %s with params %s", action_cmd, action_params) try: action_result = self.jhelper.run_action( unit, self.model, action_cmd, action_params ) except ActionFailedException as e: - LOG.debug(f"Running action {action_cmd} on {unit} failed: {str(e)}") + LOG.debug("Running action %s on %s failed: %s", action_cmd, unit, str(e)) retry_with_feature_key = True # For backward compatiblity reasons, try to run remove-ca-cert action # with feature_key as ca cert name if retry_with_feature_key: action_params = {"name": self.feature_key} - LOG.debug(f"Running action {action_cmd} with params {action_params}") + LOG.debug("Running action %s with params %s", action_cmd, action_params) try: action_result = self.jhelper.run_action( unit, self.model, action_cmd, action_params ) except ActionFailedException as e: - LOG.debug(f"Running action {action_cmd} on {unit} failed") + LOG.debug("Running action %s on %s failed", action_cmd, unit) return Result(ResultType.FAILED, str(e)) - LOG.debug(f"Result from action {action_cmd}: {action_result}") + LOG.debug("Result from action %s: %s", action_cmd, action_result) return Result(ResultType.COMPLETED) @@ -447,13 +447,13 @@ def handle_list_outstanding_csrs( ca_provider_app, model, jhelper ) except LeaderNotFoundException as e: - LOG.debug(f"Unable to get {ca_provider_app} leader to print CSRs") + LOG.debug("Unable to get %s leader to print CSRs", ca_provider_app) raise click.ClickException(str(e)) except ActionFailedException as e: - LOG.debug(f"Running action {action_cmd} failed") + LOG.debug("Running action %s failed", action_cmd) raise click.ClickException(str(e)) - LOG.debug(f"Result from action {action_cmd}: {action_result}") + LOG.debug("Result from action %s: %s", action_cmd, action_result) if action_result.get("return-code", 0) > 1: raise click.ClickException( "Unable to get outstanding certificate requests from CA" @@ -556,7 +556,7 @@ def prompt( self.app, self.model, self.jhelper ) - LOG.debug(f"Result from action {action_cmd}: {action_result}") + LOG.debug("Result from action %s: %s", action_cmd, action_result) if action_result.get("return-code", 0) > 1: raise click.ClickException( "Unable to get outstanding certificate requests from CA" @@ -651,10 +651,10 @@ def run(self, context: StepContext) -> Result: try: unit = self.jhelper.get_leader_unit(self.app, self.model) except LeaderNotFoundException as e: - LOG.debug(f"Unable to get {self.app} leader") + LOG.debug("Unable to get %s leader", self.app) return Result(ResultType.FAILED, str(e)) - LOG.debug(f"Process certs: {self.process_certs}") + LOG.debug("Process certs: %s", self.process_certs) for subject, request in self.process_certs.items(): csr = request.get("csr") csr = encode_base64_as_string(csr) @@ -681,19 +681,19 @@ def run(self, context: StepContext) -> Result: request.get("certificate"), self.ca_cert, self.ca_chain ) except binascii.Error as e: - LOG.debug(f"Unable to encode CA chain: {str(e)}") + LOG.debug("Unable to encode CA chain: %r", e) return Result(ResultType.FAILED, "Unable to encode CA chain") - LOG.debug(f"Running action {action_cmd} with params {action_params}") + LOG.debug("Running action %s with params %s", action_cmd, action_params) try: action_result = self.jhelper.run_action( unit, self.model, action_cmd, action_params ) except ActionFailedException as e: - LOG.debug(f"Running action {action_cmd} on {unit} failed") + LOG.debug("Running action %s on %s failed", action_cmd, unit) return Result(ResultType.FAILED, str(e)) - LOG.debug(f"Result from action {action_cmd}: {action_result}") + LOG.debug("Result from action %s: %s", action_cmd, action_result) if action_result.get("return-code", 0) > 1: return Result( ResultType.FAILED, f"Action {action_cmd} on {unit} returned error" diff --git a/sunbeam-python/sunbeam/features/validation/feature.py b/sunbeam-python/sunbeam/features/validation/feature.py index f8c4e3cfa..27d48dafe 100644 --- a/sunbeam-python/sunbeam/features/validation/feature.py +++ b/sunbeam-python/sunbeam/features/validation/feature.py @@ -221,7 +221,7 @@ def run(self, context: StepContext) -> Result: charms = self.tfhelper.tfvar_map["charms"] tempest_k8s_config_var = charms["tempest-k8s"]["config"] roles = get_enabled_roles(self.deployment) - LOG.info(f"OpenStack roles enabled for Tempest: {roles}") + LOG.info("OpenStack roles enabled for Tempest: %s", roles) override_tfvars: dict[str, Any] = {} if self.config_changes.schedule is not None or roles: override_tfvars[tempest_k8s_config_var] = {} @@ -239,7 +239,7 @@ def run(self, context: StepContext) -> Result: reporter=context.reporter, ) except (TerraformException, TerraformStateLockedException) as e: - LOG.exception("Error configuring validation feature.") + LOG.exception("Error configuring validation feature") return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) diff --git a/sunbeam-python/sunbeam/features/vault/feature.py b/sunbeam-python/sunbeam/features/vault/feature.py index 99ac78a0c..05d7a1076 100644 --- a/sunbeam-python/sunbeam/features/vault/feature.py +++ b/sunbeam-python/sunbeam/features/vault/feature.py @@ -178,9 +178,9 @@ def get_vault_status(self, unit: str) -> dict: "-tls-skip-verify", "-format=json", ] - LOG.debug(f"Running vault command: {' '.join(cmd)}") + LOG.debug("Running vault command: %s", " ".join(cmd)) result = self._run_command_on_container(unit, cmd) - LOG.debug(f"Vault command result: {result}") + LOG.debug("Vault command result: %s", result) return json.loads(result.get("stdout")) def initialize_vault(self, unit: str, key_shares: int, key_threshold: int) -> dict: @@ -201,10 +201,10 @@ def initialize_vault(self, unit: str, key_shares: int, key_threshold: int) -> di "-tls-skip-verify", "-format=json", ] - LOG.debug(f"Running vault command: {' '.join(cmd)}") + LOG.debug("Running vault command: %s", " ".join(cmd)) result = self._run_command_on_container(unit, cmd) # Do not log result since the result has secret keys - LOG.debug(f"Vault command result code: {result['return-code']}") + LOG.debug("Vault command result code: %s", result["return-code"]) if result["return-code"] != 0: raise VaultCommandFailedException(result.get("stderr")) @@ -227,9 +227,9 @@ def unseal_vault(self, unit: str, key: str) -> dict: "-format=json", key, ] - LOG.debug(f"Running vault command: {' '.join(cmd[:-1])}") + LOG.debug("Running vault command: %s", " ".join(cmd[:-1])) result = self._run_command_on_container(unit, cmd) - LOG.debug(f"Vault command result: {result}") + LOG.debug("Vault command result: %s", result) if result["return-code"] != 0: raise VaultCommandFailedException(result.get("stderr")) @@ -253,10 +253,10 @@ def create_token(self, unit: str, root_token: str) -> dict: "-format=json", ] env = {"VAULT_TOKEN": root_token} - LOG.debug(f"Running vault command: {' '.join(cmd)}") + LOG.debug("Running vault command: %s", " ".join(cmd)) result = self._run_command_on_container(unit, cmd, env) # Do not log result since the result has root token - LOG.debug(f"Vault command result code: {result.get('return-code')}") + LOG.debug("Vault command result code: %s", result.get("return-code")) if result["return-code"] != 0: raise VaultCommandFailedException(result.get("stderr")) @@ -285,10 +285,10 @@ def is_skip(self, context: StepContext) -> Result: ) res = self.vhelper.get_vault_status(self.leader_unit) if res.get("initialized") is True: - LOG.info("Vault already initialized") + LOG.info("Vault is already initialized") return Result(ResultType.SKIPPED, "Vault already initialized.") except LeaderNotFoundException as e: - LOG.debug(f"Failed to get {VAULT_APPLICATION_NAME} leader", exc_info=True) + LOG.debug("Failed to get %s leader", VAULT_APPLICATION_NAME, exc_info=True) return Result(ResultType.FAILED, str(e)) except TimeoutError as e: LOG.debug("Timeout running vault status", exc_info=True) @@ -310,7 +310,7 @@ def run(self, context: StepContext) -> Result: ) keys: str = json.dumps(res) except LeaderNotFoundException as e: - LOG.debug(f"Failed to get {VAULT_APPLICATION_NAME} leader", exc_info=True) + LOG.debug("Failed to get %s leader", VAULT_APPLICATION_NAME, exc_info=True) return Result(ResultType.FAILED, str(e)) except TimeoutError as e: LOG.debug("Timeout running vault init", exc_info=True) @@ -426,11 +426,11 @@ def run(self, context: StepContext) -> Result: # Leader is unsealed, apply unseal on non-leader units. LOG.debug("Running vault unseal command on non-leader units") for unit in non_leader_units: - LOG.debug(f"Running vault unseal command on unit {unit}") + LOG.debug("Running vault unseal command on unit %s", unit) res = self.vhelper.unseal_vault(unit, self.unseal_key) unseal_status[unit] = self._get_remaining_keys_count(res) - LOG.debug(f"Unseal status non leader units: {unseal_status}") + LOG.debug("Unseal status non leader units: %s", unseal_status) # Some units are sealed if remaining is greater than 0 if any(v > 0 for k, v in unseal_status.items()): message = "Vault unseal operation status: " @@ -441,7 +441,7 @@ def run(self, context: StepContext) -> Result: return Result(ResultType.COMPLETED, message) except ApplicationNotFoundException as e: - LOG.debug(f"Failed to get info on {VAULT_APPLICATION_NAME}", exc_info=True) + LOG.debug("Failed to get info on %s", VAULT_APPLICATION_NAME, exc_info=True) return Result(ResultType.FAILED, str(e)) except TimeoutError as e: LOG.debug("Timeout running vault unseal", exc_info=True) @@ -492,7 +492,8 @@ def run(self, context: StepContext) -> Result: self.jhelper.grant_secret( OPENSTACK_MODEL, self.tmp_secret_name, VAULT_APPLICATION_NAME ) - LOG.debug(f"Created secret: {secret}") + # TODO(himax16): should this be logged? + LOG.debug("Created secret: %s", secret) # Run authorize-charm action on vault leader unit action_cmd = "authorize-charm" @@ -504,10 +505,10 @@ def run(self, context: StepContext) -> Result: # Remove the juju secret self.jhelper.remove_secret(OPENSTACK_MODEL, self.tmp_secret_name) - LOG.debug(f"Result from action {action_cmd}: {action_result}") + LOG.debug("Result from action %s: %s", action_cmd, action_result) except LeaderNotFoundException as e: - LOG.debug(f"Failed to get {VAULT_APPLICATION_NAME} leader", exc_info=True) + LOG.debug("Failed to get %s leader", VAULT_APPLICATION_NAME, exc_info=True) return Result(ResultType.FAILED, str(e)) except TimeoutError as e: LOG.debug("Timeout running vault token create", exc_info=True) @@ -516,7 +517,9 @@ def run(self, context: StepContext) -> Result: LOG.debug("Failed to run vault token create", exc_info=True) return Result(ResultType.FAILED, str(e)) except ActionFailedException as e: - LOG.debug(f"Running action {action_cmd} on {leader_unit} failed") + LOG.debug( + "Running action %s on %s failed", action_cmd, leader_unit, exc_info=True + ) return Result(ResultType.FAILED, str(e)) except JujuException as e: LOG.debug("Failed to run vault token create", exc_info=True) @@ -592,7 +595,7 @@ def run(self, context: StepContext) -> Result: return Result(ResultType.COMPLETED, json.dumps(consolidated_status)) except ApplicationNotFoundException as e: - LOG.debug(f"Failed to get info on {VAULT_APPLICATION_NAME}", exc_info=True) + LOG.debug("Failed to get info on %s", VAULT_APPLICATION_NAME, exc_info=True) return Result(ResultType.FAILED, str(e)) except TimeoutError as e: LOG.debug("Timeout running vault unseal", exc_info=True) diff --git a/sunbeam-python/sunbeam/hooks.py b/sunbeam-python/sunbeam/hooks.py index 935742616..4be676011 100644 --- a/sunbeam-python/sunbeam/hooks.py +++ b/sunbeam-python/sunbeam/hooks.py @@ -153,8 +153,10 @@ def flatten_dict(d, parent_key="", sep="."): missing_deps = _check_feature_gate_dependencies(gate_key, flattened) if missing_deps: LOG.error( - f"Cannot enable '{gate_key}': dependencies not enabled: " - f"{', '.join(missing_deps)}. Enable them first." + "Cannot enable %r: dependencies are not enabled: " + "%s. Enable them first.", + gate_key, + ", ".join(missing_deps), ) continue @@ -166,23 +168,27 @@ def flatten_dict(d, parent_key="", sep="."): if existing_gate.enabled != enabled_bool: client.cluster.update_feature_gate(gate_key, enabled_bool) LOG.info( - f"Updated gate '{gate_key}' in cluster " - f"(changed from {existing_gate.enabled} to {enabled_bool})" + "Updated gate %r in cluster (changed from %s to %s)", + gate_key, + existing_gate.enabled, + enabled_bool, ) except Exception: # Create if doesn't exist try: client.cluster.add_feature_gate(gate_key, enabled_bool) LOG.info( - f"Created gate '{gate_key}' in cluster (enabled={enabled_bool})" + "Created gate %r in cluster (enabled=%s)", + gate_key, + enabled_bool, ) except Exception as e: - LOG.warning(f"Failed to sync gate '{gate_key}': {e}") + LOG.warning("Failed to sync gate %r: %r", gate_key, e) except ClusterServiceUnavailableException: LOG.debug("Cluster service unavailable, skipping feature gate sync") except Exception as e: - LOG.warning(f"Failed to sync feature gates to cluster: {e}") + LOG.warning("Failed to sync feature gates to cluster: %r", e) def _sync_feature_gates_to_cluster(snap: Snap) -> None: @@ -217,7 +223,7 @@ def install(snap: Snap) -> None: """ setup_logging(snap.paths.common / "hooks.log") LOG.debug("Running install hook...") - logging.info(f"Setting default config: {DEFAULT_CONFIG}") + LOG.info("Setting default config: %s", DEFAULT_CONFIG) snap.config.set(DEFAULT_CONFIG) @@ -247,7 +253,7 @@ def configure(snap: Snap) -> None: :param snap: the snap reference """ setup_logging(snap.paths.common / "hooks.log") - logging.info("Running configure hook") + LOG.info("Running configure hook") _update_default_config(snap) diff --git a/sunbeam-python/sunbeam/log.py b/sunbeam-python/sunbeam/log.py index 6472c8aaf..a102c754c 100644 --- a/sunbeam-python/sunbeam/log.py +++ b/sunbeam-python/sunbeam/log.py @@ -72,7 +72,7 @@ def setup_root_logging(logfile: Path | None = None): ) ) logger.addHandler(file_handler) - logger.debug(f"Logging to {str(logfile)!r}") + logger.debug("Logging to %r", str(logfile)) def setup_logging(logfile: Union[Path, str]) -> None: diff --git a/sunbeam-python/sunbeam/provider/common/multiregion.py b/sunbeam-python/sunbeam/provider/common/multiregion.py index 0bd7bd5d9..42f364ce8 100644 --- a/sunbeam-python/sunbeam/provider/common/multiregion.py +++ b/sunbeam-python/sunbeam/provider/common/multiregion.py @@ -74,7 +74,7 @@ def connect_to_region_controller( f"as the primary region: {deployment.get_region_name()}" ) - logging.debug( + LOG.debug( "Primary region name: %s, secondary region name: %s", primary_region_name, deployment.get_region_name(), diff --git a/sunbeam-python/sunbeam/provider/local/commands.py b/sunbeam-python/sunbeam/provider/local/commands.py index 348524ae9..5af7d2b44 100644 --- a/sunbeam-python/sunbeam/provider/local/commands.py +++ b/sunbeam-python/sunbeam/provider/local/commands.py @@ -657,8 +657,8 @@ def bootstrap( # noqa: C901 "The database topology can also be set via manifest." ) - LOG.debug(f"Manifest used for deployment - core: {manifest.core}") - LOG.debug(f"Manifest used for deployment - features: {manifest.features}") + LOG.debug("Manifest used for deployment - core: %s", manifest.core) + LOG.debug("Manifest used for deployment - features: %s", manifest.features) # Bootstrap node must always have the control role or region controller # role. @@ -684,7 +684,7 @@ def bootstrap( # noqa: C901 roles_str = ",".join(role.name for role in roles) pretty_roles = ", ".join(role.name.lower() for role in roles) - LOG.debug(f"Bootstrap node: roles {roles_str}") + LOG.debug("Bootstrap node: roles %s", roles_str) data_location = snap.paths.user_data @@ -725,7 +725,7 @@ def bootstrap( # noqa: C901 try: local_management_ip = _resolve_local_ip_from_cidr(management_cidr) except ValueError as e: - LOG.error(f"Error resolving local management ip from cidr: {e}") + LOG.exception("Error resolving local management ip from cidr") raise click.ClickException("Cannot resolve local management ip") from e plan: list[BaseStep] = [] @@ -777,7 +777,7 @@ def bootstrap( # noqa: C901 # Store deployment type for feature gate sync behavior client.cluster.update_config(DEPLOYMENT_TYPE_CONFIG_KEY, deployment.type) proxy_settings = deployment.get_proxy_settings() - LOG.debug(f"Proxy settings: {proxy_settings}") + LOG.debug("Proxy settings: %s", proxy_settings) if juju_controller: plan11: list[BaseStep] = [] @@ -1021,7 +1021,7 @@ def configure_sriov( node = client.cluster.get_node_info(fqdn) if "compute" not in node["role"]: - LOG.info("SR-IOV can only be configured on compute nodes.") + LOG.info("SR-IOV can only be configured on compute nodes") return manifest = deployment.get_manifest(manifest_path) @@ -1094,7 +1094,7 @@ def configure_dpdk( node = client.cluster.get_node_info(fqdn) if "compute" not in node["role"]: - LOG.info("DPDK can only be configured on compute nodes.") + LOG.info("DPDK can only be configured on compute nodes") return manifest = deployment.get_manifest(manifest_path) @@ -1383,7 +1383,7 @@ def join( # noqa: C901 roles_str = roles_to_str_list(roles) pretty_roles = ", ".join(role_.name.lower() for role_ in roles) - LOG.debug(f"Node joining the cluster with roles: {pretty_roles}") + LOG.debug("Node joining the cluster with roles: %s", pretty_roles) preflight_checks: list[Check] = [] preflight_checks.append(SystemRequirementsCheck()) @@ -1976,14 +1976,14 @@ def configure_cmd( # Validate manifest file manifest = deployment.get_manifest(manifest_path) - LOG.debug(f"Manifest used for deployment - core: {manifest.core}") - LOG.debug(f"Manifest used for deployment - features: {manifest.features}") + LOG.debug("Manifest used for deployment - core: %s", manifest.core) + LOG.debug("Manifest used for deployment - features: %s", manifest.features) name = utils.get_fqdn(deployment.get_management_cidr()) jhelper = deployment.get_juju_helper() jhelper_keystone = deployment.get_juju_helper(keystone=True) if not jhelper.model_exists(OPENSTACK_MODEL): - LOG.error(f"Expected model {OPENSTACK_MODEL} missing") + LOG.error("Expected model %s is missing", OPENSTACK_MODEL) raise click.ClickException("Please run `sunbeam cluster bootstrap` first") # Check if the node is a network node node = client.cluster.get_node_info(name) diff --git a/sunbeam-python/sunbeam/provider/local/deployment.py b/sunbeam-python/sunbeam/provider/local/deployment.py index c98fe4c1e..4a2a5c4d3 100644 --- a/sunbeam-python/sunbeam/provider/local/deployment.py +++ b/sunbeam-python/sunbeam/provider/local/deployment.py @@ -77,7 +77,7 @@ def __init__(self, **data): def _load_juju_account(self) -> JujuAccount | None: try: juju_account = JujuAccount.load(snaphelpers.Snap().paths.user_data) - LOG.debug(f"Local account found: {juju_account.user}") + LOG.debug("Local account found: %s", juju_account.user) return juju_account except JujuAccountNotFound: LOG.debug("No juju account found", exc_info=True) diff --git a/sunbeam-python/sunbeam/provider/local/steps.py b/sunbeam-python/sunbeam/provider/local/steps.py index 163890345..1c4821e24 100644 --- a/sunbeam-python/sunbeam/provider/local/steps.py +++ b/sunbeam-python/sunbeam/provider/local/steps.py @@ -215,20 +215,20 @@ def prompt_for_nics( current += 1 if not physnets: if len(candidate_nics) == 0: - LOG.debug("No more candidate nics available, stopping prompt.") + LOG.debug("No more candidate nics available, stopping prompt") break another = physnet_qs["configure_more"].ask() if not another: LOG.debug( - "User chose not to configure more physnets, stopping prompt." + "User chose not to configure more physnets, stopping prompt" ) break else: if current >= len(physnets): - LOG.debug("Reached the end of physnets, stopping prompt.") + LOG.debug("Reached the end of physnets, stopping prompt") break if len(candidate_nics) == 0: - raise SunbeamException("No more candidate nics available.") + raise SunbeamException("No more candidate nics available") return physnet_mapping @@ -462,8 +462,9 @@ def _update_microcluster_status(self, status: dict, microcluster_status: dict): == node_name ): LOG.debug( - f"Node name matched with address {node_name}, change name " - f"to {member}" + "Node name matched with address %s, change name to %s", + node_name, + member, ) node_name = member node_status["name"] = member @@ -526,7 +527,7 @@ def prompt( # noqa: C901 available and should provide a reasonable default where possible. """ if not console: - LOG.info("No console available, skipping prompt.") + LOG.info("No console available, skipping prompt") return self.variables = sunbeam.core.questions.load_answers( @@ -557,7 +558,7 @@ def prompt( # noqa: C901 excluded_devices[self.node_name] = [] if not self.clear_previous_config: - logging.debug("Picking up previous answers.") + LOG.debug("Picking up previous answers") for device_spec in previous_pci_whitelist: if device_spec not in pci_whitelist: pci_whitelist.append(device_spec) @@ -571,7 +572,7 @@ def prompt( # noqa: C901 else: # The user requested to drop the previous answers instead of merging the # device lists with the previous ones. - logging.debug("Dropping previous answers.") + LOG.debug("Dropping previous answers") if not self.accept_defaults: self._do_prompt(pci_whitelist, excluded_devices, show_hint) @@ -588,7 +589,8 @@ def prompt( # noqa: C901 ) except (UnitNotFoundException, ActionFailedException) as e: LOG.debug( - f"Failed fetching GPUs from node {self.node_name}", + "Failed fetching GPUs from node %s", + self.node_name, exc_info=True, ) raise click.ClickException( @@ -663,7 +665,7 @@ def _do_prompt( # pci@0000:03:00.0 enp3s0f0r0 network Ethernet interface # pci@0000:03:00.0 enp3s0f0r1 network Ethernet interface LOG.debug( - "Duplicate PCI address: %s, interface names: %s %s.", + "Duplicate PCI address: %s, interface names: %s %s", pci_address, nic_name, pci_address_map[pci_address], @@ -783,7 +785,7 @@ def run(self, context: StepContext) -> Result: ) except (ActionFailedException, TimeoutError): msg = f"Unable to set hypervisor {name} configuration" - LOG.error(msg, exc_info=True) + LOG.exception(msg) return Result(ResultType.FAILED, msg) return Result(ResultType.COMPLETED) @@ -909,7 +911,7 @@ def _prompt_nics( dpdk_manifest_ports = self._get_dpdk_manifest_ports() or {} if dpdk_manifest_ports.get(self.node_name): self.nics = dpdk_manifest_ports[self.node_name] - logging.debug("DPDK ports specified through the manifest: %s", self.nics) + LOG.debug("DPDK ports specified through the manifest: %s", self.nics) return with console: @@ -921,30 +923,30 @@ def _prompt_nics( candidate_nics: list[dict] = [] enabled_nic_names: list[str] = [] - LOG.debug("Determining DPDK candidate interfaces.") + LOG.debug("Determining DPDK candidate interfaces") for nic in all_nics: if not nic.get("name"): # Note that the interface name will no longer be visible once # assigned to the "vfio-pci" driver. - LOG.debug("No interface name: %s, skipping.", nic.get("pci_address")) + LOG.debug("No interface name: %s, skipping", nic.get("pci_address")) continue if nic.get("pf_pci_address"): - LOG.debug("Ignoring SR-IOV VF: %s.", nic.get("name")) + LOG.debug("Ignoring SR-IOV VF: %s", nic.get("name")) continue if not nic.get("pci_address"): - LOG.debug("Not a PCI device: %s.", nic.get("name")) + LOG.debug("Not a PCI device: %s", nic.get("name")) continue if nic.get("configured"): - LOG.debug("The interface has an IP assigned, skipping.") + LOG.debug("The interface has an IP assigned, skipping") continue candidate_nics.append(nic) if not candidate_nics: - LOG.info("No candidate DPDK interfaces.") + LOG.info("No candidate DPDK interfaces") return - console.print("Configuring DPDK physical interfaces.") + console.print("Configuring DPDK physical interfaces") console.print( "\nWARNING: the specified interfaces will be reconfigured to use a " "DPDK-compatible driver (vfio-pci by default) and will no longer " @@ -995,7 +997,7 @@ def run(self, context: StepContext) -> Result: ) except (ActionFailedException, TimeoutError): msg = f"Unable to set hypervisor {name} configuration" - LOG.error(msg, exc_info=True) + LOG.exception(msg) return Result(ResultType.FAILED, msg) return Result(ResultType.COMPLETED) diff --git a/sunbeam-python/sunbeam/provider/maas/commands.py b/sunbeam-python/sunbeam/provider/maas/commands.py index 0cb5124ef..ae57a0050 100644 --- a/sunbeam-python/sunbeam/provider/maas/commands.py +++ b/sunbeam-python/sunbeam/provider/maas/commands.py @@ -330,8 +330,8 @@ def bootstrap( # Validate manifest file manifest = deployment.get_manifest(manifest_path) - LOG.debug(f"Manifest used for deployment - core: {manifest.core}") - LOG.debug(f"Manifest used for deployment - features: {manifest.features}") + LOG.debug("Manifest used for deployment - core: %s", manifest.core) + LOG.debug("Manifest used for deployment - features: %s", manifest.features) deployments = DeploymentsConfig.load(deployment_path(Snap())) @@ -536,11 +536,11 @@ def bootstrap( deployments.update_deployment(deployment) if proxy_from_user and isinstance(proxy_from_user, dict): - LOG.debug(f"Writing proxy information to clusterdb: {proxy_from_user}") + LOG.debug("Writing proxy information to clusterdb: %s", proxy_from_user) client.cluster.update_config(PROXY_CONFIG_KEY, json.dumps(proxy_from_user)) # Store deployment type for feature gate sync behavior - LOG.debug(f"Writing deployment type to clusterdb: {deployment.type}") + LOG.debug("Writing deployment type to clusterdb: %s", deployment.type) client.cluster.update_config(DEPLOYMENT_TYPE_CONFIG_KEY, deployment.type) console.print("Bootstrap controller components complete.") @@ -623,8 +623,8 @@ def deploy( manifest = deployment.get_manifest(manifest_path) - LOG.debug(f"Manifest used for deployment - core: {manifest.core}") - LOG.debug(f"Manifest used for deployment - features: {manifest.features}") + LOG.debug("Manifest used for deployment - core: %s", manifest.core) + LOG.debug("Manifest used for deployment - features: %s", manifest.features) proxy_settings = deployment.get_proxy_settings() tfhelper_sunbeam_machine = deployment.get_tfhelper("sunbeam-machine-plan") @@ -1009,12 +1009,12 @@ def configure_cmd( # Validate manifest file manifest = deployment.get_manifest(manifest_path) - LOG.debug(f"Manifest used for deployment - core: {manifest.core}") - LOG.debug(f"Manifest used for deployment - features: {manifest.features}") + LOG.debug("Manifest used for deployment - core: %s", manifest.core) + LOG.debug("Manifest used for deployment - features: %s", manifest.features) jhelper = JujuHelper(deployment.juju_controller) if not jhelper.model_exists(OPENSTACK_MODEL): - LOG.error(f"Expected model {OPENSTACK_MODEL} missing") + LOG.error("Expected model %s is missing", OPENSTACK_MODEL) raise click.ClickException("Please run `sunbeam cluster bootstrap` first") admin_credentials = retrieve_admin_credentials(jhelper, deployment, OPENSTACK_MODEL) # Add OS_INSECURE as https not working with terraform openstack provider. @@ -1467,7 +1467,7 @@ def _run_maas_checks(checks: list[DiagnosticsCheck], console: Console) -> list[d """ check_results = [] for check in checks: - LOG.debug(f"Starting check {check.name!r}") + LOG.debug("Starting check %r", check.name) message = f"{check.description}..." with console.status(message): results = check.run() @@ -1479,7 +1479,12 @@ def _run_maas_checks(checks: list[DiagnosticsCheck], console: Console) -> list[d for result in results: passed = result.passed.value - LOG.debug(f"{result.name=!r}, {passed=!r}, {result.message=!r}") + LOG.debug( + "Result for %r: passed=%r, message=%r", + result.name, + passed, + result.message, + ) console.print( message, result.message, @@ -1501,7 +1506,7 @@ def _run_maas_meta_checks( check_results = [] for check in checks: - LOG.debug(f"Starting check {check.name!r}") + LOG.debug("Starting check %r", check.name) message = f"{check.description}..." with console.status(message): results = check.run() @@ -1550,7 +1555,7 @@ def validate_machine_cmd(ctx: click.Context, machine: str): with console.status(f"Fetching {machine} ..."): try: machine_obj = get_machine(client, machine) - LOG.debug(f"{machine_obj=!r}") + LOG.debug("Fetched machine: %r", machine_obj) except ValueError as e: console.print("Error:", e) sys.exit(1) diff --git a/sunbeam-python/sunbeam/provider/maas/steps.py b/sunbeam-python/sunbeam/provider/maas/steps.py index 8a3f09acb..b1a31ce0f 100644 --- a/sunbeam-python/sunbeam/provider/maas/steps.py +++ b/sunbeam-python/sunbeam/provider/maas/steps.py @@ -236,7 +236,9 @@ def run(self) -> DiagnosticsResult: """List machines roles, fail if machine is missing roles.""" super().run assigned_roles = self.machine["roles"] - LOG.debug(f"{self.machine['hostname']=!r} assigned roles: {assigned_roles!r}") + LOG.debug( + "Machine %r assigned roles: %r", self.machine["hostname"], assigned_roles + ) if not assigned_roles: return DiagnosticsResult.fail( self.name, @@ -280,7 +282,9 @@ def run(self) -> DiagnosticsResult: machine=self.machine["hostname"], ) assigned_roles = self.machine["roles"] - LOG.debug(f"{self.machine['hostname']=!r} assigned roles: {assigned_roles!r}") + LOG.debug( + "Machine %r assigned roles: %r", self.machine["hostname"], assigned_roles + ) if not assigned_roles: return DiagnosticsResult.fail( self.name, @@ -289,26 +293,32 @@ def run(self) -> DiagnosticsResult: machine=self.machine["hostname"], ) assigned_spaces = self.machine["spaces"] - LOG.debug(f"{self.machine['hostname']=!r} assigned spaces: {assigned_spaces!r}") + LOG.debug( + "Machine %r assigned spaces: %r", self.machine["hostname"], assigned_spaces + ) required_networks: set[Networks] = set() for role in assigned_roles: required_networks.update( maas_deployment.ROLE_NETWORK_MAPPING[maas_deployment.RoleTags(role)] ) LOG.debug( - f"{self.machine['hostname']=!r} required networks: {required_networks!r}" + "Machine %r required networks: %r", + self.machine["hostname"], + required_networks, ) required_spaces: set[str] = set() missing_spaces: set[str] = set() for network in required_networks: corresponding_space = network_to_space_mapping[network.value] if not corresponding_space: - LOG.debug(f"{network.value=!r} has no corresponding space") + LOG.debug("Network %r has no corresponding space", network.value) continue required_spaces.add(corresponding_space) if corresponding_space and corresponding_space not in assigned_spaces: missing_spaces.add(corresponding_space) - LOG.debug(f"{self.machine['hostname']=!r} missing spaces: {missing_spaces!r}") + LOG.debug( + "Machine %r missing spaces: %r", self.machine["hostname"], missing_spaces + ) if not assigned_spaces or missing_spaces: return DiagnosticsResult.fail( self.name, @@ -342,7 +352,9 @@ def __init__(self, machine: dict): def run(self) -> DiagnosticsResult: """Check machine has storage assigned if required.""" assigned_roles = self.machine["roles"] - LOG.debug(f"{self.machine['hostname']=!r} assigned roles: {assigned_roles!r}") + LOG.debug( + "Machine %r assigned roles: %r", self.machine["hostname"], assigned_roles + ) if not assigned_roles: return DiagnosticsResult.fail( self.name, @@ -410,7 +422,9 @@ def _has_neutron_nic(self) -> bool: def run(self) -> DiagnosticsResult: """Check machine has neutron nic if required.""" assigned_roles = self.machine["roles"] - LOG.debug(f"{self.machine['hostname']=!r} assigned roles: {assigned_roles!r}") + LOG.debug( + "Machine %r assigned roles: %r", self.machine["hostname"], assigned_roles + ) if not assigned_roles: return DiagnosticsResult.fail( self.name, @@ -602,13 +616,18 @@ def run(self) -> DiagnosticsResult: def _run_check_list(checks: Sequence[DiagnosticsCheck]) -> list[DiagnosticsResult]: check_results = [] for check in checks: - LOG.debug(f"Starting check {check.name}") + LOG.debug("Starting check %s", check.name) results = check.run() if isinstance(results, DiagnosticsResult): results = [results] for result in results: passed = result.passed.value - LOG.debug(f"{result.name=!r}, {passed=!r}, {result.message=!r}") + LOG.debug( + "Result for %r: passed=%r, message=%r", + result.name, + passed, + result.message, + ) check_results.extend(results) return check_results @@ -772,7 +791,7 @@ def run(self) -> DiagnosticsResult: for role in machine["roles"]: zone_role_counts[zone].setdefault(role, 0) zone_role_counts[zone][role] += 1 - LOG.debug(f"{zone_role_counts=!r}") + LOG.debug("Zone role counts: %r", zone_role_counts) unbalanced_roles = [] distribution = "" for role in maas_deployment.RoleTags.values(): @@ -1200,7 +1219,7 @@ def is_skip(self, context: StepContext) -> Result: try: controller = self.get_controller(self.controller) except ControllerNotFoundException as e: - LOG.debug(str(e)) + LOG.debug("Failed to get controller %s: %r", self.controller, e) return Result(ResultType.FAILED, f"Controller {self.controller} not found") controller_machines = controller.get("controller-machines") @@ -1233,8 +1252,9 @@ def is_skip(self, context: StepContext) -> Result: if len(machines) < self.n: LOG.debug( - f"Found {len(machines)} juju controllers," - f" need {self.n} to scale, skipping..." + "Found %d juju controllers, need %d to scale, skipping...", + len(machines), + self.n, ) return Result(ResultType.SKIPPED) machines = sorted(machines, key=lambda x: x["hostname"]) @@ -1409,14 +1429,14 @@ def __init__(self, client: Client, maas_client: maas_client.MaasClient): def is_skip(self, context: StepContext) -> Result: """Determines if the step should be skipped or not.""" maas_machines = maas_client.list_machines(self.maas_client) - LOG.debug(f"Machines fetched: {maas_machines}") + LOG.debug("Machines fetched: %s", maas_machines) filtered_machines = [] for machine in maas_machines: if set(machine["roles"]).intersection( set(maas_deployment.RoleTags.enabled_values()) ): filtered_machines.append(machine) - LOG.debug(f"Machines containing worker roles: {filtered_machines}") + LOG.debug("Machines containing worker roles: %s", filtered_machines) if filtered_machines is None or len(filtered_machines) == 0: return Result(ResultType.FAILED, "Maas deployment has no machines.") clusterd_nodes = self.client.cluster.list_nodes() @@ -1543,7 +1563,7 @@ def run(self, context: StepContext) -> Result: """Deploy machines in Juju.""" for node in self.nodes_to_deploy: self.update_status(context, f"deploying {node['name']}") - LOG.debug(f"Adding machine {node['name']} to model {self.model}") + LOG.debug("Adding machine %s to model %s", node["name"], self.model) juju_machine = self.jhelper.add_machine( "system-id=" + node["systemid"], self.model, @@ -1555,7 +1575,7 @@ def run(self, context: StepContext) -> Result: self.update_status(context, "waiting for machines to deploy") machines = self.jhelper.get_machines(self.model) for node in self.nodes_to_update: - LOG.debug(f"Updating machine {node['name']} in model {self.model}") + LOG.debug("Updating machine %s in model %s", node["name"], self.model) for machine_id, machine in machines.items(): if machine.hostname == node["name"]: self.client.cluster.update_node_info( @@ -1589,7 +1609,7 @@ def __init__( def is_skip(self, context: StepContext) -> Result: """Determines if the step should be skipped or not.""" maas_machines = maas_client.list_machines(self.maas_client) - LOG.debug(f"Machines fetched: {maas_machines}") + LOG.debug("Machines fetched: %s", maas_machines) filtered_machines = [] for machine in maas_machines: if set(machine["roles"]).intersection( @@ -1598,13 +1618,13 @@ def is_skip(self, context: StepContext) -> Result: } ): filtered_machines.append(machine) - LOG.debug(f"Machines containing infra role: {filtered_machines}") + LOG.debug("Machines containing infra role: %s", filtered_machines) if filtered_machines is None or len(filtered_machines) == 0: return Result(ResultType.FAILED, "Maas deployment has no infra machines.") self.machines_to_deploy = filtered_machines.copy() juju_machines = self.jhelper.get_machines(self.model) - LOG.debug(f"Machines already deployed: {juju_machines}") + LOG.debug("Machines already deployed: %s", juju_machines) for filtered_machine in filtered_machines: for id, deployed_machine in juju_machines.items(): @@ -1619,7 +1639,7 @@ def run(self, context: StepContext) -> Result: """Deploy machines in Juju.""" for machine in self.machines_to_deploy: self.update_status(context, f"deploying {machine['hostname']}") - LOG.debug(f"Adding machine {machine['hostname']} to model {self.model}") + LOG.debug("Adding machine %s to model %s", machine["hostname"], self.model) self.jhelper.add_machine( "system-id=" + machine["system_id"], self.model, @@ -2149,9 +2169,9 @@ def models(self) -> list[str]: return models LOG.debug( - f"Model {self.deployment.openstack_machines_model} not found. This is " - "expected when cluster is bootstrapped but not deployed yet. " - "Skipping model." + "Model %s is not found. This is expected when cluster is bootstrapped but" + " not yet deployed. Skipping model.", + self.deployment.openstack_machines_model, ) return models @@ -2381,7 +2401,8 @@ def _get_pci_config( ) except (UnitNotFoundException, ActionFailedException) as e: LOG.debug( - f"Failed fetching GPUs from node {node_name}", + "Failed fetching GPUs from node %s", + node_name, exc_info=True, ) raise click.ClickException( @@ -2430,7 +2451,7 @@ def run(self, context: StepContext) -> Result: ) except (ActionFailedException, TimeoutError): msg = f"Unable to set hypervisor {node_name} configuration" - LOG.error(msg, exc_info=True) + LOG.exception(msg) return Result(ResultType.FAILED, msg) return Result(ResultType.COMPLETED) @@ -2601,7 +2622,7 @@ def run(self, context: StepContext) -> Result: ) except (ActionFailedException, TimeoutError): msg = f"Unable to set hypervisor {node_name} configuration" - LOG.error(msg, exc_info=True) + LOG.exception(msg) return Result(ResultType.FAILED, msg) return Result(ResultType.COMPLETED) diff --git a/sunbeam-python/sunbeam/steps/cinder_volume.py b/sunbeam-python/sunbeam/steps/cinder_volume.py index bb5db05b1..67c98dca1 100644 --- a/sunbeam-python/sunbeam/steps/cinder_volume.py +++ b/sunbeam-python/sunbeam/steps/cinder_volume.py @@ -330,7 +330,7 @@ def run(self, context: StepContext) -> Result: try: resources = self.tfhelper.state_list() except TerraformException as e: - LOG.debug(f"Failed to list terraform state: {str(e)}") + LOG.debug("Failed to list terraform state: %r", e) return Result(ResultType.FAILED, "Failed to list terraform state") for resource in resources: @@ -338,7 +338,7 @@ def run(self, context: StepContext) -> Result: try: self.tfhelper.state_rm(resource) except TerraformException as e: - LOG.debug(f"Failed to remove resource {resource}: {str(e)}") + LOG.debug("Failed to remove resource %s: %r", resource, e) return Result( ResultType.FAILED, f"Failed to remove resource {resource} from state", diff --git a/sunbeam-python/sunbeam/steps/cluster_status.py b/sunbeam-python/sunbeam/steps/cluster_status.py index e20f3b0e4..431b4276e 100644 --- a/sunbeam-python/sunbeam/steps/cluster_status.py +++ b/sunbeam-python/sunbeam/steps/cluster_status.py @@ -237,7 +237,7 @@ def _compute_status(self) -> dict: try: _status_model = self._get_machines_status(model) except ModelNotFoundException as e: - LOG.debug(f"Model {model} not found", exc_info=True) + LOG.debug("Model %s not found", model, exc_info=True) raise SunbeamException("Failed to query model status.") from e status[model] = merge_dict( _status_model, diff --git a/sunbeam-python/sunbeam/steps/clusterd.py b/sunbeam-python/sunbeam/steps/clusterd.py index 3eeaee6f9..c6b10df42 100644 --- a/sunbeam-python/sunbeam/steps/clusterd.py +++ b/sunbeam-python/sunbeam/steps/clusterd.py @@ -82,12 +82,12 @@ def is_skip(self, context: StepContext) -> Result: self.fqdn = utils.get_fqdn(self.management_cidr) try: members = self.client.cluster.get_cluster_members() - LOG.info(members) + LOG.info("Cluster members: %s", members) member_names = [member.get("name") for member in members] if self.fqdn in member_names: return Result(ResultType.SKIPPED) except ClusterServiceUnavailableException as e: - LOG.debug(e) + LOG.debug("Failed to get cluster members: %r", e) if "Sunbeam Cluster not initialized" in str(e): return Result(ResultType.COMPLETED) return Result(ResultType.FAILED, str(e)) @@ -112,7 +112,7 @@ def run(self, context: StepContext) -> Result: LOG.debug("Bootstrapped clusterd on %s", address) return Result(ResultType.COMPLETED) except ClusterAlreadyBootstrappedException: - LOG.debug("Cluster already bootstrapped") + LOG.debug("Cluster is already bootstrapped") return Result(ResultType.COMPLETED) except Exception as e: return Result(ResultType.FAILED, str(e)) @@ -248,7 +248,7 @@ def is_skip(self, context: StepContext) -> Result: """ try: members = self.client.cluster.get_cluster_members() - LOG.debug(members) + LOG.debug("Cluster members: %s", members) member_names = [member.get("name") for member in members] if self.node_name in member_names: return Result(ResultType.SKIPPED) @@ -261,7 +261,7 @@ def is_skip(self, context: StepContext) -> Result: LOG.warning("Reusing existing token for %s", self.node_name) return Result(ResultType.SKIPPED, token_d.get(self.node_name)) except ClusterServiceUnavailableException as e: - LOG.debug(e) + LOG.debug("Failed to check existing token for %s: %r", self.node_name, e) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) @@ -273,7 +273,7 @@ def run(self, context: StepContext) -> Result: LOG.debug("Generated token for node %s", self.node_name) return Result(result_type=ResultType.COMPLETED, message=token) except TokenAlreadyGeneratedException as e: - LOG.warning(e) + LOG.warning("Token for %s is already generated: %r", self.node_name, e) return Result(ResultType.FAILED, str(e)) @@ -305,12 +305,12 @@ def is_skip(self, context: StepContext) -> Result: """ try: members = self.client.cluster.get_cluster_members() - LOG.info(members) + LOG.info("Cluster members: %s", members) member_names = [member.get("name") for member in members] if self.fqdn in member_names: return Result(ResultType.SKIPPED) except ClusterServiceUnavailableException as e: - LOG.debug(e) + LOG.debug("Failed to get cluster members: %r", e) if "Sunbeam Cluster not initialized" in str(e): return Result(ResultType.COMPLETED) return Result(ResultType.FAILED, str(e)) @@ -326,10 +326,10 @@ def run(self, context: StepContext) -> Result: token=self.token, role=self.role, ) - LOG.info(self.token) + LOG.info("Node joined successfully with token: %s", self.token) return Result(result_type=ResultType.COMPLETED, message=self.token) except (NodeAlreadyExistsException, NodeJoinException) as e: - LOG.warning(e) + LOG.warning("Failed to join the cluster: %r", e) return Result(ResultType.FAILED, str(e)) @@ -344,9 +344,9 @@ def run(self, context: StepContext) -> Result: """List nodes in the sunbeam cluster.""" try: members = self.client.cluster.get_cluster_members() - LOG.debug(f"Members: {members}") + LOG.debug("Cluster members: %s", members) nodes = self.client.cluster.list_nodes() - LOG.debug(f"Nodes: {nodes}") + LOG.debug("Nodes: %s", nodes) nodes_dict = { member.get("name"): {"status": member.get("status")} @@ -357,7 +357,7 @@ def run(self, context: StepContext) -> Result: return Result(result_type=ResultType.COMPLETED, message=nodes_dict) except ClusterServiceUnavailableException as e: - LOG.debug(e) + LOG.debug("Failed to list nodes: %r", e) return Result(ResultType.FAILED, str(e)) @@ -385,7 +385,7 @@ def run(self, context: StepContext) -> Result: ) return Result(result_type=ResultType.COMPLETED) except ClusterServiceUnavailableException as e: - LOG.debug(e) + LOG.debug("Failed to update node info for %s: %r", self.node_name, e) return Result(ResultType.FAILED, str(e)) @@ -409,10 +409,10 @@ def run(self, context: StepContext) -> Result: NodeNotExistInClusterException, ) as e: # Consider these exceptions as soft ones - LOG.debug(e) + LOG.debug("Failed to remove node %s: %r", self.node_name, e) return Result(ResultType.COMPLETED) except (LastNodeRemovalFromClusterException, Exception) as e: - LOG.debug(e) + LOG.debug("Failed to remove node %s: %r", self.node_name, e) return Result(ResultType.FAILED, str(e)) @@ -482,7 +482,7 @@ def prompt( continue_operation = question_bank.continue_operation.ask() self.continue_operation = continue_operation except ClusterServiceUnavailableException as e: - LOG.debug(e) + LOG.debug("Failed to check for node and prompt user: %r", e) self.cluster_unavailable = True def run(self, context: StepContext) -> Result: @@ -527,9 +527,9 @@ def is_skip(self, context: StepContext) -> Result: """ try: user = self.client.cluster.get_juju_user(self.username) - LOG.debug(f"JujuUser {user} found in database.") + LOG.debug("JujuUser %s found in database", user) except ClusterServiceUnavailableException as e: - LOG.debug(e) + LOG.debug("Failed to get Juju user %s from cluster: %r", self.username, e) return Result(ResultType.FAILED, str(e)) except JujuUserNotFoundException: return Result(ResultType.COMPLETED) @@ -542,7 +542,7 @@ def run(self, context: StepContext) -> Result: self.client.cluster.add_juju_user(self.username, self.token) return Result(result_type=ResultType.COMPLETED) except ClusterServiceUnavailableException as e: - LOG.debug(e) + LOG.debug("Failed to add Juju user %s to cluster: %r", self.username, e) return Result(ResultType.FAILED, str(e)) @@ -567,16 +567,18 @@ def is_skip(self, context: StepContext) -> Result: """ try: user = self.client.cluster.get_juju_user(self.username) - LOG.debug("Juju user %s is found in database.", self.username) + LOG.debug("Juju user %s is found in the database", self.username) if user.get("token") == self.token: - LOG.debug("Juju user token is up to date, skipping update.") + LOG.debug( + "Juju user %s token is up to date, skipping update", self.username + ) return Result(ResultType.SKIPPED) except ClusterServiceUnavailableException as e: - LOG.debug(e) + LOG.debug("Failed to get Juju user %s from cluster: %r", self.username, e) return Result(ResultType.FAILED, str(e)) except JujuUserNotFoundException: LOG.debug( - "Juju user %s is not found in database, adding user.", self.username + "Juju user %s is not found in the database, adding user", self.username ) return Result(ResultType.COMPLETED) @@ -588,11 +590,12 @@ def run(self, context: StepContext) -> Result: self.client.cluster.update_juju_user(self.username, self.token) except JujuUserNotFoundException: LOG.debug( - "Juju user %s is not found in database, adding user.", self.username + "Juju user %s is not found in the database, adding user", + self.username, ) self.client.cluster.add_juju_user(self.username, self.token) except ClusterServiceUnavailableException as e: - LOG.debug(e) + LOG.debug("Failed to update Juju user %s in cluster: %r", self.username, e) return Result(ResultType.FAILED, str(e)) return Result(result_type=ResultType.COMPLETED) @@ -666,7 +669,7 @@ def is_skip(self, context: StepContext) -> Result: self.networks = variables.get("bootstrap", {}).get("management_cidr") juju_controller = JujuController.load(self.client) - LOG.debug(f"Controller(s) present at: {juju_controller.api_endpoints}") + LOG.debug("Controller(s) present at: %s", juju_controller.api_endpoints) if not juju_controller.api_endpoints: LOG.debug( "Controller endpoints are empty in database, so update the " @@ -680,13 +683,13 @@ def is_skip(self, context: StepContext) -> Result: # Controller found, and parsed successfully return Result(ResultType.SKIPPED) except ClusterServiceUnavailableException as e: - LOG.debug(e) + LOG.debug("Failed to get controller endpoints: %r", e) return Result(ResultType.FAILED, str(e)) except ConfigItemNotFoundException: pass # Credentials missing, schedule for record except TypeError as e: # Note(gboutry): Credentials invalid, schedule for record - LOG.warning(e) + LOG.warning("Failed to get controller endpoints: %r", e) return Result(ResultType.COMPLETED) @@ -703,7 +706,7 @@ def run(self, context: StepContext) -> Result: try: juju_controller.write(self.client) except ClusterServiceUnavailableException as e: - LOG.debug(e) + LOG.debug("Failed to update controller endpoints: %r", e) return Result(ResultType.FAILED, str(e)) return Result(result_type=ResultType.COMPLETED) @@ -777,7 +780,7 @@ def run(self, context: StepContext) -> Result: timeout=SUNBEAM_CLUSTERD_APP_TIMEOUT, ) except (JujuWaitException, TimeoutError) as e: - LOG.warning(str(e)) + LOG.warning("Failed to deploy sunbeam-clusterd application: %r", e) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) diff --git a/sunbeam-python/sunbeam/steps/configure.py b/sunbeam-python/sunbeam/steps/configure.py index 12abc0b92..7b1a605a0 100644 --- a/sunbeam-python/sunbeam/steps/configure.py +++ b/sunbeam-python/sunbeam/steps/configure.py @@ -556,7 +556,7 @@ def run(self, context: StepContext) -> Result: is_network_node = "network" in node_roles if bridge_mapping is None and not split_roles: - LOG.debug(f"No NIC found for {name}, skipping.") + LOG.debug("No NIC found for %s, skipping.", name) continue # When split-roles is active, only network nodes get diff --git a/sunbeam-python/sunbeam/steps/hypervisor.py b/sunbeam-python/sunbeam/steps/hypervisor.py index 479ef8365..a6edcef22 100644 --- a/sunbeam-python/sunbeam/steps/hypervisor.py +++ b/sunbeam-python/sunbeam/steps/hypervisor.py @@ -3,7 +3,6 @@ import json import logging -import traceback import typing import tenacity @@ -242,24 +241,26 @@ def is_skip(self, context: StepContext) -> Result: node = self.client.cluster.get_node_info(self.node_name) self.machine_id = str(node.get("machineid")) except NodeNotExistInClusterException: - LOG.debug(f"Machine {self.node_name} does not exist, skipping.") + LOG.debug("Machine %s does not exist, skipping", self.node_name) return Result(ResultType.SKIPPED) try: application = self.jhelper.get_application(APPLICATION, self.model) except ApplicationNotFoundException as e: - LOG.debug(str(e)) + LOG.debug("Failed to get hypervisor application: %r", e) return Result( ResultType.SKIPPED, "Hypervisor application has not been deployed yet" ) for unit_name, unit in application.units.items(): if unit.machine == self.machine_id: - LOG.debug(f"Unit {unit_name} is deployed on machine: {self.machine_id}") + LOG.debug( + "Unit %s is deployed on machine: %s", unit_name, self.machine_id + ) self.unit = unit_name break if not self.unit: - LOG.debug(f"Unit is not deployed on machine: {self.machine_id}, skipping.") + LOG.debug("Unit is not deployed on machine: %s, skipping.", self.machine_id) return Result(ResultType.SKIPPED) try: results = self.jhelper.run_action(self.unit, self.model, "running-guests") @@ -269,7 +270,7 @@ def is_skip(self, context: StepContext) -> Result: if result := results.get("result"): guests = json.loads(result) - LOG.debug(f"Found guests on hypervisor: {guests}") + LOG.debug("Found guests on hypervisor: %s", guests) if guests and not self.force: return Result( ResultType.FAILED, @@ -297,7 +298,7 @@ def run(self, context: StepContext) -> Result: try: self.jhelper.run_action(self.unit, self.model, "disable") except ActionFailedException as e: - LOG.debug(str(e)) + LOG.debug("Failed to disable hypervisor unit: %r", e) return Result(ResultType.FAILED, "Failed to disable hypervisor unit") try: self.jhelper.remove_unit(APPLICATION, self.unit, self.model) @@ -314,18 +315,19 @@ def run(self, context: StepContext) -> Result: timeout=HYPERVISOR_UNIT_TIMEOUT, ) except (ApplicationNotFoundException, TimeoutError) as e: - LOG.warning(str(e)) + LOG.warning("Failed to remove hypervisor unit: %r", e) return Result(ResultType.FAILED, str(e)) try: if self.deployment: remove_hypervisor(self.jhelper, self.deployment, self.node_name) except openstack.exceptions.SDKException as e: LOG.error( - "Encountered error removing hypervisor references from control plane." + "Encountered error removing hypervisor references from control plane" ) if self.force: - LOG.warning("Force mode set, ignoring exception:") - traceback.print_exception(e) + LOG.warning( + "Force mode set, ignoring following exceptions", exc_info=True + ) else: return Result(ResultType.FAILED, str(e)) @@ -388,7 +390,7 @@ def run(self, context: StepContext) -> Result: if network_configs: LOG.debug( - "Add external network configs from DemoSetup to extra tfvars: %s", + "Adding external network configs from DemoSetup to extra tfvars: %s", network_configs, ) self.extra_tfvars["charm_config"].update(network_configs) @@ -427,7 +429,7 @@ def run(self, context: StepContext) -> Result: timeout=HYPERVISOR_UNIT_TIMEOUT, ) except TimeoutError as e: - LOG.warning(str(e)) + LOG.warning("Timed out waiting for hypervisor application: %r", e) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) @@ -492,24 +494,26 @@ def is_skip(self, context: StepContext) -> Result: node = self.client.cluster.get_node_info(self.node) self.machine_id = str(node.get("machineid")) except NodeNotExistInClusterException: - LOG.debug(f"Machine {self.node} does not exist, skipping.") + LOG.debug("Machine %s does not exist, skipping.", self.node) return Result(ResultType.SKIPPED) try: application = self.jhelper.get_application(APPLICATION, self.model) except ApplicationNotFoundException as e: - LOG.debug(str(e)) + LOG.debug("Failed to get hypervisor application: %r", e) return Result( ResultType.SKIPPED, "Hypervisor application has not been deployed yet" ) for unit_name, unit in application.units.items(): if unit.machine == self.machine_id: - LOG.debug(f"Unit {unit_name} is deployed on machine: {self.machine_id}") + LOG.debug( + "Unit %s is deployed on machine: %s", unit_name, self.machine_id + ) self.unit = unit_name break if not self.unit: - LOG.debug(f"Unit is not deployed on machine: {self.machine_id}, skipping.") + LOG.debug("Unit is not deployed on machine: %s, skipping.", self.machine_id) return Result(ResultType.SKIPPED) return Result(ResultType.COMPLETED) @@ -520,7 +524,7 @@ def run(self, context: StepContext) -> Result: try: self.jhelper.run_action(self.unit, self.model, "enable") except ActionFailedException as e: - LOG.debug(str(e)) + LOG.debug("Failed to enable hypervisor service for %s: %r", self.unit, e) return Result( ResultType.FAILED, f"Failed to enable hypervisor service for unit {self.unit}", diff --git a/sunbeam-python/sunbeam/steps/juju.py b/sunbeam-python/sunbeam/steps/juju.py index 80258e6e1..cc0f5a27b 100644 --- a/sunbeam-python/sunbeam/steps/juju.py +++ b/sunbeam-python/sunbeam/steps/juju.py @@ -89,7 +89,7 @@ def is_skip(self, context: StepContext) -> Result: "Error determining whether to skip the bootstrap " "process. Defaulting to not skip." ) - LOG.warning(e.stderr) + LOG.warning("%s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) if self.cloud not in juju_clouds: @@ -108,7 +108,7 @@ def is_skip(self, context: StepContext) -> Result: "Error determining whether to skip the bootstrap " "process. Defaulting to not skip." ) - LOG.warning(e.stderr) + LOG.warning("%s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) if self.cloud in juju_clouds_on_controller: @@ -127,8 +127,7 @@ def run(self, context: StepContext) -> Result: if not result: return Result(ResultType.FAILED, "Unable to create cloud") except subprocess.CalledProcessError as e: - LOG.debug(e.stderr) - LOG.debug(str(e)) + LOG.debug("%s: %s", e, e.stderr) message = None if "already exists" in e.stderr: @@ -178,7 +177,7 @@ def is_skip(self, context: StepContext) -> Result: "Error determining whether to skip the bootstrap " "process. Defaulting to not skip." ) - LOG.warning(e.stderr) + LOG.warning("%s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) client_creds = credentials.get("client-credentials", {}) @@ -200,7 +199,7 @@ def run(self, context: StepContext) -> Result: self.add_credential(self.cloud, self.definition, self.controller) except subprocess.CalledProcessError as e: LOG.exception("Error adding credentials to Juju") - LOG.warning(e.stderr) + LOG.warning("%s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) @@ -243,7 +242,7 @@ def is_skip(self, context: StepContext) -> Result: "Error determining whether to skip the bootstrap " "process. Defaulting to not skip." ) - LOG.warning(e.stderr) + LOG.warning("%s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) if self.cloud not in self.juju_clouds: return Result( @@ -321,20 +320,21 @@ def run(self, context: StepContext) -> Result: env = os.environ.copy() env.update(self.proxy_settings) - LOG.debug(f"Running command {' '.join(cmd)}") - LOG.debug(f"Bootstrap configs used: {configs_to_print}") + LOG.debug("Running command %s", " ".join(cmd)) + LOG.debug("Bootstrap configs used: %s", configs_to_print) process = subprocess.run( cmd, capture_output=True, text=True, check=True, env=env ) LOG.debug( - f"Command finished. stdout={process.stdout}, " - f"stderr={process.stderr}" + "Command finished. stdout=%r, stderr=%r", + process.stdout, + process.stderr, ) return Result(ResultType.COMPLETED) except subprocess.CalledProcessError as e: LOG.exception("Error bootstrapping Juju") - LOG.warning(e.stderr) + LOG.warning("%s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) @@ -367,9 +367,11 @@ def run(self, context: StepContext) -> Result: str(self.n), *self.extra_args, ] - LOG.debug(f"Running command {' '.join(cmd)}") + LOG.debug("Running command %s", " ".join(cmd)) process = subprocess.run(cmd, capture_output=True, text=True, check=True) - LOG.debug(f"Command finished. stdout={process.stdout}, stderr={process.stderr}") + LOG.debug( + "Command finished. stdout=%r, stderr=%r", process.stdout, process.stderr + ) cmd = [ self._get_juju_binary(), "wait-for", @@ -382,9 +384,11 @@ def run(self, context: StepContext) -> Result: ] self.update_status(context, "scaling controller") LOG.debug("Waiting for HA to be enabled") - LOG.debug(f"Running command {' '.join(cmd)}") + LOG.debug("Running command %s", " ".join(cmd)) process = subprocess.run(cmd, capture_output=True, text=True, check=True) - LOG.debug(f"Command finished. stdout={process.stdout}, stderr={process.stderr}") + LOG.debug( + "Command finished. stdout=%r, stderr=%r", process.stdout, process.stderr + ) return Result(ResultType.COMPLETED) @@ -431,12 +435,16 @@ def run(self, context: StepContext) -> Result: if self.destroy_args: cmd.extend(self.destroy_args) + LOG.debug("Running command %s", " ".join(cmd)) try: process = subprocess.run(cmd, capture_output=True, text=True, check=True) except subprocess.CalledProcessError as e: LOG.exception("Error destroying controller") + LOG.warning("%s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) - LOG.debug(f"Command finished. stdout={process.stdout}, stderr={process.stderr}") + LOG.debug( + "Command finished. stdout=%r, stderr=%r", process.stdout, process.stderr + ) return Result(ResultType.COMPLETED) @@ -464,7 +472,7 @@ def is_skip(self, context: StepContext) -> Result: return Result(ResultType.SKIPPED) except subprocess.CalledProcessError as e: LOG.exception("Error getting users list from juju.") - LOG.warning(e.stderr) + LOG.warning("%s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) @@ -478,10 +486,10 @@ def run(self, context: StepContext) -> Result: """ try: cmd = [self._get_juju_binary(), "add-user", self.username] - LOG.debug(f"Running command {' '.join(cmd)}") + LOG.debug("Running command %s", " ".join(cmd)) process = subprocess.run(cmd, capture_output=True, text=True, check=True) LOG.debug( - f"Command finished. stdout={process.stdout}, stderr={process.stderr}" + "Command finished. stdout=%r, stderr=%r", process.stdout, process.stderr ) re_groups = re.search( @@ -495,10 +503,10 @@ def run(self, context: StepContext) -> Result: # Grant superuser access to user. cmd = [self._get_juju_binary(), "grant", self.username, "superuser"] - LOG.debug(f"Running command {' '.join(cmd)}") + LOG.debug("Running command %s", " ".join(cmd)) process = subprocess.run(cmd, capture_output=True, text=True, check=True) LOG.debug( - f"Command finished. stdout={process.stdout}, stderr={process.stderr}" + "Command finished. stdout=%r, stderr=%r", process.stdout, process.stderr ) # Grant write access to controller model @@ -510,16 +518,16 @@ def run(self, context: StepContext) -> Result: "admin", CONTROLLER_MODEL, ] - LOG.debug(f"Running command {' '.join(cmd)}") + LOG.debug("Running command %s", " ".join(cmd)) process = subprocess.run(cmd, capture_output=True, text=True, check=True) LOG.debug( - f"Command finished. stdout={process.stdout}, stderr={process.stderr}" + "Command finished. stdout=%r, stderr=%r", process.stdout, process.stderr ) return Result(ResultType.COMPLETED, message=token) except subprocess.CalledProcessError as e: - LOG.exception(f"Error creating user {self.username} in Juju") - LOG.warning(e.stderr) + LOG.exception("Error creating user %s in Juju", self.username) + LOG.warning("%s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) @@ -550,7 +558,7 @@ def run(self, context: StepContext) -> Result: ) except subprocess.CalledProcessError as e: LOG.exception("Error getting list of users from Juju.") - LOG.warning(e.stderr) + LOG.warning("%s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) try: @@ -560,10 +568,10 @@ def run(self, context: StepContext) -> Result: self.username, "--reset", ] - LOG.debug(f"Running command {' '.join(cmd)}") + LOG.debug("Running command %s", " ".join(cmd)) process = subprocess.run(cmd, capture_output=True, text=True, check=True) LOG.debug( - f"Command finished. stdout={process.stdout}, stderr={process.stderr}" + "Command finished. stdout=%r, stderr=%r", process.stdout, process.stderr ) re_groups = re.search(self.reset_token_regex, process.stderr, re.MULTILINE) @@ -579,8 +587,7 @@ def run(self, context: StepContext) -> Result: return Result(ResultType.COMPLETED, message=token) except subprocess.CalledProcessError as e: - LOG.exception(f"Error resetting user {self.username} in Juju") - LOG.warning(e.stderr) + LOG.exception("Error resetting user %s in Juju", self.username) return Result(ResultType.FAILED, str(e)) @@ -619,7 +626,7 @@ def run(self, context: StepContext) -> Result: "admin", model_with_owner, ] - LOG.debug("Running command %r", " ".join(cmd)) + LOG.debug("Running command %s", " ".join(cmd)) process = subprocess.run(cmd, capture_output=True, text=True, check=True) LOG.debug( "Command finished. stdout=%r, stderr=%r", @@ -670,8 +677,8 @@ def is_skip(self, context: StepContext) -> Result: if self.username not in user_names: return Result(ResultType.SKIPPED) except subprocess.CalledProcessError as e: - LOG.exception("Error getting list of users from Juju.") - LOG.warning(e.stderr) + LOG.exception("Error getting list of users from Juju") + LOG.warning("%s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) @@ -685,16 +692,16 @@ def run(self, context: StepContext) -> Result: """ try: cmd = [self._get_juju_binary(), "remove-user", self.username, "--yes"] - LOG.debug(f"Running command {' '.join(cmd)}") + LOG.debug("Running command %s", " ".join(cmd)) process = subprocess.run(cmd, capture_output=True, text=True, check=True) LOG.debug( - f"Command finished. stdout={process.stdout}, stderr={process.stderr}" + "Command finished. stdout=%r, stderr=%r", process.stdout, process.stderr ) return Result(ResultType.COMPLETED) except subprocess.CalledProcessError as e: - LOG.exception(f"Error removing user {self.username} from Juju") - LOG.warning(e.stderr) + LOG.exception("Error removing user %s from Juju", self.username) + LOG.warning("%s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) @@ -733,24 +740,23 @@ def is_skip(self, context: StepContext) -> Result: """ try: self.juju_account = JujuAccount.load(self.data_location) - LOG.debug(f"Local account found: {self.juju_account.user}") - except JujuAccountNotFound as e: - LOG.warning(e) + LOG.debug("Local account found: %r", self.juju_account.user) + except JujuAccountNotFound: + LOG.warning("Cannot find local Juju account: %r", self.juju_account.user) return Result(ResultType.FAILED, "Account was not registered locally") try: user = self._juju_cmd("show-user") - LOG.debug(f"Found user: {user['user-name']}") + LOG.debug("Found user: %r", user["user-name"]) username = user["user-name"] if username == self.juju_account.user: return Result(ResultType.SKIPPED) except subprocess.CalledProcessError as e: if "No controllers registered" not in e.stderr: LOG.exception("Error retrieving authenticated user from Juju.") - LOG.warning(e.stderr) + LOG.warning("%s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) # Error is about no controller register, which is okay is this case - pass if self.client is None: return Result( @@ -790,13 +796,14 @@ def run(self, context: StepContext) -> Result: self.controller, "--no-prompt", ] - LOG.debug(f"Running command {' '.join(cmd)}") + LOG.debug("Running command %s", " ".join(cmd)) process = subprocess.run( cmd, capture_output=True, text=True, check=True ) LOG.debug( - f"Command finished. stdout={process.stdout}, " - f"stderr={process.stderr}" + "Command finished. stdout=%r, stderr=%r", + process.stdout, + process.stderr, ) except ControllerNotFoundException: LOG.debug("No controller found to replace, register the juju user") @@ -843,8 +850,8 @@ def run(self, context: StepContext) -> Result: while True: index = child.expect(expect_list, PEXPECT_TIMEOUT) LOG.debug( - "Juju registraton: expect got regex related to " - f"{expect_list[index]}" + "Juju registraton: expect got regex related to %r", + expect_list[index], ) if index in (0, 1, 3): child.sendline(self.juju_account.password) @@ -867,8 +874,7 @@ def run(self, context: StepContext) -> Result: LOG.debug("User registration completed") break except pexpect.TIMEOUT as e: - LOG.exception(f"Error registering user {self.username} in Juju") - LOG.warning(e) + LOG.exception("Error registering user %s in Juju", self.username) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) @@ -900,7 +906,7 @@ def is_skip(self, context: StepContext) -> Result: """ try: self.juju_account = JujuAccount.load(self.data_location, self.account_file) - LOG.debug(f"Local account found for {self.controller}") + LOG.debug("Local account found for %s", self.controller) except JujuAccountNotFound: password = random_string(12) self.juju_account = JujuAccount(user="REPLACE_USER", password=password) @@ -916,9 +922,9 @@ def _get_user_from_local_juju(self, controller: str) -> str | None: user = ( accounts.get("controllers", {}).get(self.controller, {}).get("user") ) - LOG.debug(f"Found user from accounts.yaml for {controller}: {user}") - except FileNotFoundError as e: - LOG.debug(f"Error in retrieving local user: {str(e)}") + LOG.debug("Found user from accounts.yaml for %s: %r", controller, user) + except FileNotFoundError: + LOG.debug("Error in retrieving local user", exc_info=True) user = None return user @@ -944,7 +950,7 @@ def run(self, context: StepContext) -> Result: if self.juju_account.user != user: self.juju_account.user = user - LOG.debug(f"Updating user in {self.juju_account} file") + LOG.debug("Updating user in %r file", self.juju_account) self.juju_account.write(self.data_location, self.account_file) return Result(ResultType.COMPLETED) @@ -971,8 +977,8 @@ def is_skip(self, context: StepContext) -> Result: except ControllerNotFoundException: self.account_file.unlink(missing_ok=True) LOG.warning( - f"Controller {self.controller} not found, skipping unregsiter " - "controller" + "Controller %s is not found, skipping unregsiter controller", + self.controller, ) return Result(ResultType.SKIPPED) @@ -992,10 +998,10 @@ def run(self, context: StepContext) -> Result: self.controller, "--no-prompt", ] - LOG.debug(f"Running command {' '.join(cmd)}") + LOG.debug("Running command %s", " ".join(cmd)) process = subprocess.run(cmd, capture_output=True, text=True, check=True) LOG.debug( - f"Command finished. stdout={process.stdout}, stderr={process.stderr}" + "Command finished. stdout=%r, stderr=%r", process.stdout, process.stderr ) self.account_file.unlink(missing_ok=True) except subprocess.CalledProcessError as e: @@ -1033,7 +1039,7 @@ def is_skip(self, context: StepContext) -> Result: try: machines = self._juju_cmd("machines", "-m", self.model_with_owner) - LOG.debug(f"Found machines: {machines}") + LOG.debug("Found machines: %r", machines) machines = machines.get("machines", {}) for machine, details in machines.items(): @@ -1043,7 +1049,7 @@ def is_skip(self, context: StepContext) -> Result: except subprocess.CalledProcessError as e: LOG.exception("Error retrieving machines list from Juju") - LOG.warning(e.stderr) + LOG.warning("%s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) @@ -1054,7 +1060,7 @@ def is_skip(self, context: StepContext) -> Result: def _wait_for_machine(self, machine_ip: str) -> str: """Wait for machine to report it's ip address.""" machines = self._juju_cmd("machines", "-m", self.model_with_owner) - LOG.debug(f"Found machines: {machines}") + LOG.debug("Found machines: %r", machines) machines = machines.get("machines", {}) for machine, details in machines.items(): if machine_ip in details.get("ip-addresses", []): @@ -1086,8 +1092,8 @@ def run(self, context: StepContext) -> Result: while True: index = child.expect(expect_list) LOG.debug( - "Juju add-machine: expect got regex related to " - f"{expect_list[index]}" + "Juju add-machine: expect got regex related to %r", + expect_list[index], ) if index == 0: child.sendline("yes") @@ -1109,9 +1115,8 @@ def run(self, context: StepContext) -> Result: machine = "-1" return Result(ResultType.COMPLETED, machine) - except pexpect.TIMEOUT as e: - LOG.exception("Error adding machine {self.machine_ip} to Juju") - LOG.warning(e) + except pexpect.TIMEOUT: + LOG.exception("Error adding machine %s to Juju", self.machine_ip) return Result(ResultType.FAILED, "TIMED OUT to add machine") @@ -1145,7 +1150,7 @@ def is_skip(self, context: StepContext) -> Result: self.model_with_owner = self.get_model_name_with_owner(self.model) try: machines = self._juju_cmd("machines", "-m", self.model_with_owner) - LOG.debug(f"Found machines: {machines}") + LOG.debug("Found machines: %r", machines) machines = machines.get("machines", {}) if str(self.machine_id) not in machines: @@ -1153,7 +1158,7 @@ def is_skip(self, context: StepContext) -> Result: return Result(ResultType.SKIPPED) except subprocess.CalledProcessError as e: LOG.exception("Error retrieving machine list from Juju") - LOG.warning(e.stderr) + LOG.warning("%s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) @@ -1182,16 +1187,18 @@ def run(self, context: StepContext) -> Result: str(self.machine_id), "--no-prompt", ] - LOG.debug(f"Running command {' '.join(cmd)}") + LOG.debug("Running command %s", " ".join(cmd)) process = subprocess.run(cmd, capture_output=True, text=True, check=True) LOG.debug( - f"Command finished. stdout={process.stdout}, stderr={process.stderr}" + "Command finished. stdout=%r, stderr=%r", + process.stdout, + process.stderr, ) return Result(ResultType.COMPLETED) except subprocess.CalledProcessError as e: - LOG.exception(f"Error removing machine {self.machine_id} from Juju") - LOG.warning(e.stderr) + LOG.exception("Error removing machine %s from Juju", self.machine_id) + LOG.warning("%s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) @@ -1214,13 +1221,13 @@ def is_skip(self, context: StepContext) -> Result: """ try: user = self._juju_cmd("show-user") - LOG.debug(f"Found user: {user['user-name']}") + LOG.debug("Found user: %s", user["user-name"]) username = user["user-name"] if username == "admin": return Result(ResultType.COMPLETED) except subprocess.CalledProcessError as e: LOG.exception("Error retrieving user from Juju") - LOG.warning(e.stderr) + LOG.warning("%s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) return Result(ResultType.SKIPPED) @@ -1257,7 +1264,7 @@ def is_skip(self, context: StepContext) -> Result: """ try: juju_account = JujuAccount.load(self.data_location) - LOG.debug(f"Local account found: {juju_account.user}") + LOG.debug("Local account found: %s", juju_account.user) if juju_account.user == self.username: # TODO(gboutry): make user password updateable ? return Result(ResultType.SKIPPED) @@ -1333,10 +1340,10 @@ def _get_credentials_from_local_juju( .get(self.controller, {}) .get("password") ) - LOG.debug(f"Found user from accounts.yaml for {controller}: {user}") + LOG.debug("Found user from accounts.yaml for %s: %s", controller, user) return user, password - except FileNotFoundError as e: - LOG.debug(f"Error in retrieving local user: {str(e)}") + except FileNotFoundError: + LOG.debug("Error in retrieving local user", exc_info=True) user = None return None @@ -1386,7 +1393,7 @@ def is_skip(self, context: StepContext) -> Result: """ if self.jhelper.model_exists(self.model): return Result(ResultType.COMPLETED) - LOG.debug(f"Model {self.model} not found") + LOG.debug("Model %s is not found", self.model) return Result(ResultType.SKIPPED) def run(self, context: StepContext) -> Result: @@ -1397,7 +1404,7 @@ def run(self, context: StepContext) -> Result: :return: """ try: - LOG.debug(f"Getting juju status for model {self.model}") + LOG.debug("Getting juju status for model %s", self.model) model_status = self.jhelper.get_model_status(self.model) LOG.debug(model_status) @@ -1439,7 +1446,7 @@ def is_skip(self, context: StepContext) -> Result: self.model_uuid = model.get("model-uuid") return Result(ResultType.COMPLETED) except ModelNotFoundException: - LOG.debug(f"Model {self.model} not found") + LOG.debug("Model %s is not found", self.model) return Result(ResultType.SKIPPED) def run(self, context: StepContext) -> Result: @@ -1451,7 +1458,7 @@ def run(self, context: StepContext) -> Result: """ if not self.model_uuid: return Result(ResultType.FAILED, "Model UUID not found") - LOG.debug(f"Getting debug logs for model {self.model}") + LOG.debug("Getting debug logs for model %s", self.model) try: # libjuju model.debug_log is broken. cmd = [ @@ -1505,15 +1512,15 @@ def is_skip(self, context: StepContext) -> Result: ) if self.controller: cmd += f" -c {self.controller}" - LOG.debug(f"Running command {cmd}") + LOG.debug("Running command %s", cmd) expect_list = ["^please enter password", "{}", pexpect.EOF] with pexpect.spawn(cmd) as process: try: index = process.expect(expect_list, timeout=PEXPECT_TIMEOUT) except pexpect.TIMEOUT as e: - LOG.debug("Process timeout") + LOG.debug("Process timeout", exc_info=True) return Result(ResultType.FAILED, str(e)) - LOG.debug(f"Command stdout={process.before}") + LOG.debug("Command stdout=%r", process.before) if index in (0, 1): return Result(ResultType.COMPLETED) elif index == 2: @@ -1543,7 +1550,7 @@ def run(self, context: StepContext) -> Result: ) if self.controller: cmd += f" -c {self.controller}" - LOG.debug(f"Running command {cmd}") + LOG.debug("Running command %s", cmd) process = pexpect.spawn(cmd) try: process.expect("^please enter password", timeout=PEXPECT_TIMEOUT) @@ -1551,9 +1558,9 @@ def run(self, context: StepContext) -> Result: process.expect(pexpect.EOF, timeout=PEXPECT_TIMEOUT) process.close() except pexpect.TIMEOUT as e: - LOG.debug("Process timeout") + LOG.debug("Process timeout", exc_info=True) return Result(ResultType.FAILED, str(e)) - LOG.debug(f"Command stdout={process.before}") + LOG.debug("Command stdout=%r", process.before) if process.exitstatus != 0: return Result(ResultType.FAILED, "Failed to login to Juju Controller") return Result(ResultType.COMPLETED) @@ -1583,7 +1590,7 @@ def is_skip(self, context: StepContext) -> Result: """Determines if the step should be skipped or not.""" if self.jhelper.model_exists(self.model): return Result(ResultType.SKIPPED) - LOG.debug(f"Model {self.model} not found") + LOG.debug("Model %s is not found", self.model) return Result(ResultType.COMPLETED) def run(self, context: StepContext) -> Result: @@ -1592,7 +1599,7 @@ def run(self, context: StepContext) -> Result: self.model_config.update( convert_proxy_to_model_configs(self.proxy_settings) ) - LOG.debug(f"Adding model {self.model} with config: {self.model_config}") + LOG.debug("Adding model %s with config: %s", self.model, self.model_config) self.jhelper.add_model( self.model, @@ -1621,7 +1628,7 @@ def is_skip(self, context: StepContext) -> Result: """Determines if the step should be skipped or not.""" if self.jhelper.model_exists(self.model): return Result(ResultType.COMPLETED) - LOG.debug(f"Model {self.model} not found") + LOG.debug("Model %s is not found", self.model) return Result(ResultType.SKIPPED) def run(self, context: StepContext) -> Result: @@ -1633,7 +1640,7 @@ def run(self, context: StepContext) -> Result: timeout=self.timeout, ) except Exception as e: - LOG.debug(f"Error destroying model {self.model}", exc_info=True) + LOG.debug("Error destroying model %s", self.model, exc_info=True) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) @@ -1704,7 +1711,7 @@ def run(self, context: StepContext) -> Result: "--base", JUJU_BASE, ] - LOG.debug(f"Running command {' '.join(cmd)}") + LOG.debug("Running command %s", " ".join(cmd)) env = os.environ.copy() env.update(self.proxy_settings) process = subprocess.run( @@ -1716,7 +1723,7 @@ def run(self, context: StepContext) -> Result: env=env, ) LOG.debug( - f"Command finished. stdout={process.stdout}, stderr={process.stderr}" + "Command finished. stdout=%r, stderr=%r", process.stdout, process.stderr ) for charm_file in download_dir.glob("juju-controller*.charm"): @@ -1725,6 +1732,7 @@ def run(self, context: StepContext) -> Result: return Result(ResultType.COMPLETED) except subprocess.CalledProcessError as e: LOG.exception("Error downloading Juju Controller charm") + LOG.warning("%s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) @@ -1770,7 +1778,7 @@ def is_skip(self, context: StepContext) -> Result: wanted_subnets = set(self.subnets) missing_from_all = wanted_subnets - all_subnets if missing_from_all: - LOG.debug(f"Subnets {missing_from_all} are not available in any space") + LOG.debug("Subnets %s are not available in any space", missing_from_all) return Result(ResultType.FAILED) space_subnets = spaces_subnets.get(self.space) @@ -1944,7 +1952,7 @@ def is_skip(self, context: StepContext) -> Result: def run(self, context: StepContext) -> Result: """Integrate applications.""" for node in self.nodes_to_update: - LOG.debug(f"Updating machine {node['name']} in model {self.model}") + LOG.debug("Updating machine %s in model %s", node["name"], self.model) if (machine_id := self.hostname_id.get(node["name"])) is not None: self.client.cluster.update_node_info(node["name"], machineid=machine_id) else: @@ -1970,13 +1978,14 @@ def run(self, context: StepContext) -> Result: """Switch to juju controller.""" try: cmd = [self._get_juju_binary(), "switch", self.controller] - LOG.debug(f"Running command {' '.join(cmd)}") + LOG.debug("Running command %s", " ".join(cmd)) process = subprocess.run(cmd, capture_output=True, text=True, check=True) LOG.debug( - f"Command finished. stdout={process.stdout}, stderr={process.stderr}" + "Command finished. stdout=%r, stderr=%r", process.stdout, process.stderr ) except subprocess.CalledProcessError as e: - LOG.exception(f"Error in switching the controller to {self.controller}") + LOG.exception("Error in switching the controller to %s", self.controller) + LOG.warning("%s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) @@ -2119,7 +2128,7 @@ def _get_remote_apps_from_interfaces( if not remote_app: continue - LOG.debug(f"Processing remote app: {remote_app}") + LOG.debug("Processing remote app: %s", remote_app) for endpoint in remote_app.endpoints.values(): if endpoint.interface in offering_interfaces: remote_apps.append(name) @@ -2208,9 +2217,11 @@ def __init__( def _switch_controller(self, controller: str): cmd = [self._get_juju_binary(), "switch", controller] - LOG.debug(f"Running command {' '.join(cmd)}") + LOG.debug("Running command %s", " ".join(cmd)) process = subprocess.run(cmd, capture_output=True, text=True, check=True) - LOG.debug(f"Command finished. stdout={process.stdout}, stderr={process.stderr}") + LOG.debug( + "Command finished. stdout=%r, stderr=%r", process.stdout, process.stderr + ) def _get_model_owner(self, model: str, controller: str) -> str: """Determine model owner.""" @@ -2218,7 +2229,7 @@ def _get_model_owner(self, model: str, controller: str) -> str: model_info = self._juju_cmd("show-model", f"{controller}:{model}") except subprocess.CalledProcessError: raise ValueError(f"Model {model} not found") - LOG.debug(f"Model info: {model_info}") + LOG.debug("Model info: %s", model_info) if owner := model_info.get(model, {}).get("owner"): return owner raise ValueError(f"Owner not found for model {model}") @@ -2250,8 +2261,11 @@ def _wait_for_model( return True except subprocess.CalledProcessError: LOG.exception( - f"Error in finding {model} model in {from_controller} controller" + "Error in finding %s model in controller %s", + self.model, + self.from_controller, ) + LOG.warning("%s: %s", e, e.stderr) raise JujuMigrationFailedError( f"Model {model} is not in either {to_controller} or" @@ -2262,7 +2276,7 @@ def _wait_for_model( migration_status = ( model_info.get(model, {}).get("status", {}).get("migration", "") ) - LOG.debug(f"Migration status: {migration_status}") + LOG.debug("Migration status: %s", migration_status) if migration_status.startswith("aborted"): return False @@ -2290,20 +2304,24 @@ def _migrate_model( to_controller, ] - LOG.debug(f"Running command {' '.join(cmd)}") + LOG.debug("Running command %s", " ".join(cmd)) try: process = subprocess.run(cmd, capture_output=True, text=True, check=True) except subprocess.CalledProcessError as e: LOG.exception( - f"Error in migrating model {self.model}" - f" from {self.from_controller}" - f" to controller {self.to_controller}" + "Error in migrating model %s from controller %s to controller %s", + self.model, + self.from_controller, + self.to_controller, ) + LOG.warning("%s: %s", e, e.stderr) raise JujuMigrationFailedError("Juju migrate command failed") from e - LOG.debug(f"Command finished. stdout={process.stdout}, stderr={process.stderr}") + LOG.debug( + "Command finished. stdout=%r, stderr=%r", process.stdout, process.stderr + ) # Check migration status is_model_migrated = self._wait_for_model( @@ -2321,12 +2339,14 @@ def is_skip(self, context: StepContext) -> Result: try: self._switch_controller(self.to_controller) except subprocess.CalledProcessError as e: - LOG.exception(f"Error in switching to controller {self.to_controller}") + LOG.exception("Error in switching to controller %s", self.to_controller) + LOG.warning("%s: %s", e, e.stderr) + return Result(ResultType.FAILED, str(e)) models = self._juju_cmd("models") models = models.get("models", []) - LOG.debug(f"Looking for {self.model} in models: {models}") + LOG.debug("Looking for %s in models: %s", self.model, models) for model_ in models: if model_.get("short-name") == self.model: @@ -2352,7 +2372,7 @@ def run(self, context: StepContext) -> Result: except JujuMigrationFailedError as e: return Result(ResultType.FAILED, str(e)) except TimeoutError: - LOG.exception("Error in finding model after migration", exc_info=True) + LOG.exception("Error in finding model after migration") return Result( ResultType.FAILED, f"Timed out waiting for model {self.model} to migrate", @@ -2362,7 +2382,8 @@ def run(self, context: StepContext) -> Result: try: self._switch_controller(self.to_controller) except subprocess.CalledProcessError as e: - LOG.exception(f"Error in switching to controller {self.to_controller}") + LOG.exception("Error in switching to controller %s", self.to_controller) + LOG.warning("%s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) diff --git a/sunbeam-python/sunbeam/steps/k8s.py b/sunbeam-python/sunbeam/steps/k8s.py index 6f0968a25..e2e34b10d 100644 --- a/sunbeam-python/sunbeam/steps/k8s.py +++ b/sunbeam-python/sunbeam/steps/k8s.py @@ -388,12 +388,12 @@ def _find_matching_k8s_node( Raises K8SError if client not able to get nodes from k8s. """ - LOG.debug(f"Matching K8S Node with name {hostname} and IPs {ips}") + LOG.debug("Matching K8S Node with name %s and IPs %s", hostname, ips) hostname_without_domain = hostname.split(".")[0] k8s_nodes = list_nodes( self.kube, labels={DEPLOYMENT_LABEL: self.deployment.name} ) - LOG.debug(f"K8S nodes filtered by deployment label: {k8s_nodes}") + LOG.debug("K8S nodes filtered by deployment label: %s", k8s_nodes) for k8s_node in k8s_nodes: if k8s_node.metadata is None: @@ -591,7 +591,7 @@ def is_skip(self, context: StepContext) -> Result: ResultType.COMPLETED or ResultType.FAILED otherwise """ clouds = self.jhelper.get_clouds() - LOG.debug(f"Clouds registered in the controller: {clouds}") + LOG.debug("Clouds registered in the controller: %s", clouds) # TODO(hemanth): Need to check if cloud credentials are also created? if self.cloud_name in clouds.keys(): return Result(ResultType.SKIPPED) @@ -628,7 +628,7 @@ def is_skip(self, context: StepContext) -> Result: ResultType.COMPLETED or ResultType.FAILED otherwise """ clouds = self.get_clouds("k8s", local=True) - LOG.debug(f"Clouds registered in the client: {clouds}") + LOG.debug("Clouds registered in the client: %s", clouds) if self.cloud_name in clouds: return Result(ResultType.SKIPPED) @@ -663,7 +663,7 @@ def is_skip(self, context: StepContext) -> Result: ResultType.COMPLETED or ResultType.FAILED otherwise """ clouds = self.jhelper.get_clouds() - LOG.debug(f"Clouds registered in the controller: {clouds}") + LOG.debug("Clouds registered in the controller: %s", clouds) if self.cloud_name not in clouds.keys(): return Result( ResultType.SKIPPED, @@ -708,8 +708,8 @@ def is_skip(self, context: StepContext) -> Result: if "not found" in e.stderr: return Result(ResultType.COMPLETED) - LOG.debug(e.stderr) - LOG.exception("Error retrieving juju credentails from controller.") + LOG.debug("%s: %s", e, e.stderr) + LOG.exception("Error retrieving juju credentails from controller") return Result(ResultType.FAILED, str(e)) if self.credential_name in credentials.get("controller-credentials", {}).keys(): @@ -765,7 +765,7 @@ def run(self, context: StepContext) -> Result: unit = self.jhelper.get_leader_unit(APPLICATION, self.model) machine = self.jhelper.get_leader_unit_machine(APPLICATION, self.model) - LOG.debug(unit) + LOG.debug("Leader unit: %s", unit) leader_unit_management_ip = self._get_management_server_ip(machine) LOG.debug("Leader unit management IP: %s", leader_unit_management_ip) run_action_kwargs = ( @@ -780,7 +780,7 @@ def run(self, context: StepContext) -> Result: run_action_kwargs, ) - LOG.debug(result) + LOG.debug("Result from getting kubeconfig for %s: %s", unit, result) if not result.get("kubeconfig"): return Result( ResultType.FAILED, @@ -2053,7 +2053,7 @@ def is_skip(self, context: StepContext) -> Result: coredns_hpa = self.kube.get( autoscaling_v2.HorizontalPodAutoscaler, name=self.coredns_hpa ) - LOG.debug(f"Existing coredns hpa: {coredns_hpa}") + LOG.debug("Existing coredns hpa: %s", coredns_hpa) coredns_hpa_spec = coredns_hpa.spec if coredns_hpa_spec is None: LOG.debug("Coredns HPA has no spec") @@ -2066,7 +2066,7 @@ def is_skip(self, context: StepContext) -> Result: LOG.debug("Failed to get coredns hpa", exc_info=True) return Result(ResultType.FAILED, str(e)) else: - LOG.debug(f"No hpa found for coredns: {str(e)}") + LOG.debug("No hpa found for coredns: %r", e) return Result(ResultType.COMPLETED) @@ -2082,7 +2082,7 @@ def run(self, context: StepContext) -> Result: self.juju_app_name, self.deployment.openstack_machines_model ) except JujuException as e: - LOG.debug(f"Failed to get {self.juju_app_name} leader", exc_info=True) + LOG.debug("Failed to get %s leader", self.juju_app_name, exc_info=True) return Result(ResultType.FAILED, str(e)) hpa_dict = copy.deepcopy(COREDNS_HPA) @@ -2101,7 +2101,7 @@ def run(self, context: StepContext) -> Result: "/snap/k8s/current/k8s/manifests/charts/coredns-*.tgz" f" --reuse-values --set-json {set_json}" ) - LOG.debug(f"Running cmd in unit {leader}: {cmd_str}") + LOG.debug("Running cmd in unit %s: %s", leader, cmd_str) result = self.jhelper.run_cmd_on_machine_unit_payload( leader, @@ -2109,7 +2109,7 @@ def run(self, context: StepContext) -> Result: cmd_str, self.timeout, ) - LOG.debug(f"Result: {result}") + LOG.debug("Result for patching coredns for %s: %s", leader, result) if result.return_code != 0: return Result( diff --git a/sunbeam-python/sunbeam/steps/microceph.py b/sunbeam-python/sunbeam/steps/microceph.py index de597a367..baac6a39b 100644 --- a/sunbeam-python/sunbeam/steps/microceph.py +++ b/sunbeam-python/sunbeam/steps/microceph.py @@ -72,7 +72,7 @@ def microceph_questions(): ) def list_disks(jhelper: JujuHelper, model: str, unit: str) -> tuple[dict, dict]: """Call list-disks action on an unit.""" - LOG.debug("Running list-disks on : %r", unit) + LOG.debug("Running list-disks on: %r", unit) action_result = jhelper.run_action( unit, model, "list-disks", action_params={"host-only": True} ) @@ -283,11 +283,11 @@ def get_all_disks(self) -> None: disk.get("path") for disk in unpartitioned_disks_dict ] self.osd_disks = [disk.get("path") for disk in osd_disks_dict] - LOG.debug(f"Unpartitioned disks: {self.unpartitioned_disks}") - LOG.debug(f"OSD disks: {self.osd_disks}") + LOG.debug("Unpartitioned disks: %s", self.unpartitioned_disks) + LOG.debug("OSD disks: %s", self.osd_disks) except (UnitNotFoundException, ActionFailedException) as e: - LOG.debug(str(e)) + LOG.debug("Failed to list disks: %r", e) raise SunbeamException("Unable to list disks") def prompt( @@ -343,7 +343,7 @@ def prompt( # note(gboutry): wipe disks option is never saved in clusterd, always # read when needed in the manifest. - LOG.debug(self.variables) + LOG.debug("Microceph variables: %s", self.variables) questions.write_answers(self.client, self._CONFIG, self.variables) def has_prompts(self) -> bool: @@ -386,14 +386,14 @@ def run(self, context: StepContext) -> Result: unit = self.jhelper.get_unit_from_machine( APPLICATION, self.machine_id, self.model ) - LOG.debug(f"Running action add-osd on {unit}") + LOG.debug("Running action add-osd on %s", unit) action_result = self.jhelper.run_action( unit, self.model, "add-osd", action_params=action_params, ) - LOG.debug(f"Result after running action add-osd: {action_result}") + LOG.debug("Result after running action add-osd: %s", action_result) except UnitNotFoundException as e: message = f"Microceph Adding disks {self.disks} failed: {str(e)}" failed = True @@ -408,12 +408,12 @@ def run(self, context: StepContext) -> Result: # disk already added to microceph, ignore the error if "entry already exists" in result.get("message"): disk = result.get("spec") - LOG.debug(f"Disk {disk} already added") + LOG.debug("Disk %s is already added", disk) continue else: failed = True - except Exception as ex: - LOG.debug(f"Exception in eval action output: {str(ex)}") + except Exception as e: + LOG.debug("Exception in eval action output: %r", e) return Result(ResultType.FAILED, message) if failed: @@ -463,7 +463,7 @@ def run(self, context: StepContext) -> Result: "size": ceph_replica_scale(len(self.storage_nodes)), } LOG.debug( - f"Running microceph action set-pool-size with params {action_params}" + "Running microceph action set-pool-size with params %s", action_params ) result = self.jhelper.run_action( unit, self.model, "set-pool-size", action_params @@ -478,7 +478,7 @@ def run(self, context: StepContext) -> Result: LeaderNotFoundException, ActionFailedException, ) as e: - LOG.debug(f"Failed to update pool size for {pools}", exc_info=True) + LOG.debug("Failed to update pool size for %s", pools, exc_info=True) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) @@ -592,7 +592,7 @@ def run(self, context: StepContext) -> Result: try: resources = self.tfhelper.state_list() except TerraformException as e: - LOG.debug(f"Failed to list terraform state: {str(e)}") + LOG.debug("Failed to list terraform state: %r", e) return Result(ResultType.FAILED, "Failed to list terraform state") for resource in resources: @@ -600,7 +600,7 @@ def run(self, context: StepContext) -> Result: try: self.tfhelper.state_rm(resource) except TerraformException as e: - LOG.debug(f"Failed to remove resource {resource}: {str(e)}") + LOG.debug("Failed to remove resource %s: %r", resource, e) return Result( ResultType.FAILED, f"Failed to remove resource {resource} from state", diff --git a/sunbeam-python/sunbeam/steps/microovn.py b/sunbeam-python/sunbeam/steps/microovn.py index ca56e5254..39520240e 100644 --- a/sunbeam-python/sunbeam/steps/microovn.py +++ b/sunbeam-python/sunbeam/steps/microovn.py @@ -207,8 +207,8 @@ def run(self, context: StepContext) -> Result: if network_configs: LOG.debug( - "Add external network configs from DemoSetup to extra tfvars: " - f"{network_configs}" + "Add external network configs from DemoSetup to extra tfvars: %s", + network_configs, ) self.extra_tfvars["charm_openstack_network_agents_config"].update( network_configs @@ -233,7 +233,7 @@ def run(self, context: StepContext) -> Result: timeout=MICROOVN_UNIT_TIMEOUT, ) except TimeoutError as e: - LOG.warning(str(e)) + LOG.warning("Timed out waiting for reapplying MicroOVN: %r", e) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) @@ -292,24 +292,26 @@ def is_skip(self, context: StepContext) -> Result: node = self.client.cluster.get_node_info(self.node) self.machine_id = str(node.get("machineid")) except NodeNotExistInClusterException: - LOG.debug(f"Machine {self.node} does not exist, skipping.") + LOG.debug("Machine %s does not exist, skipping.", self.node) return Result(ResultType.SKIPPED) try: application = self.jhelper.get_application(APPLICATION, self.model) except ApplicationNotFoundException as e: - LOG.debug(str(e)) + LOG.debug("MicroOVN application is not found: %r", e) return Result( ResultType.SKIPPED, "microovn application has not been deployed yet" ) for unit_name, unit in application.units.items(): if unit.machine == self.machine_id: - LOG.debug(f"Unit {unit_name} is deployed on machine: {self.machine_id}") + LOG.debug( + "Unit %s is deployed on machine: %s", unit_name, self.machine_id + ) self.unit = unit_name break if not self.unit: - LOG.debug(f"Unit is not deployed on machine: {self.machine_id}, skipping.") + LOG.debug("Unit is not deployed on machine: %s, skipping", self.machine_id) return Result(ResultType.SKIPPED) return Result(ResultType.COMPLETED) diff --git a/sunbeam-python/sunbeam/steps/mysql.py b/sunbeam-python/sunbeam/steps/mysql.py index d7d1df3d4..0adb5ae36 100644 --- a/sunbeam-python/sunbeam/steps/mysql.py +++ b/sunbeam-python/sunbeam/steps/mysql.py @@ -81,9 +81,9 @@ def load_upgrade_state(client: Client) -> dict: try: state = json.loads(client.cluster.get_config(MYSQL_UPGRADE_CONFIG_KEY)) except ConfigItemNotFoundException as e: - LOG.debug(f"{MYSQL_UPGRADE_CONFIG_KEY} not found: " + str(e)) + LOG.debug("Config item for %s not found: %r", MYSQL_UPGRADE_CONFIG_KEY, e) except (json.JSONDecodeError, TypeError) as e: - LOG.warning(f"Found malformed mysql upgrade state from clusterd: {str(e)}. ") + LOG.warning("Found malformed mysql upgrade state from clusterd: %r", e) return state @@ -132,7 +132,7 @@ def __init__( self.original_revision: int | None = None self.original_scale: int | None = None if reset_mysql_upgrade_state: - LOG.debug("Resetting mysql upgrade state.") + LOG.debug("Resetting mysql upgrade state") self._reset_state() def _get_upgrade_stack(self, unit_data: dict) -> list[str]: @@ -145,7 +145,7 @@ def _get_upgrade_stack(self, unit_data: dict) -> list[str]: try: return json.loads(raw) except json.JSONDecodeError: - LOG.warning(f"Failed to parse upgrade stack: {raw}") + LOG.warning("Failed to parse upgrade stack: %s", raw) return [] @@ -164,17 +164,17 @@ def _set_state(self, state: MySQLUpgradeState): "original_scale": self.original_scale, }, ) - LOG.debug("mysql upgrade state %s", state.name) + LOG.debug("MySQL upgrade state %s", state.name) def _reset_state(self): - LOG.debug("mysql resetting upgrade state") + LOG.debug("Resetting MySQL upgrade state") self.original_revision = None self.original_scale = None self.state = MySQLUpgradeState.INIT try: self.client.cluster.delete_config(MYSQL_UPGRADE_CONFIG_KEY) except ConfigItemNotFoundException as e: - LOG.warning("mysql-k8s upgrade state not found in clusterd: %s", e) + LOG.warning("mysql-k8s upgrade state not found in clusterd: %r", e) def _target_scale_for_upgrade(self, original_scale: int) -> int: """Calculate target scale for upgrading mysql-k8s. @@ -199,8 +199,9 @@ def record_original_state(self, context: StepContext): self.original_revision = app.charm_rev self.original_scale = app.scale LOG.debug( - f"Recorded original mysql-k8s revision: {self.original_revision}, " - f"scale: {self.original_scale}" + "Recorded original mysql-k8s revision: %d, scale: %d", + self.original_revision, + self.original_scale, ) self._set_state(MySQLUpgradeState.ORIGINAL_STATE_RECORDED) @@ -421,10 +422,11 @@ def scale_back(self, context: StepContext): ) self.jhelper.wait_until_active(self.model, apps=[self.application]) self._set_state(MySQLUpgradeState.SCALED_BACK) - except (JujuException, JujuWaitException, TimeoutError) as exc: + except (JujuException, JujuWaitException, TimeoutError) as e: LOG.warning( - f"Upgrade completed but scale-back to original scale: " - f"{self.original_scale} failed: {str(exc)}", + "Upgrade completed but scale-back to original scale of %d failed: %r", + self.original_scale, + e, ) def is_skip(self, context: StepContext) -> Result: @@ -486,7 +488,7 @@ def is_skip(self, context: StepContext) -> Result: arch="amd64", ) else: - LOG.debug("Could not determine base for mysql-k8s.") + LOG.debug("Could not determine base for mysql-k8s") latest = self.jhelper.get_available_charm_revision( MYSQL_CHARM, deployed_channel, @@ -527,7 +529,7 @@ def run(self, context: StepContext) -> Result: try: self.state = MySQLUpgradeState[state_name] except KeyError: - LOG.warning(f"Invalid mysql-k8s upgrade state: {state_name}") + LOG.warning("Invalid mysql-k8s upgrade state: %s", state_name) self.state = MySQLUpgradeState.INIT self.original_revision = persisted.get("original_revision") self.original_scale = persisted.get("original_scale") diff --git a/sunbeam-python/sunbeam/steps/openstack.py b/sunbeam-python/sunbeam/steps/openstack.py index d9c8ce950..73b53dadb 100644 --- a/sunbeam-python/sunbeam/steps/openstack.py +++ b/sunbeam-python/sunbeam/steps/openstack.py @@ -102,7 +102,7 @@ def remove_blocked_apps_from_features(jhelper: JujuHelper, model: str) -> list[s for app_name in APPS_BLOCKED_WHEN_FEATURE_ENABLED: try: app = jhelper.get_application(app_name, model) - LOG.debug(f"Application status for {app_name}: {app.app_status.current}") + LOG.debug("Application status for %s: %s", app_name, app.app_status.current) if app.app_status.current != "active": apps_to_remove.append(app_name) except ApplicationNotFoundException: @@ -280,7 +280,7 @@ def determine_target_topology(client: Client) -> str: topology = "multi" else: topology = "large" - LOG.debug(f"Auto-detected topology: {topology}") + LOG.debug("Auto-detected topology: %s", topology) return topology @@ -703,7 +703,7 @@ def is_skip(self, context: StepContext) -> Result: try: previous_config = read_config(self.client, TOPOLOGY_KEY) self.database = previous_config.get("database", DEFAULT_DATABASE_TOPOLOGY) - LOG.debug(f"database topology {self.database}") + LOG.debug("Database topology: %s", self.database) except ConfigItemNotFoundException: # Config was never registered in database previous_config = {} @@ -712,7 +712,7 @@ def is_skip(self, context: StepContext) -> Result: if self.topology == "auto": self.topology = determined_topology - LOG.debug(f"topology {self.topology}") + LOG.debug("Topology: %s", self.topology) # Check for storage size modifications in manifest # TODO: Force flag to update storage sizes once resize on k8s @@ -798,12 +798,16 @@ def run(self, context: StepContext) -> Result: LOG.info( "The Terraform Juju provider does not currently support " "cross-controller relations: " - "https://github.com/juju/terraform-provider-juju/issues/805." + "https://github.com/juju/terraform-provider-juju/issues/805" ) LOG.info( "The following offers must be consumed manually: " - f"{m}.keystone-credentials, {m}.keystone-endpoints " - f"{m}.keystone-ops, {m}.cert-distributor" + "%s.keystone-credentials, %s.keystone-endpoints " + "%s.keystone-ops, %s.cert-distributor", + m, + m, + m, + m, ) extra_tfvars.update( { @@ -844,7 +848,7 @@ def run(self, context: StepContext) -> Result: apps = list(set(apps) - set(self.remove_blocked_apps_from_role())) apps = list(set(apps) - set(self.remove_blocked_apps_from_ovn_provider())) - LOG.debug(f"Applications monitored for readiness: {apps}") + LOG.debug("Applications monitored for readiness: %s", apps) status_queue: queue.Queue[str] = queue.Queue() task = update_status_background(self, apps, status_queue, context.status) try: @@ -856,7 +860,7 @@ def run(self, context: StepContext) -> Result: overlay=build_overlay_dict(apps), ) except (JujuWaitException, TimeoutError) as e: - LOG.warning(str(e)) + LOG.warning("Timed out waiting to deploy OpenStack control plane: %r", e) return Result(ResultType.FAILED, str(e)) finally: task.stop() @@ -1028,13 +1032,13 @@ def run(self, context: StepContext) -> Result: apps = list( set(apps) - set(remove_blocked_apps_from_ovn_provider(self.ovn_manager)) ) - LOG.debug(f"Application monitored for readiness: {apps}") + LOG.debug("Application monitored for readiness: %s", apps) pre_status: dict[str, str] = {} try: pre_status = self.jhelper.snapshot_workload_status(self.model, apps) except Exception: LOG.debug("Could not fetch pre-wait status", exc_info=True) - LOG.debug(f"Pre-wait workload status for {self.model}: {pre_status}") + LOG.debug("Pre-wait workload status for %s: %s", self.model, pre_status) status_queue: queue.Queue[str] = queue.Queue() task = update_status_background(self, apps, status_queue, context.status) try: @@ -1048,7 +1052,7 @@ def run(self, context: StepContext) -> Result: ), ) except (JujuWaitException, TimeoutError) as e: - LOG.debug(str(e)) + LOG.debug("Timed out waiting for reapplying OpenStack control plane: %r", e) return Result(ResultType.FAILED, str(e)) finally: task.stop() @@ -1162,7 +1166,7 @@ def prompt( if region := self.variables.get("region"): # Region cannot be modified once set - LOG.debug(f"Region already set to {region}") + LOG.debug("Region is already set to %s", region) return preseed = {} if self.manifest: @@ -1343,8 +1347,10 @@ def _get_valid_ip( ) LOG.warning( - f"Invalid IP address for {endpoint} endpoint: {ip}, " - "IP is not in the configured load balancer range" + "Invalid IP address for %s endpoint: %s, " + "IP is not in the configured load balancer range", + endpoint, + ip, ) ip = endpoint_bank.ip.ask() @@ -1353,7 +1359,10 @@ def _get_valid_ip( raise e LOG.warning( - f"Invalid IP address for {endpoint} endpoint: {ip}, error: {e}" + "Invalid IP address for %s endpoint: %s, error: %r", + endpoint, + ip, + e, ) ip = endpoint_bank.ip.ask() @@ -1441,7 +1450,7 @@ def prompt( if database := self.variables.get("database"): # Region cannot be modified once set - LOG.debug(f"Database topology already set to {database}") + LOG.debug("Database topology is already set to %s", database) return preseed = {} diff --git a/sunbeam-python/sunbeam/steps/sync_feature_gates.py b/sunbeam-python/sunbeam/steps/sync_feature_gates.py index a8a0f309c..7e4961b75 100644 --- a/sunbeam-python/sunbeam/steps/sync_feature_gates.py +++ b/sunbeam-python/sunbeam/steps/sync_feature_gates.py @@ -32,18 +32,20 @@ def __init__(self, client: Client): def run(self, context: StepContext) -> Result: """Sync feature gates from snap configuration to cluster database.""" - LOG.debug("Syncing snap config feature gates to cluster database") + LOG.debug("Syncing snap config feature gates to the cluster database") try: from sunbeam.hooks import sync_feature_gates_from_snap_to_cluster sync_feature_gates_from_snap_to_cluster(self.client) - LOG.info("Successfully synced feature gates from snap config to cluster") + LOG.info( + "Successfully synced feature gates from snap config to the cluster" + ) return Result(ResultType.COMPLETED) except Exception as e: # Don't fail the step if sync fails - feature gates can be set later - LOG.warning(f"Failed to sync feature gates to cluster: {e}") + LOG.warning("Failed to sync feature gates to the cluster: %r", e) return Result( ResultType.COMPLETED, "Feature gate sync failed but continuing (non-fatal)", diff --git a/sunbeam-python/sunbeam/steps/upgrades/inter_channel.py b/sunbeam-python/sunbeam/steps/upgrades/inter_channel.py index 5ed1c3a11..8128e770a 100644 --- a/sunbeam-python/sunbeam/steps/upgrades/inter_channel.py +++ b/sunbeam-python/sunbeam/steps/upgrades/inter_channel.py @@ -112,7 +112,7 @@ def upgrade_applications( """ expected_wls = ["active", "blocked", "unknown"] LOG.debug( - f"Upgrading applications using terraform plan {tfhelper.plan}: {apps}" + "Upgrading applications using terraform plan %s: %s", tfhelper.plan, apps ) try: tfhelper.update_partial_tfvars_and_apply_tf( @@ -137,7 +137,7 @@ def upgrade_applications( queue=status_queue, ) except (JujuWaitException, TimeoutError) as e: - LOG.debug(str(e)) + LOG.debug("Timed out waiting for upgrading %s: %r", apps, e) return Result(ResultType.FAILED, str(e)) finally: task.stop() @@ -177,7 +177,7 @@ def __init__( def upgrade_tasks(self, context: StepContext) -> Result: """Perform the upgrade tasks.""" # Step 1: Upgrade mysql charms - LOG.debug("Upgrading Mysql charms") + LOG.debug("Upgrading MySQL charms") charms = list(MYSQL_CHARMS_K8S.keys()) apps = self.get_apps_filter_by_charms(self.model, charms) result = self.upgrade_applications( diff --git a/sunbeam-python/sunbeam/steps/upgrades/intra_channel.py b/sunbeam-python/sunbeam/steps/upgrades/intra_channel.py index 6f3abe81a..ee48e62d8 100644 --- a/sunbeam-python/sunbeam/steps/upgrades/intra_channel.py +++ b/sunbeam-python/sunbeam/steps/upgrades/intra_channel.py @@ -101,7 +101,7 @@ def is_track_changed_for_any_charm(self, deployed_apps: dict): if not charm_manifest: continue if not charm_manifest: - LOG.debug(f"Charm not present in manifest: {charm}") + LOG.debug("Charm is not present in manifest: %s", charm) continue channel_from_manifest = charm_manifest.channel or "" @@ -113,8 +113,11 @@ def is_track_changed_for_any_charm(self, deployed_apps: dict): # Compare tracks if track_from_manifest != track_from_deployed_app: LOG.debug( - f"Channel track for app {app_name} different in manifest " - "and actual deployed" + "Channel track for app %s is different between the manifest " + "and the actual deployment (manifest: %s, deployed: %s)", + app_name, + track_from_manifest, + track_from_deployed_app, ) return True @@ -136,14 +139,14 @@ def _wait_after_refresh( if not refreshed_apps: return Result(ResultType.COMPLETED) - LOG.debug(f"Waiting for apps {refreshed_apps} in model {model}") + LOG.debug("Waiting for apps %s in model %s", refreshed_apps, model) if model == OPENSTACK_MODEL: overlay = build_pre_status_overlay( refreshed_apps, pre_refresh_status, build_overlay_dict(refreshed_apps), ) - LOG.debug(f"Wait overlay for {model}: {overlay}") + LOG.debug("Waiting overlay for %s: %s", model, overlay) status_queue: queue.Queue[str] = queue.Queue() status = context.status if context else None task = update_status_background(self, refreshed_apps, status_queue, status) @@ -156,7 +159,7 @@ def _wait_after_refresh( overlay=overlay, ) except (JujuWaitException, TimeoutError) as e: - LOG.warning(str(e)) + LOG.warning("Timed out waiting for refreshed %s: %r", refreshed_apps, e) return Result(ResultType.FAILED, str(e)) finally: task.stop() @@ -168,8 +171,10 @@ def _wait_after_refresh( prior = pre_refresh_status.get(app_name, "active") accepted = list({prior, "active", "unknown"}) LOG.debug( - f"Waiting for {app_name} in {model} " - f"with accepted_status={accepted}" + "Waiting for %s in %s with accepted_status=%s", + app_name, + model, + accepted, ) self.jhelper.wait_application_ready( app_name, @@ -178,7 +183,7 @@ def _wait_after_refresh( timeout=1800, # 30 minutes ) except TimeoutError as e: - LOG.warning(str(e)) + LOG.warning("Timed out waiting for refreshed %s: %r", app_name, e) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) @@ -204,7 +209,7 @@ def refresh_apps( ) except Exception: LOG.debug("Could not fetch pre-refresh status", exc_info=True) - LOG.debug(f"Pre-refresh workload status in {model}: {pre_refresh_status}") + LOG.debug("Pre-refresh workload status in %s: %s", model, pre_refresh_status) refreshed_apps = [] for app_name, (charm, channel, _) in apps.items(): @@ -214,11 +219,11 @@ def refresh_apps( manifest_charm = self.manifest.find_charm(charm) if not manifest_charm: - LOG.debug(f"Running refresh for app {app_name} (no manifest entry)") + LOG.debug("Running refresh for app %s (no manifest entry)", app_name) self.jhelper.charm_refresh(app_name, model) refreshed_apps.append(app_name) else: - LOG.debug(f"Running refresh for app {app_name} with manifest config") + LOG.debug("Running refresh for app %s with manifest config", app_name) self.jhelper.charm_refresh( app_name, model, @@ -252,7 +257,7 @@ def run(self, context: StepContext) -> Result: all_deployed_apps = deployed_k8s_apps.copy() all_deployed_apps.update(deployed_machine_apps) all_deployed_apps.update(deployed_infra_apps) - LOG.debug(f"All deployed apps: {all_deployed_apps}") + LOG.debug("All deployed apps: %s", all_deployed_apps) if self.is_track_changed_for_any_charm(all_deployed_apps): error_msg = ( "Manifest has track values that require upgrades, rerun with " @@ -315,11 +320,14 @@ def run(self, context: StepContext) -> Result: charm_manifest = self.manifest.core.software.charms.get(charm_name) if not charm_manifest or not charm_manifest.config: LOG.debug( - f"No manifest config for {charm_name}, skipping config reapply" + "No manifest config for %s, skipping config reapply", charm_name ) continue LOG.debug( - f"Reapplying config for {app_name} in {model}: {charm_manifest.config}" + "Reapplying config for %s in %s: %s", + app_name, + model, + charm_manifest.config, ) self.jhelper.set_app_config(app_name, model, charm_manifest.config) return Result(ResultType.COMPLETED) @@ -351,7 +359,7 @@ def _refresh_snap_for_apps( application = self.jhelper.get_application(app_name, model) except ApplicationNotFoundException: LOG.debug( - "Application %s not found in %s, skipping snap refresh", + "Application %s is not found in %s, skipping snap refresh", app_name, model, ) diff --git a/sunbeam-python/sunbeam/steps/vault.py b/sunbeam-python/sunbeam/steps/vault.py index fcba411f6..a111106be 100644 --- a/sunbeam-python/sunbeam/steps/vault.py +++ b/sunbeam-python/sunbeam/steps/vault.py @@ -71,7 +71,7 @@ def upgrade( self, revision: int | None = None, channel: str = VAULT_CHANNEL ) -> None: """Upgrade vault-k8s to the specified channel.""" - LOG.info(f"Upgrading {self.application} to channel {channel}") + LOG.info("Upgrading %s to channel %s", self.application, channel) self.jhelper.charm_refresh( self.application, OPENSTACK_MODEL, @@ -139,7 +139,8 @@ def _needs_charm_refresh(self) -> Result: CHARM_NAME, target_channel, arch="amd64" ) except JujuException as e: - LOG.debug("Could not determine latest revision for %s: %s", CHARM_NAME, e) + LOG.debug("Could not determine latest revision for %s: %r", CHARM_NAME, e) + # Proceed with refresh if we cannot confirm the revision. return Result(ResultType.COMPLETED) if deployed_channel == target_channel and app.charm_rev == latest_rev: @@ -187,7 +188,7 @@ def run(self, context: StepContext) -> Result: ) self.upgrade(revision=revision, channel=target_channel) except JujuException as e: - LOG.error(f"Failed to refresh Vault: {e}") + LOG.error("Failed to refresh Vault: %r", e) return Result( ResultType.FAILED, f"Failed to refresh Vault: {e}", @@ -204,7 +205,7 @@ def run(self, context: StepContext) -> Result: timeout=VAULT_UPGRADE_TIMEOUT, ) except (JujuWaitException, TimeoutError) as e: - LOG.error(f"Timed out waiting for {self.application}: {e}") + LOG.error("Timed out waiting for %s: %r", self.application, e) return Result( ResultType.FAILED, f"Timed out waiting for {self.application} to stabilise: {e}", @@ -246,8 +247,9 @@ def run(self, context: StepContext) -> Result: ) except (JujuWaitException, TimeoutError) as e: LOG.warning( - f"Timed out waiting for {self.application} to stabilise " - f"after terraform apply: {e}" + "Timed out waiting for %s to stabilise after terraform apply: %r", + self.application, + e, ) try: @@ -256,7 +258,7 @@ def run(self, context: StepContext) -> Result: ) vault_status = VaultHelper(self.jhelper).get_vault_status(leader_unit) except (JujuException, LeaderNotFoundException, TimeoutError) as e: - LOG.warning(f"Could not determine vault seal status: {e}") + LOG.warning("Could not determine vault seal status: %r", e) return Result( ResultType.COMPLETED, "Vault upgraded. Unable to determine seal status, run " diff --git a/sunbeam-python/sunbeam/storage/base.py b/sunbeam-python/sunbeam/storage/base.py index fc8f96389..e38638603 100644 --- a/sunbeam-python/sunbeam/storage/base.py +++ b/sunbeam-python/sunbeam/storage/base.py @@ -724,5 +724,8 @@ def _get_cli_class(self) -> type[StorageBackendCLIBase]: cli_class_name = f"{self.backend_type.title()}CLI" return getattr(cli_module, cli_class_name) except (ImportError, AttributeError): - LOG.debug(f"{self.backend_type} does not implement custom cli class") + LOG.debug( + "%s does not implement custom cli class", + self.backend_type, + ) return StorageBackendCLIBase diff --git a/sunbeam-python/sunbeam/storage/manager.py b/sunbeam-python/sunbeam/storage/manager.py index 9e632203d..b6edaa715 100644 --- a/sunbeam-python/sunbeam/storage/manager.py +++ b/sunbeam-python/sunbeam/storage/manager.py @@ -75,11 +75,11 @@ def _load_backends(self) -> None: # Check if the backend.py file exists in the backend directory if not backend_module_path.exists(): - LOG.debug(f"Skipping {backend_name}: no backend.py file found") + LOG.debug("Skipping %s: no backend.py file found", backend_name) continue try: - LOG.debug(f"Loading storage backend: {backend_name}") + LOG.debug("Loading storage backend: %s", backend_name) # Import the backend module from the backend subdirectory mod = importlib.import_module( f"sunbeam.storage.backends.{backend_name}.backend" @@ -96,13 +96,13 @@ def _load_backends(self) -> None: backend_instance = attr() self._backends[backend_instance.backend_type] = backend_instance LOG.debug( - "Registered storage backend: " - + backend_instance.backend_type + "Registered storage backend: %s", + backend_instance.backend_type, ) except Exception as e: LOG.debug("Failed to load storage backend", exc_info=True) - LOG.warning(f"Failed to load storage backend {backend_name}: {e}") + LOG.warning("Failed to load storage backend %s: %r", backend_name, e) self._loaded = True @@ -138,7 +138,7 @@ def register(self, cli: click.Group, deployment: Deployment) -> None: self.register_cli_commands(storage, deployment) LOG.debug("Storage backend commands registered successfully") except Exception as e: - LOG.error(f"Failed to register storage backend commands: {e}") + LOG.error("Failed to register storage backend commands: %r", e) raise e def register_cli_commands( # noqa: C901 @@ -268,7 +268,7 @@ def show_backend(ctx, backend_name: str): except Exception as e: backend_name = getattr(backend, "name", "unknown") LOG.warning( - "Backend %s failed to register CLI: %s", + "Backend %s failed to register CLI: %r", backend_name, e, ) diff --git a/sunbeam-python/sunbeam/storage/service.py b/sunbeam-python/sunbeam/storage/service.py index 938606e96..25f5f8bc8 100644 --- a/sunbeam-python/sunbeam/storage/service.py +++ b/sunbeam-python/sunbeam/storage/service.py @@ -64,10 +64,13 @@ def list_backends(self) -> list[StorageBackendInfo]: config=backend.config, ) backends.append(backend_info) - LOG.debug(f"Found {backend.type} backend: {backend.name}") + LOG.debug("Found %s backend: %s", backend.type, backend.name) except Exception as e: LOG.warning( - f"Error processing {backend.type} backend {backend.name}: {e}" + "Error processing %s backend %s: %r", + backend.type, + backend.name, + e, ) continue return backends @@ -95,7 +98,7 @@ def _get_application_status(self, jhelper: JujuHelper, app_name: str) -> str: return "not-found" except Exception as e: - LOG.debug(f"Failed to get status for application {app_name}: {e}") + LOG.debug("Failed to get status for application %s: %r", app_name, e) return "unknown" def _get_application_charm(self, jhelper: JujuHelper, app_name: str) -> str: @@ -121,9 +124,8 @@ def _get_application_charm(self, jhelper: JujuHelper, app_name: str) -> str: return charm_url return "Not Found" - except Exception as e: - LOG.debug(f"Failed to get charm for application {app_name}: {e}") + LOG.debug("Failed to get charm for application %s: %r", app_name, e) return "Unknown" def backend_exists(self, backend_name: str, backend_type: str) -> bool: @@ -143,5 +145,5 @@ def get_backend(self, backend_name: str) -> StorageBackend: try: return client.cluster.get_storage_backend(backend_name) except StorageBackendNotFoundException as e: - LOG.debug(f"Storage backend not found: {backend_name}", exc_info=True) + LOG.debug("Storage backend not found: %s", backend_name, exc_info=True) raise BackendNotFoundException() from e diff --git a/sunbeam-python/sunbeam/storage/steps.py b/sunbeam-python/sunbeam/storage/steps.py index 601971900..338168795 100644 --- a/sunbeam-python/sunbeam/storage/steps.py +++ b/sunbeam-python/sunbeam/storage/steps.py @@ -88,7 +88,7 @@ def _check_juju_authentication(self) -> Result: # If we can list models, we're authenticated models = self.jhelper.models() LOG.debug( - f"Juju authentication check successful, found {len(models)} models" + "Juju authentication check successful, found %d models", len(models) ) return Result(ResultType.COMPLETED) @@ -177,7 +177,7 @@ def run(self, context: StepContext) -> Result: "Please deploy OpenStack storage services first.", ) except Exception as e: - LOG.debug(f"Failed to check cinder-volume application: {e}") + LOG.debug("Failed to check cinder-volume application: %r", e) return Result( ResultType.FAILED, "Unable to verify cinder-volume application. " @@ -187,7 +187,7 @@ def run(self, context: StepContext) -> Result: return Result(ResultType.COMPLETED) except Exception as e: - LOG.error(f"Failed to validate storage prerequisites: {e}") + LOG.error("Failed to validate storage prerequisites: %r", e) return Result(ResultType.FAILED, str(e)) @@ -368,7 +368,7 @@ def prompt( self.variables, by_name=True ) except pydantic.ValidationError as e: - LOG.error(f"Invalid configuration: {e}") + LOG.error("Invalid configuration: %r", e) raise e write_answers(self.client, self.config_key, self.variables) @@ -396,8 +396,8 @@ def run(self, context: StepContext) -> Result: # Ensure fresh Juju credentials and Terraform env before applying try: self.deployment.reload_tfhelpers() - except Exception as cred_err: - LOG.debug(f"Failed to reload credentials/env: {cred_err}") + except Exception as e: + LOG.debug("Failed to reload credentials/env: %r", e) # Merge with existing backends so we don't overwrite them backend_key = self.backend_name @@ -438,8 +438,10 @@ def run(self, context: StepContext) -> Result: raise e except Exception as e: LOG.error( - f"Failed to deploy {self.backend_instance.display_name} " - f"backend {self.backend_name}: {e}" + "Failed to deploy %s backend %s: %r", + self.backend_instance.display_name, + self.backend_name, + e, ) return Result(ResultType.FAILED, str(e)) # Let's save backend if not present @@ -464,7 +466,12 @@ def run(self, context: StepContext) -> Result: timeout=self.get_application_timeout(), ) except TimeoutError as e: - LOG.warning(str(e)) + LOG.warning( + "Timed out deploying %s backend %s: %r", + self.backend_instance.display_name, + self.backend_name, + e, + ) return Result(ResultType.FAILED, str(e)) self.backend_instance.enable_backend(self.client) @@ -538,14 +545,14 @@ def run(self, context: StepContext) -> Result: # Ensure fresh Juju credentials and Terraform env before destroying/applying try: self.deployment.reload_tfhelpers() - except Exception as cred_err: - LOG.debug(f"Failed to reload credentials/env: {cred_err}") + except Exception as e: + LOG.debug("Failed to reload credentials/env: %r", e) # First, read and validate the current configuration try: tfvars = read_config(self.client, self.backend_instance.tfvar_config_key) except ConfigItemNotFoundException: - LOG.warning(f"No configuration found for backend {self.backend_name}") + LOG.warning("No configuration found for backend %s", self.backend_name) tfvars = {} backends = tfvars.get("backends", {}) @@ -554,8 +561,10 @@ def run(self, context: StepContext) -> Result: backends.pop(self.backend_name, None) # For removal: update config and apply atomically - LOG.info(f"Performing removal for backend {self.backend_name}") - LOG.info(f"Remaining backends after removal: {list(tfvars['backends'].keys())}") + LOG.info("Performing removal for backend %s", self.backend_name) + LOG.info( + "Remaining backends after removal: %s", list(tfvars["backends"].keys()) + ) # First update the configuration update_config( @@ -567,8 +576,8 @@ def run(self, context: StepContext) -> Result: try: LOG.info( - f"Writing Terraform variables with backends: " - f"{list(tfvars.get('backends', {}).keys())}" + "Writing Terraform variables with backends: %s", + list(tfvars.get("backends", {}).keys()), ) self.tfhelper.update_tfvars_and_apply_tf( self.client, @@ -579,7 +588,7 @@ def run(self, context: StepContext) -> Result: ) except TerraformStateLockedException as e: # Bubble up to trigger retry - LOG.debug("Error: Terraform state locked") + LOG.debug("Terraform state locked") raise e except TerraformException: # Restore the backend configuration if apply fails @@ -592,7 +601,7 @@ def run(self, context: StepContext) -> Result: try: self.client.cluster.delete_storage_backend(self.backend_name) except StorageBackendNotFoundException: - LOG.debug(f"Backend {self.backend_name} not found in clusterd") + LOG.debug("Backend %s is not found in clusterd", self.backend_name) try: # Wipe previously saved answers @@ -601,7 +610,8 @@ def run(self, context: StepContext) -> Result: ) except ConfigItemNotFoundException: LOG.debug( - f"Configuration for backend {self.backend_name} not found in clusterd" + "Configuration for backend %s is not found in clusterd", + self.backend_name, ) return Result(ResultType.COMPLETED) @@ -771,8 +781,9 @@ def run(self, context: StepContext) -> Result: ) except Exception as e: LOG.error( - f"Failed to deploy non-HA cinder-volume for backend " - f"{self.backend_name}: {e}" + "Failed to deploy non-HA cinder-volume for backend %s: %r", + self.backend_name, + e, ) return Result(ResultType.FAILED, str(e)) @@ -784,7 +795,11 @@ def run(self, context: StepContext) -> Result: timeout=self.get_application_timeout(), ) except TimeoutError as e: - LOG.warning(str(e)) + LOG.warning( + "Timed out deploying non-HA cinder-volume for backend %s: %r", + self.backend_name, + e, + ) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) @@ -873,8 +888,9 @@ def run(self, context: StepContext) -> Result: ) except Exception as e: LOG.error( - f"Failed to destroy non-HA cinder-volume for backend " - f"{self.backend_name}: {e}" + "Failed to destroy non-HA cinder-volume for backend %s: %r", + self.backend_name, + e, ) return Result(ResultType.FAILED, str(e)) @@ -885,7 +901,11 @@ def run(self, context: StepContext) -> Result: timeout=self.get_application_timeout(), ) except TimeoutError as e: - LOG.warning(str(e)) + LOG.warning( + "Timed out destroying non-HA cinder-volume for backend %s: %r", + self.backend_name, + e, + ) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) diff --git a/sunbeam-python/sunbeam/utils.py b/sunbeam-python/sunbeam/utils.py index 50146fb97..0e419e431 100644 --- a/sunbeam-python/sunbeam/utils.py +++ b/sunbeam-python/sunbeam/utils.py @@ -99,7 +99,7 @@ def _get_default_gw_iface_fallback() -> str | None: iface = None with open("/proc/net/route", "r") as f: contents = [line.strip() for line in f.readlines() if line.strip()] - logging.debug(contents) + LOG.debug("Contents of /proc/net/route: %s", contents) entries: list[dict[str, str]] = [] # First line is a header line of the table contents. Note, we skip blank entries @@ -251,7 +251,7 @@ def __call__(self, *args, **kwargs): " Please see https://canonical-openstack.readthedocs-hosted.com/en/latest/how-to/troubleshooting/inspecting-the-cluster/" " for troubleshooting information." ) - LOG.warn(message) + LOG.warning(message) LOG.error("Error: %s", e) sys.exit(1) @@ -309,7 +309,7 @@ def first_connected_server(servers: list) -> str | None: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) ip_port = server.rsplit(":", 1) if len(ip_port) != 2: - LOG.debug(f"Server {server} not in : format") + LOG.debug("Server %s is not in the : format", server) continue ip = ipaddress.ip_address(ip_port[0].lstrip("[").rstrip("]")) @@ -325,8 +325,7 @@ def first_connected_server(servers: list) -> str | None: s.connect((str(ip), port)) return server except Exception as e: - LOG.debug(str(e)) - LOG.debug(f"Not able to connect to {ip} {port}") + LOG.debug("Not able to connect to %s:%s server: %r", ip, port, e) finally: s.close() diff --git a/sunbeam-python/tests/functional/local/test_dpdk.py b/sunbeam-python/tests/functional/local/test_dpdk.py index ff2a4a0f6..ac23e87df 100644 --- a/sunbeam-python/tests/functional/local/test_dpdk.py +++ b/sunbeam-python/tests/functional/local/test_dpdk.py @@ -6,6 +6,8 @@ from . import utils +LOG = logging.getLogger(__name__) + def test_dpdk( tmp_path, @@ -36,7 +38,7 @@ def test_dpdk( base_manifest_path=manifest_path, ) - logging.info("Applying DPDK configuration.") + LOG.info("Applying DPDK configuration") utils.sunbeam_command( f"-v configure dpdk -m {dpdk_manifest_path} --accept-defaults" ) @@ -62,12 +64,12 @@ def test_dpdk( == "true" ) if dpdk_initialized: - logging.info("OVS DPDK initialized.") + LOG.info("OVS DPDK initialized") break else: - logging.info("OVS DPDK not initialized yet.") + LOG.info("OVS DPDK not initialized yet") if attempt < dpdk_init_retries - 1: - logging.debug("Rechecking in %s seconds.", dpdk_init_check_interval) + LOG.debug("Rechecking in %d seconds", dpdk_init_check_interval) time.sleep(dpdk_init_check_interval) assert dpdk_initialized, "OVS DPDK did not initialize in time." diff --git a/sunbeam-python/tests/functional/local/test_network_node.py b/sunbeam-python/tests/functional/local/test_network_node.py index 63cb5684b..ef95d074c 100644 --- a/sunbeam-python/tests/functional/local/test_network_node.py +++ b/sunbeam-python/tests/functional/local/test_network_node.py @@ -9,6 +9,8 @@ from . import utils +LOG = logging.getLogger(__name__) + @pytest.fixture(scope="module") def ensure_network_node_cluster(manifest_path, openstack_snap_channel): @@ -20,16 +22,16 @@ def ensure_network_node_cluster(manifest_path, openstack_snap_channel): deployments = info.get("deployments") or [] if deployments: - logging.warning( + LOG.warning( "Until the ability to run multi-node functional tests is added, " - "skip the tests if there's an existing deployment." + "skip the tests if there's an existing deployment" ) return utils.ensure_openstack_snap_installed(openstack_snap_channel) utils.install_bootstrap_prerequisites() - logging.info("Bootstrapping cluster with network node role.") + LOG.info("Bootstrapping cluster with network node role") cmd = "cluster bootstrap --accept-defaults --role network" if manifest_path: cmd += f" --manifest {manifest_path}" @@ -58,7 +60,7 @@ def test_network_node( base_manifest_path=manifest_path, ) - logging.info("Configuring network node deployment.") + LOG.info("Configuring network node deployment") utils.sunbeam_command( f"-v configure deployment -m {network_manifest_path} --accept-defaults" ) @@ -86,7 +88,7 @@ def test_network_node( if heading == "external_ids": external_ids = utils.parse_ovsdb_data(record[position]) - logging.info("Checking OVN configuration.") + LOG.info("Checking OVN configuration") # Verify bridge mappings assert "ovn-bridge-mappings" in external_ids diff --git a/sunbeam-python/tests/functional/local/test_sriov.py b/sunbeam-python/tests/functional/local/test_sriov.py index 24720ce77..88078b98f 100644 --- a/sunbeam-python/tests/functional/local/test_sriov.py +++ b/sunbeam-python/tests/functional/local/test_sriov.py @@ -9,6 +9,8 @@ from . import snap, utils +LOG = logging.getLogger(__name__) + # The number of VFs to create if there aren't any existing ones. DEFAULT_NUM_VFS = 4 @@ -26,7 +28,7 @@ def test_sriov( pytest.skip("No SR-IOV interface specified, skipping SR-IOV tests.") pci_address = utils.get_iface_pci_address(sriov_interface_name) - logging.info("Detected SR-IOV interface PCI address: %s", pci_address) + LOG.info("Detected SR-IOV interface PCI address: %s", pci_address) if not utils.is_sriov_capable(pci_address): raise Exception( @@ -35,7 +37,7 @@ def test_sriov( ) has_hw_offload = utils.is_hw_offload_available(sriov_interface_name) - logging.info("Hardware offloading available: %s", has_hw_offload) + LOG.info("Hardware offloading available: %s", has_hw_offload) if has_hw_offload: utils.ensure_hw_offload_enabled(sriov_interface_name) @@ -43,7 +45,7 @@ def test_sriov( num_vfs = utils.get_sriov_numvfs(pci_address) if num_vfs <= 0: num_vfs = DEFAULT_NUM_VFS - logging.info("No SR-IOV VFs defined, creating %s", num_vfs) + LOG.info("No SR-IOV VFs defined, creating %s", num_vfs) utils.set_sriov_numvfs(pci_address, num_vfs) # The device specs are expected to contain a physnet unless overlay @@ -71,7 +73,7 @@ def test_sriov( base_manifest_path=manifest_path, ) - logging.info("Applying SR-IOV configuration.") + LOG.info("Applying SR-IOV configuration.") utils.sunbeam_command( f"-v configure sriov -m {sriov_manifest_path} --accept-defaults" ) diff --git a/sunbeam-python/tests/functional/local/utils.py b/sunbeam-python/tests/functional/local/utils.py index 35278fc8e..af54469f8 100644 --- a/sunbeam-python/tests/functional/local/utils.py +++ b/sunbeam-python/tests/functional/local/utils.py @@ -15,6 +15,8 @@ from . import snap +LOG = logging.getLogger(__name__) + # Sunbeam may also be installed through tox but we're actually interested in # using the snap executable. SUNBEAM_BINARY = "/snap/bin/sunbeam" @@ -47,7 +49,7 @@ def get_sunbeam_deployments() -> dict: def install_bootstrap_prerequisites(): - logging.info("Installing Sunbeam bootstrap prerequisites.") + LOG.info("Installing Sunbeam bootstrap prerequisites") script = subprocess.check_output( [SUNBEAM_BINARY, "prepare-node-script", "--bootstrap"] ) @@ -64,7 +66,7 @@ def bootstrap_local_cluster( roles = ["control", "compute", "storage"] role_arg = ",".join(roles) - logging.info("Bootstrapping Sunbeam. Manifest: %s, roles: %s", manifest_path, roles) + LOG.info("Bootstrapping Sunbeam. Manifest: %s, roles: %s", manifest_path, roles) cmd = f"-v cluster bootstrap --accept-defaults --role {role_arg}" if manifest_path: cmd += f" --manifest {manifest_path}" @@ -72,7 +74,7 @@ def bootstrap_local_cluster( def ensure_openstack_snap_installed(snap_channel: str | None = None): - logging.info("Ensuring that the Openstack snap in installed.") + LOG.info("Ensuring that the Openstack snap is installed") snap_cache = snap.SnapCache() openstack_snap = snap_cache["openstack"] if not openstack_snap.present: @@ -91,7 +93,7 @@ def ensure_local_cluster_bootstrapped( active_deployment_name = info.get("active") if not deployments: - logging.info("No Sunbeam deployment found, bootstrapping a new cluster.") + LOG.info("No Sunbeam deployment found, bootstrapping a new cluster") return bootstrap_local_cluster(manifest_path, roles) active_deployment = None @@ -105,7 +107,7 @@ def ensure_local_cluster_bootstrapped( if active_deployment["type"] != "local": raise Exception("The active Sunbeam deployment is not a local deployment.") - logging.info("Reusing local Sunbeam deployment: %s", active_deployment["name"]) + LOG.info("Reusing local Sunbeam deployment: %s", active_deployment["name"]) def apply_manifest( diff --git a/sunbeam-python/tests/unit/sunbeam/test_clusterd.py b/sunbeam-python/tests/unit/sunbeam/test_clusterd.py index 9e4992e15..752f3ec7a 100644 --- a/sunbeam-python/tests/unit/sunbeam/test_clusterd.py +++ b/sunbeam-python/tests/unit/sunbeam/test_clusterd.py @@ -136,12 +136,13 @@ def test_update_juju_user_is_skipped_when_token_matches( assert result.result_type == ResultType.SKIPPED assert any( rec.levelno == logging.DEBUG - and "Juju user juju-user is found in database." in rec.getMessage() + and "Juju user juju-user is found in the database" in rec.getMessage() for rec in caplog.records ) assert any( rec.levelno == logging.DEBUG - and "Juju user token is up to date, skipping update." in rec.getMessage() + and "Juju user juju-user token is up to date, skipping update" + in rec.getMessage() for rec in caplog.records ) @@ -155,10 +156,13 @@ def test_update_juju_user_updates_when_token_changed( assert result.result_type == ResultType.COMPLETED assert any( rec.levelno == logging.DEBUG - and "Juju user juju-user is found in database." in rec.getMessage() + and "Juju user juju-user is found in the database" in rec.getMessage() for rec in caplog.records ) - assert "Juju user token is up to date, skipping update." not in caplog.text + assert ( + "Juju user juju-user token is up to date, skipping update" + not in caplog.text + ) step.client.cluster.update_juju_user = MagicMock() result2 = step.run(step_context) @@ -175,7 +179,7 @@ def test_update_juju_user_user_not_found(self, cclient, step_context, caplog): assert result.result_type == ResultType.COMPLETED assert any( rec.levelno == logging.DEBUG - and "Juju user juju-user is not found in database, adding user." + and "Juju user juju-user is not found in the database, adding user" in rec.getMessage() for rec in caplog.records ) From cde0f5b48107d8ff75f998ef9ea489a5ff2c3292 Mon Sep 17 00:00:00 2001 From: Himawan Winarto Date: Thu, 23 Apr 2026 23:32:19 -0400 Subject: [PATCH 3/5] refactor: unify logging format and traceback Signed-off-by: Himawan Winarto --- sunbeam-python/sunbeam/commands/configure.py | 4 +- .../sunbeam/commands/generate_cloud_config.py | 2 +- sunbeam-python/sunbeam/core/checks.py | 4 +- sunbeam-python/sunbeam/core/manifest.py | 2 +- .../sunbeam/features/baremetal/steps.py | 2 +- .../features/instance_recovery/consul.py | 4 +- .../features/interface/v1/openstack.py | 4 +- .../sunbeam/features/loadbalancer/feature.py | 4 +- .../sunbeam/features/observability/feature.py | 10 +- .../sunbeam/features/pro/feature.py | 2 +- .../sunbeam/features/validation/feature.py | 2 +- .../sunbeam/provider/local/steps.py | 4 +- sunbeam-python/sunbeam/provider/maas/steps.py | 4 +- sunbeam-python/sunbeam/steps/certificates.py | 2 +- sunbeam-python/sunbeam/steps/juju.py | 161 ++++++++++-------- sunbeam-python/sunbeam/steps/k8s.py | 4 +- sunbeam-python/sunbeam/steps/maintenance.py | 4 +- sunbeam-python/sunbeam/steps/openstack.py | 8 +- .../sunbeam/steps/upgrades/inter_channel.py | 2 +- sunbeam-python/sunbeam/utils.py | 6 +- 20 files changed, 131 insertions(+), 104 deletions(-) diff --git a/sunbeam-python/sunbeam/commands/configure.py b/sunbeam-python/sunbeam/commands/configure.py index c3726fe99..9bcde1635 100644 --- a/sunbeam-python/sunbeam/commands/configure.py +++ b/sunbeam-python/sunbeam/commands/configure.py @@ -225,7 +225,7 @@ def run(self, context: StepContext) -> Result: self._print_openrc(tf_output) return Result(ResultType.COMPLETED) except TerraformException as e: - LOG.exception("Error getting Terraform output") + LOG.warning("Error getting Terraform output: %r", e) return Result(ResultType.FAILED, str(e)) def _print_openrc(self, tf_output: dict) -> None: @@ -293,7 +293,7 @@ def run(self, context: StepContext) -> Result: self.tfhelper.apply(reporter=context.reporter) return Result(ResultType.COMPLETED) except TerraformException as e: - LOG.exception("Error configuring cloud") + LOG.warning("Error configuring cloud: %r", e) return Result(ResultType.FAILED, str(e)) diff --git a/sunbeam-python/sunbeam/commands/generate_cloud_config.py b/sunbeam-python/sunbeam/commands/generate_cloud_config.py index 1edce12da..e2ee5be8f 100644 --- a/sunbeam-python/sunbeam/commands/generate_cloud_config.py +++ b/sunbeam-python/sunbeam/commands/generate_cloud_config.py @@ -93,7 +93,7 @@ def run(self, context: StepContext) -> Result: self._print_cloud_config(tf_output) return Result(ResultType.COMPLETED) except subprocess.CalledProcessError as e: - LOG.exception("Error initializing Terraform") + LOG.warning("Error initializing Terraform: %r", e) return Result(ResultType.FAILED, str(e)) def _generate_cloud_config(self, is_admin: bool, tf_output: dict) -> dict: diff --git a/sunbeam-python/sunbeam/core/checks.py b/sunbeam-python/sunbeam/core/checks.py index 7da735b8f..1cb72e611 100644 --- a/sunbeam-python/sunbeam/core/checks.py +++ b/sunbeam-python/sunbeam/core/checks.py @@ -506,14 +506,14 @@ def run(self) -> bool: try: token_bytes = base64.b64decode(self.token) except Exception: - LOG.exception("Failed to decode join token") + LOG.warning("Failed to decode join token: invalid base64 encoding") self.message = "Join token is not a valid base64 string" return False try: token = json.loads(token_bytes) except Exception: - LOG.exception("Failed to decode join token") + LOG.warning("Failed to decode join token: invalid JSON format") self.message = "Join token content is not a valid JSON-encoded object" return False diff --git a/sunbeam-python/sunbeam/core/manifest.py b/sunbeam-python/sunbeam/core/manifest.py index 4f072b9a7..47ae075c5 100644 --- a/sunbeam-python/sunbeam/core/manifest.py +++ b/sunbeam-python/sunbeam/core/manifest.py @@ -643,5 +643,5 @@ def run(self, context: StepContext) -> Result: ) return Result(ResultType.COMPLETED, id) except Exception as e: - LOG.debug(e) + LOG.debug("Failed to add manifest to cluster db: %r", e) return Result(ResultType.FAILED, str(e)) diff --git a/sunbeam-python/sunbeam/features/baremetal/steps.py b/sunbeam-python/sunbeam/features/baremetal/steps.py index d212264ee..95ab5bf38 100644 --- a/sunbeam-python/sunbeam/features/baremetal/steps.py +++ b/sunbeam-python/sunbeam/features/baremetal/steps.py @@ -124,7 +124,7 @@ def run(self, context: StepContext) -> Result: try: self._run(reporter=context.reporter) except Exception as e: - LOG.exception("Failed to execute step %s: %r", self.name, e) + LOG.warning("Failed to execute step %s: %r", self.name, e) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) diff --git a/sunbeam-python/sunbeam/features/instance_recovery/consul.py b/sunbeam-python/sunbeam/features/instance_recovery/consul.py index dfdaaeb99..c2666aa71 100644 --- a/sunbeam-python/sunbeam/features/instance_recovery/consul.py +++ b/sunbeam-python/sunbeam/features/instance_recovery/consul.py @@ -213,7 +213,7 @@ def run(self, context: StepContext) -> Result: reporter=context.reporter, ) except (TerraformException, TerraformStateLockedException) as e: - LOG.exception("Error deploying consul client") + LOG.warning("Error deploying consul client: %r", e) return Result(ResultType.FAILED, str(e)) apps = ConsulFeature.set_consul_client_application_names(self.deployment) @@ -260,7 +260,7 @@ def run(self, context: StepContext) -> Result: try: self.tfhelper.destroy(reporter=context.reporter) except TerraformException as e: - LOG.exception("Error destroying consul client") + LOG.warning("Error destroying consul client: %r", e) return Result(ResultType.FAILED, str(e)) apps = ConsulFeature.set_consul_client_application_names(self.deployment) diff --git a/sunbeam-python/sunbeam/features/interface/v1/openstack.py b/sunbeam-python/sunbeam/features/interface/v1/openstack.py index a215e561d..8aa43d3ae 100644 --- a/sunbeam-python/sunbeam/features/interface/v1/openstack.py +++ b/sunbeam-python/sunbeam/features/interface/v1/openstack.py @@ -415,7 +415,7 @@ def upgrade_hook( LOG.debug("Release upgrade is not supported for feature %s", self.name) return - # Nothig to do if the plan is openstack-plan as it is taken + # Nothing to do if the plan is openstack-plan as it is taken # care during control plane refresh if self.tf_plan_location == TerraformPlanLocation.SUNBEAM_TERRAFORM_REPO: LOG.debug( @@ -481,7 +481,7 @@ def run(self, context: StepContext) -> Result: reporter=context.reporter, ) except TerraformException as e: - LOG.exception("Error upgrading feature %s", self.feature.name) + LOG.warning("Error upgrading feature %s: %r", self.feature.name, e) return Result(ResultType.FAILED, str(e)) status_queue: queue.Queue[str] = queue.Queue() task = update_status_background(self, apps, status_queue, context.status) diff --git a/sunbeam-python/sunbeam/features/loadbalancer/feature.py b/sunbeam-python/sunbeam/features/loadbalancer/feature.py index a1498b4ec..759256416 100644 --- a/sunbeam-python/sunbeam/features/loadbalancer/feature.py +++ b/sunbeam-python/sunbeam/features/loadbalancer/feature.py @@ -952,7 +952,7 @@ def run(self, context: StepContext) -> Result: queue=status_queue, ) except (JujuWaitException, TimeoutError) as e: - LOG.warning(str(e)) + LOG.warning("Timed out waiting for CNI applications %s: %s", apps, e) return Result(ResultType.FAILED, str(e)) finally: task.stop() @@ -1110,7 +1110,7 @@ def run(self, context: StepContext) -> Result: timeout=LOADBALANCER_CNI_DEPLOY_TIMEOUT, ) except TimeoutError as e: - LOG.warning(str(e)) + LOG.warning("Timed out waiting for CNI applications to be removed: %s", e) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) diff --git a/sunbeam-python/sunbeam/features/observability/feature.py b/sunbeam-python/sunbeam/features/observability/feature.py index 117d30381..698ba5a75 100644 --- a/sunbeam-python/sunbeam/features/observability/feature.py +++ b/sunbeam-python/sunbeam/features/observability/feature.py @@ -162,7 +162,7 @@ def run(self, context: StepContext) -> Result: reporter=context.reporter, ) except (TerraformException, TerraformStateLockedException) as e: - LOG.exception("Error deploying Observability Stack") + LOG.warning("Error deploying Observability Stack: %r", e) return Result(ResultType.FAILED, str(e)) apps = self.jhelper.get_application_names(self.model) @@ -231,7 +231,7 @@ def run(self, context: StepContext) -> Result: reporter=context.reporter, ) except (TerraformException, TerraformStateLockedException) as e: - LOG.exception("Error updating Observability Model config") + LOG.warning("Error updating Observability Model config: %r", e) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) @@ -283,7 +283,7 @@ def run(self, context: StepContext) -> Result: reporter=context.reporter, ) except (TerraformException, TerraformStateLockedException) as e: - LOG.exception("Error deploying observability agent") + LOG.warning("Error deploying observability agent: %r", e) return Result(ResultType.FAILED, str(e)) app = "opentelemetry-collector" @@ -326,7 +326,7 @@ def run(self, context: StepContext) -> Result: try: self.tfhelper.destroy(reporter=context.reporter) except TerraformException as e: - LOG.exception("Error destroying Observability Stack") + LOG.warning("Error destroying Observability Stack: %r", e) return Result(ResultType.FAILED, str(e)) try: @@ -367,7 +367,7 @@ def run(self, context: StepContext) -> Result: try: self.tfhelper.destroy(reporter=context.reporter) except TerraformException as e: - LOG.exception("Error destroying observability agent") + LOG.warning("Error destroying observability agent: %r", e) return Result(ResultType.FAILED, str(e)) apps = ["opentelemetry-collector"] diff --git a/sunbeam-python/sunbeam/features/pro/feature.py b/sunbeam-python/sunbeam/features/pro/feature.py index 19cafaff6..f76398936 100644 --- a/sunbeam-python/sunbeam/features/pro/feature.py +++ b/sunbeam-python/sunbeam/features/pro/feature.py @@ -121,7 +121,7 @@ def run(self, context: StepContext) -> Result: return Result(ResultType.FAILED, message) except TimeoutError as e: - LOG.warning(str(e)) + LOG.warning("Timed out waiting for %s application: %r", APPLICATION, e) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) diff --git a/sunbeam-python/sunbeam/features/validation/feature.py b/sunbeam-python/sunbeam/features/validation/feature.py index 27d48dafe..edf5eeb39 100644 --- a/sunbeam-python/sunbeam/features/validation/feature.py +++ b/sunbeam-python/sunbeam/features/validation/feature.py @@ -239,7 +239,7 @@ def run(self, context: StepContext) -> Result: reporter=context.reporter, ) except (TerraformException, TerraformStateLockedException) as e: - LOG.exception("Error configuring validation feature") + LOG.warning("Error configuring validation feature: %r", e) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) diff --git a/sunbeam-python/sunbeam/provider/local/steps.py b/sunbeam-python/sunbeam/provider/local/steps.py index 1c4821e24..1b60a5235 100644 --- a/sunbeam-python/sunbeam/provider/local/steps.py +++ b/sunbeam-python/sunbeam/provider/local/steps.py @@ -785,7 +785,7 @@ def run(self, context: StepContext) -> Result: ) except (ActionFailedException, TimeoutError): msg = f"Unable to set hypervisor {name} configuration" - LOG.exception(msg) + LOG.warning(msg) return Result(ResultType.FAILED, msg) return Result(ResultType.COMPLETED) @@ -997,7 +997,7 @@ def run(self, context: StepContext) -> Result: ) except (ActionFailedException, TimeoutError): msg = f"Unable to set hypervisor {name} configuration" - LOG.exception(msg) + LOG.warning(msg) return Result(ResultType.FAILED, msg) return Result(ResultType.COMPLETED) diff --git a/sunbeam-python/sunbeam/provider/maas/steps.py b/sunbeam-python/sunbeam/provider/maas/steps.py index b1a31ce0f..c628b3b18 100644 --- a/sunbeam-python/sunbeam/provider/maas/steps.py +++ b/sunbeam-python/sunbeam/provider/maas/steps.py @@ -2451,7 +2451,7 @@ def run(self, context: StepContext) -> Result: ) except (ActionFailedException, TimeoutError): msg = f"Unable to set hypervisor {node_name} configuration" - LOG.exception(msg) + LOG.warning(msg) return Result(ResultType.FAILED, msg) return Result(ResultType.COMPLETED) @@ -2622,7 +2622,7 @@ def run(self, context: StepContext) -> Result: ) except (ActionFailedException, TimeoutError): msg = f"Unable to set hypervisor {node_name} configuration" - LOG.exception(msg) + LOG.warning(msg) return Result(ResultType.FAILED, msg) return Result(ResultType.COMPLETED) diff --git a/sunbeam-python/sunbeam/steps/certificates.py b/sunbeam-python/sunbeam/steps/certificates.py index c85b26d35..2304aaa89 100644 --- a/sunbeam-python/sunbeam/steps/certificates.py +++ b/sunbeam-python/sunbeam/steps/certificates.py @@ -77,7 +77,7 @@ def run(self, context: StepContext) -> Result: timeout=CERTIFICATES_APP_TIMEOUT, ) except (JujuWaitException, TimeoutError) as e: - LOG.warning(str(e)) + LOG.warning("Timed out waiting for certificates application: %r", e) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) diff --git a/sunbeam-python/sunbeam/steps/juju.py b/sunbeam-python/sunbeam/steps/juju.py index cc0f5a27b..7c7b82e2e 100644 --- a/sunbeam-python/sunbeam/steps/juju.py +++ b/sunbeam-python/sunbeam/steps/juju.py @@ -85,11 +85,12 @@ def is_skip(self, context: StepContext) -> Result: try: juju_clouds = self.get_clouds(cloud_type, local=True) except subprocess.CalledProcessError as e: - LOG.exception( + LOG.warning( "Error determining whether to skip the bootstrap " - "process. Defaulting to not skip." + "process. Defaulting to not skip: %s: %s", + e, + e.stderr, ) - LOG.warning("%s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) if self.cloud not in juju_clouds: @@ -104,11 +105,12 @@ def is_skip(self, context: StepContext) -> Result: cloud_type, local=False, controller=self.controller ) except subprocess.CalledProcessError as e: - LOG.exception( + LOG.warning( "Error determining whether to skip the bootstrap " - "process. Defaulting to not skip." + "process. Defaulting to not skip: %s: %s", + e, + e.stderr, ) - LOG.warning("%s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) if self.cloud in juju_clouds_on_controller: @@ -173,11 +175,12 @@ def is_skip(self, context: StepContext) -> Result: if "not found" in e.stderr: return Result(ResultType.COMPLETED) - LOG.exception( + LOG.warning( "Error determining whether to skip the bootstrap " - "process. Defaulting to not skip." + "process. Defaulting to not skip: %s: %s", + e, + e.stderr, ) - LOG.warning("%s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) client_creds = credentials.get("client-credentials", {}) @@ -198,8 +201,7 @@ def run(self, context: StepContext) -> Result: try: self.add_credential(self.cloud, self.definition, self.controller) except subprocess.CalledProcessError as e: - LOG.exception("Error adding credentials to Juju") - LOG.warning("%s: %s", e, e.stderr) + LOG.warning("Error adding credentials to Juju: %s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) @@ -238,11 +240,12 @@ def is_skip(self, context: StepContext) -> Result: try: self.juju_clouds = self.get_clouds(self.cloud_type) except subprocess.CalledProcessError as e: - LOG.exception( + LOG.warning( "Error determining whether to skip the bootstrap " - "process. Defaulting to not skip." + "process. Defaulting to not skip: %s: %s", + e, + e.stderr, ) - LOG.warning("%s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) if self.cloud not in self.juju_clouds: return Result( @@ -253,7 +256,7 @@ def is_skip(self, context: StepContext) -> Result: self.get_controller(self.controller) return Result(ResultType.SKIPPED) except ControllerNotFoundException as e: - LOG.debug(str(e)) + LOG.debug("Controller %s is not found: %r", self.controller, e) return Result(ResultType.COMPLETED) def run(self, context: StepContext) -> Result: @@ -279,9 +282,8 @@ def run(self, context: StepContext) -> Result: try: k, v = self.bootstrap_args[index].split("=", 1) - except ValueError as e: - LOG.exception("Error bootstrapping Juju") - LOG.debug(str(e)) + except ValueError: + LOG.warning("Error parsing bootstrap config argument") msg = ( "Value Error: Expected boostrap argument in form of " f"key=value, got {self.bootstrap_args[index]}" @@ -333,8 +335,7 @@ def run(self, context: StepContext) -> Result: return Result(ResultType.COMPLETED) except subprocess.CalledProcessError as e: - LOG.exception("Error bootstrapping Juju") - LOG.warning("%s: %s", e, e.stderr) + LOG.warning("Error bootstrapping Juju: %s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) @@ -439,8 +440,7 @@ def run(self, context: StepContext) -> Result: try: process = subprocess.run(cmd, capture_output=True, text=True, check=True) except subprocess.CalledProcessError as e: - LOG.exception("Error destroying controller") - LOG.warning("%s: %s", e, e.stderr) + LOG.warning("Error destroying controller: %s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) LOG.debug( "Command finished. stdout=%r, stderr=%r", process.stdout, process.stderr @@ -471,8 +471,7 @@ def is_skip(self, context: StepContext) -> Result: if self.username in user_names: return Result(ResultType.SKIPPED) except subprocess.CalledProcessError as e: - LOG.exception("Error getting users list from juju.") - LOG.warning("%s: %s", e, e.stderr) + LOG.warning("Error getting users list from Juju: %s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) @@ -526,8 +525,12 @@ def run(self, context: StepContext) -> Result: return Result(ResultType.COMPLETED, message=token) except subprocess.CalledProcessError as e: - LOG.exception("Error creating user %s in Juju", self.username) - LOG.warning("%s: %s", e, e.stderr) + LOG.warning( + "Error creating user %s in Juju: %s: %s", + self.username, + e, + e.stderr, + ) return Result(ResultType.FAILED, str(e)) @@ -557,8 +560,7 @@ def run(self, context: StepContext) -> Result: ResultType.FAILED, f"User {self.username} not found in Juju" ) except subprocess.CalledProcessError as e: - LOG.exception("Error getting list of users from Juju.") - LOG.warning("%s: %s", e, e.stderr) + LOG.warning("Error getting list of users from Juju: %s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) try: @@ -587,7 +589,7 @@ def run(self, context: StepContext) -> Result: return Result(ResultType.COMPLETED, message=token) except subprocess.CalledProcessError as e: - LOG.exception("Error resetting user %s in Juju", self.username) + LOG.warning("Error resetting user %s in Juju: %r", self.username, e) return Result(ResultType.FAILED, str(e)) @@ -646,7 +648,7 @@ def run(self, context: StepContext) -> Result: ) return Result(ResultType.COMPLETED) - LOG.exception( + LOG.warning( "Error granting user %r admin access on model %r: %s", self.username, model_with_owner, @@ -677,8 +679,7 @@ def is_skip(self, context: StepContext) -> Result: if self.username not in user_names: return Result(ResultType.SKIPPED) except subprocess.CalledProcessError as e: - LOG.exception("Error getting list of users from Juju") - LOG.warning("%s: %s", e, e.stderr) + LOG.warning("Error getting list of users from Juju: %s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) @@ -700,8 +701,12 @@ def run(self, context: StepContext) -> Result: return Result(ResultType.COMPLETED) except subprocess.CalledProcessError as e: - LOG.exception("Error removing user %s from Juju", self.username) - LOG.warning("%s: %s", e, e.stderr) + LOG.warning( + "Error removing user %s from Juju: %s: %s", + self.username, + e, + e.stderr, + ) return Result(ResultType.FAILED, str(e)) @@ -742,7 +747,11 @@ def is_skip(self, context: StepContext) -> Result: self.juju_account = JujuAccount.load(self.data_location) LOG.debug("Local account found: %r", self.juju_account.user) except JujuAccountNotFound: - LOG.warning("Cannot find local Juju account: %r", self.juju_account.user) + LOG.warning( + "Cannot find local Juju account %r in %r", + self.username, + self.data_location, + ) return Result(ResultType.FAILED, "Account was not registered locally") try: @@ -753,8 +762,11 @@ def is_skip(self, context: StepContext) -> Result: return Result(ResultType.SKIPPED) except subprocess.CalledProcessError as e: if "No controllers registered" not in e.stderr: - LOG.exception("Error retrieving authenticated user from Juju.") - LOG.warning("%s: %s", e, e.stderr) + LOG.warning( + "Error retrieving authenticated user from Juju: %s: %s", + e, + e.stderr, + ) return Result(ResultType.FAILED, str(e)) # Error is about no controller register, which is okay is this case @@ -850,7 +862,7 @@ def run(self, context: StepContext) -> Result: while True: index = child.expect(expect_list, PEXPECT_TIMEOUT) LOG.debug( - "Juju registraton: expect got regex related to %r", + "Juju registration: expect got regex related to %r", expect_list[index], ) if index in (0, 1, 3): @@ -874,7 +886,7 @@ def run(self, context: StepContext) -> Result: LOG.debug("User registration completed") break except pexpect.TIMEOUT as e: - LOG.exception("Error registering user %s in Juju", self.username) + LOG.warning("Timeout registering user %s in Juju: %r", self.username, e) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) @@ -1034,7 +1046,7 @@ def is_skip(self, context: StepContext) -> Result: try: self.model_with_owner = self.get_model_name_with_owner(self.model) except ModelNotFoundException as e: - LOG.debug(str(e)) + LOG.debug("Model %s is not found: %r", self.model, e) return Result(ResultType.FAILED, "Model not found") try: @@ -1048,8 +1060,7 @@ def is_skip(self, context: StepContext) -> Result: return Result(ResultType.SKIPPED, machine) except subprocess.CalledProcessError as e: - LOG.exception("Error retrieving machines list from Juju") - LOG.warning("%s: %s", e, e.stderr) + LOG.warning("Error retrieving machines list from Juju: %s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) @@ -1116,7 +1127,7 @@ def run(self, context: StepContext) -> Result: return Result(ResultType.COMPLETED, machine) except pexpect.TIMEOUT: - LOG.exception("Error adding machine %s to Juju", self.machine_ip) + LOG.warning("Timeout adding machine %s to Juju", self.machine_ip) return Result(ResultType.FAILED, "TIMED OUT to add machine") @@ -1157,8 +1168,7 @@ def is_skip(self, context: StepContext) -> Result: LOG.debug("Machine does not exist") return Result(ResultType.SKIPPED) except subprocess.CalledProcessError as e: - LOG.exception("Error retrieving machine list from Juju") - LOG.warning("%s: %s", e, e.stderr) + LOG.warning("Error retrieving machine list from Juju: %s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) @@ -1197,8 +1207,12 @@ def run(self, context: StepContext) -> Result: return Result(ResultType.COMPLETED) except subprocess.CalledProcessError as e: - LOG.exception("Error removing machine %s from Juju", self.machine_id) - LOG.warning("%s: %s", e, e.stderr) + LOG.warning( + "Error removing machine %s from Juju: %s: %s", + self.machine_id, + e, + e.stderr, + ) return Result(ResultType.FAILED, str(e)) @@ -1226,8 +1240,7 @@ def is_skip(self, context: StepContext) -> Result: if username == "admin": return Result(ResultType.COMPLETED) except subprocess.CalledProcessError as e: - LOG.exception("Error retrieving user from Juju") - LOG.warning("%s: %s", e, e.stderr) + LOG.warning("Error retrieving user from Juju: %s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) return Result(ResultType.SKIPPED) @@ -1731,8 +1744,7 @@ def run(self, context: StepContext) -> Result: return Result(ResultType.COMPLETED) except subprocess.CalledProcessError as e: - LOG.exception("Error downloading Juju Controller charm") - LOG.warning("%s: %s", e, e.stderr) + LOG.warning("Error downloading Juju Controller charm: %s: %s", e, e.stderr) return Result(ResultType.FAILED, str(e)) @@ -1864,7 +1876,7 @@ def run(self, context: StepContext) -> Result: timeout=1200, ) except (JujuWaitException, TimeoutError) as e: - LOG.warning(str(e)) + LOG.warning("Timed out waiting for integration of %s: %r", apps, e) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) @@ -1984,8 +1996,12 @@ def run(self, context: StepContext) -> Result: "Command finished. stdout=%r, stderr=%r", process.stdout, process.stderr ) except subprocess.CalledProcessError as e: - LOG.exception("Error in switching the controller to %s", self.controller) - LOG.warning("%s: %s", e, e.stderr) + LOG.warning( + "Error in switching the controller to %s: %s: %s", + self.controller, + e, + e.stderr, + ) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) @@ -2019,7 +2035,7 @@ def _get_controller(self, name: str) -> JujuController | None: try: controller = self.get_controller(name)["details"] except ControllerNotFoundException as e: - LOG.debug(str(e)) + LOG.debug("Controller %s is not found: %r", name, e) return None return JujuController( name=name, @@ -2260,12 +2276,13 @@ def _wait_for_model( return True except subprocess.CalledProcessError: - LOG.exception( - "Error in finding %s model in controller %s", + LOG.warning( + "Error in finding %s model in controller %s: %s: %s", self.model, self.from_controller, + e, + e.stderr, ) - LOG.warning("%s: %s", e, e.stderr) raise JujuMigrationFailedError( f"Model {model} is not in either {to_controller} or" @@ -2309,13 +2326,15 @@ def _migrate_model( try: process = subprocess.run(cmd, capture_output=True, text=True, check=True) except subprocess.CalledProcessError as e: - LOG.exception( - "Error in migrating model %s from controller %s to controller %s", + LOG.warning( + "Error in migrating model %s from controller" + " %s to controller %s: %s: %s", self.model, self.from_controller, self.to_controller, + e, + e.stderr, ) - LOG.warning("%s: %s", e, e.stderr) raise JujuMigrationFailedError("Juju migrate command failed") from e @@ -2339,8 +2358,12 @@ def is_skip(self, context: StepContext) -> Result: try: self._switch_controller(self.to_controller) except subprocess.CalledProcessError as e: - LOG.exception("Error in switching to controller %s", self.to_controller) - LOG.warning("%s: %s", e, e.stderr) + LOG.warning( + "Error in switching to controller %s: %s: %s", + self.to_controller, + e, + e.stderr, + ) return Result(ResultType.FAILED, str(e)) @@ -2361,7 +2384,7 @@ def run(self, context: StepContext) -> Result: owner = self._get_model_owner(self.model, self.from_controller) except ValueError: msg = f"Error in determining the owner for model {self.model}" - LOG.exception(msg) + LOG.warning(msg) return Result(ResultType.FAILED, msg) # Migrate model @@ -2372,7 +2395,7 @@ def run(self, context: StepContext) -> Result: except JujuMigrationFailedError as e: return Result(ResultType.FAILED, str(e)) except TimeoutError: - LOG.exception("Error in finding model after migration") + LOG.warning("Timed out waiting for model %s to migrate", self.model) return Result( ResultType.FAILED, f"Timed out waiting for model {self.model} to migrate", @@ -2382,8 +2405,12 @@ def run(self, context: StepContext) -> Result: try: self._switch_controller(self.to_controller) except subprocess.CalledProcessError as e: - LOG.exception("Error in switching to controller %s", self.to_controller) - LOG.warning("%s: %s", e, e.stderr) + LOG.warning( + "Error in switching to controller %s: %s: %s", + self.to_controller, + e, + e.stderr, + ) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) diff --git a/sunbeam-python/sunbeam/steps/k8s.py b/sunbeam-python/sunbeam/steps/k8s.py index e2e34b10d..9be415482 100644 --- a/sunbeam-python/sunbeam/steps/k8s.py +++ b/sunbeam-python/sunbeam/steps/k8s.py @@ -709,7 +709,7 @@ def is_skip(self, context: StepContext) -> Result: return Result(ResultType.COMPLETED) LOG.debug("%s: %s", e, e.stderr) - LOG.exception("Error retrieving juju credentails from controller") + LOG.warning("Error retrieving Juju credentials from controller: %r", e) return Result(ResultType.FAILED, str(e)) if self.credential_name in credentials.get("controller-credentials", {}).keys(): @@ -2129,7 +2129,7 @@ def run(self, context: StepContext) -> Result: try: self._wait_for_coredns_ready() except (TimeoutError, K8SError) as e: - LOG.warning(str(e)) + LOG.warning("CoreDNS readiness check failed: %s", e) return Result(ResultType.FAILED, str(e)) return Result(ResultType.COMPLETED) diff --git a/sunbeam-python/sunbeam/steps/maintenance.py b/sunbeam-python/sunbeam/steps/maintenance.py index 94ffdb4c9..06d0d3130 100644 --- a/sunbeam-python/sunbeam/steps/maintenance.py +++ b/sunbeam-python/sunbeam/steps/maintenance.py @@ -111,7 +111,7 @@ def run(self, context: StepContext) -> Result: audit = self._create_audit() actions = self._get_actions(audit) except tenacity.RetryError as e: - LOG.warning(e) + LOG.warning("Failed to create Watcher audit: %r", e) return Result(ResultType.FAILED, "Unable to create Watcher audit") return Result( ResultType.COMPLETED, @@ -202,7 +202,7 @@ def run(self, context: StepContext) -> Result: tenacity.RetryError, WatcherActionFailedException, ) as e: - LOG.warning(e) + LOG.warning("Failed to execute Watcher audit action plan: %r", e) failed = True actions = watcher_helper.get_actions(client=self.client, audit=self.audit) diff --git a/sunbeam-python/sunbeam/steps/openstack.py b/sunbeam-python/sunbeam/steps/openstack.py index 73b53dadb..2acd682d7 100644 --- a/sunbeam-python/sunbeam/steps/openstack.py +++ b/sunbeam-python/sunbeam/steps/openstack.py @@ -837,7 +837,7 @@ def run(self, context: StepContext) -> Result: reporter=context.reporter, ) except TerraformException as e: - LOG.exception("Error configuring cloud") + LOG.warning("Error configuring cloud: %r", e) return Result(ResultType.FAILED, str(e)) apps = self.jhelper.get_application_names(self.model) @@ -1009,7 +1009,7 @@ def run(self, context: StepContext) -> Result: reporter=context.reporter, ) except TerraformException as e: - LOG.exception("Error reconfiguring cloud") + LOG.warning("Error reconfiguring cloud: %r", e) return Result(ResultType.FAILED, str(e)) storage_nodes = self.client.cluster.list_nodes_by_role("storage") @@ -1119,7 +1119,7 @@ def run(self, context: StepContext) -> Result: ) return Result(ResultType.COMPLETED) except TerraformException as e: - LOG.exception("Error updating modelconfigs for openstack plan") + LOG.warning("Error updating modelconfigs for openstack plan: %r", e) return Result(ResultType.FAILED, str(e)) @@ -1536,7 +1536,7 @@ def run(self, context: StepContext) -> Result: try: self.tfhelper.destroy(reporter=context.reporter) except TerraformException as e: - LOG.exception("Error destroying cloud") + LOG.warning("Error destroying cloud: %r", e) return Result(ResultType.FAILED, str(e)) timeout_factor = 0.8 diff --git a/sunbeam-python/sunbeam/steps/upgrades/inter_channel.py b/sunbeam-python/sunbeam/steps/upgrades/inter_channel.py index 8128e770a..94d47f0b0 100644 --- a/sunbeam-python/sunbeam/steps/upgrades/inter_channel.py +++ b/sunbeam-python/sunbeam/steps/upgrades/inter_channel.py @@ -123,7 +123,7 @@ def upgrade_applications( reporter=context.reporter if context else None, ) except TerraformException as e: - LOG.exception("Error upgrading cloud") + LOG.warning("Error upgrading cloud: %r", e) return Result(ResultType.FAILED, str(e)) status_queue: queue.Queue[str] = queue.Queue() status = context.status if context else None diff --git a/sunbeam-python/sunbeam/utils.py b/sunbeam-python/sunbeam/utils.py index 0e419e431..aed2c4573 100644 --- a/sunbeam-python/sunbeam/utils.py +++ b/sunbeam-python/sunbeam/utils.py @@ -76,7 +76,7 @@ def get_fqdn(cidr: str | None = None) -> str: return fqdn except Exception as e: LOG.debug("Ignoring error in getting FQDN") - LOG.debug(e, exc_info=True) + LOG.debug("FQDN lookup failed: %r", e) # return hostname if fqdn is localhost return socket.gethostname() @@ -241,11 +241,11 @@ def __call__(self, *args, **kwargs): try: return self.main(*args, **kwargs) except SunbeamException as e: - LOG.debug(e, exc_info=True) + LOG.debug("SunbeamException caught: %r", e) LOG.error("Error: %s", e) sys.exit(1) except Exception as e: - LOG.debug(e, exc_info=True) + LOG.debug("Unexpected exception caught: %r", e) message = ( "An unexpected error has occurred." " Please see https://canonical-openstack.readthedocs-hosted.com/en/latest/how-to/troubleshooting/inspecting-the-cluster/" From ba30aaf96a326eb2112445472c3106379c97cb25 Mon Sep 17 00:00:00 2001 From: Himawan Winarto Date: Thu, 23 Apr 2026 23:32:33 -0400 Subject: [PATCH 4/5] docs: add logging style guide document Signed-off-by: Himawan Winarto --- docs/LOGGING_STYLE_GUIDE.md | 484 ++++++++++++++++++++++++++++++++++++ 1 file changed, 484 insertions(+) create mode 100644 docs/LOGGING_STYLE_GUIDE.md diff --git a/docs/LOGGING_STYLE_GUIDE.md b/docs/LOGGING_STYLE_GUIDE.md new file mode 100644 index 000000000..db3b4084e --- /dev/null +++ b/docs/LOGGING_STYLE_GUIDE.md @@ -0,0 +1,484 @@ +# Sunbeam Logging Style Guide + +## Overview + +The Sunbeam codebase enforces logging conventions through two mechanisms: + +1. **Automated linting** — Ruff rules `LOG` ([flake8-logging](https://docs.astral.sh/ruff/rules/#flake8-logging-log)) and `G` ([flake8-logging-format](https://docs.astral.sh/ruff/rules/#flake8-logging-format-g)) catch formatting violations at CI time. +2. **Style conventions** — This document covers semantic and stylistic rules that linters cannot enforce. These were established during the logging standardization effort (PR #740) and should be followed for all new code. + +Both mechanisms work together: the linter prevents mechanical mistakes (f-strings in log calls, deprecated APIs), while these conventions ensure logs are **descriptive**, **non-redundant**, and **traceback-appropriate**. + +## What the Linter Catches + +The `LOG` and `G` rule groups are enabled in `sunbeam-python/pyproject.toml`: + +```toml +[tool.ruff.lint] +select = [ + # ... other rules ... + "LOG", # flake8-logging + "G", # flake8-logging-format +] +``` + +### `LOG` — flake8-logging + +| Rule | Description | +|------|-------------| +| `LOG001` | Direct use of `logging.WARN` (use `WARNING`) | +| `LOG002` | Use of `__cached__` as logger name | +| `LOG009` | Use of `logging.WARN()` method | +| `LOG015` | `LOG.exception()` outside of an exception handler | + +### `G` — flake8-logging-format + +| Rule | Description | +|------|-------------| +| `G001` | Logging uses `str.format()` — use lazy `%`-style args | +| `G002` | Logging uses `%` operator for inline formatting | +| `G003` | Logging uses `+` string concatenation | +| `G004` | Logging uses f-string — use lazy `%`-style args | +| `G010` | `logging.warn()` is deprecated, use `logging.warning()` | +| `G101` | Extra keys conflict with `LogRecord` attributes | +| `G201` | `LOG.exception()` with redundant `exc_info` set | +| `G202` | `LOG.exception()` with `exc_info=False` (use `LOG.error`) | + +### Running the Linter + +```bash +# Full lint check (includes LOG and G rules) +tox -e pep8 + +# Check specific rules only +.tox/pep8/bin/ruff check --select LOG,G sunbeam-python/ + +# Check a specific file +.tox/pep8/bin/ruff check --select LOG,G sunbeam-python/sunbeam/steps/juju.py +``` + +## What the Linter Does NOT Catch + +The following conventions must be enforced through code review. Each section below describes a rule, explains why it matters, and shows before/after examples from the codebase. + +### 1. Use `LOG.exception()` Only for Terminal Errors + +`LOG.exception()` automatically includes a full traceback. Only use it when the error **stops execution** — i.e., the exception is re-raised or causes a program exit. + +**Why:** Tracebacks in logs for handled errors create noise, make real errors harder to find, and alarm operators unnecessarily. + +**Correct usage — error re-raises, traceback is valuable:** + +```python +# sunbeam/core/terraform.py +except subprocess.CalledProcessError as e: + LOG.exception("terraform init failed: %s", e.stderr) + raise TerraformException(str(e)) +``` + +```python +# sunbeam/commands/launch.py +except ValueError as e: + LOG.exception("Error resolving management ip from cidr") + raise click.ClickException(str(e)) from e +``` + +**Incorrect usage — error is handled gracefully, traceback is noise:** + +```python +# BAD +except TerraformException as e: + LOG.exception("Error configuring cloud") + return Result(ResultType.FAILED, str(e)) + +# GOOD — use LOG.warning instead +except TerraformException as e: + LOG.warning("Error configuring cloud: %r", e) + return Result(ResultType.FAILED, str(e)) +``` + +**Rule of thumb:** If the next line after the `LOG` call is `return Result(ResultType.FAILED, ...)`, use `LOG.warning`. If the next line is `raise`, use `LOG.exception`. + +### 2. Do Not Pass Bare Exceptions or `str(e)` as the Log Message + +Bare exception objects or `str(e)` produce uninformative log lines with no context about what operation failed. Always include a descriptive message and use `%r` for exceptions. + +**Why:** A log line like `"Connection refused"` gives no indication of which connection, to what, or during what operation. Descriptive messages make debugging significantly faster. + +```python +# BAD — no context +LOG.debug(e) +LOG.warning(str(e)) +LOG.debug(e.stderr) + +# GOOD — descriptive context +LOG.debug("Failed to add manifest to cluster db: %r", e) +LOG.warning("Timed out waiting for certificates application: %r", e) +LOG.debug("Subprocess failed: %s: %s", e, e.stderr) +``` + +**Real codebase examples:** + +```python +# sunbeam/steps/juju.py — controller not found +except ControllerNotFoundException as e: + LOG.debug("Controller %s is not found: %r", self.controller, e) + +# sunbeam/commands/configure.py — terraform error +except TerraformException as e: + LOG.warning("Error getting Terraform output: %r", e) + return Result(ResultType.FAILED, str(e)) + +# sunbeam/utils.py — connection check +except Exception as e: + LOG.debug("Not able to connect to %s:%s server: %r", ip, port, e) +``` + +### 3. Avoid Redundant Log Calls for the Same Error + +Do not log the same exception at multiple levels. Pick one appropriate level with a single, descriptive message. + +**Why:** Duplicate log entries for the same error create confusion about whether multiple errors occurred. + +```python +# BAD — two log calls for one error +except subprocess.CalledProcessError as e: + LOG.exception("Error bootstrapping Juju") + LOG.warning("%s: %s", e, e.stderr) + return Result(ResultType.FAILED, str(e)) + +# GOOD — single descriptive message +except subprocess.CalledProcessError as e: + LOG.warning("Error bootstrapping Juju: %s: %s", e, e.stderr) + return Result(ResultType.FAILED, str(e)) +``` + +**Real codebase example:** + +```python +# sunbeam/steps/juju.py — handled error with context +except subprocess.CalledProcessError as e: + LOG.warning( + "Error determining whether to skip the bootstrap " + "process. Defaulting to not skip: %s: %s", + e, + e.stderr, + ) + return Result(ResultType.FAILED, str(e)) +``` + +### 4. Use `%r` for Exceptions, `%s` for Human-Readable Values + +- **`%r`** — use for exceptions and objects where the repr (including type name) is useful +- **`%s`** — use for strings, names, IPs, and other human-readable values + +**Why:** `%r` shows the exception type alongside the message (e.g., `ConnectionError('refused')` vs just `refused`), which is critical for debugging. `%s` is cleaner for values that are already human-readable. + +```python +# GOOD — %r for exception, %s for names +LOG.debug("Controller %s is not found: %r", self.controller, e) +LOG.warning("Timed out waiting for %s to be ready: %r", app_name, e) +LOG.debug("Running command %s", " ".join(cmd)) +LOG.debug("Unit %s is deployed on machine: %s", name, self.machine_id) +``` + +**Corollary:** Use `%r` instead of `'%s'` when quoting variable values: + +```python +# BAD +LOG.debug("Got value '%s' for key '%s'", value, key) + +# GOOD +LOG.debug("Got value %r for key %r", value, key) +``` + +### 5. Use Consistent, Descriptive Messages + +Log messages should clearly describe **what happened** and include relevant context (what entity, what operation). + +**Why:** Vague messages like `"Error occurred"` require digging through code to understand what they mean. Descriptive messages make logs self-documenting. + +```python +# BAD — vague or inconsistent +LOG.debug("Demo setup not yet done") +LOG.debug("Error: Terraform state locked") +LOG.debug("Cluster already bootstrapped") + +# GOOD — precise and descriptive +LOG.debug("Demo setup is not yet done") +LOG.debug("Terraform state locked") +LOG.debug("Cluster is already bootstrapped") +``` + +#### Standard Message Patterns + +Use these patterns consistently across the codebase: + +| Pattern | Example | +|---------|---------| +| Entity not found | `LOG.debug("Model %s is not found: %r", model, e)` | +| Timeout waiting | `LOG.warning("Timed out waiting for %s: %r", app, e)` | +| Operation failed | `LOG.warning("Failed to create Watcher audit: %r", e)` | +| Command execution | `LOG.debug("Running command %s", " ".join(cmd))` | +| Command result | `LOG.debug("Command finished. stdout=%r, stderr=%r", out, err)` | +| Subprocess error | `LOG.warning("Error bootstrapping Juju: %s: %s", e, e.stderr)` | +| State description | `LOG.debug("Cluster is already bootstrapped")` | +| Connection check | `LOG.debug("Not able to connect to %s:%s server: %r", ip, port, e)` | + +### 6. Do Not Include Trailing Periods in Log Messages + +```python +# BAD +LOG.debug("DPDK disabled.") +LOG.debug("Determining DPDK candidate interfaces.") + +# GOOD +LOG.debug("DPDK disabled") +LOG.debug("Determining DPDK candidate interfaces") +``` + +### 7. Use Proper Casing for Product Names + +Product and project names should use their official casing. + +| Correct | Incorrect | +|---------|-----------| +| Juju | juju | +| MySQL | Mysql | +| Terraform | terraform (in messages) | +| OpenStack | Openstack | +| MicroCeph | microceph | +| MicroOVN | microovn | + +```python +# BAD +LOG.warning("Error getting users list from juju.") + +# GOOD +LOG.warning("Error getting users list from Juju") +``` + +### 8. Use `exc_info=True` Sparingly on `LOG.debug` + +`exc_info=True` on `LOG.debug` is acceptable when: +- The error is expected/recoverable and you want the traceback only in debug logs +- The exception is caught but doesn't need user-visible logging + +```python +# GOOD — expected failure, traceback only in debug +except ConfigItemNotFoundException: + LOG.debug("Failed to pull state", exc_info=True) + +# BAD — use %r instead when the traceback isn't needed +except Exception as e: + LOG.debug(e, exc_info=True) + +# GOOD +except Exception as e: + LOG.debug("FQDN lookup failed: %r", e) +``` + +### 9. Choosing the Right Log Level + +| Level | When to use | Example | +|-------|-------------|---------| +| `LOG.debug` | Internal state, command execution, expected/handled failures | `LOG.debug("Controller %s is not found: %r", name, e)` | +| `LOG.info` | User-relevant progress | `LOG.info("Applying DPDK configuration")` | +| `LOG.warning` | Handled errors that may affect behavior, timeouts, fallbacks | `LOG.warning("Error configuring cloud: %r", e)` | +| `LOG.error` | Errors shown to user at CLI level | `LOG.error("Error: %s", e)` | +| `LOG.exception` | **Only** when the error stops execution (re-raise or exit) | `LOG.exception("terraform init failed: %s", e.stderr)` | + +**Real codebase example — using multiple levels correctly:** + +```python +# sunbeam/utils.py — global error handler +try: + return self.main(*args, **kwargs) +except SunbeamException as e: + LOG.debug("SunbeamException caught: %r", e) # debug: internal context + LOG.error("Error: %s", e) # error: user-facing message + sys.exit(1) +except Exception as e: + LOG.debug("Unexpected exception caught: %r", e) # debug: internal context + LOG.warning(message) # warning: troubleshooting hint + LOG.error("Error: %s", e) # error: user-facing message + sys.exit(1) +``` + +## Quick Reference + +### Decision Tree: Which Log Level for Exceptions? + +``` +Exception caught +├── Re-raised or causes exit? +│ ├── Yes → LOG.exception("descriptive message: %s", e.detail) +│ │ raise NewException(...) from e +│ └── No (handled gracefully) → +│ ├── Expected/normal flow? → LOG.debug("context: %r", e) +│ └── May affect behavior? → LOG.warning("context: %r", e) +│ return Result(ResultType.FAILED, str(e)) +``` + +### Formatting Cheat Sheet + +| Scenario | Format | Example | +|----------|--------|---------| +| Exception object | `%r` | `LOG.debug("Failed: %r", e)` | +| Exception + stderr | `%s: %s` | `LOG.warning("Error: %s: %s", e, e.stderr)` | +| String value | `%s` | `LOG.debug("Model %s found", name)` | +| Quoting a value | `%r` | `LOG.debug("Got value %r", value)` | +| Command | `%s` | `LOG.debug("Running command %s", " ".join(cmd))` | +| stdout/stderr output | `%r` | `LOG.debug("stdout=%r, stderr=%r", out, err)` | + +### Error Logging Output Reference + +Understanding what each format specifier and log method produces helps choose the right one. + +**Standard exception:** + +```python +try: + raise ValueError("Bad thing") from Exception("original cause") +except ValueError as e: + LOG.debug("Error with %s: %s", "format", e) + LOG.debug("Error with %r: %r", "format", e) + LOG.debug(e, exc_info=True) + LOG.exception("Caught an exception") +``` + +``` +DEBUG Error with format: Bad thing +DEBUG Error with format: ValueError('Bad thing') +DEBUG Bad thing # bare e — no context, avoid this +Exception: original cause # exc_info=True adds full traceback + ... +ValueError: Bad thing +ERROR Caught an exception # LOG.exception — always ERROR + traceback +Exception: original cause + ... +ValueError: Bad thing +``` + +**`subprocess.CalledProcessError`:** + +```python +try: + subprocess.run(["juju", "show-controller", "nonexistent"], + check=True, capture_output=True, text=True) +except subprocess.CalledProcessError as e: + LOG.debug("e with %%s : %s", e) + LOG.debug("e with %%r : %r", e) + LOG.debug("Output : %s", e.output) + LOG.debug("Stderr : %s", e.stderr) + LOG.exception("Caught a CalledProcessError") +``` + +``` +DEBUG e with %s : Command '['juju', 'show-controller', 'nonexistent']' returned non-zero exit status 1. +DEBUG e with %r : CalledProcessError(1, ['juju', 'show-controller', 'nonexistent']) +DEBUG Output : +DEBUG Stderr : ERROR controller nonexistent not found +ERROR Caught a CalledProcessError +Traceback (most recent call last): + ... +subprocess.CalledProcessError: Command '['juju', ...]' returned non-zero exit status 1. +``` + +**Key takeaways:** +- `%s` on an exception gives the human-readable message +- `%r` on an exception gives the type + message (e.g., `ValueError('Bad thing')`) — preferred for debugging +- `%s` on `e.stderr` gives the actual error output from the subprocess — often the most useful detail +- `LOG.exception` always logs at `ERROR` level with full traceback — reserve for terminal errors +- `LOG.debug(e, exc_info=True)` logs at `DEBUG` with traceback but uses bare exception as message — avoid, use a descriptive message instead + +## Implementation Details + +### Logger Initialization + +All modules use a module-level logger: + +```python +import logging + +LOG = logging.getLogger(__name__) +``` + +### Files and Configuration + +| File | Purpose | +|------|---------| +| `sunbeam-python/pyproject.toml` | Ruff configuration with `LOG` and `G` rules | +| `sunbeam-python/tox.ini` | `pep8` environment runs `ruff check` | +| `sunbeam-python/sunbeam/core/terraform.py` | Reference for correct `LOG.exception` usage (5 calls, all re-raise) | +| `sunbeam-python/sunbeam/utils.py` | Reference for global error handler patterns | +| `sunbeam-python/sunbeam/steps/juju.py` | Reference for subprocess error logging patterns | + +## Code Review Checklist + +When reviewing logging changes, check for: + +- [ ] **`LOG.exception` only on re-raise/exit**: Does the exception handler re-raise or exit? If not, use `LOG.warning` or `LOG.debug` +- [ ] **No bare exceptions**: Is the log message descriptive? No `LOG.debug(e)` or `LOG.warning(str(e))` +- [ ] **No redundant calls**: Is the same error logged at multiple levels? +- [ ] **Correct format specifiers**: `%r` for exceptions, `%s` for human-readable strings +- [ ] **Descriptive messages**: Does the message explain what operation failed and on what entity? +- [ ] **No trailing periods**: Log messages should not end with `.` +- [ ] **Correct product names**: Juju, MySQL, Terraform, OpenStack (not juju, Mysql, etc.) +- [ ] **Appropriate level**: `debug` for internals, `warning` for handled errors, `exception` for terminal errors + +## Troubleshooting + +### Linter Passes but Style Is Wrong + +The `LOG` and `G` ruff rules only check formatting mechanics (f-strings, `.format()`, concatenation, deprecated APIs). They do **not** check: +- Whether `LOG.exception` is appropriate for the error handler +- Whether the message is descriptive +- Whether the log level matches the severity +- Whether multiple calls log the same error + +These must be caught during code review using this guide. + +### Common Anti-Patterns + +**Anti-pattern: `LOG.exception` + `return Result(FAILED)`** + +This is the most common issue. If the error is handled and execution continues, the traceback is misleading. + +```python +# BAD — traceback implies a crash, but execution continues normally +except TerraformException as e: + LOG.exception("Error configuring cloud") + return Result(ResultType.FAILED, str(e)) +``` + +**Anti-pattern: `LOG.exception` + `LOG.warning` for the same error** + +This produces two log entries for one error — the traceback from `LOG.exception` and a separate warning. + +```python +# BAD — redundant logging +except subprocess.CalledProcessError as e: + LOG.exception("Error running command") + LOG.warning("%s: %s", e, e.stderr) +``` + +**Anti-pattern: bare exception as message** + +This loses all context about what operation was being attempted. + +```python +# BAD — which operation? which entity? +except Exception as e: + LOG.debug(e) +``` + +## Contributing + +When adding new logging calls: + +1. Follow the patterns documented here +2. Use the decision tree to select the correct log level +3. Include descriptive context in all messages (what operation, what entity) +4. Run `tox -e pep8` to verify linter rules pass +5. Self-review against the code review checklist above From f0f6188d801528ca2caca38dd6d90884017aaf68 Mon Sep 17 00:00:00 2001 From: Himawan Winarto Date: Wed, 8 Apr 2026 10:38:25 -0400 Subject: [PATCH 5/5] refactor: enable linting for python scripts in tools and CI/CD pipeline Signed-off-by: Himawan Winarto --- .github/scripts/tag-syncer.py | 17 ++++++++++--- sunbeam-python/pyproject.toml | 3 +++ sunbeam-python/tox.ini | 13 +++++++--- tools/add_charm_revisions.py | 46 +++++++++++++++++++++++++++++------ 4 files changed, 63 insertions(+), 16 deletions(-) diff --git a/.github/scripts/tag-syncer.py b/.github/scripts/tag-syncer.py index c1b724285..61b50848f 100644 --- a/.github/scripts/tag-syncer.py +++ b/.github/scripts/tag-syncer.py @@ -1,17 +1,24 @@ +# SPDX-FileCopyrightText: 2026 - Canonical Ltd +# SPDX-License-Identifier: Apache-2.0 + # /// script # requires-python = ">=3.12" # dependencies = [ # "launchpadlib>=2.1.0", # ] # /// +"""Helper script to sync git tags with snap builds in Launchpad. + +Usage: + python3 tag-syncer.py +""" import logging -import sys import subprocess +import sys from launchpadlib.launchpad import Launchpad - logging.basicConfig( level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s", @@ -133,7 +140,7 @@ def process_build(build) -> bool: """Process a single build and create a tag if appropriate. Returns: - bool: True if build was processed successfully (tagged or skipped), False on error. + bool: True if build was processed successfully (tagged or skipped). """ if not is_build_ready_for_tagging(build): return True @@ -261,7 +268,9 @@ def main(*snap_recipes: str) -> None: if len(sys.argv) < 2: logger.error("Usage: tag-syncer.py ") logger.error( - "Example: tag-syncer.py ~openstack-snappers/snap-openstack/+snap/openstack-caracal-candidate ~openstack-snappers/+snap/openstack-main-edge" + "Example: tag-syncer.py" + " ~openstack-snappers/snap-openstack/+snap/openstack-caracal-candidate" + " ~openstack-snappers/+snap/openstack-main-edge" ) sys.exit(1) diff --git a/sunbeam-python/pyproject.toml b/sunbeam-python/pyproject.toml index f01c5d02a..dad2af7a6 100644 --- a/sunbeam-python/pyproject.toml +++ b/sunbeam-python/pyproject.toml @@ -810,6 +810,9 @@ extend-ignore = ["E203"] "N806", "S", # Disable bandit rules in tests ] +"../tools/*" = [ + "S310", # URL scheme is validated before urlopen +] "setup.py" = ["D100"] "**/__init__.py" = ["D104"] diff --git a/sunbeam-python/tox.ini b/sunbeam-python/tox.ini index 6170c1c39..304ee0e79 100644 --- a/sunbeam-python/tox.ini +++ b/sunbeam-python/tox.ini @@ -7,10 +7,12 @@ skipsdist = True ignore_basepython_conflict = True [vars] -src_path = {toxinidir}/ -tst_path = {toxinidir}/tests/ -all_path = {[vars]src_path} {[vars]tst_path} -uv_flags = --frozen --isolated --extra=dev +src_path = {toxinidir}/ +tst_path = {toxinidir}/tests/ +tool_path = {toxinidir}/../tools/ +cicd_path = {toxinidir}/../.github/scripts/ +all_path = {[vars]src_path} {[vars]tst_path} {[vars]tool_path} {[vars]cicd_path} +uv_flags = --frozen --isolated --extra=dev [testenv] usedevelop = True @@ -65,6 +67,9 @@ commands = # the unit tests pass the mypy check. # uv run {[vars]uv_flags} mypy {[vars]tst_path}/unit uv run {[vars]uv_flags} mypy {[vars]tst_path}/functional + uv run {[vars]uv_flags} mypy {[vars]tool_path} + # TODO(himax16): enable after solving custom dependency issues in CI/CD scripts. + # uv run {[vars]uv_flags} mypy {[vars]cicd_path} [testenv:cover] setenv = diff --git a/tools/add_charm_revisions.py b/tools/add_charm_revisions.py index da5fb4fc9..c3d21e4d3 100644 --- a/tools/add_charm_revisions.py +++ b/tools/add_charm_revisions.py @@ -1,4 +1,7 @@ #!/usr/bin/env python3 +# SPDX-FileCopyrightText: 2026 - Canonical Ltd +# SPDX-License-Identifier: Apache-2.0 + """Add charm revisions to a sunbeam manifest file by querying the Charmhub API. Usage: @@ -8,9 +11,12 @@ """ import argparse +import json import sys +import urllib.error +import urllib.parse import urllib.request -import json + import yaml @@ -18,10 +24,19 @@ def fetch_revision(charm: str, channel: str) -> int | None: """Return the revision number for *charm* at *channel*, or None on failure.""" url = f"https://api.charmhub.io/v2/charms/info/{charm}?fields=channel-map" try: - req = urllib.request.Request(url, headers={"User-Agent": "add-charm-revisions/1.0"}) + req = urllib.request.Request( + url, headers={"User-Agent": "add-charm-revisions/1.0"} + ) with urllib.request.urlopen(req, timeout=15) as resp: + final_url = resp.geturl() + if urllib.parse.urlparse(final_url).scheme not in ("https",): + print( + f" WARNING: Insecure redirect to {final_url!r} for {charm!r}", + file=sys.stderr, + ) + return None data = json.loads(resp.read()) - except Exception as exc: + except urllib.error.HTTPError as exc: print(f" WARNING: could not fetch info for {charm!r}: {exc}", file=sys.stderr) return None @@ -52,7 +67,11 @@ def _walk(node): charms = node.get("charms") if isinstance(charms, dict): for name, charm in charms.items(): - if isinstance(charm, dict) and "channel" in charm and "revision" not in charm: + if ( + isinstance(charm, dict) + and "channel" in charm + and "revision" not in charm + ): pairs.append((name, charm["channel"])) for value in node.values(): if isinstance(value, dict): @@ -71,7 +90,11 @@ def inject_revisions(obj: dict, revisions: dict[tuple[str, str], int | None]) -> if isinstance(charms, dict): new_charms = {} for name, charm in charms.items(): - if isinstance(charm, dict) and "channel" in charm and "revision" not in charm: + if ( + isinstance(charm, dict) + and "channel" in charm + and "revision" not in charm + ): new_charm: dict = {} for key, value in charm.items(): new_charm[key] = value @@ -84,11 +107,15 @@ def inject_revisions(obj: dict, revisions: dict[tuple[str, str], int | None]) -> new_charms[name] = charm obj = {k: (new_charms if k == "charms" else v) for k, v in obj.items()} - return {k: inject_revisions(v, revisions) if isinstance(v, dict) else v for k, v in obj.items()} + return { + k: inject_revisions(v, revisions) if isinstance(v, dict) else v + for k, v in obj.items() + } def process_manifest(input_path: str, output_path: str | None) -> None: - with open(input_path) as fh: + """Read the manifest, fetch revisions, and write the updated manifest.""" + with open(input_path, "r") as fh: data = yaml.safe_load(fh) pairs = collect_charms(data) @@ -114,7 +141,9 @@ def process_manifest(input_path: str, output_path: str | None) -> None: file=sys.stderr, ) - output = yaml.dump(data, sort_keys=False, default_flow_style=False, allow_unicode=True) + output = yaml.dump( + data, sort_keys=False, default_flow_style=False, allow_unicode=True + ) if output_path: with open(output_path, "w") as fh: @@ -125,6 +154,7 @@ def process_manifest(input_path: str, output_path: str | None) -> None: def main() -> None: + """Main function to add charm revisions to a Sunbeam manifest.""" parser = argparse.ArgumentParser( description="Add Charmhub revisions to a sunbeam manifest YAML file." )