From 8096b0973f83389a9834ca49605b35c36c2c19fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sinclert=20P=C3=A9rez?= Date: Thu, 29 Jan 2026 09:57:26 +0100 Subject: [PATCH 01/40] [MISC] Revert ARM 24.04 work-around (#56) --- machines/spread.yaml | 7 +++---- .../test_upgrade_rollback_incompat.py/task.yaml | 2 +- .../test_upgrade_from_stable_2023_04_20.py/task.yaml | 2 +- .../test_upgrade_from_stable_2023_10_06.py/task.yaml | 2 +- .../test_upgrade_from_stable_2024_06_26.py/task.yaml | 2 +- 5 files changed, 7 insertions(+), 8 deletions(-) diff --git a/machines/spread.yaml b/machines/spread.yaml index a044689a2..b0eb0aaf1 100644 --- a/machines/spread.yaml +++ b/machines/spread.yaml @@ -76,9 +76,10 @@ backends: allocate: | sudo tee /etc/ssh/sshd_config.d/10-spread-github-ci.conf << 'EOF' PasswordAuthentication yes + PermitEmptyPasswords yes EOF - echo "runner:$SPREAD_PASSWORD" | sudo chpasswd + sudo passwd --delete "$USER" ADDRESS localhost # HACK: spread does not pass environment variables set on runner @@ -95,9 +96,7 @@ backends: systems: - ubuntu-24.04: username: runner - # TODO: Revert to ubuntu-24.04-arm once Kernel 6.14 issue is solved - # https://gitlab.com/apparmor/apparmor/-/issues/571 - - ubuntu-22.04-arm: + - ubuntu-24.04-arm: username: runner variants: - -juju29 diff --git a/machines/tests/spread/integration/test_upgrade_rollback_incompat.py/task.yaml b/machines/tests/spread/integration/test_upgrade_rollback_incompat.py/task.yaml index b7f18e347..d02a2ca7a 100644 --- a/machines/tests/spread/integration/test_upgrade_rollback_incompat.py/task.yaml +++ b/machines/tests/spread/integration/test_upgrade_rollback_incompat.py/task.yaml @@ -6,4 +6,4 @@ execute: | artifacts: - allure-results systems: - - -ubuntu-22.04-arm + - -ubuntu-24.04-arm diff --git a/machines/tests/spread/release/test_upgrade_from_stable_2023_04_20.py/task.yaml b/machines/tests/spread/release/test_upgrade_from_stable_2023_04_20.py/task.yaml index 55e9a0230..2b83d6200 100644 --- a/machines/tests/spread/release/test_upgrade_from_stable_2023_04_20.py/task.yaml +++ b/machines/tests/spread/release/test_upgrade_from_stable_2023_04_20.py/task.yaml @@ -10,4 +10,4 @@ artifacts: backends: - -lxd-vm # This task requires charm built on different architecture from host systems: - - -ubuntu-22.04-arm + - -ubuntu-24.04-arm diff --git a/machines/tests/spread/release/test_upgrade_from_stable_2023_10_06.py/task.yaml b/machines/tests/spread/release/test_upgrade_from_stable_2023_10_06.py/task.yaml index f87cf15f6..75e0d41c1 100644 --- a/machines/tests/spread/release/test_upgrade_from_stable_2023_10_06.py/task.yaml +++ b/machines/tests/spread/release/test_upgrade_from_stable_2023_10_06.py/task.yaml @@ -10,4 +10,4 @@ artifacts: backends: - -lxd-vm # This task requires charm built on different architecture from host systems: - - -ubuntu-22.04-arm + - -ubuntu-24.04-arm diff --git a/machines/tests/spread/release/test_upgrade_from_stable_2024_06_26.py/task.yaml b/machines/tests/spread/release/test_upgrade_from_stable_2024_06_26.py/task.yaml index 1f56d753a..d60f1cf6a 100644 --- a/machines/tests/spread/release/test_upgrade_from_stable_2024_06_26.py/task.yaml +++ b/machines/tests/spread/release/test_upgrade_from_stable_2024_06_26.py/task.yaml @@ -10,4 +10,4 @@ artifacts: backends: - -lxd-vm # This task requires charm built on different architecture from host systems: - - -ubuntu-22.04-arm + - -ubuntu-24.04-arm From 92db53e55af96e3a63cb1f4756f410b1409f1702 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sinclert=20P=C3=A9rez?= Date: Fri, 30 Jan 2026 10:13:12 +0100 Subject: [PATCH 02/40] [DPE-8300] Branch out MySQL 8.0 (#59) --- .../lib/charms/mysql/v0/async_replication.py | 21 ------------------ kubernetes/lib/charms/mysql/v0/mysql.py | 15 ------------- kubernetes/src/charm.py | 12 ---------- kubernetes/src/mysql_k8s_helpers.py | 12 ---------- .../lib/charms/mysql/v0/async_replication.py | 22 ------------------- machines/lib/charms/mysql/v0/mysql.py | 15 ------------- machines/src/charm.py | 13 ----------- 7 files changed, 110 deletions(-) diff --git a/kubernetes/lib/charms/mysql/v0/async_replication.py b/kubernetes/lib/charms/mysql/v0/async_replication.py index 85d95516b..12787e77c 100644 --- a/kubernetes/lib/charms/mysql/v0/async_replication.py +++ b/kubernetes/lib/charms/mysql/v0/async_replication.py @@ -104,17 +104,11 @@ def role(self) -> ClusterSetInstanceState: """Current cluster set role of the unit, after the relation is established.""" is_replica = self._charm._mysql.is_cluster_replica() - # TODO: - # Remove `.lower()` when migrating to MySQL 8.4 - # (when breaking changes are allowed) if is_replica: cluster_role = ClusterRole.REPLICA.lower() elif is_replica is False: cluster_role = ClusterRole.PRIMARY.lower() else: - # TODO: - # Uppercase when migrating to MySQL 8.4 - # (when breaking changes are allowed) cluster_role = "unset" instance_role = self._charm._mysql.get_member_role() @@ -124,9 +118,6 @@ def role(self) -> ClusterSetInstanceState: else: relation_side = RELATION_OFFER - # TODO: - # Remove `.lower()` when migrating to MySQL 8.4 - # (when breaking changes are allowed) return ClusterSetInstanceState(cluster_role, instance_role.lower(), relation_side) @property @@ -195,9 +186,6 @@ def on_async_relation_broken(self, event: RelationBrokenEvent): # noqa: C901 """Handle the async relation being broken from either side.""" # Remove the replica cluster, if this is the primary - # TODO: - # Remove `.lower()` when migrating to MySQL 8.4 - # (when breaking changes are allowed) if ( self.role.cluster_role in (ClusterRole.REPLICA.lower(), "unset") and not self._charm.removing_unit @@ -242,9 +230,6 @@ def on_async_relation_broken(self, event: RelationBrokenEvent): # noqa: C901 # set flag to persist removed from cluster-set state self._charm.app_peer_data["removed-from-cluster-set"] = "true" - # TODO: - # Remove `.lower()` when migrating to MySQL 8.4 - # (when breaking changes are allowed) elif self.role.cluster_role == ClusterRole.PRIMARY.lower(): if self._charm.unit.is_leader(): # only leader units can remove replica clusters @@ -679,9 +664,6 @@ def replica_initialized(self) -> bool: def _check_version(self) -> bool: """Check if the MySQL version is compatible with the primary cluster.""" - # TODO: - # Remove `.split("-")[0]` when migrating to MySQL 8.4 - # (when breaking changes are allowed) remote_version = self.remote_relation_data.get("mysql-version").split("-")[0] local_version = self._charm._mysql.get_mysql_version() @@ -867,9 +849,6 @@ def _on_consumer_changed(self, event): # noqa: C901 ) logger.debug("Awaiting other units to join the cluster") - # TODO: - # Remove `.lower()` when migrating to MySQL 8.4 - # (when breaking changes are allowed) # set state flags to allow secondaries to join the cluster self._charm.unit_peer_data["member-state"] = InstanceState.ONLINE.lower() self._charm.unit_peer_data["member-role"] = InstanceRole.PRIMARY.lower() diff --git a/kubernetes/lib/charms/mysql/v0/mysql.py b/kubernetes/lib/charms/mysql/v0/mysql.py index 638d78606..ebd21bd92 100644 --- a/kubernetes/lib/charms/mysql/v0/mysql.py +++ b/kubernetes/lib/charms/mysql/v0/mysql.py @@ -166,9 +166,6 @@ def __init__( ROLE_BACKUP = "charmed_backup" ROLE_MAX_LENGTH = 32 -# TODO: -# Remove legacy role when migrating to MySQL 8.4 -# (when breaking changes are allowed) LEGACY_ROLE_ROUTER = "mysqlrouter" MODERN_ROLE_ROUTER = "charmed_router" @@ -583,9 +580,6 @@ def _get_cluster_status(self, event: ActionEvent) -> None: event.fail("Failed to read cluster status. See logs for more information.") return - # TODO: - # Remove `.lower()` when migrating to MySQL 8.4 - # (when breaking changes are allowed) status = json.dumps(status) status = status.lower() status = json.loads(status) @@ -677,9 +671,6 @@ def create_cluster(self) -> None: role = self._mysql.get_member_role() state = self._mysql.get_member_state() - # TODO: - # Remove `.lower()` when migrating to MySQL 8.4 - # (when breaking changes are allowed) self.unit_peer_data.update({ "member-state": state.lower(), "member-role": role.lower(), @@ -1340,9 +1331,6 @@ def configure_mysql_system_users(self) -> None: def install_plugins(self, plugins: list[str]) -> None: """Install extra plugins.""" - # TODO: - # Remove this context-manager when migrating to MySQL 8.4 - # (when breaking changes are allowed) with self._read_only_disabled(): installed_plugins = self._instance_client_tcp.search_instance_plugins("%") @@ -1366,9 +1354,6 @@ def install_plugins(self, plugins: list[str]) -> None: def uninstall_plugins(self, plugins: list[str]) -> None: """Uninstall plugins.""" - # TODO: - # Remove this context-manager when migrating to MySQL 8.4 - # (when breaking changes are allowed) with self._read_only_disabled(): installed_plugins = self._instance_client_tcp.search_instance_plugins("%") diff --git a/kubernetes/src/charm.py b/kubernetes/src/charm.py index a95656212..1e753d9ba 100755 --- a/kubernetes/src/charm.py +++ b/kubernetes/src/charm.py @@ -412,9 +412,6 @@ def _create_cluster(self) -> None: def _get_primary_from_online_peer(self) -> Optional[str]: """Get the primary address from an online peer.""" for unit in self.peers.units: - # TODO: - # Remove `.lower()` when migrating to MySQL 8.4 - # (when breaking changes are allowed) if self.peers.data[unit].get("member-state") == InstanceState.ONLINE.lower(): try: return self._mysql.get_cluster_primary_address( @@ -521,9 +518,6 @@ def join_unit_to_cluster(self) -> None: logger.info("waiting: failed to acquire lock when adding instance to cluster") return - # TODO: - # Remove `.lower()` when migrating to MySQL 8.4 - # (when breaking changes are allowed) self.unit_peer_data["member-state"] = InstanceState.ONLINE.lower() self.unit.status = ActiveStatus(self.active_status_message) logger.info(f"Instance {instance_label} added to cluster") @@ -905,9 +899,6 @@ def _handle_potential_cluster_crash_scenario(self) -> bool: # noqa: C901 logger.info(f"Unit workload member-state is {state} with member-role {role}") - # TODO: - # Remove `.lower()` when migrating to MySQL 8.4 - # (when breaking changes are allowed) self.unit_peer_data["member-state"] = state.lower() self.unit_peer_data["member-role"] = role.lower() @@ -927,9 +918,6 @@ def _handle_potential_cluster_crash_scenario(self) -> bool: # noqa: C901 self.peers.data[unit].get("member-state", "unknown") for unit in self.peers.units } - # TODO: - # Use InstanceState.OFFLINE when migrating to MySQL 8.4 - # (when breaking changes are allowed) # Add state 'offline' for this unit (self.peers.unit does not include this unit) if (all_states | {"offline"} == {"offline"} and self.unit.is_leader()) or ( only_single_uninitialized_node_across_cluster and all_states == {"waiting"} diff --git a/kubernetes/src/mysql_k8s_helpers.py b/kubernetes/src/mysql_k8s_helpers.py index 602b6415a..83fb57449 100644 --- a/kubernetes/src/mysql_k8s_helpers.py +++ b/kubernetes/src/mysql_k8s_helpers.py @@ -464,9 +464,6 @@ def _wait_until_unit_removed_from_cluster(self, unit_address: str) -> None: if unit_address in members_in_cluster: raise MySQLWaitUntilUnitRemovedFromClusterError("Remove member still in cluster") - # TODO: - # Remove when migrating to MySQL 8.4 - # (when breaking changes are allowed) def create_database_legacy(self, database_name: str) -> None: """Creates a database. @@ -489,9 +486,6 @@ def create_database_legacy(self, database_name: str) -> None: logger.exception(f"Failed to create database {database_name}", exc_info=e) raise MySQLCreateDatabaseError() from None - # TODO: - # Remove when migrating to MySQL 8.4 - # (when breaking changes are allowed) def create_user_legacy( self, username: str, password: str, label: str, hostname: str = "%" ) -> None: @@ -520,9 +514,6 @@ def create_user_legacy( logger.exception(f"Failed to create user {username}@{hostname}") raise MySQLCreateUserError() from None - # TODO: - # Remove when migrating to MySQL 8.4 - # (when breaking changes are allowed) def escalate_user_privileges(self, username: str, hostname: str = "%") -> None: """Escalates the provided user's privileges. @@ -561,9 +552,6 @@ def escalate_user_privileges(self, username: str, hostname: str = "%") -> None: logger.exception(f"Failed to escalate user privileges for {username}@{hostname}") raise MySQLEscalateUserPrivilegesError() from None - # TODO: - # Remove when migrating to MySQL 8.4 - # (when breaking changes are allowed) def delete_users_with_label(self, label_name: str, label_value: str) -> None: """Delete users with the provided label. diff --git a/machines/lib/charms/mysql/v0/async_replication.py b/machines/lib/charms/mysql/v0/async_replication.py index 85d95516b..0040fab41 100644 --- a/machines/lib/charms/mysql/v0/async_replication.py +++ b/machines/lib/charms/mysql/v0/async_replication.py @@ -104,17 +104,11 @@ def role(self) -> ClusterSetInstanceState: """Current cluster set role of the unit, after the relation is established.""" is_replica = self._charm._mysql.is_cluster_replica() - # TODO: - # Remove `.lower()` when migrating to MySQL 8.4 - # (when breaking changes are allowed) if is_replica: cluster_role = ClusterRole.REPLICA.lower() elif is_replica is False: cluster_role = ClusterRole.PRIMARY.lower() else: - # TODO: - # Uppercase when migrating to MySQL 8.4 - # (when breaking changes are allowed) cluster_role = "unset" instance_role = self._charm._mysql.get_member_role() @@ -124,9 +118,6 @@ def role(self) -> ClusterSetInstanceState: else: relation_side = RELATION_OFFER - # TODO: - # Remove `.lower()` when migrating to MySQL 8.4 - # (when breaking changes are allowed) return ClusterSetInstanceState(cluster_role, instance_role.lower(), relation_side) @property @@ -194,10 +185,6 @@ def _on_promote_to_primary(self, event: ActionEvent) -> None: def on_async_relation_broken(self, event: RelationBrokenEvent): # noqa: C901 """Handle the async relation being broken from either side.""" # Remove the replica cluster, if this is the primary - - # TODO: - # Remove `.lower()` when migrating to MySQL 8.4 - # (when breaking changes are allowed) if ( self.role.cluster_role in (ClusterRole.REPLICA.lower(), "unset") and not self._charm.removing_unit @@ -242,9 +229,6 @@ def on_async_relation_broken(self, event: RelationBrokenEvent): # noqa: C901 # set flag to persist removed from cluster-set state self._charm.app_peer_data["removed-from-cluster-set"] = "true" - # TODO: - # Remove `.lower()` when migrating to MySQL 8.4 - # (when breaking changes are allowed) elif self.role.cluster_role == ClusterRole.PRIMARY.lower(): if self._charm.unit.is_leader(): # only leader units can remove replica clusters @@ -679,9 +663,6 @@ def replica_initialized(self) -> bool: def _check_version(self) -> bool: """Check if the MySQL version is compatible with the primary cluster.""" - # TODO: - # Remove `.split("-")[0]` when migrating to MySQL 8.4 - # (when breaking changes are allowed) remote_version = self.remote_relation_data.get("mysql-version").split("-")[0] local_version = self._charm._mysql.get_mysql_version() @@ -867,9 +848,6 @@ def _on_consumer_changed(self, event): # noqa: C901 ) logger.debug("Awaiting other units to join the cluster") - # TODO: - # Remove `.lower()` when migrating to MySQL 8.4 - # (when breaking changes are allowed) # set state flags to allow secondaries to join the cluster self._charm.unit_peer_data["member-state"] = InstanceState.ONLINE.lower() self._charm.unit_peer_data["member-role"] = InstanceRole.PRIMARY.lower() diff --git a/machines/lib/charms/mysql/v0/mysql.py b/machines/lib/charms/mysql/v0/mysql.py index 638d78606..ebd21bd92 100644 --- a/machines/lib/charms/mysql/v0/mysql.py +++ b/machines/lib/charms/mysql/v0/mysql.py @@ -166,9 +166,6 @@ def __init__( ROLE_BACKUP = "charmed_backup" ROLE_MAX_LENGTH = 32 -# TODO: -# Remove legacy role when migrating to MySQL 8.4 -# (when breaking changes are allowed) LEGACY_ROLE_ROUTER = "mysqlrouter" MODERN_ROLE_ROUTER = "charmed_router" @@ -583,9 +580,6 @@ def _get_cluster_status(self, event: ActionEvent) -> None: event.fail("Failed to read cluster status. See logs for more information.") return - # TODO: - # Remove `.lower()` when migrating to MySQL 8.4 - # (when breaking changes are allowed) status = json.dumps(status) status = status.lower() status = json.loads(status) @@ -677,9 +671,6 @@ def create_cluster(self) -> None: role = self._mysql.get_member_role() state = self._mysql.get_member_state() - # TODO: - # Remove `.lower()` when migrating to MySQL 8.4 - # (when breaking changes are allowed) self.unit_peer_data.update({ "member-state": state.lower(), "member-role": role.lower(), @@ -1340,9 +1331,6 @@ def configure_mysql_system_users(self) -> None: def install_plugins(self, plugins: list[str]) -> None: """Install extra plugins.""" - # TODO: - # Remove this context-manager when migrating to MySQL 8.4 - # (when breaking changes are allowed) with self._read_only_disabled(): installed_plugins = self._instance_client_tcp.search_instance_plugins("%") @@ -1366,9 +1354,6 @@ def install_plugins(self, plugins: list[str]) -> None: def uninstall_plugins(self, plugins: list[str]) -> None: """Uninstall plugins.""" - # TODO: - # Remove this context-manager when migrating to MySQL 8.4 - # (when breaking changes are allowed) with self._read_only_disabled(): installed_plugins = self._instance_client_tcp.search_instance_plugins("%") diff --git a/machines/src/charm.py b/machines/src/charm.py index 66d945e3f..b4dcdf930 100755 --- a/machines/src/charm.py +++ b/machines/src/charm.py @@ -441,9 +441,6 @@ def _handle_non_online_instance_status(self, state: str) -> bool: self.peers.data[unit].get("member-state", "unknown") for unit in self.peers.units } - # TODO: - # Remove `.lower()` when migrating to MySQL 8.4 - # (when breaking changes are allowed) all_states.add(InstanceState.OFFLINE.lower()) if all_states == {InstanceState.OFFLINE.lower()} and self.unit.is_leader(): @@ -584,10 +581,6 @@ def _on_update_status(self, _) -> None: # noqa: C901 state = "UNREACHABLE" logger.info(f"Unit workload member-state is {state} with member-role {role}") - - # TODO: - # Remove `.lower()` when migrating to MySQL 8.4 - # (when breaking changes are allowed) self.unit_peer_data["member-role"] = role.lower() self.unit_peer_data["member-state"] = state.lower() @@ -906,9 +899,6 @@ def _is_unit_waiting_to_join_cluster(self) -> bool: def _get_primary_from_online_peer(self) -> str | None: """Get the primary address from an online peer.""" for unit in self.peers.units: - # TODO: - # Remove `.lower()` when migrating to MySQL 8.4 - # (when breaking changes are allowed) if self.peers.data[unit].get("member-state") == InstanceState.ONLINE.lower(): try: return self._mysql.get_cluster_primary_address( @@ -984,9 +974,6 @@ def join_unit_to_cluster(self) -> None: logger.info("Waiting to join the cluster, failed to acquire lock.") return - # TODO: - # Remove `.lower()` when migrating to MySQL 8.4 - # (when breaking changes are allowed) self.unit_peer_data["member-state"] = InstanceState.ONLINE.lower() self.unit.status = ActiveStatus(self.active_status_message) logger.info(f"Instance {instance_label} added to cluster") From cee95a22a0a5058fa5a1e1b2c4f82fb05265575c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sinclert=20P=C3=A9rez?= Date: Tue, 3 Feb 2026 10:23:36 +0100 Subject: [PATCH 03/40] [MISC] 8.0 - Remove pytest-operator plugin (#61) --- kubernetes/poetry.lock | 314 ---------------- kubernetes/pyproject.toml | 3 +- kubernetes/tests/integration/conftest.py | 21 +- .../test_async_replication.py | 15 +- .../test_async_replication_upgrade.py | 12 +- .../test_primary_switchover.py | 4 - .../test_replication_data_consistency.py | 3 - .../test_replication_data_isolation.py | 2 - .../test_replication_logs_rotation.py | 3 - .../test_replication_reelection.py | 3 - .../test_replication_scaling.py | 3 - .../test_replication_unit_endpoints.py | 4 - .../test_replication_variables.py | 4 - .../test_self_healing_network_cut.py | 3 - .../test_self_healing_node_drain.py | 3 - .../test_self_healing_pod.py | 3 - .../test_self_healing_process_frozen.py | 3 - .../test_self_healing_process_killed.py | 3 - .../test_self_healing_restart_graceful.py | 3 - .../test_self_healing_setup_crash.py | 3 - .../test_self_healing_stop_all.py | 3 - .../test_self_healing_stop_primary.py | 3 - .../high_availability/test_upgrade.py | 5 - .../test_upgrade_from_stable.py | 4 - .../test_upgrade_rollback_incompat.py | 5 - .../integration/relations/test_database.py | 7 - .../integration/relations/test_mysql_root.py | 4 - .../roles/test_database_dba_role.py | 2 - .../roles/test_instance_dba_role.py | 3 - .../integration/roles/test_instance_roles.py | 3 - .../integration/test_backup_aws.py | 4 - .../integration/test_backup_ceph.py | 4 - .../integration/test_backup_gcp.py | 4 - .../integration/test_backup_pitr_aws.py | 3 - .../integration/test_backup_pitr_gcp.py | 3 - .../integration/integration/test_charm.py | 9 - .../test_cos_integration_bundle.py | 2 - .../integration/test_multi_relations.py | 5 - .../test_osm_integration_bundle.py | 2 - .../test_saturate_max_connections.py | 3 - .../tests/integration/integration/test_tls.py | 7 - .../test_architecture.py/task.yaml | 2 +- .../test_async_replication.py/task.yaml | 2 +- .../task.yaml | 2 +- .../integration/test_backup_aws.py/task.yaml | 2 +- .../integration/test_backup_ceph.py/task.yaml | 2 +- .../integration/test_backup_gcp.py/task.yaml | 2 +- .../test_backup_pitr_aws.py/task.yaml | 2 +- .../test_backup_pitr_gcp.py/task.yaml | 2 +- .../integration/test_charm.py/task.yaml | 2 +- .../test_cos_integration_bundle.py/task.yaml | 2 +- .../integration/test_database.py/task.yaml | 2 +- .../test_database_dba_role.py/task.yaml | 2 +- .../test_instance_dba_role.py/task.yaml | 2 +- .../test_instance_roles.py/task.yaml | 2 +- .../test_multi_relations.py/task.yaml | 2 +- .../integration/test_mysql_root.py/task.yaml | 2 +- .../test_osm_integration_bundle.py/task.yaml | 2 +- .../test_primary_switchover.py/task.yaml | 2 +- .../task.yaml | 2 +- .../task.yaml | 2 +- .../task.yaml | 2 +- .../test_replication_reelection.py/task.yaml | 2 +- .../test_replication_scaling.py/task.yaml | 2 +- .../task.yaml | 2 +- .../test_replication_variables.py/task.yaml | 2 +- .../task.yaml | 2 +- .../task.yaml | 2 +- .../test_self_healing_node_drain.py/task.yaml | 2 +- .../test_self_healing_pod.py/task.yaml | 2 +- .../task.yaml | 2 +- .../task.yaml | 2 +- .../task.yaml | 2 +- .../task.yaml | 2 +- .../test_self_healing_stop_all.py/task.yaml | 2 +- .../task.yaml | 2 +- .../spread/integration/test_tls.py/task.yaml | 2 +- .../integration/test_upgrade.py/task.yaml | 2 +- .../task.yaml | 2 +- .../task.yaml | 2 +- .../task.yaml | 2 +- .../task.yaml | 2 +- .../task.yaml | 2 +- .../task.yaml | 2 +- .../task.yaml | 2 +- .../task.yaml | 2 +- .../task.yaml | 2 +- .../task.yaml | 2 +- kubernetes/tox.ini | 2 +- machines/poetry.lock | 354 +----------------- machines/pyproject.toml | 1 - machines/tests/integration/conftest.py | 21 +- .../test_async_replication.py | 15 +- .../test_async_replication_upgrade.py | 12 +- .../test_primary_switchover.py | 4 - .../test_replication_data_consistency.py | 3 - .../test_replication_data_isolation.py | 2 - .../test_replication_logs_rotation.py | 3 - .../test_replication_reelection.py | 3 - .../test_replication_scaling.py | 3 - .../test_replication_unit_endpoints.py | 3 - .../test_replication_variables.py | 3 - .../test_self_healing_network_cut.py | 3 - .../test_self_healing_process_frozen.py | 3 - .../test_self_healing_process_killed.py | 3 - .../test_self_healing_restart_forceful.py | 3 - .../test_self_healing_restart_graceful.py | 3 - .../test_self_healing_stop_all.py | 3 - .../test_self_healing_stop_primary.py | 3 - .../high_availability/test_upgrade.py | 5 - .../test_upgrade_rollback_incompat.py | 5 - .../test_upgrade_skip_pre_upgrade_check.py | 4 - .../integration/relations/test_database.py | 10 - .../integration/relations/test_db_router.py | 2 - .../relations/test_relation_mysql_legacy.py | 5 - .../integration/relations/test_shared_db.py | 2 - .../roles/test_database_dba_role.py | 3 - .../roles/test_instance_dba_role.py | 4 - .../integration/roles/test_instance_roles.py | 4 - .../integration/spaces/test_spaced_db.py | 1 - .../integration/test_backup_aws.py | 4 - .../integration/test_backup_ceph.py | 4 - .../integration/test_backup_gcp.py | 4 - .../integration/test_backup_pitr_aws.py | 2 - .../integration/test_backup_pitr_gcp.py | 2 - .../test_saturate_max_connections.py | 3 - .../tests/integration/integration/test_tls.py | 7 - .../integration/integration/test_vm_reboot.py | 6 - .../test_architecture.py/task.yaml | 2 +- .../test_async_replication.py/task.yaml | 2 +- .../task.yaml | 2 +- .../integration/test_backup_aws.py/task.yaml | 2 +- .../integration/test_backup_ceph.py/task.yaml | 2 +- .../integration/test_backup_gcp.py/task.yaml | 2 +- .../test_backup_pitr_aws.py/task.yaml | 2 +- .../test_backup_pitr_gcp.py/task.yaml | 2 +- .../integration/test_database.py/task.yaml | 2 +- .../test_database_dba_role.py/task.yaml | 2 +- .../integration/test_db_router.py/task.yaml | 2 +- .../test_instance_dba_role.py/task.yaml | 2 +- .../test_instance_roles.py/task.yaml | 2 +- .../test_primary_switchover.py/task.yaml | 2 +- .../test_relation_mysql_legacy.py/task.yaml | 2 +- .../task.yaml | 2 +- .../task.yaml | 2 +- .../task.yaml | 2 +- .../test_replication_reelection.py/task.yaml | 2 +- .../test_replication_scaling.py/task.yaml | 2 +- .../task.yaml | 2 +- .../test_replication_variables.py/task.yaml | 2 +- .../task.yaml | 2 +- .../task.yaml | 2 +- .../task.yaml | 2 +- .../task.yaml | 2 +- .../task.yaml | 2 +- .../task.yaml | 2 +- .../test_self_healing_stop_all.py/task.yaml | 2 +- .../task.yaml | 2 +- .../integration/test_shared_db.py/task.yaml | 2 +- .../integration/test_spaced_db.py/task.yaml | 2 +- .../test_subordinate_charms.py/task.yaml | 2 +- .../spread/integration/test_tls.py/task.yaml | 2 +- .../integration/test_upgrade.py/task.yaml | 2 +- .../task.yaml | 2 +- .../task.yaml | 2 +- .../integration/test_vm_reboot.py/task.yaml | 2 +- .../task.yaml | 2 +- .../task.yaml | 2 +- .../task.yaml | 2 +- .../task.yaml | 2 +- .../task.yaml | 2 +- .../task.yaml | 2 +- machines/tox.ini | 2 +- 173 files changed, 108 insertions(+), 1100 deletions(-) diff --git a/kubernetes/poetry.lock b/kubernetes/poetry.lock index 7806cec47..63f53774c 100644 --- a/kubernetes/poetry.lock +++ b/kubernetes/poetry.lock @@ -71,25 +71,6 @@ doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphin test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\""] trio = ["trio (>=0.23)"] -[[package]] -name = "asttokens" -version = "2.4.1" -description = "Annotate AST trees with source code positions" -optional = false -python-versions = "*" -groups = ["integration"] -files = [ - {file = "asttokens-2.4.1-py2.py3-none-any.whl", hash = "sha256:051ed49c3dcae8913ea7cd08e46a606dba30b79993209636c4875bc1d637bc24"}, - {file = "asttokens-2.4.1.tar.gz", hash = "sha256:b03869718ba9a6eb027e134bfdf69f38a236d681c83c160d510768af11254ba0"}, -] - -[package.dependencies] -six = ">=1.12.0" - -[package.extras] -astroid = ["astroid (>=1,<2) ; python_version < \"3\"", "astroid (>=2,<4) ; python_version >= \"3\""] -test = ["astroid (>=1,<2) ; python_version < \"3\"", "astroid (>=2,<4) ; python_version >= \"3\"", "pytest"] - [[package]] name = "attrs" version = "23.2.0" @@ -575,18 +556,6 @@ ssh = ["bcrypt (>=3.1.5)"] test = ["certifi", "cryptography-vectors (==43.0.1)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] test-randomorder = ["pytest-randomly"] -[[package]] -name = "decorator" -version = "5.1.1" -description = "Decorators for Humans" -optional = false -python-versions = ">=3.5" -groups = ["integration"] -files = [ - {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"}, - {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, -] - [[package]] name = "deprecated" version = "1.2.14" @@ -621,21 +590,6 @@ files = [ [package.extras] test = ["pytest (>=6)"] -[[package]] -name = "executing" -version = "2.0.1" -description = "Get the currently executing AST node of a frame, and other information" -optional = false -python-versions = ">=3.5" -groups = ["integration"] -files = [ - {file = "executing-2.0.1-py2.py3-none-any.whl", hash = "sha256:eac49ca94516ccc753f9fb5ce82603156e590b27525a8bc32cce8ae302eb61bc"}, - {file = "executing-2.0.1.tar.gz", hash = "sha256:35afe2ce3affba8ee97f2d69927fa823b08b472b7b994e36a52a964b93d16147"}, -] - -[package.extras] -tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich ; python_version >= \"3.11\""] - [[package]] name = "google-auth" version = "2.29.0" @@ -799,82 +753,6 @@ files = [ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] -[[package]] -name = "ipdb" -version = "0.13.13" -description = "IPython-enabled pdb" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -groups = ["integration"] -files = [ - {file = "ipdb-0.13.13-py3-none-any.whl", hash = "sha256:45529994741c4ab6d2388bfa5d7b725c2cf7fe9deffabdb8a6113aa5ed449ed4"}, - {file = "ipdb-0.13.13.tar.gz", hash = "sha256:e3ac6018ef05126d442af680aad863006ec19d02290561ac88b8b1c0b0cfc726"}, -] - -[package.dependencies] -decorator = {version = "*", markers = "python_version > \"3.6\""} -ipython = {version = ">=7.31.1", markers = "python_version > \"3.6\""} -tomli = {version = "*", markers = "python_version > \"3.6\" and python_version < \"3.11\""} - -[[package]] -name = "ipython" -version = "8.25.0" -description = "IPython: Productive Interactive Computing" -optional = false -python-versions = ">=3.10" -groups = ["integration"] -files = [ - {file = "ipython-8.25.0-py3-none-any.whl", hash = "sha256:53eee7ad44df903a06655871cbab66d156a051fd86f3ec6750470ac9604ac1ab"}, - {file = "ipython-8.25.0.tar.gz", hash = "sha256:c6ed726a140b6e725b911528f80439c534fac915246af3efc39440a6b0f9d716"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "sys_platform == \"win32\""} -decorator = "*" -exceptiongroup = {version = "*", markers = "python_version < \"3.11\""} -jedi = ">=0.16" -matplotlib-inline = "*" -pexpect = {version = ">4.3", markers = "sys_platform != \"win32\" and sys_platform != \"emscripten\""} -prompt-toolkit = ">=3.0.41,<3.1.0" -pygments = ">=2.4.0" -stack-data = "*" -traitlets = ">=5.13.0" -typing-extensions = {version = ">=4.6", markers = "python_version < \"3.12\""} - -[package.extras] -all = ["ipython[black,doc,kernel,matplotlib,nbconvert,nbformat,notebook,parallel,qtconsole]", "ipython[test,test-extra]"] -black = ["black"] -doc = ["docrepr", "exceptiongroup", "intersphinx-registry", "ipykernel", "ipython[test]", "matplotlib", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "sphinxcontrib-jquery", "tomli ; python_version < \"3.11\"", "typing-extensions"] -kernel = ["ipykernel"] -matplotlib = ["matplotlib"] -nbconvert = ["nbconvert"] -nbformat = ["nbformat"] -notebook = ["ipywidgets", "notebook"] -parallel = ["ipyparallel"] -qtconsole = ["qtconsole"] -test = ["pickleshare", "pytest", "pytest-asyncio (<0.22)", "testpath"] -test-extra = ["curio", "ipython[test]", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.23)", "pandas", "trio"] - -[[package]] -name = "jedi" -version = "0.19.1" -description = "An autocompletion tool for Python that can be used for text editors." -optional = false -python-versions = ">=3.6" -groups = ["integration"] -files = [ - {file = "jedi-0.19.1-py2.py3-none-any.whl", hash = "sha256:e983c654fe5c02867aef4cdfce5a2fbb4a50adc0af145f70504238f18ef5e7e0"}, - {file = "jedi-0.19.1.tar.gz", hash = "sha256:cf0496f3651bc65d7174ac1b7d043eff454892c708a87d1b683e57b569927ffd"}, -] - -[package.dependencies] -parso = ">=0.8.3,<0.9.0" - -[package.extras] -docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alabaster (==0.7.12)", "babel (==2.9.1)", "chardet (==4.0.0)", "commonmark (==0.8.1)", "docutils (==0.17.1)", "future (==0.18.2)", "idna (==2.10)", "imagesize (==1.2.0)", "mock (==1.0.1)", "packaging (==20.9)", "pyparsing (==2.4.7)", "pytz (==2021.1)", "readthedocs-sphinx-ext (==2.1.4)", "recommonmark (==0.5.0)", "requests (==2.25.1)", "six (==1.15.0)", "snowballstemmer (==2.1.0)", "sphinx (==1.8.5)", "sphinx-rtd-theme (==0.4.3)", "sphinxcontrib-serializinghtml (==1.1.4)", "sphinxcontrib-websupport (==1.2.4)", "urllib3 (==1.26.4)"] -qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] -testing = ["Django", "attrs", "colorama", "docopt", "pytest (<7.0.0)"] - [[package]] name = "jinja2" version = "3.1.6" @@ -1137,21 +1015,6 @@ files = [ {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, ] -[[package]] -name = "matplotlib-inline" -version = "0.1.7" -description = "Inline Matplotlib backend for Jupyter" -optional = false -python-versions = ">=3.8" -groups = ["integration"] -files = [ - {file = "matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca"}, - {file = "matplotlib_inline-0.1.7.tar.gz", hash = "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90"}, -] - -[package.dependencies] -traitlets = "*" - [[package]] name = "mypy-extensions" version = "1.0.0" @@ -1410,38 +1273,6 @@ all = ["gssapi (>=1.4.1) ; platform_system != \"Windows\"", "invoke (>=2.0)", "p gssapi = ["gssapi (>=1.4.1) ; platform_system != \"Windows\"", "pyasn1 (>=0.1.7)", "pywin32 (>=2.1.8) ; platform_system == \"Windows\""] invoke = ["invoke (>=2.0)"] -[[package]] -name = "parso" -version = "0.8.4" -description = "A Python Parser" -optional = false -python-versions = ">=3.6" -groups = ["integration"] -files = [ - {file = "parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18"}, - {file = "parso-0.8.4.tar.gz", hash = "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d"}, -] - -[package.extras] -qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] -testing = ["docopt", "pytest"] - -[[package]] -name = "pexpect" -version = "4.9.0" -description = "Pexpect allows easy control of interactive console applications." -optional = false -python-versions = "*" -groups = ["integration"] -markers = "sys_platform != \"win32\" and sys_platform != \"emscripten\"" -files = [ - {file = "pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523"}, - {file = "pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f"}, -] - -[package.dependencies] -ptyprocess = ">=0.5" - [[package]] name = "pluggy" version = "1.5.0" @@ -1470,21 +1301,6 @@ files = [ {file = "poetry_core-1.9.0.tar.gz", hash = "sha256:fa7a4001eae8aa572ee84f35feb510b321bd652e5cf9293249d62853e1f935a2"}, ] -[[package]] -name = "prompt-toolkit" -version = "3.0.46" -description = "Library for building powerful interactive command lines in Python" -optional = false -python-versions = ">=3.7.0" -groups = ["integration"] -files = [ - {file = "prompt_toolkit-3.0.46-py3-none-any.whl", hash = "sha256:45abe60a8300f3c618b23c16c4bb98c6fc80af8ce8b17c7ae92db48db3ee63c1"}, - {file = "prompt_toolkit-3.0.46.tar.gz", hash = "sha256:869c50d682152336e23c4db7f74667639b5047494202ffe7670817053fd57795"}, -] - -[package.dependencies] -wcwidth = "*" - [[package]] name = "protobuf" version = "3.20.3" @@ -1517,34 +1333,6 @@ files = [ {file = "protobuf-3.20.3.tar.gz", hash = "sha256:2e3427429c9cffebf259491be0af70189607f365c2f41c7c3764af6f337105f2"}, ] -[[package]] -name = "ptyprocess" -version = "0.7.0" -description = "Run a subprocess in a pseudo terminal" -optional = false -python-versions = "*" -groups = ["integration"] -markers = "sys_platform != \"win32\" and sys_platform != \"emscripten\"" -files = [ - {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"}, - {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"}, -] - -[[package]] -name = "pure-eval" -version = "0.2.2" -description = "Safely evaluate AST nodes without side effects" -optional = false -python-versions = "*" -groups = ["integration"] -files = [ - {file = "pure_eval-0.2.2-py3-none-any.whl", hash = "sha256:01eaab343580944bc56080ebe0a674b39ec44a945e6d09ba7db3cb8cec289350"}, - {file = "pure_eval-0.2.2.tar.gz", hash = "sha256:2b45320af6dfaa1750f543d714b6d1c520a1688dec6fd24d339063ce0aaa9ac3"}, -] - -[package.extras] -tests = ["pytest"] - [[package]] name = "pyasn1" version = "0.6.0" @@ -1638,21 +1426,6 @@ typing-extensions = ">=4.2.0" dotenv = ["python-dotenv (>=0.10.4)"] email = ["email-validator (>=1.0.3)"] -[[package]] -name = "pygments" -version = "2.18.0" -description = "Pygments is a syntax highlighting package written in Python." -optional = false -python-versions = ">=3.8" -groups = ["integration"] -files = [ - {file = "pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"}, - {file = "pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199"}, -] - -[package.extras] -windows-terminal = ["colorama (>=0.4.6)"] - [[package]] name = "pymacaroons" version = "0.13.0" @@ -1734,25 +1507,6 @@ tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} [package.extras] testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] -[[package]] -name = "pytest-asyncio" -version = "0.21.2" -description = "Pytest support for asyncio" -optional = false -python-versions = ">=3.7" -groups = ["integration"] -files = [ - {file = "pytest_asyncio-0.21.2-py3-none-any.whl", hash = "sha256:ab664c88bb7998f711d8039cacd4884da6430886ae8bbd4eded552ed2004f16b"}, - {file = "pytest_asyncio-0.21.2.tar.gz", hash = "sha256:d67738fc232b94b326b9d060750beb16e0074210b98dd8b58a5239fa2a154f45"}, -] - -[package.dependencies] -pytest = ">=7.0.0" - -[package.extras] -docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] -testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy (>=0.931)", "pytest-trio (>=0.7.0)"] - [[package]] name = "pytest-mock" version = "3.14.0" @@ -1771,26 +1525,6 @@ pytest = ">=6.2.5" [package.extras] dev = ["pre-commit", "pytest-asyncio", "tox"] -[[package]] -name = "pytest-operator" -version = "0.28.0" -description = "Fixtures for Operators" -optional = false -python-versions = "*" -groups = ["integration"] -files = [ - {file = "pytest-operator-0.28.0.tar.gz", hash = "sha256:efac98697da71558790eb5d4c9d42f11f3d5fb43dff22a802aee69e1801edce8"}, - {file = "pytest_operator-0.28.0-py3-none-any.whl", hash = "sha256:b3cb5a8ebf838f890133a25ee520c25c8be259b54341e42e39f64a6d97735d9f"}, -] - -[package.dependencies] -ipdb = "*" -jinja2 = "*" -juju = "*" -pytest = "*" -pytest-asyncio = "*" -pyyaml = "*" - [[package]] name = "python-dateutil" version = "2.9.0.post0" @@ -2151,26 +1885,6 @@ files = [ {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, ] -[[package]] -name = "stack-data" -version = "0.6.3" -description = "Extract data from python stack frames and tracebacks for informative displays" -optional = false -python-versions = "*" -groups = ["integration"] -files = [ - {file = "stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695"}, - {file = "stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9"}, -] - -[package.dependencies] -asttokens = ">=2.1.0" -executing = ">=1.2.0" -pure-eval = "*" - -[package.extras] -tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] - [[package]] name = "tenacity" version = "8.3.0" @@ -2212,22 +1926,6 @@ files = [ {file = "toposort-1.10.tar.gz", hash = "sha256:bfbb479c53d0a696ea7402601f4e693c97b0367837c8898bc6471adfca37a6bd"}, ] -[[package]] -name = "traitlets" -version = "5.14.3" -description = "Traitlets Python configuration system" -optional = false -python-versions = ">=3.8" -groups = ["integration"] -files = [ - {file = "traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f"}, - {file = "traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7"}, -] - -[package.extras] -docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] -test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<8.2)", "pytest-mock", "pytest-mypy-testing"] - [[package]] name = "typing-extensions" version = "4.12.1" @@ -2275,18 +1973,6 @@ h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] -[[package]] -name = "wcwidth" -version = "0.2.13" -description = "Measures the displayed width of unicode strings in a terminal" -optional = false -python-versions = "*" -groups = ["integration"] -files = [ - {file = "wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859"}, - {file = "wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5"}, -] - [[package]] name = "websocket-client" version = "1.8.0" diff --git a/kubernetes/pyproject.toml b/kubernetes/pyproject.toml index 15dda08aa..27bd0b430 100644 --- a/kubernetes/pyproject.toml +++ b/kubernetes/pyproject.toml @@ -50,7 +50,7 @@ unit = [ ] integration = [ "pytest~=7.4", - "pytest-operator~=0.28.0", + "jinja2~=3.1", "juju~=3.6", "ops~=2.15", "mysql-connector-python~=9.1.0", @@ -62,7 +62,6 @@ integration = [ "kubernetes~=27.2.0", "allure-pytest~=2.13", "allure-pytest-default-results~=0.1.2", - "pytest-asyncio~=0.21.1", "jubilant-backports~=1.4", ] diff --git a/kubernetes/tests/integration/conftest.py b/kubernetes/tests/integration/conftest.py index 046abd0a8..d475d112b 100644 --- a/kubernetes/tests/integration/conftest.py +++ b/kubernetes/tests/integration/conftest.py @@ -52,22 +52,5 @@ def cloud_configs_gcp() -> tuple[dict[str, str], dict[str, str]]: @pytest.fixture(scope="module") -def juju(request: pytest.FixtureRequest): - """Pytest fixture that wraps :meth:`jubilant.with_model`. - - This adds command line parameter ``--keep-models`` (see help for details). - """ - model = request.config.getoption("--model") - keep_models = bool(request.config.getoption("--keep-models")) - - if model: - juju = jubilant_backports.Juju(model=model) # type: ignore - yield juju - log = juju.debug_log(limit=1000) - else: - with jubilant_backports.temp_model(keep=keep_models) as juju: - yield juju - log = juju.debug_log(limit=1000) - - if request.session.testsfailed: - print(log, end="") +def juju() -> jubilant_backports.Juju: + return jubilant_backports.Juju(model="testing") diff --git a/kubernetes/tests/integration/integration/high_availability/test_async_replication.py b/kubernetes/tests/integration/integration/high_availability/test_async_replication.py index 2b061bd74..5c172bf27 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_async_replication.py +++ b/kubernetes/tests/integration/integration/high_availability/test_async_replication.py @@ -33,13 +33,13 @@ @pytest.fixture(scope="module") -def first_model(juju: Juju, request: pytest.FixtureRequest) -> Generator: +def first_model(juju: Juju) -> Generator: """Creates and return the first model.""" yield juju.model @pytest.fixture(scope="module") -def second_model(juju: Juju, request: pytest.FixtureRequest) -> Generator: +def second_model(juju: Juju) -> Generator: """Creates and returns the second model.""" model_name = f"{juju.model}-other" @@ -47,8 +47,6 @@ def second_model(juju: Juju, request: pytest.FixtureRequest) -> Generator: juju.add_model(model_name) yield model_name - if request.config.getoption("--keep-models"): - return logging.info(f"Destroying model: {model_name}") juju.destroy_model(model_name, destroy_storage=True, force=True) @@ -72,7 +70,6 @@ def continuous_writes(first_model: str) -> Generator: @juju3 -@pytest.mark.abort_on_fail def test_build_and_deploy(first_model: str, second_model: str, charm: str) -> None: """Simple test to ensure that the MySQL application charms get deployed.""" configuration = {"profile": "testing"} @@ -113,7 +110,6 @@ def test_build_and_deploy(first_model: str, second_model: str, charm: str) -> No @juju3 -@pytest.mark.abort_on_fail def test_async_relate(first_model: str, second_model: str) -> None: """Relate the two MySQL clusters.""" logging.info("Creating offers in first model") @@ -142,7 +138,6 @@ def test_async_relate(first_model: str, second_model: str) -> None: @juju3 -@pytest.mark.abort_on_fail def test_deploy_router_and_app(first_model: str) -> None: """Deploy the router and the test application.""" logging.info("Deploying the router and test application") @@ -181,7 +176,6 @@ def test_deploy_router_and_app(first_model: str) -> None: @juju3 -@pytest.mark.abort_on_fail def test_create_replication(first_model: str, second_model: str) -> None: """Run the create-replication action and wait for the applications to settle.""" model_1 = Juju(model=first_model) @@ -206,7 +200,6 @@ def test_create_replication(first_model: str, second_model: str) -> None: @juju3 -@pytest.mark.abort_on_fail def test_data_replication(first_model: str, second_model: str, continuous_writes) -> None: """Test to write to primary, and read the same data back from replicas.""" logging.info("Testing data replication") @@ -218,7 +211,6 @@ def test_data_replication(first_model: str, second_model: str, continuous_writes @juju3 -@pytest.mark.abort_on_fail def test_standby_promotion(first_model: str, second_model: str, continuous_writes) -> None: """Test graceful promotion of a standby cluster to primary.""" model_2 = Juju(model=second_model) @@ -248,7 +240,6 @@ def test_standby_promotion(first_model: str, second_model: str, continuous_write @juju3 -@pytest.mark.abort_on_fail def test_failover(first_model: str, second_model: str) -> None: """Test switchover on primary cluster fail.""" logging.info("Freezing mysqld on primary cluster units") @@ -301,7 +292,6 @@ def test_failover(first_model: str, second_model: str) -> None: @juju3 -@pytest.mark.abort_on_fail def test_rejoin_invalidated_cluster( first_model: str, second_model: str, continuous_writes ) -> None: @@ -321,7 +311,6 @@ def test_rejoin_invalidated_cluster( @juju3 -@pytest.mark.abort_on_fail def test_unrelate_and_relate(first_model: str, second_model: str, continuous_writes) -> None: """Test removing and re-relating the two mysql clusters.""" model_1 = Juju(model=first_model) diff --git a/kubernetes/tests/integration/integration/high_availability/test_async_replication_upgrade.py b/kubernetes/tests/integration/integration/high_availability/test_async_replication_upgrade.py index 79ae9ac1f..a78c5031a 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_async_replication_upgrade.py +++ b/kubernetes/tests/integration/integration/high_availability/test_async_replication_upgrade.py @@ -35,13 +35,13 @@ @pytest.fixture(scope="module") -def first_model(juju: Juju, request: pytest.FixtureRequest) -> Generator: +def first_model(juju: Juju) -> Generator: """Creates and return the first model.""" yield juju.model @pytest.fixture(scope="module") -def second_model(juju: Juju, request: pytest.FixtureRequest) -> Generator: +def second_model(juju: Juju) -> Generator: """Creates and returns the second model.""" model_name = f"{juju.model}-other" @@ -49,8 +49,6 @@ def second_model(juju: Juju, request: pytest.FixtureRequest) -> Generator: juju.add_model(model_name) yield model_name - if request.config.getoption("--keep-models"): - return logging.info(f"Destroying model: {model_name}") juju.destroy_model(model_name, destroy_storage=True, force=True) @@ -74,7 +72,6 @@ def continuous_writes(first_model: str) -> Generator: @juju3 -@pytest.mark.abort_on_fail def test_build_and_deploy(first_model: str, second_model: str, charm: str) -> None: """Simple test to ensure that the MySQL application charms get deployed.""" configuration = {"profile": "testing"} @@ -115,7 +112,6 @@ def test_build_and_deploy(first_model: str, second_model: str, charm: str) -> No @juju3 -@pytest.mark.abort_on_fail def test_async_relate(first_model: str, second_model: str) -> None: """Relate the two MySQL clusters.""" logging.info("Creating offers in first model") @@ -144,7 +140,6 @@ def test_async_relate(first_model: str, second_model: str) -> None: @juju3 -@pytest.mark.abort_on_fail def test_deploy_test_app(first_model: str) -> None: """Deploy the test application.""" logging.info("Deploying the test application") @@ -170,7 +165,6 @@ def test_deploy_test_app(first_model: str) -> None: @juju3 -@pytest.mark.abort_on_fail def test_create_replication(first_model: str, second_model: str) -> None: """Run the create-replication action and wait for the applications to settle.""" model_1 = Juju(model=first_model) @@ -195,7 +189,6 @@ def test_create_replication(first_model: str, second_model: str) -> None: @juju3 -@pytest.mark.abort_on_fail def test_upgrade_from_edge( first_model: str, second_model: str, charm: str, continuous_writes ) -> None: @@ -211,7 +204,6 @@ def test_upgrade_from_edge( @juju3 -@pytest.mark.abort_on_fail def test_data_replication(first_model: str, second_model: str, continuous_writes) -> None: """Test to write to primary, and read the same data back from replicas.""" logging.info("Testing data replication") diff --git a/kubernetes/tests/integration/integration/high_availability/test_primary_switchover.py b/kubernetes/tests/integration/integration/high_availability/test_primary_switchover.py index d95f0930c..4f9428072 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_primary_switchover.py +++ b/kubernetes/tests/integration/integration/high_availability/test_primary_switchover.py @@ -5,7 +5,6 @@ import subprocess import jubilant_backports -import pytest from jubilant_backports import Juju from ...helpers_ha import ( @@ -26,7 +25,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -62,7 +60,6 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ) -@pytest.mark.abort_on_fail def test_cluster_switchover(juju: Juju) -> None: """Test that the primary node can be switched over.""" logging.info("Testing cluster switchover...") @@ -90,7 +87,6 @@ def test_cluster_switchover(juju: Juju) -> None: assert get_mysql_primary_unit(juju, app_name) == new_primary_unit, "Switchover failed" -@pytest.mark.abort_on_fail def test_cluster_failover_after_majority_loss(juju: Juju) -> None: """Test the promote-to-primary command after losing the majority of nodes, with force flag.""" app_name = get_app_name(juju, MYSQL_APP_NAME) diff --git a/kubernetes/tests/integration/integration/high_availability/test_replication_data_consistency.py b/kubernetes/tests/integration/integration/high_availability/test_replication_data_consistency.py index 5a7d70c56..4761bb008 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_replication_data_consistency.py +++ b/kubernetes/tests/integration/integration/high_availability/test_replication_data_consistency.py @@ -4,7 +4,6 @@ import logging import jubilant_backports -import pytest from jubilant_backports import Juju from ...helpers import generate_random_string @@ -23,7 +22,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -59,7 +57,6 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ) -@pytest.mark.abort_on_fail def test_consistent_data_replication_across_cluster(juju: Juju) -> None: """Confirm that data is replicated from the primary node to all the replicas.""" table_name = "data" diff --git a/kubernetes/tests/integration/integration/high_availability/test_replication_data_isolation.py b/kubernetes/tests/integration/integration/high_availability/test_replication_data_isolation.py index d6dadcad2..1830f51d2 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_replication_data_isolation.py +++ b/kubernetes/tests/integration/integration/high_availability/test_replication_data_isolation.py @@ -4,7 +4,6 @@ import logging import jubilant_backports -import pytest from jubilant_backports import Juju from ...helpers_ha import ( @@ -21,7 +20,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") diff --git a/kubernetes/tests/integration/integration/high_availability/test_replication_logs_rotation.py b/kubernetes/tests/integration/integration/high_availability/test_replication_logs_rotation.py index ea50262df..a899a89d7 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_replication_logs_rotation.py +++ b/kubernetes/tests/integration/integration/high_availability/test_replication_logs_rotation.py @@ -8,7 +8,6 @@ from pathlib import Path import jubilant_backports -import pytest from jubilant_backports import CLIError, Juju from tenacity import ( Retrying, @@ -32,7 +31,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -68,7 +66,6 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ) -@pytest.mark.abort_on_fail def test_log_rotation(juju: Juju) -> None: """Test the log rotation of text files.""" log_types = ["error", "audit"] diff --git a/kubernetes/tests/integration/integration/high_availability/test_replication_reelection.py b/kubernetes/tests/integration/integration/high_availability/test_replication_reelection.py index c2ff84e48..a6c3b9729 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_replication_reelection.py +++ b/kubernetes/tests/integration/integration/high_availability/test_replication_reelection.py @@ -4,7 +4,6 @@ import logging import jubilant_backports -import pytest from jubilant_backports import Juju from ...helpers import generate_random_string @@ -25,7 +24,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -61,7 +59,6 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ) -@pytest.mark.abort_on_fail def test_kill_primary_check_reelection(juju: Juju) -> None: """Confirm that a new primary is elected when the current primary is tear down.""" check_mysql_units_writes_increment(juju, MYSQL_APP_NAME) diff --git a/kubernetes/tests/integration/integration/high_availability/test_replication_scaling.py b/kubernetes/tests/integration/integration/high_availability/test_replication_scaling.py index baa432ef6..ae0990e91 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_replication_scaling.py +++ b/kubernetes/tests/integration/integration/high_availability/test_replication_scaling.py @@ -4,7 +4,6 @@ import logging import jubilant_backports -import pytest from jubilant_backports import Juju from ...helpers import generate_random_string @@ -23,7 +22,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -59,7 +57,6 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ) -@pytest.mark.abort_on_fail def test_scaling_without_data_loss(juju: Juju) -> None: """Test that data is preserved during scale up and scale down.""" table_name = "instance_state_replication" diff --git a/kubernetes/tests/integration/integration/high_availability/test_replication_unit_endpoints.py b/kubernetes/tests/integration/integration/high_availability/test_replication_unit_endpoints.py index c04b2e5b9..7c6abed2b 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_replication_unit_endpoints.py +++ b/kubernetes/tests/integration/integration/high_availability/test_replication_unit_endpoints.py @@ -4,7 +4,6 @@ import logging import jubilant_backports -import pytest from jubilant_backports import Juju from ...helpers_ha import ( @@ -26,7 +25,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_highly_available_cluster_1(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -66,7 +64,6 @@ def test_deploy_highly_available_cluster_1(juju: Juju, charm: str) -> None: ) -@pytest.mark.abort_on_fail def test_deploy_highly_available_cluster_2(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -106,7 +103,6 @@ def test_deploy_highly_available_cluster_2(juju: Juju, charm: str) -> None: ) -@pytest.mark.abort_on_fail def test_labeling_of_k8s_endpoints(juju: Juju) -> None: """Test the labeling of k8s endpoints when apps with same cluster-name deployed.""" logging.info("Ensuring that the created k8s endpoints have correct addresses") diff --git a/kubernetes/tests/integration/integration/high_availability/test_replication_variables.py b/kubernetes/tests/integration/integration/high_availability/test_replication_variables.py index b404a6b2b..820bd1cdd 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_replication_variables.py +++ b/kubernetes/tests/integration/integration/high_availability/test_replication_variables.py @@ -5,7 +5,6 @@ import logging import jubilant_backports -import pytest from jubilant_backports import Juju from ...helpers_ha import ( @@ -23,8 +22,6 @@ TIMEOUT = 15 * MINUTE_SECS -@pytest.mark.skip_if_deployed -@pytest.mark.abort_on_fail def test_build_and_deploy(juju: Juju, charm) -> None: """Build the mysql charm and deploy it.""" logger.info(f"Deploying {APP_NAME}") @@ -44,7 +41,6 @@ def test_build_and_deploy(juju: Juju, charm) -> None: ) -@pytest.mark.abort_on_fail def test_custom_variables(juju: Juju) -> None: """Query database for custom variables.""" app_units = get_app_units(juju, APP_NAME) diff --git a/kubernetes/tests/integration/integration/high_availability/test_self_healing_network_cut.py b/kubernetes/tests/integration/integration/high_availability/test_self_healing_network_cut.py index b3c942cc4..9ba12702b 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_self_healing_network_cut.py +++ b/kubernetes/tests/integration/integration/high_availability/test_self_healing_network_cut.py @@ -10,7 +10,6 @@ from string import Template import jubilant_backports -import pytest from jubilant_backports import Juju from ...helpers_ha import ( @@ -30,7 +29,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -66,7 +64,6 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ) -@pytest.mark.abort_on_fail def test_network_cut_affecting_an_instance(juju: Juju, continuous_writes, chaos_mesh) -> None: """Test for a network cut affecting an instance.""" logging.info("Ensuring that all instances have incrementing continuous writes") diff --git a/kubernetes/tests/integration/integration/high_availability/test_self_healing_node_drain.py b/kubernetes/tests/integration/integration/high_availability/test_self_healing_node_drain.py index 356081f17..7504d803c 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_self_healing_node_drain.py +++ b/kubernetes/tests/integration/integration/high_availability/test_self_healing_node_drain.py @@ -4,7 +4,6 @@ import logging import jubilant_backports -import pytest from jubilant_backports import Juju from lightkube.core.client import Client from lightkube.models.meta_v1 import ObjectMeta @@ -27,7 +26,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -63,7 +61,6 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ) -@pytest.mark.abort_on_fail def test_pod_eviction_and_pvc_deletion(juju: Juju, continuous_writes) -> None: """Test behavior when node drains - pod is evicted and pvs are rotated.""" logging.info("Ensuring that all instances have incrementing continuous writes") diff --git a/kubernetes/tests/integration/integration/high_availability/test_self_healing_pod.py b/kubernetes/tests/integration/integration/high_availability/test_self_healing_pod.py index 4e5ef81b7..b1de5167d 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_self_healing_pod.py +++ b/kubernetes/tests/integration/integration/high_availability/test_self_healing_pod.py @@ -4,7 +4,6 @@ import logging import jubilant_backports -import pytest from jubilant_backports import Juju from ...helpers import generate_random_string @@ -25,7 +24,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -61,7 +59,6 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ) -@pytest.mark.abort_on_fail def test_single_unit_pod_delete(juju: Juju) -> None: """Delete the pod in a single unit deployment and write data to new pod.""" logging.info("Scale mysql application to 1 unit that is active") diff --git a/kubernetes/tests/integration/integration/high_availability/test_self_healing_process_frozen.py b/kubernetes/tests/integration/integration/high_availability/test_self_healing_process_frozen.py index 913fb4556..a8c0d3c73 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_self_healing_process_frozen.py +++ b/kubernetes/tests/integration/integration/high_availability/test_self_healing_process_frozen.py @@ -5,7 +5,6 @@ import random import jubilant_backports -import pytest from jubilant_backports import Juju from tenacity import ( Retrying, @@ -32,7 +31,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -68,7 +66,6 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ) -@pytest.mark.abort_on_fail def test_freeze_db_process(juju: Juju, continuous_writes) -> None: """Test to send a SIGSTOP to the primary db process and ensure that the cluster self heals.""" logging.info("Ensuring that all instances have incrementing continuous writes") diff --git a/kubernetes/tests/integration/integration/high_availability/test_self_healing_process_killed.py b/kubernetes/tests/integration/integration/high_availability/test_self_healing_process_killed.py index 6d9b37f7a..fece4062f 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_self_healing_process_killed.py +++ b/kubernetes/tests/integration/integration/high_availability/test_self_healing_process_killed.py @@ -5,7 +5,6 @@ import time import jubilant_backports -import pytest from jubilant_backports import Juju from constants import CONTAINER_NAME @@ -31,7 +30,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -67,7 +65,6 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ) -@pytest.mark.abort_on_fail def test_kill_db_process(juju: Juju, continuous_writes) -> None: """Test to send a SIGKILL to the primary db process and ensure that the cluster self heals.""" logging.info("Ensuring all units have continuous writes incrementing") diff --git a/kubernetes/tests/integration/integration/high_availability/test_self_healing_restart_graceful.py b/kubernetes/tests/integration/integration/high_availability/test_self_healing_restart_graceful.py index d19d41a3f..89ffb2a58 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_self_healing_restart_graceful.py +++ b/kubernetes/tests/integration/integration/high_availability/test_self_healing_restart_graceful.py @@ -4,7 +4,6 @@ import logging import jubilant_backports -import pytest from jubilant_backports import Juju from ...helpers import execute_queries_on_unit @@ -27,7 +26,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -63,7 +61,6 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ) -@pytest.mark.abort_on_fail def test_cluster_manual_rejoin(juju: Juju, continuous_writes) -> None: """The cluster manual re-join test. diff --git a/kubernetes/tests/integration/integration/high_availability/test_self_healing_setup_crash.py b/kubernetes/tests/integration/integration/high_availability/test_self_healing_setup_crash.py index 2ee8289dd..58e59df40 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_self_healing_setup_crash.py +++ b/kubernetes/tests/integration/integration/high_availability/test_self_healing_setup_crash.py @@ -4,7 +4,6 @@ import logging import jubilant_backports -import pytest from jubilant_backports import Juju from ...helpers_ha import ( @@ -19,7 +18,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_single_unit_cluster(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -40,7 +38,6 @@ def test_deploy_single_unit_cluster(juju: Juju, charm: str) -> None: ) -@pytest.mark.abort_on_fail def test_crash_during_cluster_setup(juju: Juju, charm: str) -> None: """Test primary crash during startup. diff --git a/kubernetes/tests/integration/integration/high_availability/test_self_healing_stop_all.py b/kubernetes/tests/integration/integration/high_availability/test_self_healing_stop_all.py index 8abe331f8..a7b1f944e 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_self_healing_stop_all.py +++ b/kubernetes/tests/integration/integration/high_availability/test_self_healing_stop_all.py @@ -4,7 +4,6 @@ import logging import jubilant_backports -import pytest from jubilant_backports import Juju from ...helpers_ha import ( @@ -25,7 +24,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -61,7 +59,6 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ) -@pytest.mark.abort_on_fail async def test_graceful_full_cluster_crash(juju: Juju, continuous_writes) -> None: """Pause test. diff --git a/kubernetes/tests/integration/integration/high_availability/test_self_healing_stop_primary.py b/kubernetes/tests/integration/integration/high_availability/test_self_healing_stop_primary.py index 08673bc3b..ab2646233 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_self_healing_stop_primary.py +++ b/kubernetes/tests/integration/integration/high_availability/test_self_healing_stop_primary.py @@ -4,7 +4,6 @@ import logging import jubilant_backports -import pytest from jubilant_backports import Juju from constants import CONTAINER_NAME @@ -25,7 +24,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -61,7 +59,6 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ) -@pytest.mark.abort_on_fail async def test_graceful_crash_of_primary(juju: Juju, continuous_writes) -> None: """Test to send SIGTERM to primary instance and then verify recovery.""" # Ensure continuous writes still incrementing for all units diff --git a/kubernetes/tests/integration/integration/high_availability/test_upgrade.py b/kubernetes/tests/integration/integration/high_availability/test_upgrade.py index 48cedb79c..99f69d86a 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_upgrade.py +++ b/kubernetes/tests/integration/integration/high_availability/test_upgrade.py @@ -9,7 +9,6 @@ from pathlib import Path import jubilant_backports -import pytest from jubilant_backports import Juju, TaskError from ...helpers_ha import ( @@ -33,7 +32,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_latest(juju: Juju) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -70,7 +68,6 @@ def test_deploy_latest(juju: Juju) -> None: ) -@pytest.mark.abort_on_fail def test_pre_upgrade_check(juju: Juju) -> None: """Test that the pre-upgrade-check action runs successfully.""" mysql_leader = get_app_leader(juju, MYSQL_APP_NAME) @@ -92,7 +89,6 @@ def test_pre_upgrade_check(juju: Juju) -> None: assert get_k8s_stateful_set_partitions(juju, MYSQL_APP_NAME) == 2, "Partition not set to 2" -@pytest.mark.abort_on_fail def test_upgrade_from_edge(juju: Juju, charm: str, continuous_writes) -> None: """Update the second cluster.""" logging.info("Ensure continuous writes are incrementing") @@ -133,7 +129,6 @@ def test_upgrade_from_edge(juju: Juju, charm: str, continuous_writes) -> None: check_mysql_units_writes_increment(juju, MYSQL_APP_NAME) -@pytest.mark.abort_on_fail def test_fail_and_rollback(juju: Juju, charm: str, continuous_writes) -> None: """Test an upgrade failure and its rollback.""" mysql_app_leader = get_app_leader(juju, MYSQL_APP_NAME) diff --git a/kubernetes/tests/integration/integration/high_availability/test_upgrade_from_stable.py b/kubernetes/tests/integration/integration/high_availability/test_upgrade_from_stable.py index 0fefbcdec..b6084bebe 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_upgrade_from_stable.py +++ b/kubernetes/tests/integration/integration/high_availability/test_upgrade_from_stable.py @@ -5,7 +5,6 @@ from contextlib import suppress import jubilant_backports -import pytest from jubilant_backports import Juju, TaskError from ...helpers_ha import ( @@ -27,7 +26,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_stable(juju: Juju) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -64,7 +62,6 @@ def test_deploy_stable(juju: Juju) -> None: ) -@pytest.mark.abort_on_fail def test_pre_upgrade_check(juju: Juju) -> None: """Test that the pre-upgrade-check action runs successfully.""" mysql_leader = get_app_leader(juju, MYSQL_APP_NAME) @@ -86,7 +83,6 @@ def test_pre_upgrade_check(juju: Juju) -> None: assert get_k8s_stateful_set_partitions(juju, MYSQL_APP_NAME) == 2, "Partition not set to 2" -@pytest.mark.abort_on_fail def test_upgrade_from_stable(juju: Juju, charm: str, continuous_writes) -> None: """Update the second cluster.""" logging.info("Ensure continuous writes are incrementing") diff --git a/kubernetes/tests/integration/integration/high_availability/test_upgrade_rollback_incompat.py b/kubernetes/tests/integration/integration/high_availability/test_upgrade_rollback_incompat.py index 43b78563c..66cb3b88c 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_upgrade_rollback_incompat.py +++ b/kubernetes/tests/integration/integration/high_availability/test_upgrade_rollback_incompat.py @@ -9,7 +9,6 @@ from pathlib import Path import jubilant_backports -import pytest from jubilant_backports import Juju, TaskError from ...helpers_ha import ( @@ -32,7 +31,6 @@ # TODO: remove AMD64 marker after next incompatible MySQL server version is released in our snap # (details: https://github.com/canonical/mysql-operator/pull/472#discussion_r1659300069) @amd64_only -@pytest.mark.abort_on_fail def test_build_and_deploy(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" juju.deploy( @@ -59,7 +57,6 @@ def test_build_and_deploy(juju: Juju, charm: str) -> None: # TODO: remove AMD64 marker after next incompatible MySQL server version is released in our snap # (details: https://github.com/canonical/mysql-operator/pull/472#discussion_r1659300069) @amd64_only -@pytest.mark.abort_on_fail def test_pre_upgrade_check(juju: Juju) -> None: """Test that the pre-upgrade-check action runs successfully.""" mysql_leader = get_app_leader(juju, MYSQL_APP_NAME) @@ -71,7 +68,6 @@ def test_pre_upgrade_check(juju: Juju) -> None: # TODO: remove AMD64 marker after next incompatible MySQL server version is released in our snap # (details: https://github.com/canonical/mysql-operator/pull/472#discussion_r1659300069) @amd64_only -@pytest.mark.abort_on_fail def test_upgrade_to_failing(juju: Juju, charm: str) -> None: with InjectFailure( path="src/upgrade.py", @@ -110,7 +106,6 @@ def test_upgrade_to_failing(juju: Juju, charm: str) -> None: # TODO: remove AMD64 marker after next incompatible MySQL server version is released in our snap # (details: https://github.com/canonical/mysql-operator/pull/472#discussion_r1659300069) @amd64_only -@pytest.mark.abort_on_fail def test_rollback(juju: Juju, charm: str) -> None: """Test upgrade rollback to a healthy revision.""" mysql_app_leader = get_app_leader(juju, MYSQL_APP_NAME) diff --git a/kubernetes/tests/integration/integration/relations/test_database.py b/kubernetes/tests/integration/integration/relations/test_database.py index af2123958..6d0a98fd9 100644 --- a/kubernetes/tests/integration/integration/relations/test_database.py +++ b/kubernetes/tests/integration/integration/relations/test_database.py @@ -5,7 +5,6 @@ import logging import jubilant_backports -import pytest from jubilant_backports import Juju from ... import markers @@ -24,8 +23,6 @@ APPS = [DATABASE_APP_NAME, APPLICATION_APP_NAME] -@pytest.mark.abort_on_fail -@pytest.mark.skip_if_deployed def test_build_and_deploy(juju: Juju, charm): """Build the charm and deploy 3 units to ensure a cluster is formed.""" juju.deploy( @@ -46,7 +43,6 @@ def test_build_and_deploy(juju: Juju, charm): ) -@pytest.mark.abort_on_fail def test_relation_creation_eager(juju: Juju): """Relate charms before they have time to properly start. @@ -72,7 +68,6 @@ def test_relation_creation_eager(juju: Juju): ) -@pytest.mark.abort_on_fail @markers.only_without_juju_secrets def test_relation_creation_databag(juju: Juju): """Relate charms and wait for the expected changes in status.""" @@ -85,7 +80,6 @@ def test_relation_creation_databag(juju: Juju): assert {"password", "username"} <= set(relation_data[0]["application-data"]) -@pytest.mark.abort_on_fail @markers.only_with_juju_secrets def test_relation_creation(juju: Juju): """Relate charms and wait for the expected changes in status.""" @@ -99,7 +93,6 @@ def test_relation_creation(juju: Juju): assert "secret-user" in relation_data[0]["application-data"] -@pytest.mark.abort_on_fail def test_relation_broken(juju: Juju): """Remove relation and wait for the expected changes in status.""" juju.remove_relation( diff --git a/kubernetes/tests/integration/integration/relations/test_mysql_root.py b/kubernetes/tests/integration/integration/relations/test_mysql_root.py index f8ab50020..beaebfb56 100644 --- a/kubernetes/tests/integration/integration/relations/test_mysql_root.py +++ b/kubernetes/tests/integration/integration/relations/test_mysql_root.py @@ -5,7 +5,6 @@ import logging import jubilant_backports -import pytest from jubilant_backports import Juju from ...helpers_ha import ( @@ -20,8 +19,6 @@ APPLICATION_ENDPOINT = "mysql" -@pytest.mark.abort_on_fail -@pytest.mark.skip_if_deployed def test_build_and_deploy(juju: Juju, charm): """Build the charm and deploy 3 units to ensure a cluster is formed.""" juju.deploy( @@ -45,7 +42,6 @@ def test_build_and_deploy(juju: Juju, charm): ) -@pytest.mark.abort_on_fail def test_relation_creation_eager(juju: Juju): """Relate charms before they have time to properly start. diff --git a/kubernetes/tests/integration/integration/roles/test_database_dba_role.py b/kubernetes/tests/integration/integration/roles/test_database_dba_role.py index 82b2777e5..2ff5deb95 100644 --- a/kubernetes/tests/integration/integration/roles/test_database_dba_role.py +++ b/kubernetes/tests/integration/integration/roles/test_database_dba_role.py @@ -24,7 +24,6 @@ INTEGRATOR_APP_NAME = "data-integrator" -@pytest.mark.abort_on_fail def test_build_and_deploy(juju: Juju, charm) -> None: """Simple test to ensure that the mysql and data-integrator charms get deployed.""" juju.deploy( @@ -67,7 +66,6 @@ def test_build_and_deploy(juju: Juju, charm) -> None: ) -@pytest.mark.abort_on_fail def test_charmed_dba_role(juju: Juju): """Test the database-level DBA role.""" juju.config( diff --git a/kubernetes/tests/integration/integration/roles/test_instance_dba_role.py b/kubernetes/tests/integration/integration/roles/test_instance_dba_role.py index 49640c979..bf00e02a6 100644 --- a/kubernetes/tests/integration/integration/roles/test_instance_dba_role.py +++ b/kubernetes/tests/integration/integration/roles/test_instance_dba_role.py @@ -3,7 +3,6 @@ # See LICENSE file for licensing details. import jubilant_backports -import pytest from jubilant_backports import Juju from ...helpers_ha import ( @@ -22,7 +21,6 @@ INTEGRATOR_APP_NAME = "data-integrator" -@pytest.mark.abort_on_fail def test_build_and_deploy(juju: Juju, charm) -> None: """Simple test to ensure that the mysql and data-integrator charms get deployed.""" juju.deploy( @@ -54,7 +52,6 @@ def test_build_and_deploy(juju: Juju, charm) -> None: ) -@pytest.mark.abort_on_fail def test_charmed_dba_role(juju: Juju): """Test the DBA predefined role.""" juju.config( diff --git a/kubernetes/tests/integration/integration/roles/test_instance_roles.py b/kubernetes/tests/integration/integration/roles/test_instance_roles.py index f35cd8d87..2088fc2de 100644 --- a/kubernetes/tests/integration/integration/roles/test_instance_roles.py +++ b/kubernetes/tests/integration/integration/roles/test_instance_roles.py @@ -25,7 +25,6 @@ INTEGRATOR_APP_NAME = "data-integrator" -@pytest.mark.abort_on_fail def test_build_and_deploy(juju: Juju, charm) -> None: """Simple test to ensure that the mysql and data-integrator charms get deployed.""" juju.deploy( @@ -68,7 +67,6 @@ def test_build_and_deploy(juju: Juju, charm) -> None: ) -@pytest.mark.abort_on_fail def test_charmed_read_role(juju: Juju): """Test the charmed_read predefined role.""" juju.config( @@ -154,7 +152,6 @@ def test_charmed_read_role(juju: Juju): ) -@pytest.mark.abort_on_fail def test_charmed_dml_role(juju: Juju): """Test the charmed_dml role.""" juju.config( diff --git a/kubernetes/tests/integration/integration/test_backup_aws.py b/kubernetes/tests/integration/integration/test_backup_aws.py index 64a7a010a..a91769ebd 100644 --- a/kubernetes/tests/integration/integration/test_backup_aws.py +++ b/kubernetes/tests/integration/integration/test_backup_aws.py @@ -76,7 +76,6 @@ def clean_backups_from_buckets(cloud_configs_aws): bucket_object.delete() -@pytest.mark.abort_on_fail def test_build_and_deploy(juju: Juju, charm) -> None: """Simple test to ensure that the mysql charm gets deployed.""" juju.deploy( @@ -122,7 +121,6 @@ def test_build_and_deploy(juju: Juju, charm) -> None: ) -@pytest.mark.abort_on_fail def test_backup(juju: Juju, cloud_configs_aws) -> None: """Test to create a backup and list backups.""" global backup_id, value_before_backup, value_after_backup @@ -207,7 +205,6 @@ def test_backup(juju: Juju, cloud_configs_aws) -> None: ) -@pytest.mark.abort_on_fail def test_restore_on_same_cluster(juju: Juju, cloud_configs_aws) -> None: """Test to restore a backup to the same mysql cluster.""" cloud_configs, cloud_credentials = cloud_configs_aws @@ -319,7 +316,6 @@ def test_restore_on_same_cluster(juju: Juju, cloud_configs_aws) -> None: ), "cluster should migrate to blocked status after restore" -@pytest.mark.abort_on_fail def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_aws) -> None: """Test to restore a backup on a new mysql cluster.""" cloud_configs, cloud_credentials = cloud_configs_aws diff --git a/kubernetes/tests/integration/integration/test_backup_ceph.py b/kubernetes/tests/integration/integration/test_backup_ceph.py index 51f788c51..e197427b3 100644 --- a/kubernetes/tests/integration/integration/test_backup_ceph.py +++ b/kubernetes/tests/integration/integration/test_backup_ceph.py @@ -164,7 +164,6 @@ def clean_backups_from_buckets(cloud_credentials, cloud_configs): bucket_object.delete() -@pytest.mark.abort_on_fail def test_build_and_deploy(juju: Juju, charm) -> None: """Simple test to ensure that the mysql charm gets deployed.""" juju.deploy( @@ -210,7 +209,6 @@ def test_build_and_deploy(juju: Juju, charm) -> None: ) -@pytest.mark.abort_on_fail def test_backup(juju: Juju, cloud_credentials, cloud_configs) -> None: """Test to create a backup and list backups.""" global backup_id, value_before_backup, value_after_backup @@ -293,7 +291,6 @@ def test_backup(juju: Juju, cloud_credentials, cloud_configs) -> None: ) -@pytest.mark.abort_on_fail def test_restore_on_same_cluster(juju: Juju, cloud_credentials, cloud_configs) -> None: """Test to restore a backup to the same mysql cluster.""" logger.info("Scaling mysql application to 1 unit") @@ -403,7 +400,6 @@ def test_restore_on_same_cluster(juju: Juju, cloud_credentials, cloud_configs) - ), "cluster should migrate to blocked status after restore" -@pytest.mark.abort_on_fail def test_restore_on_new_cluster(juju: Juju, charm, cloud_credentials, cloud_configs) -> None: """Test to restore a backup on a new mysql cluster.""" logger.info("Deploying a new mysql cluster") diff --git a/kubernetes/tests/integration/integration/test_backup_gcp.py b/kubernetes/tests/integration/integration/test_backup_gcp.py index 26baee46f..c1c00f9aa 100644 --- a/kubernetes/tests/integration/integration/test_backup_gcp.py +++ b/kubernetes/tests/integration/integration/test_backup_gcp.py @@ -75,7 +75,6 @@ def clean_backups_from_buckets(cloud_configs_gcp): bucket_object.delete() -@pytest.mark.abort_on_fail def test_build_and_deploy(juju: Juju, charm) -> None: """Simple test to ensure that the mysql charm gets deployed.""" juju.deploy( @@ -121,7 +120,6 @@ def test_build_and_deploy(juju: Juju, charm) -> None: ) -@pytest.mark.abort_on_fail def test_backup(juju: Juju, cloud_configs_gcp) -> None: """Test to create a backup and list backups.""" global backup_id, value_before_backup, value_after_backup @@ -206,7 +204,6 @@ def test_backup(juju: Juju, cloud_configs_gcp) -> None: ) -@pytest.mark.abort_on_fail def test_restore_on_same_cluster(juju: Juju, cloud_configs_gcp) -> None: """Test to restore a backup to the same mysql cluster.""" cloud_configs, cloud_credentials = cloud_configs_gcp @@ -318,7 +315,6 @@ def test_restore_on_same_cluster(juju: Juju, cloud_configs_gcp) -> None: ), "cluster should migrate to blocked status after restore" -@pytest.mark.abort_on_fail def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_gcp) -> None: """Test to restore a backup on a new mysql cluster.""" cloud_configs, cloud_credentials = cloud_configs_gcp diff --git a/kubernetes/tests/integration/integration/test_backup_pitr_aws.py b/kubernetes/tests/integration/integration/test_backup_pitr_aws.py index d56fe0796..f1c239d51 100644 --- a/kubernetes/tests/integration/integration/test_backup_pitr_aws.py +++ b/kubernetes/tests/integration/integration/test_backup_pitr_aws.py @@ -1,13 +1,11 @@ # Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. -import pytest from jubilant_backports import Juju from .helpers_backups import build_and_deploy_operations, pitr_operations -@pytest.mark.abort_on_fail def test_build_and_deploy_aws( juju: Juju, cloud_configs_aws: tuple[dict[str, str], dict[str, str]], charm ) -> None: @@ -15,7 +13,6 @@ def test_build_and_deploy_aws( build_and_deploy_operations(juju, charm, cloud_configs_aws[0], cloud_configs_aws[1]) -@pytest.mark.abort_on_fail def test_pitr_aws(juju: Juju, cloud_configs_aws: tuple[dict[str, str], dict[str, str]]) -> None: """Pitr tests.""" pitr_operations(juju, cloud_configs_aws[0], cloud_configs_aws[1]) diff --git a/kubernetes/tests/integration/integration/test_backup_pitr_gcp.py b/kubernetes/tests/integration/integration/test_backup_pitr_gcp.py index 812c9ef09..ab5eb660b 100644 --- a/kubernetes/tests/integration/integration/test_backup_pitr_gcp.py +++ b/kubernetes/tests/integration/integration/test_backup_pitr_gcp.py @@ -1,13 +1,11 @@ # Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. -import pytest from jubilant_backports import Juju from .helpers_backups import build_and_deploy_operations, pitr_operations -@pytest.mark.abort_on_fail def test_build_and_deploy_gcp( juju: Juju, cloud_configs_gcp: tuple[dict[str, str], dict[str, str]], charm ) -> None: @@ -15,7 +13,6 @@ def test_build_and_deploy_gcp( build_and_deploy_operations(juju, charm, cloud_configs_gcp[0], cloud_configs_gcp[1]) -@pytest.mark.abort_on_fail def test_pitr_gcp(juju: Juju, cloud_configs_gcp: tuple[dict[str, str], dict[str, str]]) -> None: """Pitr tests.""" pitr_operations(juju, cloud_configs_gcp[0], cloud_configs_gcp[1]) diff --git a/kubernetes/tests/integration/integration/test_charm.py b/kubernetes/tests/integration/integration/test_charm.py index e7889e854..3c54ebb82 100644 --- a/kubernetes/tests/integration/integration/test_charm.py +++ b/kubernetes/tests/integration/integration/test_charm.py @@ -5,7 +5,6 @@ import logging import jubilant_backports -import pytest import urllib3 from jubilant_backports import Juju @@ -33,8 +32,6 @@ TIMEOUT = 15 * MINUTE_SECS -@pytest.mark.skip_if_deployed -@pytest.mark.abort_on_fail def test_build_and_deploy(juju: Juju, charm) -> None: """Build the mysql charm and deploy it.""" logger.info(f"Deploying {APP_NAME}") @@ -72,7 +69,6 @@ def test_build_and_deploy(juju: Juju, charm) -> None: assert output[0] == 3 -@pytest.mark.abort_on_fail def test_scale_up_after_scale_down(juju: Juju) -> None: """Confirm storage reuse works.""" logger.info("Scale down to one unit") @@ -88,7 +84,6 @@ def test_scale_up_after_scale_down(juju: Juju) -> None: assert (num_online, num_not_online) == (3, 0) -@pytest.mark.abort_on_fail def test_scale_up_from_zero(juju: Juju) -> None: """Ensure scaling down to zero and back up works.""" logger.info("Scaling down to 0 units") @@ -106,7 +101,6 @@ def test_scale_up_from_zero(juju: Juju) -> None: assert (num_online, num_not_online) == (3, 0) -@pytest.mark.abort_on_fail def test_password_rotation(juju: Juju): """Rotate password and confirm changes.""" app_units = get_app_units(juju, APP_NAME) @@ -140,7 +134,6 @@ def test_password_rotation(juju: Juju): ) -@pytest.mark.abort_on_fail def test_password_rotation_silent(juju: Juju): """Rotate password and confirm changes.""" app_units = get_app_units(juju, APP_NAME) @@ -171,7 +164,6 @@ def test_password_rotation_silent(juju: Juju): ) -@pytest.mark.abort_on_fail def test_password_rotation_root_user_implicit(juju: Juju): """Rotate password and confirm changes.""" app_units = get_app_units(juju, APP_NAME) @@ -198,7 +190,6 @@ def test_password_rotation_root_user_implicit(juju: Juju): assert updated_credentials["password"] == updated_root_credentials["password"] -@pytest.mark.abort_on_fail def test_exporter_endpoints(juju: Juju) -> None: """Test that endpoints are running.""" app_units = get_app_units(juju, APP_NAME) diff --git a/kubernetes/tests/integration/integration/test_cos_integration_bundle.py b/kubernetes/tests/integration/integration/test_cos_integration_bundle.py index a390ddcca..7d90fb7ac 100644 --- a/kubernetes/tests/integration/integration/test_cos_integration_bundle.py +++ b/kubernetes/tests/integration/integration/test_cos_integration_bundle.py @@ -7,7 +7,6 @@ import jinja2 import jubilant_backports -import pytest from jubilant_backports import Juju from ..helpers_ha import CHARM_METADATA, wait_for_apps_status @@ -18,7 +17,6 @@ TIMEOUT = 10 * 60 -@pytest.mark.abort_on_fail def test_deploy_bundle_with_cos_integrations(juju: Juju, charm) -> None: """Test COS integrations formed before mysql is allocated and deployed.""" bundle_template = jinja2.Template( diff --git a/kubernetes/tests/integration/integration/test_multi_relations.py b/kubernetes/tests/integration/integration/test_multi_relations.py index ba5bf815f..1a66bf481 100644 --- a/kubernetes/tests/integration/integration/test_multi_relations.py +++ b/kubernetes/tests/integration/integration/test_multi_relations.py @@ -3,7 +3,6 @@ # See LICENSE file for licensing details. import jubilant_backports -import pytest from jubilant_backports import CLIError, Juju from tenacity import RetryError, Retrying, retry_if_exception_type, stop_after_attempt, wait_fixed @@ -14,7 +13,6 @@ SCALE_UNITS = 3 -@pytest.mark.abort_on_fail def test_build_and_deploy(juju: Juju, charm): """Build the charm and deploy 1 units to ensure a cluster is formed.""" config = {"profile": "testing"} @@ -83,7 +81,6 @@ def test_build_and_deploy(juju: Juju, charm): ) -@pytest.mark.abort_on_fail def test_relate_all(juju: Juju): """Relate all the applications to the database.""" for idx in range(SCALE_APPS): @@ -105,7 +102,6 @@ def test_relate_all(juju: Juju): ) -@pytest.mark.abort_on_fail def test_scale_out(juju: Juju): """Scale database and routers.""" retry_if_cli_error(lambda: juju.add_unit(MYSQL_APP_NAME, num_units=SCALE_UNITS - 1)) @@ -123,7 +119,6 @@ def test_scale_out(juju: Juju): ) -@pytest.mark.abort_on_fail def test_scale_in(juju: Juju): """Scale database and routers.""" retry_if_cli_error(lambda: juju.remove_unit(MYSQL_APP_NAME, num_units=SCALE_UNITS - 1)) diff --git a/kubernetes/tests/integration/integration/test_osm_integration_bundle.py b/kubernetes/tests/integration/integration/test_osm_integration_bundle.py index 57f0bca10..bb7f14195 100644 --- a/kubernetes/tests/integration/integration/test_osm_integration_bundle.py +++ b/kubernetes/tests/integration/integration/test_osm_integration_bundle.py @@ -4,7 +4,6 @@ import logging import jubilant_backports -import pytest from jubilant_backports import Juju from tenacity import RetryError, Retrying, stop_after_attempt, wait_fixed @@ -141,7 +140,6 @@ def test_deploy_and_relate_osm_bundle(juju: Juju, charm) -> None: juju.integrate("osm-pol:mysql", f"{APP_NAME}:mysql-root") -@pytest.mark.abort_on_fail @markers.juju3 @markers.amd64_only # kafka-k8s charm not available for arm64 def test_osm_pol_operations(juju: Juju) -> None: diff --git a/kubernetes/tests/integration/integration/test_saturate_max_connections.py b/kubernetes/tests/integration/integration/test_saturate_max_connections.py index e1282929f..ff941104e 100644 --- a/kubernetes/tests/integration/integration/test_saturate_max_connections.py +++ b/kubernetes/tests/integration/integration/test_saturate_max_connections.py @@ -18,7 +18,6 @@ CONNECTIONS = 10 -@pytest.mark.abort_on_fail def test_build_and_deploy(juju: Juju, charm) -> None: """Build the charm and deploy 1 units to ensure a cluster is formed.""" juju.deploy( @@ -32,7 +31,6 @@ def test_build_and_deploy(juju: Juju, charm) -> None: ) -@pytest.mark.abort_on_fail def test_deploy_and_relate_test_app(juju: Juju) -> None: config = {"auto_start_writes": False, "sleep_interval": "500"} logger.info("Deploying test app") @@ -55,7 +53,6 @@ def test_deploy_and_relate_test_app(juju: Juju) -> None: ) -@pytest.mark.abort_on_fail def test_saturate_max_connections(juju: Juju) -> None: app_unit_name = get_app_units(juju, TEST_APP_NAME)[0] mysql_unit_name = get_app_units(juju, MYSQL_APP_NAME)[0] diff --git a/kubernetes/tests/integration/integration/test_tls.py b/kubernetes/tests/integration/integration/test_tls.py index 8ebf69daf..a72ef1d97 100644 --- a/kubernetes/tests/integration/integration/test_tls.py +++ b/kubernetes/tests/integration/integration/test_tls.py @@ -6,7 +6,6 @@ from time import sleep import jubilant_backports -import pytest from jubilant_backports import Juju from constants import CLUSTER_ADMIN_USERNAME, CONTAINER_NAME, TLS_SSL_CERT_FILE @@ -48,8 +47,6 @@ config = {} -@pytest.mark.skip_if_deployed -@pytest.mark.abort_on_fail def test_build_and_deploy(juju: Juju, charm) -> None: """Build the charm and deploy 3 units to ensure a cluster is formed.""" # Set model configuration @@ -72,7 +69,6 @@ def test_build_and_deploy(juju: Juju, charm) -> None: ) -@pytest.mark.abort_on_fail def test_connection_before_tls(juju: Juju) -> None: """Ensure connections (with and without ssl) are possible before relating with TLS operator.""" app_units = get_app_units(juju, APP_NAME) @@ -100,7 +96,6 @@ def test_connection_before_tls(juju: Juju) -> None: ) -@pytest.mark.abort_on_fail def test_enable_tls(juju: Juju) -> None: """Test for encryption enablement when relation to TLS charm.""" app_units = get_app_units(juju, APP_NAME) @@ -143,7 +138,6 @@ def test_enable_tls(juju: Juju) -> None: assert get_tls_ca(juju, app_units[0]), "❌ No CA found after TLS relation" -@pytest.mark.abort_on_fail def test_rotate_tls_key(juju: Juju) -> None: """Verify rotating tls private keys restarts cluster with new certificates. @@ -193,7 +187,6 @@ def test_rotate_tls_key(juju: Juju) -> None: ) -@pytest.mark.abort_on_fail def test_disable_tls(juju: Juju) -> None: # Remove the relation app_units = get_app_units(juju, APP_NAME) diff --git a/kubernetes/tests/spread/integration/test_architecture.py/task.yaml b/kubernetes/tests/spread/integration/test_architecture.py/task.yaml index 9a1bacc35..90181d0f6 100644 --- a/kubernetes/tests/spread/integration/test_architecture.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_architecture.py/task.yaml @@ -2,7 +2,7 @@ summary: test_architecture.py environment: TEST_MODULE: test_architecture.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results backends: diff --git a/kubernetes/tests/spread/integration/test_async_replication.py/task.yaml b/kubernetes/tests/spread/integration/test_async_replication.py/task.yaml index 2a424dd97..a1f1bc174 100644 --- a/kubernetes/tests/spread/integration/test_async_replication.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_async_replication.py/task.yaml @@ -2,7 +2,7 @@ summary: test_async_replication.py environment: TEST_MODULE: high_availability/test_async_replication.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results variants: diff --git a/kubernetes/tests/spread/integration/test_async_replication_upgrade.py/task.yaml b/kubernetes/tests/spread/integration/test_async_replication_upgrade.py/task.yaml index 8874e2b29..5e7bc75fc 100644 --- a/kubernetes/tests/spread/integration/test_async_replication_upgrade.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_async_replication_upgrade.py/task.yaml @@ -2,7 +2,7 @@ summary: test_async_replication_upgrade.py environment: TEST_MODULE: high_availability/test_async_replication_upgrade.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results variants: diff --git a/kubernetes/tests/spread/integration/test_backup_aws.py/task.yaml b/kubernetes/tests/spread/integration/test_backup_aws.py/task.yaml index dd385cc2e..aefca2e42 100644 --- a/kubernetes/tests/spread/integration/test_backup_aws.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_backup_aws.py/task.yaml @@ -2,7 +2,7 @@ summary: test_backup_aws.py environment: TEST_MODULE: test_backup_aws.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results backends: diff --git a/kubernetes/tests/spread/integration/test_backup_ceph.py/task.yaml b/kubernetes/tests/spread/integration/test_backup_ceph.py/task.yaml index d9935971c..3d542ad9f 100644 --- a/kubernetes/tests/spread/integration/test_backup_ceph.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_backup_ceph.py/task.yaml @@ -2,6 +2,6 @@ summary: test_backup_ceph.py environment: TEST_MODULE: test_backup_ceph.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/kubernetes/tests/spread/integration/test_backup_gcp.py/task.yaml b/kubernetes/tests/spread/integration/test_backup_gcp.py/task.yaml index d932d0d92..1f2ce92d5 100644 --- a/kubernetes/tests/spread/integration/test_backup_gcp.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_backup_gcp.py/task.yaml @@ -2,7 +2,7 @@ summary: test_backup_gcp.py environment: TEST_MODULE: test_backup_gcp.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results backends: diff --git a/kubernetes/tests/spread/integration/test_backup_pitr_aws.py/task.yaml b/kubernetes/tests/spread/integration/test_backup_pitr_aws.py/task.yaml index 0f13196ec..525bc318a 100644 --- a/kubernetes/tests/spread/integration/test_backup_pitr_aws.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_backup_pitr_aws.py/task.yaml @@ -2,6 +2,6 @@ summary: test_backup_pitr_aws.py environment: TEST_MODULE: test_backup_pitr_aws.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/kubernetes/tests/spread/integration/test_backup_pitr_gcp.py/task.yaml b/kubernetes/tests/spread/integration/test_backup_pitr_gcp.py/task.yaml index 9fb49f41d..0402e587b 100644 --- a/kubernetes/tests/spread/integration/test_backup_pitr_gcp.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_backup_pitr_gcp.py/task.yaml @@ -2,6 +2,6 @@ summary: test_backup_pitr_gcp.py environment: TEST_MODULE: test_backup_pitr_gcp.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/kubernetes/tests/spread/integration/test_charm.py/task.yaml b/kubernetes/tests/spread/integration/test_charm.py/task.yaml index e62cc4aea..6f000bc94 100644 --- a/kubernetes/tests/spread/integration/test_charm.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_charm.py/task.yaml @@ -2,6 +2,6 @@ summary: test_charm.py environment: TEST_MODULE: test_charm.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/kubernetes/tests/spread/integration/test_cos_integration_bundle.py/task.yaml b/kubernetes/tests/spread/integration/test_cos_integration_bundle.py/task.yaml index a9a102edf..fbe3007ff 100644 --- a/kubernetes/tests/spread/integration/test_cos_integration_bundle.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_cos_integration_bundle.py/task.yaml @@ -2,6 +2,6 @@ summary: test_cos_integration_bundle.py environment: TEST_MODULE: test_cos_integration_bundle.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/kubernetes/tests/spread/integration/test_database.py/task.yaml b/kubernetes/tests/spread/integration/test_database.py/task.yaml index c8de1abca..549bc776c 100644 --- a/kubernetes/tests/spread/integration/test_database.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_database.py/task.yaml @@ -2,6 +2,6 @@ summary: test_database.py environment: TEST_MODULE: relations/test_database.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/kubernetes/tests/spread/integration/test_database_dba_role.py/task.yaml b/kubernetes/tests/spread/integration/test_database_dba_role.py/task.yaml index 0b4e5f5ad..5d60a8282 100644 --- a/kubernetes/tests/spread/integration/test_database_dba_role.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_database_dba_role.py/task.yaml @@ -2,6 +2,6 @@ summary: test_database_dba_role.py environment: TEST_MODULE: roles/test_database_dba_role.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/kubernetes/tests/spread/integration/test_instance_dba_role.py/task.yaml b/kubernetes/tests/spread/integration/test_instance_dba_role.py/task.yaml index 780946c6f..addeea2e7 100644 --- a/kubernetes/tests/spread/integration/test_instance_dba_role.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_instance_dba_role.py/task.yaml @@ -2,6 +2,6 @@ summary: test_instance_dba_role.py environment: TEST_MODULE: roles/test_instance_dba_role.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/kubernetes/tests/spread/integration/test_instance_roles.py/task.yaml b/kubernetes/tests/spread/integration/test_instance_roles.py/task.yaml index 1d970b44e..6709bc81b 100644 --- a/kubernetes/tests/spread/integration/test_instance_roles.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_instance_roles.py/task.yaml @@ -2,6 +2,6 @@ summary: test_instance_roles.py environment: TEST_MODULE: roles/test_instance_roles.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/kubernetes/tests/spread/integration/test_multi_relations.py/task.yaml b/kubernetes/tests/spread/integration/test_multi_relations.py/task.yaml index d24f5d675..6ca820d5c 100644 --- a/kubernetes/tests/spread/integration/test_multi_relations.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_multi_relations.py/task.yaml @@ -2,6 +2,6 @@ summary: test_multi_relations.py environment: TEST_MODULE: test_multi_relations.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/kubernetes/tests/spread/integration/test_mysql_root.py/task.yaml b/kubernetes/tests/spread/integration/test_mysql_root.py/task.yaml index e20171f03..55330f07f 100644 --- a/kubernetes/tests/spread/integration/test_mysql_root.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_mysql_root.py/task.yaml @@ -2,7 +2,7 @@ summary: test_mysql_root.py environment: TEST_MODULE: relations/test_mysql_root.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results systems: diff --git a/kubernetes/tests/spread/integration/test_osm_integration_bundle.py/task.yaml b/kubernetes/tests/spread/integration/test_osm_integration_bundle.py/task.yaml index e58c4aabb..bbedc13b3 100644 --- a/kubernetes/tests/spread/integration/test_osm_integration_bundle.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_osm_integration_bundle.py/task.yaml @@ -2,7 +2,7 @@ summary: test_osm_integration_bundle.py environment: TEST_MODULE: test_osm_integration_bundle.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results variants: diff --git a/kubernetes/tests/spread/integration/test_primary_switchover.py/task.yaml b/kubernetes/tests/spread/integration/test_primary_switchover.py/task.yaml index b3ab09e0a..350f2c037 100644 --- a/kubernetes/tests/spread/integration/test_primary_switchover.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_primary_switchover.py/task.yaml @@ -2,6 +2,6 @@ summary: test_primary_switchover environment: TEST_MODULE: high_availability/test_primary_switchover.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/kubernetes/tests/spread/integration/test_replication_data_consistency.py/task.yaml b/kubernetes/tests/spread/integration/test_replication_data_consistency.py/task.yaml index 285997cd0..c0429dd9e 100644 --- a/kubernetes/tests/spread/integration/test_replication_data_consistency.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_replication_data_consistency.py/task.yaml @@ -2,6 +2,6 @@ summary: test_replication_data_consistency.py environment: TEST_MODULE: high_availability/test_replication_data_consistency.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/kubernetes/tests/spread/integration/test_replication_data_isolation.py/task.yaml b/kubernetes/tests/spread/integration/test_replication_data_isolation.py/task.yaml index cdbe969c5..6fb68b6a3 100644 --- a/kubernetes/tests/spread/integration/test_replication_data_isolation.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_replication_data_isolation.py/task.yaml @@ -2,6 +2,6 @@ summary: test_replication_data_isolation.py environment: TEST_MODULE: high_availability/test_replication_data_isolation.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/kubernetes/tests/spread/integration/test_replication_logs_rotation.py/task.yaml b/kubernetes/tests/spread/integration/test_replication_logs_rotation.py/task.yaml index 9a518c529..82edf7da7 100644 --- a/kubernetes/tests/spread/integration/test_replication_logs_rotation.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_replication_logs_rotation.py/task.yaml @@ -2,6 +2,6 @@ summary: test_replication_logs_rotation.py environment: TEST_MODULE: high_availability/test_replication_logs_rotation.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/kubernetes/tests/spread/integration/test_replication_reelection.py/task.yaml b/kubernetes/tests/spread/integration/test_replication_reelection.py/task.yaml index 3ef7a35ed..0fb53e797 100644 --- a/kubernetes/tests/spread/integration/test_replication_reelection.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_replication_reelection.py/task.yaml @@ -2,6 +2,6 @@ summary: test_replication_reelection.py environment: TEST_MODULE: high_availability/test_replication_reelection.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/kubernetes/tests/spread/integration/test_replication_scaling.py/task.yaml b/kubernetes/tests/spread/integration/test_replication_scaling.py/task.yaml index a00f52ea0..1e9b8d43e 100644 --- a/kubernetes/tests/spread/integration/test_replication_scaling.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_replication_scaling.py/task.yaml @@ -2,6 +2,6 @@ summary: test_replication_scaling.py environment: TEST_MODULE: high_availability/test_replication_scaling.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/kubernetes/tests/spread/integration/test_replication_unit_endpoints.py/task.yaml b/kubernetes/tests/spread/integration/test_replication_unit_endpoints.py/task.yaml index 81d88864d..976530e92 100644 --- a/kubernetes/tests/spread/integration/test_replication_unit_endpoints.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_replication_unit_endpoints.py/task.yaml @@ -2,6 +2,6 @@ summary: test_replication_unit_endpoints.py environment: TEST_MODULE: high_availability/test_replication_unit_endpoints.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/kubernetes/tests/spread/integration/test_replication_variables.py/task.yaml b/kubernetes/tests/spread/integration/test_replication_variables.py/task.yaml index ee7c96b23..4fd87c9d6 100644 --- a/kubernetes/tests/spread/integration/test_replication_variables.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_replication_variables.py/task.yaml @@ -2,6 +2,6 @@ summary: test_replication_variables.py environment: TEST_MODULE: high_availability/test_replication_variables.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/kubernetes/tests/spread/integration/test_saturate_max_connections.py/task.yaml b/kubernetes/tests/spread/integration/test_saturate_max_connections.py/task.yaml index ff95a562c..9aec1554a 100644 --- a/kubernetes/tests/spread/integration/test_saturate_max_connections.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_saturate_max_connections.py/task.yaml @@ -2,6 +2,6 @@ summary: test_saturate_max_connections.py environment: TEST_MODULE: test_saturate_max_connections.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/kubernetes/tests/spread/integration/test_self_healing_network_cut.py/task.yaml b/kubernetes/tests/spread/integration/test_self_healing_network_cut.py/task.yaml index 9fcce7948..6fb0178cd 100644 --- a/kubernetes/tests/spread/integration/test_self_healing_network_cut.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_self_healing_network_cut.py/task.yaml @@ -2,6 +2,6 @@ summary: test_self_healing_network_cut.py environment: TEST_MODULE: high_availability/test_self_healing_network_cut.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/kubernetes/tests/spread/integration/test_self_healing_node_drain.py/task.yaml b/kubernetes/tests/spread/integration/test_self_healing_node_drain.py/task.yaml index ea6f73090..44bca0174 100644 --- a/kubernetes/tests/spread/integration/test_self_healing_node_drain.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_self_healing_node_drain.py/task.yaml @@ -2,6 +2,6 @@ summary: test_self_healing_node_drain.py environment: TEST_MODULE: high_availability/test_self_healing_node_drain.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/kubernetes/tests/spread/integration/test_self_healing_pod.py/task.yaml b/kubernetes/tests/spread/integration/test_self_healing_pod.py/task.yaml index 120cccaea..4bafd3710 100644 --- a/kubernetes/tests/spread/integration/test_self_healing_pod.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_self_healing_pod.py/task.yaml @@ -2,6 +2,6 @@ summary: test_self_healing_pod.py environment: TEST_MODULE: high_availability/test_self_healing_pod.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/kubernetes/tests/spread/integration/test_self_healing_process_frozen.py/task.yaml b/kubernetes/tests/spread/integration/test_self_healing_process_frozen.py/task.yaml index e93ab2abe..73c415634 100644 --- a/kubernetes/tests/spread/integration/test_self_healing_process_frozen.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_self_healing_process_frozen.py/task.yaml @@ -2,6 +2,6 @@ summary: test_self_healing_process_frozen.py environment: TEST_MODULE: high_availability/test_self_healing_process_frozen.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/kubernetes/tests/spread/integration/test_self_healing_process_killed.py/task.yaml b/kubernetes/tests/spread/integration/test_self_healing_process_killed.py/task.yaml index dbd8acae8..f13f0f23c 100644 --- a/kubernetes/tests/spread/integration/test_self_healing_process_killed.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_self_healing_process_killed.py/task.yaml @@ -2,6 +2,6 @@ summary: test_self_healing_process_killed.py environment: TEST_MODULE: high_availability/test_self_healing_process_killed.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/kubernetes/tests/spread/integration/test_self_healing_restart_graceful.py/task.yaml b/kubernetes/tests/spread/integration/test_self_healing_restart_graceful.py/task.yaml index 212647a63..5f10d654f 100644 --- a/kubernetes/tests/spread/integration/test_self_healing_restart_graceful.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_self_healing_restart_graceful.py/task.yaml @@ -2,6 +2,6 @@ summary: test_self_healing_restart_graceful.py environment: TEST_MODULE: high_availability/test_self_healing_restart_graceful.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/kubernetes/tests/spread/integration/test_self_healing_setup_crash.py/task.yaml b/kubernetes/tests/spread/integration/test_self_healing_setup_crash.py/task.yaml index b15cadd41..36c191608 100644 --- a/kubernetes/tests/spread/integration/test_self_healing_setup_crash.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_self_healing_setup_crash.py/task.yaml @@ -2,6 +2,6 @@ summary: test_self_healing_setup_crash.py environment: TEST_MODULE: high_availability/test_self_healing_setup_crash.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/kubernetes/tests/spread/integration/test_self_healing_stop_all.py/task.yaml b/kubernetes/tests/spread/integration/test_self_healing_stop_all.py/task.yaml index 1c0729970..34ead76c9 100644 --- a/kubernetes/tests/spread/integration/test_self_healing_stop_all.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_self_healing_stop_all.py/task.yaml @@ -2,6 +2,6 @@ summary: test_self_healing_stop_all.py environment: TEST_MODULE: high_availability/test_self_healing_stop_all.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/kubernetes/tests/spread/integration/test_self_healing_stop_primary.py/task.yaml b/kubernetes/tests/spread/integration/test_self_healing_stop_primary.py/task.yaml index af71ab7c9..e243b1835 100644 --- a/kubernetes/tests/spread/integration/test_self_healing_stop_primary.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_self_healing_stop_primary.py/task.yaml @@ -2,6 +2,6 @@ summary: test_self_healing_stop_primary.py environment: TEST_MODULE: high_availability/test_self_healing_stop_primary.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/kubernetes/tests/spread/integration/test_tls.py/task.yaml b/kubernetes/tests/spread/integration/test_tls.py/task.yaml index a2b993ead..defb25c09 100644 --- a/kubernetes/tests/spread/integration/test_tls.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_tls.py/task.yaml @@ -2,6 +2,6 @@ summary: test_tls.py environment: TEST_MODULE: test_tls.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/kubernetes/tests/spread/integration/test_upgrade.py/task.yaml b/kubernetes/tests/spread/integration/test_upgrade.py/task.yaml index 83af4dc2a..67280c6cd 100644 --- a/kubernetes/tests/spread/integration/test_upgrade.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_upgrade.py/task.yaml @@ -2,6 +2,6 @@ summary: test_upgrade.py environment: TEST_MODULE: high_availability/test_upgrade.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/kubernetes/tests/spread/integration/test_upgrade_rollback_incompat.py/task.yaml b/kubernetes/tests/spread/integration/test_upgrade_rollback_incompat.py/task.yaml index d02a2ca7a..4cae0b857 100644 --- a/kubernetes/tests/spread/integration/test_upgrade_rollback_incompat.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_upgrade_rollback_incompat.py/task.yaml @@ -2,7 +2,7 @@ summary: test_upgrade_rollback_incompat.py environment: TEST_MODULE: high_availability/test_upgrade_rollback_incompat.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results systems: diff --git a/kubernetes/tests/spread/release/test_upgrade_from_stable_2023_09_12.py/task.yaml b/kubernetes/tests/spread/release/test_upgrade_from_stable_2023_09_12.py/task.yaml index 1cc98db41..91d5393e9 100644 --- a/kubernetes/tests/spread/release/test_upgrade_from_stable_2023_09_12.py/task.yaml +++ b/kubernetes/tests/spread/release/test_upgrade_from_stable_2023_09_12.py/task.yaml @@ -5,7 +5,7 @@ environment: CHARM_REVISION_AMD64: 99 CHARM_REVISION_ARM64: execute: | - tox run -e integration -- "tests/integration/release/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/release/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results backends: diff --git a/kubernetes/tests/spread/release/test_upgrade_from_stable_2024_01_08.py/task.yaml b/kubernetes/tests/spread/release/test_upgrade_from_stable_2024_01_08.py/task.yaml index 91e9c480a..787eb42df 100644 --- a/kubernetes/tests/spread/release/test_upgrade_from_stable_2024_01_08.py/task.yaml +++ b/kubernetes/tests/spread/release/test_upgrade_from_stable_2024_01_08.py/task.yaml @@ -5,7 +5,7 @@ environment: CHARM_REVISION_AMD64: 113 CHARM_REVISION_ARM64: execute: | - tox run -e integration -- "tests/integration/release/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/release/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results backends: diff --git a/kubernetes/tests/spread/release/test_upgrade_from_stable_2024_03_18.py/task.yaml b/kubernetes/tests/spread/release/test_upgrade_from_stable_2024_03_18.py/task.yaml index 175aa4b5d..c8a999e3f 100644 --- a/kubernetes/tests/spread/release/test_upgrade_from_stable_2024_03_18.py/task.yaml +++ b/kubernetes/tests/spread/release/test_upgrade_from_stable_2024_03_18.py/task.yaml @@ -5,7 +5,7 @@ environment: CHARM_REVISION_AMD64: 127 CHARM_REVISION_ARM64: execute: | - tox run -e integration -- "tests/integration/release/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/release/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results backends: diff --git a/kubernetes/tests/spread/release/test_upgrade_from_stable_2024_06_26.py/task.yaml b/kubernetes/tests/spread/release/test_upgrade_from_stable_2024_06_26.py/task.yaml index 069bb56c6..2daf314ac 100644 --- a/kubernetes/tests/spread/release/test_upgrade_from_stable_2024_06_26.py/task.yaml +++ b/kubernetes/tests/spread/release/test_upgrade_from_stable_2024_06_26.py/task.yaml @@ -5,7 +5,7 @@ environment: CHARM_REVISION_AMD64: 153 CHARM_REVISION_ARM64: execute: | - tox run -e integration -- "tests/integration/release/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/release/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results backends: diff --git a/kubernetes/tests/spread/release/test_upgrade_from_stable_2024_09_04.py/task.yaml b/kubernetes/tests/spread/release/test_upgrade_from_stable_2024_09_04.py/task.yaml index 2b7b1e9f3..87e24c483 100644 --- a/kubernetes/tests/spread/release/test_upgrade_from_stable_2024_09_04.py/task.yaml +++ b/kubernetes/tests/spread/release/test_upgrade_from_stable_2024_09_04.py/task.yaml @@ -5,7 +5,7 @@ environment: CHARM_REVISION_AMD64: 180 CHARM_REVISION_ARM64: 181 execute: | - tox run -e integration -- "tests/integration/release/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/release/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results backends: diff --git a/kubernetes/tests/spread/release/test_upgrade_from_stable_2025_01_17.py/task.yaml b/kubernetes/tests/spread/release/test_upgrade_from_stable_2025_01_17.py/task.yaml index ef6d54101..0a99a2ed1 100644 --- a/kubernetes/tests/spread/release/test_upgrade_from_stable_2025_01_17.py/task.yaml +++ b/kubernetes/tests/spread/release/test_upgrade_from_stable_2025_01_17.py/task.yaml @@ -5,7 +5,7 @@ environment: CHARM_REVISION_AMD64: 210 CHARM_REVISION_ARM64: 211 execute: | - tox run -e integration -- "tests/integration/release/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/release/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results backends: diff --git a/kubernetes/tests/spread/release/test_upgrade_from_stable_2025_03_11.py/task.yaml b/kubernetes/tests/spread/release/test_upgrade_from_stable_2025_03_11.py/task.yaml index eafca6208..a7e17ed59 100644 --- a/kubernetes/tests/spread/release/test_upgrade_from_stable_2025_03_11.py/task.yaml +++ b/kubernetes/tests/spread/release/test_upgrade_from_stable_2025_03_11.py/task.yaml @@ -5,7 +5,7 @@ environment: CHARM_REVISION_AMD64: 240 CHARM_REVISION_ARM64: 241 execute: | - tox run -e integration -- "tests/integration/release/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/release/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results backends: diff --git a/kubernetes/tests/spread/release/test_upgrade_from_stable_2025_06_23.py/task.yaml b/kubernetes/tests/spread/release/test_upgrade_from_stable_2025_06_23.py/task.yaml index 4694e1844..a1177842c 100644 --- a/kubernetes/tests/spread/release/test_upgrade_from_stable_2025_06_23.py/task.yaml +++ b/kubernetes/tests/spread/release/test_upgrade_from_stable_2025_06_23.py/task.yaml @@ -5,7 +5,7 @@ environment: CHARM_REVISION_AMD64: 255 CHARM_REVISION_ARM64: 254 execute: | - tox run -e integration -- "tests/integration/release/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/release/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results backends: diff --git a/kubernetes/tests/spread/release/test_upgrade_from_stable_2026_01_21.py/task.yaml b/kubernetes/tests/spread/release/test_upgrade_from_stable_2026_01_21.py/task.yaml index 5e4001c75..692219a62 100644 --- a/kubernetes/tests/spread/release/test_upgrade_from_stable_2026_01_21.py/task.yaml +++ b/kubernetes/tests/spread/release/test_upgrade_from_stable_2026_01_21.py/task.yaml @@ -5,7 +5,7 @@ environment: CHARM_REVISION_AMD64: 343 CHARM_REVISION_ARM64: 344 execute: | - tox run -e integration -- "tests/integration/release/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/release/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results backends: diff --git a/kubernetes/tox.ini b/kubernetes/tox.ini index 93c803848..173d2200c 100644 --- a/kubernetes/tox.ini +++ b/kubernetes/tox.ini @@ -80,4 +80,4 @@ pass_env = commands_pre = poetry install --only integration commands = - poetry run pytest -v --tb native --log-cli-level=INFO -s --ignore={[vars]tests_path}/unit/ {posargs} + poetry run pytest -v -x --tb native --log-cli-level=INFO -s --ignore={[vars]tests_path}/unit/ {posargs} diff --git a/machines/poetry.lock b/machines/poetry.lock index 744ea46b3..73b04c410 100644 --- a/machines/poetry.lock +++ b/machines/poetry.lock @@ -71,37 +71,6 @@ doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1) ; python_version >= \"3.10\"", "uvloop (>=0.21) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\" and python_version < \"3.14\""] trio = ["trio (>=0.26.1)"] -[[package]] -name = "appnope" -version = "0.1.3" -description = "Disable App Nap on macOS >= 10.9" -optional = false -python-versions = "*" -groups = ["integration"] -markers = "sys_platform == \"darwin\"" -files = [ - {file = "appnope-0.1.3-py2.py3-none-any.whl", hash = "sha256:265a455292d0bd8a72453494fa24df5a11eb18373a60c7c0430889f22548605e"}, - {file = "appnope-0.1.3.tar.gz", hash = "sha256:02bd91c4de869fbb1e1c50aafc4098827a7a54ab2f39d9dcba6c9547ed920e24"}, -] - -[[package]] -name = "asttokens" -version = "2.2.1" -description = "Annotate AST trees with source code positions" -optional = false -python-versions = "*" -groups = ["integration"] -files = [ - {file = "asttokens-2.2.1-py2.py3-none-any.whl", hash = "sha256:6b0ac9e93fb0335014d382b8fa9b3afa7df546984258005da0b9e7095b3deb1c"}, - {file = "asttokens-2.2.1.tar.gz", hash = "sha256:4622110b2a6f30b77e1473affaa97e711bc2f07d3f10848420ff1898edbe94f3"}, -] - -[package.dependencies] -six = "*" - -[package.extras] -test = ["astroid", "pytest"] - [[package]] name = "attrs" version = "23.1.0" @@ -121,18 +90,6 @@ docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib- tests = ["attrs[tests-no-zope]", "zope-interface"] tests-no-zope = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.1.1) ; platform_python_implementation == \"CPython\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version < \"3.11\"", "pytest-xdist[psutil]"] -[[package]] -name = "backcall" -version = "0.2.0" -description = "Specifications for callback functions passed in to an API" -optional = false -python-versions = "*" -groups = ["integration"] -files = [ - {file = "backcall-0.2.0-py2.py3-none-any.whl", hash = "sha256:fbbce6a29f263178a1f7915c1940bde0ec2b2a967566fe1c65c1dfb7422bd255"}, - {file = "backcall-0.2.0.tar.gz", hash = "sha256:5cbdbf27be5e7cfadb448baf0aa95508f91f2bbc6c6437cd9cd06e2a4c215e1e"}, -] - [[package]] name = "backoff" version = "2.2.1" @@ -608,18 +565,6 @@ ssh = ["bcrypt (>=3.1.5)"] test = ["certifi (>=2024)", "cryptography-vectors (==44.0.2)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"] test-randomorder = ["pytest-randomly"] -[[package]] -name = "decorator" -version = "5.1.1" -description = "Decorators for Humans" -optional = false -python-versions = ">=3.5" -groups = ["integration"] -files = [ - {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"}, - {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, -] - [[package]] name = "deprecated" version = "1.2.14" @@ -654,21 +599,6 @@ files = [ [package.extras] test = ["pytest (>=6)"] -[[package]] -name = "executing" -version = "1.2.0" -description = "Get the currently executing AST node of a frame, and other information" -optional = false -python-versions = "*" -groups = ["integration"] -files = [ - {file = "executing-1.2.0-py2.py3-none-any.whl", hash = "sha256:0314a69e37426e3608aada02473b4161d4caf5a4b244d1d0c48072b8fee7bacc"}, - {file = "executing-1.2.0.tar.gz", hash = "sha256:19da64c18d2d851112f09c287f8d3dbbdf725ab0e569077efb6cdcbd3497c107"}, -] - -[package.extras] -tests = ["asttokens", "littleutils", "pytest", "rich ; python_version >= \"3.11\""] - [[package]] name = "google-auth" version = "2.29.0" @@ -830,89 +760,13 @@ files = [ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] -[[package]] -name = "ipdb" -version = "0.13.13" -description = "IPython-enabled pdb" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -groups = ["integration"] -files = [ - {file = "ipdb-0.13.13-py3-none-any.whl", hash = "sha256:45529994741c4ab6d2388bfa5d7b725c2cf7fe9deffabdb8a6113aa5ed449ed4"}, - {file = "ipdb-0.13.13.tar.gz", hash = "sha256:e3ac6018ef05126d442af680aad863006ec19d02290561ac88b8b1c0b0cfc726"}, -] - -[package.dependencies] -decorator = {version = "*", markers = "python_version > \"3.6\""} -ipython = {version = ">=7.31.1", markers = "python_version > \"3.6\""} -tomli = {version = "*", markers = "python_version > \"3.6\" and python_version < \"3.11\""} - -[[package]] -name = "ipython" -version = "8.14.0" -description = "IPython: Productive Interactive Computing" -optional = false -python-versions = ">=3.9" -groups = ["integration"] -files = [ - {file = "ipython-8.14.0-py3-none-any.whl", hash = "sha256:248aca623f5c99a6635bc3857677b7320b9b8039f99f070ee0d20a5ca5a8e6bf"}, - {file = "ipython-8.14.0.tar.gz", hash = "sha256:1d197b907b6ba441b692c48cf2a3a2de280dc0ac91a3405b39349a50272ca0a1"}, -] - -[package.dependencies] -appnope = {version = "*", markers = "sys_platform == \"darwin\""} -backcall = "*" -colorama = {version = "*", markers = "sys_platform == \"win32\""} -decorator = "*" -jedi = ">=0.16" -matplotlib-inline = "*" -pexpect = {version = ">4.3", markers = "sys_platform != \"win32\""} -pickleshare = "*" -prompt-toolkit = ">=3.0.30,<3.0.37 || >3.0.37,<3.1.0" -pygments = ">=2.4.0" -stack-data = "*" -traitlets = ">=5" - -[package.extras] -all = ["black", "curio", "docrepr", "ipykernel", "ipyparallel", "ipywidgets", "matplotlib", "matplotlib (!=3.2.0)", "nbconvert", "nbformat", "notebook", "numpy (>=1.21)", "pandas", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio", "qtconsole", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "trio", "typing-extensions"] -black = ["black"] -doc = ["docrepr", "ipykernel", "matplotlib", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "typing-extensions"] -kernel = ["ipykernel"] -nbconvert = ["nbconvert"] -nbformat = ["nbformat"] -notebook = ["ipywidgets", "notebook"] -parallel = ["ipyparallel"] -qtconsole = ["qtconsole"] -test = ["pytest (<7.1)", "pytest-asyncio", "testpath"] -test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.21)", "pandas", "pytest (<7.1)", "pytest-asyncio", "testpath", "trio"] - -[[package]] -name = "jedi" -version = "0.19.0" -description = "An autocompletion tool for Python that can be used for text editors." -optional = false -python-versions = ">=3.6" -groups = ["integration"] -files = [ - {file = "jedi-0.19.0-py2.py3-none-any.whl", hash = "sha256:cb8ce23fbccff0025e9386b5cf85e892f94c9b822378f8da49970471335ac64e"}, - {file = "jedi-0.19.0.tar.gz", hash = "sha256:bcf9894f1753969cbac8022a8c2eaee06bfa3724e4192470aaffe7eb6272b0c4"}, -] - -[package.dependencies] -parso = ">=0.8.3,<0.9.0" - -[package.extras] -docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alabaster (==0.7.12)", "babel (==2.9.1)", "chardet (==4.0.0)", "commonmark (==0.8.1)", "docutils (==0.17.1)", "future (==0.18.2)", "idna (==2.10)", "imagesize (==1.2.0)", "mock (==1.0.1)", "packaging (==20.9)", "pyparsing (==2.4.7)", "pytz (==2021.1)", "readthedocs-sphinx-ext (==2.1.4)", "recommonmark (==0.5.0)", "requests (==2.25.1)", "six (==1.15.0)", "snowballstemmer (==2.1.0)", "sphinx (==1.8.5)", "sphinx-rtd-theme (==0.4.3)", "sphinxcontrib-serializinghtml (==1.1.4)", "sphinxcontrib-websupport (==1.2.4)", "urllib3 (==1.26.4)"] -qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] -testing = ["Django (<3.1)", "attrs", "colorama", "docopt", "pytest (<7.0.0)"] - [[package]] name = "jinja2" version = "3.1.6" description = "A very fast and expressive template engine." optional = false python-versions = ">=3.7" -groups = ["main", "integration"] +groups = ["main"] files = [ {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"}, {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"}, @@ -1103,7 +957,7 @@ version = "2.1.3" description = "Safely add untrusted strings to HTML/XML markup." optional = false python-versions = ">=3.7" -groups = ["main", "integration"] +groups = ["main"] files = [ {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"}, {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57"}, @@ -1167,21 +1021,6 @@ files = [ {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"}, ] -[[package]] -name = "matplotlib-inline" -version = "0.1.6" -description = "Inline Matplotlib backend for Jupyter" -optional = false -python-versions = ">=3.5" -groups = ["integration"] -files = [ - {file = "matplotlib-inline-0.1.6.tar.gz", hash = "sha256:f887e5f10ba98e8d2b150ddcf4702c1e5f8b3a20005eb0f74bfdbd360ee6f304"}, - {file = "matplotlib_inline-0.1.6-py3-none-any.whl", hash = "sha256:f1f41aab5328aa5aaea9b16d083b128102f8712542f819fe7e6a420ff581b311"}, -] - -[package.dependencies] -traitlets = "*" - [[package]] name = "mypy-extensions" version = "1.0.0" @@ -1442,50 +1281,6 @@ ed25519 = ["bcrypt (>=3.1.3)", "pynacl (>=1.0.1)"] gssapi = ["gssapi (>=1.4.1) ; platform_system != \"Windows\"", "pyasn1 (>=0.1.7)", "pywin32 (>=2.1.8) ; platform_system == \"Windows\""] invoke = ["invoke (>=1.3)"] -[[package]] -name = "parso" -version = "0.8.3" -description = "A Python Parser" -optional = false -python-versions = ">=3.6" -groups = ["integration"] -files = [ - {file = "parso-0.8.3-py2.py3-none-any.whl", hash = "sha256:c001d4636cd3aecdaf33cbb40aebb59b094be2a74c556778ef5576c175e19e75"}, - {file = "parso-0.8.3.tar.gz", hash = "sha256:8c07be290bb59f03588915921e29e8a50002acaf2cdc5fa0e0114f91709fafa0"}, -] - -[package.extras] -qa = ["flake8 (==3.8.3)", "mypy (==0.782)"] -testing = ["docopt", "pytest (<6.0.0)"] - -[[package]] -name = "pexpect" -version = "4.9.0" -description = "Pexpect allows easy control of interactive console applications." -optional = false -python-versions = "*" -groups = ["integration"] -markers = "sys_platform != \"win32\"" -files = [ - {file = "pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523"}, - {file = "pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f"}, -] - -[package.dependencies] -ptyprocess = ">=0.5" - -[[package]] -name = "pickleshare" -version = "0.7.5" -description = "Tiny 'shelve'-like database with concurrency support" -optional = false -python-versions = "*" -groups = ["integration"] -files = [ - {file = "pickleshare-0.7.5-py2.py3-none-any.whl", hash = "sha256:9649af414d74d4df115d5d718f82acb59c9d418196b7b4290ed47a12ce62df56"}, - {file = "pickleshare-0.7.5.tar.gz", hash = "sha256:87683d47965c1da65cdacaf31c8441d12b8044cdec9aca500cd78fc2c683afca"}, -] - [[package]] name = "pluggy" version = "1.6.0" @@ -1514,21 +1309,6 @@ files = [ {file = "poetry_core-1.7.0.tar.gz", hash = "sha256:8f679b83bd9c820082637beca1204124d5d2a786e4818da47ec8acefd0353b74"}, ] -[[package]] -name = "prompt-toolkit" -version = "3.0.39" -description = "Library for building powerful interactive command lines in Python" -optional = false -python-versions = ">=3.7.0" -groups = ["integration"] -files = [ - {file = "prompt_toolkit-3.0.39-py3-none-any.whl", hash = "sha256:9dffbe1d8acf91e3de75f3b544e4842382fc06c6babe903ac9acb74dc6e08d88"}, - {file = "prompt_toolkit-3.0.39.tar.gz", hash = "sha256:04505ade687dc26dc4284b1ad19a83be2f2afe83e7a828ace0c72f3a1df72aac"}, -] - -[package.dependencies] -wcwidth = "*" - [[package]] name = "protobuf" version = "3.20.3" @@ -1561,34 +1341,6 @@ files = [ {file = "protobuf-3.20.3.tar.gz", hash = "sha256:2e3427429c9cffebf259491be0af70189607f365c2f41c7c3764af6f337105f2"}, ] -[[package]] -name = "ptyprocess" -version = "0.7.0" -description = "Run a subprocess in a pseudo terminal" -optional = false -python-versions = "*" -groups = ["integration"] -markers = "sys_platform != \"win32\"" -files = [ - {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"}, - {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"}, -] - -[[package]] -name = "pure-eval" -version = "0.2.2" -description = "Safely evaluate AST nodes without side effects" -optional = false -python-versions = "*" -groups = ["integration"] -files = [ - {file = "pure_eval-0.2.2-py3-none-any.whl", hash = "sha256:01eaab343580944bc56080ebe0a674b39ec44a945e6d09ba7db3cb8cec289350"}, - {file = "pure_eval-0.2.2.tar.gz", hash = "sha256:2b45320af6dfaa1750f543d714b6d1c520a1688dec6fd24d339063ce0aaa9ac3"}, -] - -[package.extras] -tests = ["pytest"] - [[package]] name = "pyasn1" version = "0.5.0" @@ -1682,21 +1434,6 @@ typing-extensions = ">=4.2.0" dotenv = ["python-dotenv (>=0.10.4)"] email = ["email-validator (>=1.0.3)"] -[[package]] -name = "pygments" -version = "2.15.1" -description = "Pygments is a syntax highlighting package written in Python." -optional = false -python-versions = ">=3.7" -groups = ["integration"] -files = [ - {file = "Pygments-2.15.1-py3-none-any.whl", hash = "sha256:db2db3deb4b4179f399a09054b023b6a586b76499d36965813c71aa8ed7b5fd1"}, - {file = "Pygments-2.15.1.tar.gz", hash = "sha256:8ace4d3c1dd481894b2005f560ead0f9f19ee64fe983366be1a21e171d12775c"}, -] - -[package.extras] -plugins = ["importlib-metadata ; python_version < \"3.8\""] - [[package]] name = "pyhcl" version = "0.4.5" @@ -1790,25 +1527,6 @@ tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} [package.extras] testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] -[[package]] -name = "pytest-asyncio" -version = "0.21.2" -description = "Pytest support for asyncio" -optional = false -python-versions = ">=3.7" -groups = ["integration"] -files = [ - {file = "pytest_asyncio-0.21.2-py3-none-any.whl", hash = "sha256:ab664c88bb7998f711d8039cacd4884da6430886ae8bbd4eded552ed2004f16b"}, - {file = "pytest_asyncio-0.21.2.tar.gz", hash = "sha256:d67738fc232b94b326b9d060750beb16e0074210b98dd8b58a5239fa2a154f45"}, -] - -[package.dependencies] -pytest = ">=7.0.0" - -[package.extras] -docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] -testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy (>=0.931)", "pytest-trio (>=0.7.0)"] - [[package]] name = "pytest-mock" version = "3.11.1" @@ -1827,26 +1545,6 @@ pytest = ">=5.0" [package.extras] dev = ["pre-commit", "pytest-asyncio", "tox"] -[[package]] -name = "pytest-operator" -version = "0.35.0" -description = "Fixtures for Operators" -optional = false -python-versions = "*" -groups = ["integration"] -files = [ - {file = "pytest-operator-0.35.0.tar.gz", hash = "sha256:ed963dc013fc576e218081e95197926b7c98116c1fb5ab234269cf72e0746d5b"}, - {file = "pytest_operator-0.35.0-py3-none-any.whl", hash = "sha256:026715faba7a0d725ca386fe05a45cfc73746293d8d755be6d2a67ca252267f5"}, -] - -[package.dependencies] -ipdb = "*" -jinja2 = "*" -juju = "*" -pytest = "*" -pytest-asyncio = "<0.23" -pyyaml = "*" - [[package]] name = "python-dateutil" version = "2.8.2" @@ -2212,26 +1910,6 @@ files = [ {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, ] -[[package]] -name = "stack-data" -version = "0.6.2" -description = "Extract data from python stack frames and tracebacks for informative displays" -optional = false -python-versions = "*" -groups = ["integration"] -files = [ - {file = "stack_data-0.6.2-py3-none-any.whl", hash = "sha256:cbb2a53eb64e5785878201a97ed7c7b94883f48b87bfb0bbe8b623c74679e4a8"}, - {file = "stack_data-0.6.2.tar.gz", hash = "sha256:32d2dd0376772d01b6cb9fc996f3c8b57a357089dec328ed4b6553d037eaf815"}, -] - -[package.dependencies] -asttokens = ">=2.1.0" -executing = ">=1.2.0" -pure-eval = "*" - -[package.extras] -tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] - [[package]] name = "tenacity" version = "8.2.2" @@ -2272,22 +1950,6 @@ files = [ {file = "toposort-1.10.tar.gz", hash = "sha256:bfbb479c53d0a696ea7402601f4e693c97b0367837c8898bc6471adfca37a6bd"}, ] -[[package]] -name = "traitlets" -version = "5.9.0" -description = "Traitlets Python configuration system" -optional = false -python-versions = ">=3.7" -groups = ["integration"] -files = [ - {file = "traitlets-5.9.0-py3-none-any.whl", hash = "sha256:9e6ec080259b9a5940c797d58b613b5e31441c2257b87c2e795c5228ae80d2d8"}, - {file = "traitlets-5.9.0.tar.gz", hash = "sha256:f6cde21a9c68cf756af02035f72d5a723bf607e862e7be33ece505abf4a3bad9"}, -] - -[package.extras] -docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] -test = ["argcomplete (>=2.0)", "pre-commit", "pytest", "pytest-mock"] - [[package]] name = "typing-extensions" version = "4.12.2" @@ -2334,18 +1996,6 @@ h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] -[[package]] -name = "wcwidth" -version = "0.2.6" -description = "Measures the displayed width of unicode strings in a terminal" -optional = false -python-versions = "*" -groups = ["integration"] -files = [ - {file = "wcwidth-0.2.6-py2.py3-none-any.whl", hash = "sha256:795b138f6875577cd91bba52baf9e445cd5118fd32723b460e30a0af30ea230e"}, - {file = "wcwidth-0.2.6.tar.gz", hash = "sha256:a5220780a404dbe3353789870978e472cfe477761f06ee55077256e509b156d0"}, -] - [[package]] name = "websocket-client" version = "1.6.1" diff --git a/machines/pyproject.toml b/machines/pyproject.toml index 674c27523..154a016c0 100644 --- a/machines/pyproject.toml +++ b/machines/pyproject.toml @@ -50,7 +50,6 @@ unit = [ ] integration = [ "pytest~=7.4", - "pytest-operator~=0.35.0", "juju~=3.6", "mysql-connector-python~=9.1", "tenacity~=8.2", diff --git a/machines/tests/integration/conftest.py b/machines/tests/integration/conftest.py index 59bd81c1d..9f724a8bb 100644 --- a/machines/tests/integration/conftest.py +++ b/machines/tests/integration/conftest.py @@ -52,22 +52,5 @@ def cloud_configs_gcp() -> tuple[dict[str, str], dict[str, str]]: @pytest.fixture(scope="module") -def juju(request: pytest.FixtureRequest): - """Pytest fixture that wraps :meth:`jubilant.with_model`. - - This adds command line parameter ``--keep-models`` (see help for details). - """ - model = request.config.getoption("--model") - keep_models = bool(request.config.getoption("--keep-models")) - - if model: - juju = jubilant_backports.Juju(model=model) # type: ignore - yield juju - log = juju.debug_log(limit=1000) - else: - with jubilant_backports.temp_model(keep=keep_models) as juju: - yield juju - log = juju.debug_log(limit=1000) - - if request.session.testsfailed: - print(log, end="") +def juju() -> jubilant_backports.Juju: + return jubilant_backports.Juju(model="testing") diff --git a/machines/tests/integration/integration/high_availability/test_async_replication.py b/machines/tests/integration/integration/high_availability/test_async_replication.py index b0cc875d7..e508bff3b 100644 --- a/machines/tests/integration/integration/high_availability/test_async_replication.py +++ b/machines/tests/integration/integration/high_availability/test_async_replication.py @@ -29,13 +29,13 @@ @pytest.fixture(scope="module") -def first_model(juju: Juju, request: pytest.FixtureRequest) -> Generator: +def first_model(juju: Juju) -> Generator: """Creates and return the first model.""" yield juju.model @pytest.fixture(scope="module") -def second_model(juju: Juju, request: pytest.FixtureRequest) -> Generator: +def second_model(juju: Juju) -> Generator: """Creates and returns the second model.""" model_name = f"{juju.model}-other" @@ -43,8 +43,6 @@ def second_model(juju: Juju, request: pytest.FixtureRequest) -> Generator: juju.add_model(model_name) yield model_name - if request.config.getoption("--keep-models"): - return logging.info(f"Destroying model: {model_name}") juju.destroy_model(model_name, destroy_storage=True, force=True) @@ -68,7 +66,6 @@ def continuous_writes(first_model: str) -> Generator: @juju3 -@pytest.mark.abort_on_fail def test_build_and_deploy(first_model: str, second_model: str, charm: str) -> None: """Simple test to ensure that the MySQL application charms get deployed.""" configuration = {"profile": "testing"} @@ -106,7 +103,6 @@ def test_build_and_deploy(first_model: str, second_model: str, charm: str) -> No @juju3 -@pytest.mark.abort_on_fail def test_async_relate(first_model: str, second_model: str) -> None: """Relate the two MySQL clusters.""" logging.info("Creating offers in first model") @@ -135,7 +131,6 @@ def test_async_relate(first_model: str, second_model: str) -> None: @juju3 -@pytest.mark.abort_on_fail def test_deploy_router_and_app(first_model: str) -> None: """Deploy the router and the test application.""" logging.info("Deploying the router and test application") @@ -174,7 +169,6 @@ def test_deploy_router_and_app(first_model: str) -> None: @juju3 -@pytest.mark.abort_on_fail def test_create_replication(first_model: str, second_model: str) -> None: """Run the create-replication action and wait for the applications to settle.""" model_1 = Juju(model=first_model) @@ -199,7 +193,6 @@ def test_create_replication(first_model: str, second_model: str) -> None: @juju3 -@pytest.mark.abort_on_fail def test_data_replication(first_model: str, second_model: str, continuous_writes) -> None: """Test to write to primary, and read the same data back from replicas.""" logging.info("Testing data replication") @@ -211,7 +204,6 @@ def test_data_replication(first_model: str, second_model: str, continuous_writes @juju3 -@pytest.mark.abort_on_fail def test_standby_promotion(first_model: str, second_model: str, continuous_writes) -> None: """Test graceful promotion of a standby cluster to primary.""" model_2 = Juju(model=second_model) @@ -241,7 +233,6 @@ def test_standby_promotion(first_model: str, second_model: str, continuous_write @juju3 -@pytest.mark.abort_on_fail def test_failover(first_model: str, second_model: str) -> None: """Test switchover on primary cluster fail.""" logging.info("Freezing mysqld on primary cluster units") @@ -284,7 +275,6 @@ def test_failover(first_model: str, second_model: str) -> None: @juju3 -@pytest.mark.abort_on_fail def test_rejoin_invalidated_cluster( first_model: str, second_model: str, continuous_writes ) -> None: @@ -304,7 +294,6 @@ def test_rejoin_invalidated_cluster( @juju3 -@pytest.mark.abort_on_fail def test_unrelate_and_relate(first_model: str, second_model: str, continuous_writes) -> None: """Test removing and re-relating the two mysql clusters.""" model_1 = Juju(model=first_model) diff --git a/machines/tests/integration/integration/high_availability/test_async_replication_upgrade.py b/machines/tests/integration/integration/high_availability/test_async_replication_upgrade.py index 6f8665640..4cc89ff9c 100644 --- a/machines/tests/integration/integration/high_availability/test_async_replication_upgrade.py +++ b/machines/tests/integration/integration/high_availability/test_async_replication_upgrade.py @@ -30,13 +30,13 @@ @pytest.fixture(scope="module") -def first_model(juju: Juju, request: pytest.FixtureRequest) -> Generator: +def first_model(juju: Juju) -> Generator: """Creates and return the first model.""" yield juju.model @pytest.fixture(scope="module") -def second_model(juju: Juju, request: pytest.FixtureRequest) -> Generator: +def second_model(juju: Juju) -> Generator: """Creates and returns the second model.""" model_name = f"{juju.model}-other" @@ -44,8 +44,6 @@ def second_model(juju: Juju, request: pytest.FixtureRequest) -> Generator: juju.add_model(model_name) yield model_name - if request.config.getoption("--keep-models"): - return logging.info(f"Destroying model: {model_name}") juju.destroy_model(model_name, destroy_storage=True, force=True) @@ -69,7 +67,6 @@ def continuous_writes(first_model: str) -> Generator: @juju3 -@pytest.mark.abort_on_fail def test_build_and_deploy(first_model: str, second_model: str, charm: str) -> None: """Simple test to ensure that the MySQL application charms get deployed.""" configuration = {"profile": "testing"} @@ -107,7 +104,6 @@ def test_build_and_deploy(first_model: str, second_model: str, charm: str) -> No @juju3 -@pytest.mark.abort_on_fail def test_async_relate(first_model: str, second_model: str) -> None: """Relate the two MySQL clusters.""" logging.info("Creating offers in first model") @@ -136,7 +132,6 @@ def test_async_relate(first_model: str, second_model: str) -> None: @juju3 -@pytest.mark.abort_on_fail def test_deploy_test_app(first_model: str) -> None: """Deploy the test application.""" logging.info("Deploying the test application") @@ -162,7 +157,6 @@ def test_deploy_test_app(first_model: str) -> None: @juju3 -@pytest.mark.abort_on_fail def test_create_replication(first_model: str, second_model: str) -> None: """Run the create-replication action and wait for the applications to settle.""" model_1 = Juju(model=first_model) @@ -187,7 +181,6 @@ def test_create_replication(first_model: str, second_model: str) -> None: @juju3 -@pytest.mark.abort_on_fail def test_upgrade_from_edge( first_model: str, second_model: str, charm: str, continuous_writes ) -> None: @@ -203,7 +196,6 @@ def test_upgrade_from_edge( @juju3 -@pytest.mark.abort_on_fail def test_data_replication(first_model: str, second_model: str, continuous_writes) -> None: """Test to write to primary, and read the same data back from replicas.""" logging.info("Testing data replication") diff --git a/machines/tests/integration/integration/high_availability/test_primary_switchover.py b/machines/tests/integration/integration/high_availability/test_primary_switchover.py index 6c36fc31c..43a2cde7e 100644 --- a/machines/tests/integration/integration/high_availability/test_primary_switchover.py +++ b/machines/tests/integration/integration/high_availability/test_primary_switchover.py @@ -5,7 +5,6 @@ import subprocess import jubilant_backports -import pytest from jubilant_backports import Juju from ...helpers_ha import ( @@ -25,7 +24,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -60,7 +58,6 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ) -@pytest.mark.abort_on_fail def test_cluster_switchover(juju: Juju) -> None: """Test that the primary node can be switched over.""" logging.info("Testing cluster switchover...") @@ -88,7 +85,6 @@ def test_cluster_switchover(juju: Juju) -> None: assert get_mysql_primary_unit(juju, app_name) == new_primary_unit, "Switchover failed" -@pytest.mark.abort_on_fail def test_cluster_failover_after_majority_loss(juju: Juju) -> None: """Test the promote-to-primary command after losing the majority of nodes, with force flag.""" app_name = get_app_name(juju, "mysql") diff --git a/machines/tests/integration/integration/high_availability/test_replication_data_consistency.py b/machines/tests/integration/integration/high_availability/test_replication_data_consistency.py index a7e390c77..87dea7677 100644 --- a/machines/tests/integration/integration/high_availability/test_replication_data_consistency.py +++ b/machines/tests/integration/integration/high_availability/test_replication_data_consistency.py @@ -4,7 +4,6 @@ import logging import jubilant_backports -import pytest from jubilant_backports import Juju from ...helpers import generate_random_string @@ -22,7 +21,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -57,7 +55,6 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ) -@pytest.mark.abort_on_fail def test_consistent_data_replication_across_cluster(juju: Juju) -> None: """Confirm that data is replicated from the primary node to all the replicas.""" table_name = "data" diff --git a/machines/tests/integration/integration/high_availability/test_replication_data_isolation.py b/machines/tests/integration/integration/high_availability/test_replication_data_isolation.py index b3708f2ed..3f36fa408 100644 --- a/machines/tests/integration/integration/high_availability/test_replication_data_isolation.py +++ b/machines/tests/integration/integration/high_availability/test_replication_data_isolation.py @@ -5,7 +5,6 @@ from time import sleep import jubilant_backports -import pytest from jubilant_backports import Juju from ...helpers_ha import ( @@ -21,7 +20,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") diff --git a/machines/tests/integration/integration/high_availability/test_replication_logs_rotation.py b/machines/tests/integration/integration/high_availability/test_replication_logs_rotation.py index 06c592913..341deffad 100644 --- a/machines/tests/integration/integration/high_availability/test_replication_logs_rotation.py +++ b/machines/tests/integration/integration/high_availability/test_replication_logs_rotation.py @@ -6,7 +6,6 @@ from pathlib import Path import jubilant_backports -import pytest from jubilant_backports import Juju from tenacity import ( Retrying, @@ -28,7 +27,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -63,7 +61,6 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ) -@pytest.mark.abort_on_fail def test_log_rotation(juju: Juju) -> None: """Test the log rotation of text files.""" log_types = ["error", "audit"] diff --git a/machines/tests/integration/integration/high_availability/test_replication_reelection.py b/machines/tests/integration/integration/high_availability/test_replication_reelection.py index 244f0f152..a4d36a0ce 100644 --- a/machines/tests/integration/integration/high_availability/test_replication_reelection.py +++ b/machines/tests/integration/integration/high_availability/test_replication_reelection.py @@ -4,7 +4,6 @@ import logging import jubilant_backports -import pytest from jubilant_backports import Juju from ...helpers import generate_random_string @@ -24,7 +23,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -59,7 +57,6 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ) -@pytest.mark.abort_on_fail def test_kill_primary_check_reelection(juju: Juju) -> None: """Confirm that a new primary is elected when the current primary is tear down.""" check_mysql_units_writes_increment(juju, MYSQL_APP_NAME) diff --git a/machines/tests/integration/integration/high_availability/test_replication_scaling.py b/machines/tests/integration/integration/high_availability/test_replication_scaling.py index 7e5b9d072..6fd131aeb 100644 --- a/machines/tests/integration/integration/high_availability/test_replication_scaling.py +++ b/machines/tests/integration/integration/high_availability/test_replication_scaling.py @@ -4,7 +4,6 @@ import logging import jubilant_backports -import pytest from jubilant_backports import Juju from ...helpers import generate_random_string @@ -23,7 +22,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -58,7 +56,6 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ) -@pytest.mark.abort_on_fail def test_scaling_without_data_loss(juju: Juju) -> None: """Test that data is preserved during scale up and scale down.""" table_name = "instance_state_replication" diff --git a/machines/tests/integration/integration/high_availability/test_replication_unit_endpoints.py b/machines/tests/integration/integration/high_availability/test_replication_unit_endpoints.py index 91ca439ba..eaf6703a5 100644 --- a/machines/tests/integration/integration/high_availability/test_replication_unit_endpoints.py +++ b/machines/tests/integration/integration/high_availability/test_replication_unit_endpoints.py @@ -5,7 +5,6 @@ import time import jubilant_backports -import pytest import urllib3 from jubilant_backports import Juju from tenacity import ( @@ -33,7 +32,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -68,7 +66,6 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ) -@pytest.mark.abort_on_fail def test_exporter_endpoints(juju: Juju) -> None: """Test that endpoints are running.""" http_client = urllib3.PoolManager() diff --git a/machines/tests/integration/integration/high_availability/test_replication_variables.py b/machines/tests/integration/integration/high_availability/test_replication_variables.py index 05016a60a..4b5705529 100644 --- a/machines/tests/integration/integration/high_availability/test_replication_variables.py +++ b/machines/tests/integration/integration/high_availability/test_replication_variables.py @@ -4,7 +4,6 @@ import logging import jubilant_backports -import pytest from jubilant_backports import Juju from ...helpers_ha import ( @@ -19,7 +18,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -54,7 +52,6 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ) -@pytest.mark.abort_on_fail def test_custom_variables(juju: Juju) -> None: """Query database for custom variables.""" for unit in get_app_units(juju, MYSQL_APP_NAME): diff --git a/machines/tests/integration/integration/high_availability/test_self_healing_network_cut.py b/machines/tests/integration/integration/high_availability/test_self_healing_network_cut.py index d4ca2ae77..8bb88c582 100644 --- a/machines/tests/integration/integration/high_availability/test_self_healing_network_cut.py +++ b/machines/tests/integration/integration/high_availability/test_self_healing_network_cut.py @@ -6,7 +6,6 @@ import subprocess import jubilant_backports -import pytest from jubilant_backports import Juju from tenacity import ( Retrying, @@ -39,7 +38,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -74,7 +72,6 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ) -@pytest.mark.abort_on_fail def test_network_cut(juju: Juju, continuous_writes) -> None: """Completely cut and restore network.""" mysql_units = get_app_units(juju, MYSQL_APP_NAME) diff --git a/machines/tests/integration/integration/high_availability/test_self_healing_process_frozen.py b/machines/tests/integration/integration/high_availability/test_self_healing_process_frozen.py index 5d1d55315..7da835506 100644 --- a/machines/tests/integration/integration/high_availability/test_self_healing_process_frozen.py +++ b/machines/tests/integration/integration/high_availability/test_self_healing_process_frozen.py @@ -4,7 +4,6 @@ import logging import jubilant_backports -import pytest from jubilant_backports import Juju from constants import CLUSTER_ADMIN_USERNAME @@ -32,7 +31,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -67,7 +65,6 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ) -@pytest.mark.abort_on_fail def test_freeze_db_process(juju: Juju, continuous_writes) -> None: """Freeze and unfreeze process and check for auto cluster recovery.""" # Ensure continuous writes still incrementing for all units diff --git a/machines/tests/integration/integration/high_availability/test_self_healing_process_killed.py b/machines/tests/integration/integration/high_availability/test_self_healing_process_killed.py index 8c2212ed6..efc88a326 100644 --- a/machines/tests/integration/integration/high_availability/test_self_healing_process_killed.py +++ b/machines/tests/integration/integration/high_availability/test_self_healing_process_killed.py @@ -4,7 +4,6 @@ import logging import jubilant_backports -import pytest from jubilant_backports import Juju from ...helpers import generate_random_string @@ -26,7 +25,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -61,7 +59,6 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ) -@pytest.mark.abort_on_fail def test_kill_db_process(juju: Juju, continuous_writes) -> None: """Kill mysqld process and check for auto cluster recovery.""" # Ensure continuous writes still incrementing for all units diff --git a/machines/tests/integration/integration/high_availability/test_self_healing_restart_forceful.py b/machines/tests/integration/integration/high_availability/test_self_healing_restart_forceful.py index 8664e4894..3ced5df38 100644 --- a/machines/tests/integration/integration/high_availability/test_self_healing_restart_forceful.py +++ b/machines/tests/integration/integration/high_availability/test_self_healing_restart_forceful.py @@ -4,7 +4,6 @@ import logging import jubilant_backports -import pytest from jubilant_backports import Juju from constants import SERVER_CONFIG_USERNAME @@ -33,7 +32,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -68,7 +66,6 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ) -@pytest.mark.abort_on_fail def test_sst_test(juju: Juju, continuous_writes): """Test a forceful restart with deleted data and without transaction logs (forced clone).""" # Ensure continuous writes still incrementing for all units diff --git a/machines/tests/integration/integration/high_availability/test_self_healing_restart_graceful.py b/machines/tests/integration/integration/high_availability/test_self_healing_restart_graceful.py index fd01fe916..bc970080f 100644 --- a/machines/tests/integration/integration/high_availability/test_self_healing_restart_graceful.py +++ b/machines/tests/integration/integration/high_availability/test_self_healing_restart_graceful.py @@ -4,7 +4,6 @@ import logging import jubilant_backports -import pytest from jubilant_backports import Juju from constants import SERVER_CONFIG_USERNAME @@ -27,7 +26,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -62,7 +60,6 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ) -@pytest.mark.abort_on_fail def test_cluster_manual_rejoin(juju: Juju, continuous_writes) -> None: """The cluster manual re-join test. diff --git a/machines/tests/integration/integration/high_availability/test_self_healing_stop_all.py b/machines/tests/integration/integration/high_availability/test_self_healing_stop_all.py index c7cc1638d..6f422c094 100644 --- a/machines/tests/integration/integration/high_availability/test_self_healing_stop_all.py +++ b/machines/tests/integration/integration/high_availability/test_self_healing_stop_all.py @@ -4,7 +4,6 @@ import logging import jubilant_backports -import pytest from jubilant_backports import Juju from constants import CLUSTER_ADMIN_USERNAME @@ -33,7 +32,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -68,7 +66,6 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ) -@pytest.mark.abort_on_fail def test_cluster_pause(juju: Juju, continuous_writes) -> None: """Pause test. diff --git a/machines/tests/integration/integration/high_availability/test_self_healing_stop_primary.py b/machines/tests/integration/integration/high_availability/test_self_healing_stop_primary.py index 6732a606a..dacb3cfe6 100644 --- a/machines/tests/integration/integration/high_availability/test_self_healing_stop_primary.py +++ b/machines/tests/integration/integration/high_availability/test_self_healing_stop_primary.py @@ -5,7 +5,6 @@ import random import jubilant_backports -import pytest from jubilant_backports import Juju from constants import CLUSTER_ADMIN_USERNAME, SERVER_CONFIG_USERNAME @@ -35,7 +34,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -70,7 +68,6 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ) -@pytest.mark.abort_on_fail def test_replicate_data_on_restart(juju: Juju, continuous_writes) -> None: """Stop server, write data, start and validate replication.""" # Ensure continuous writes still incrementing for all units diff --git a/machines/tests/integration/integration/high_availability/test_upgrade.py b/machines/tests/integration/integration/high_availability/test_upgrade.py index 6c988fedd..47a6bf67b 100644 --- a/machines/tests/integration/integration/high_availability/test_upgrade.py +++ b/machines/tests/integration/integration/high_availability/test_upgrade.py @@ -8,7 +8,6 @@ from pathlib import Path import jubilant_backports -import pytest from jubilant_backports import Juju from ...helpers_ha import ( @@ -27,7 +26,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_latest(juju: Juju) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -62,7 +60,6 @@ def test_deploy_latest(juju: Juju) -> None: ) -@pytest.mark.abort_on_fail def test_pre_upgrade_check(juju: Juju) -> None: """Test that the pre-upgrade-check action runs successfully.""" mysql_leader = get_app_leader(juju, MYSQL_APP_NAME) @@ -81,7 +78,6 @@ def test_pre_upgrade_check(juju: Juju) -> None: assert mysql_primary == mysql_leader, "Primary unit not set to leader" -@pytest.mark.abort_on_fail def test_upgrade_from_edge(juju: Juju, charm: str, continuous_writes) -> None: """Update the second cluster.""" logging.info("Ensure continuous writes are incrementing") @@ -106,7 +102,6 @@ def test_upgrade_from_edge(juju: Juju, charm: str, continuous_writes) -> None: check_mysql_units_writes_increment(juju, MYSQL_APP_NAME) -@pytest.mark.abort_on_fail def test_fail_and_rollback(juju: Juju, charm: str, continuous_writes) -> None: """Test an upgrade failure and its rollback.""" mysql_app_leader = get_app_leader(juju, MYSQL_APP_NAME) diff --git a/machines/tests/integration/integration/high_availability/test_upgrade_rollback_incompat.py b/machines/tests/integration/integration/high_availability/test_upgrade_rollback_incompat.py index 6fd0295cb..611cdb0fb 100644 --- a/machines/tests/integration/integration/high_availability/test_upgrade_rollback_incompat.py +++ b/machines/tests/integration/integration/high_availability/test_upgrade_rollback_incompat.py @@ -10,7 +10,6 @@ from pathlib import Path import jubilant_backports -import pytest from jubilant_backports import Juju from ...helpers_ha import ( @@ -33,7 +32,6 @@ # TODO: remove AMD64 marker after next incompatible MySQL server version is released in our snap # (details: https://github.com/canonical/mysql-operator/pull/472#discussion_r1659300069) @amd64_only -@pytest.mark.abort_on_fail def test_build_and_deploy(juju: Juju, charm: str) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" snap_revisions = Path("snap_revisions.json") @@ -86,7 +84,6 @@ def test_build_and_deploy(juju: Juju, charm: str) -> None: # TODO: remove AMD64 marker after next incompatible MySQL server version is released in our snap # (details: https://github.com/canonical/mysql-operator/pull/472#discussion_r1659300069) @amd64_only -@pytest.mark.abort_on_fail def test_pre_upgrade_check(juju: Juju) -> None: """Test that the pre-upgrade-check action runs successfully.""" mysql_leader = get_app_leader(juju, MYSQL_APP_NAME) @@ -98,7 +95,6 @@ def test_pre_upgrade_check(juju: Juju) -> None: # TODO: remove AMD64 marker after next incompatible MySQL server version is released in our snap # (details: https://github.com/canonical/mysql-operator/pull/472#discussion_r1659300069) @amd64_only -@pytest.mark.abort_on_fail def test_upgrade_to_failing(juju: Juju, charm: str, continuous_writes) -> None: logging.info("Ensure continuous_writes") check_mysql_units_writes_increment(juju, MYSQL_APP_NAME) @@ -135,7 +131,6 @@ def test_upgrade_to_failing(juju: Juju, charm: str, continuous_writes) -> None: # TODO: remove AMD64 marker after next incompatible MySQL server version is released in our snap # (details: https://github.com/canonical/mysql-operator/pull/472#discussion_r1659300069) @amd64_only -@pytest.mark.abort_on_fail def test_rollback(juju: Juju, charm: str, continuous_writes) -> None: """Test upgrade rollback to a healthy revision.""" relation_data = get_relation_data(juju, MYSQL_APP_NAME, "upgrade") diff --git a/machines/tests/integration/integration/high_availability/test_upgrade_skip_pre_upgrade_check.py b/machines/tests/integration/integration/high_availability/test_upgrade_skip_pre_upgrade_check.py index c00804865..6c5ef74f1 100644 --- a/machines/tests/integration/integration/high_availability/test_upgrade_skip_pre_upgrade_check.py +++ b/machines/tests/integration/integration/high_availability/test_upgrade_skip_pre_upgrade_check.py @@ -4,7 +4,6 @@ import logging import jubilant_backports -import pytest from jubilant_backports import Juju from ...helpers_ha import ( @@ -20,7 +19,6 @@ MINUTE_SECS = 60 -@pytest.mark.abort_on_fail def test_deploy_stable(juju: Juju) -> None: """Simple test to ensure that the MySQL and application charms get deployed.""" logging.info("Deploying MySQL cluster") @@ -56,7 +54,6 @@ def test_deploy_stable(juju: Juju) -> None: ) -@pytest.mark.abort_on_fail def test_refresh_without_pre_upgrade_check(juju: Juju, charm: str) -> None: """Test updating from stable channel.""" logging.info("Refresh the charm") @@ -75,7 +72,6 @@ def test_refresh_without_pre_upgrade_check(juju: Juju, charm: str) -> None: check_mysql_units_writes_increment(juju, MYSQL_APP_NAME) -@pytest.mark.abort_on_fail def test_rollback_without_pre_upgrade_check(juju: Juju, charm: str) -> None: """Test refresh back to stable channel.""" # Early Jubilant 1.X.Y versions do not support the `switch` option diff --git a/machines/tests/integration/integration/relations/test_database.py b/machines/tests/integration/integration/relations/test_database.py index fb3872c23..ef44967da 100644 --- a/machines/tests/integration/integration/relations/test_database.py +++ b/machines/tests/integration/integration/relations/test_database.py @@ -6,7 +6,6 @@ import logging import jubilant_backports -import pytest from jubilant_backports import Juju from constants import DB_RELATION_NAME, PASSWORD_LENGTH, ROOT_USERNAME, SERVER_CONFIG_USERNAME @@ -39,8 +38,6 @@ TIMEOUT = 15 * MINUTE_SECS -@pytest.mark.abort_on_fail -@pytest.mark.skip_if_deployed def test_build_and_deploy(juju: Juju, charm): """Build the charm and deploy 3 units to ensure a cluster is formed.""" juju.deploy( @@ -69,7 +66,6 @@ def test_build_and_deploy(juju: Juju, charm): ) -@pytest.mark.abort_on_fail def test_password_rotation(juju: Juju): """Rotate password and confirm changes.""" # get primary unit first, need that to invoke set-password action @@ -97,7 +93,6 @@ def test_password_rotation(juju: Juju): assert len(output) > 0, "query with new password failed, no databases found" -@pytest.mark.abort_on_fail def test_password_rotation_silent(juju: Juju): """Rotate password and confirm changes.""" # get primary unit first, need that to invoke set-password action @@ -122,7 +117,6 @@ def test_password_rotation_silent(juju: Juju): assert len(output) > 0, "query with new password failed, no databases found" -@pytest.mark.abort_on_fail def test_password_rotation_root_user(juju: Juju): """Rotate password for root user and confirm changes.""" # get primary unit first, need that to invoke set-password action @@ -137,7 +131,6 @@ def test_password_rotation_root_user(juju: Juju): assert updated_credentials["password"] != old_credentials["password"] -@pytest.mark.abort_on_fail @markers.only_without_juju_secrets def test_relation_creation_databag(juju: Juju): """Relate charms and wait for the expected changes in status.""" @@ -158,7 +151,6 @@ def test_relation_creation_databag(juju: Juju): assert {"password", "username"} <= set(relation_data[0]["application-data"]) -@pytest.mark.abort_on_fail @markers.only_with_juju_secrets def test_relation_creation(juju: Juju): """Relate charms and wait for the expected changes in status (using juju secrets).""" @@ -175,7 +167,6 @@ def test_relation_creation(juju: Juju): assert "secret-user" in relation_data[0]["application-data"] -@pytest.mark.abort_on_fail def test_read_only_endpoints(juju: Juju): """Check read-only-endpoints are correctly updated.""" relation_data = get_relation_data(juju, DATABASE_APP_NAME, DB_RELATION_NAME) @@ -213,7 +204,6 @@ def test_read_only_endpoints(juju: Juju): ) -@pytest.mark.abort_on_fail def test_relation_broken(juju: Juju): """Remove relation and wait for the expected changes in status.""" juju.remove_relation(f"{APPLICATION_APP_NAME}:{ENDPOINT}", f"{DATABASE_APP_NAME}:{ENDPOINT}") diff --git a/machines/tests/integration/integration/relations/test_db_router.py b/machines/tests/integration/integration/relations/test_db_router.py index 55c0e6bad..8b861f78b 100644 --- a/machines/tests/integration/integration/relations/test_db_router.py +++ b/machines/tests/integration/integration/relations/test_db_router.py @@ -6,7 +6,6 @@ import random import jubilant_backports -import pytest from jubilant_backports import Juju from ...helpers_ha import ( @@ -32,7 +31,6 @@ FAST_WAIT_TIMEOUT = 30 * 60 -@pytest.mark.abort_on_fail def test_keystone_bundle_db_router(juju: Juju, charm) -> None: """Deploy the keystone bundle to test the 'db-router' relation.""" juju.deploy( diff --git a/machines/tests/integration/integration/relations/test_relation_mysql_legacy.py b/machines/tests/integration/integration/relations/test_relation_mysql_legacy.py index a212fb0c2..32e0c3d57 100644 --- a/machines/tests/integration/integration/relations/test_relation_mysql_legacy.py +++ b/machines/tests/integration/integration/relations/test_relation_mysql_legacy.py @@ -5,7 +5,6 @@ import logging import jubilant_backports -import pytest from jubilant_backports import Juju from ...helpers import ( @@ -31,8 +30,6 @@ TIMEOUT = 15 * 60 -@pytest.mark.abort_on_fail -@pytest.mark.skip_if_deployed def test_build_and_deploy(juju: Juju, charm) -> None: """Build the charm and deploy 3 units to ensure a cluster is formed.""" logger.info( @@ -67,7 +64,6 @@ def test_build_and_deploy(juju: Juju, charm) -> None: ) -@pytest.mark.abort_on_fail def test_relation_creation(juju: Juju): """Relate charms and wait for the expected changes in status.""" # Configure a user and database to be used for the relation @@ -92,7 +88,6 @@ def test_relation_creation(juju: Juju): ) -@pytest.mark.abort_on_fail def test_relation_broken(juju: Juju): """Remove relation and wait for the expected changes in status.""" # store database credentials for test access later diff --git a/machines/tests/integration/integration/relations/test_shared_db.py b/machines/tests/integration/integration/relations/test_shared_db.py index ec156e0e3..172085d45 100644 --- a/machines/tests/integration/integration/relations/test_shared_db.py +++ b/machines/tests/integration/integration/relations/test_shared_db.py @@ -7,7 +7,6 @@ from time import sleep import jubilant_backports -import pytest from jubilant_backports import Juju from ...helpers_ha import ( @@ -32,7 +31,6 @@ FAST_WAIT_TIMEOUT = 15 * 60 -@pytest.mark.abort_on_fail def test_keystone_bundle_shared_db(juju: Juju, charm) -> None: """Deploy the keystone bundle to test the 'shared-db' relation. diff --git a/machines/tests/integration/integration/roles/test_database_dba_role.py b/machines/tests/integration/integration/roles/test_database_dba_role.py index ed2877fe5..f81f0ba60 100644 --- a/machines/tests/integration/integration/roles/test_database_dba_role.py +++ b/machines/tests/integration/integration/roles/test_database_dba_role.py @@ -26,8 +26,6 @@ TIMEOUT = 15 * MINUTE_SECS -@pytest.mark.abort_on_fail -@pytest.mark.skip_if_deployed def test_build_and_deploy(juju: Juju, charm) -> None: """Simple test to ensure that the mysql and data-integrator charms get deployed.""" juju.deploy( @@ -62,7 +60,6 @@ def test_build_and_deploy(juju: Juju, charm) -> None: ) -@pytest.mark.abort_on_fail def test_charmed_dba_role(juju: Juju): """Test the database-level DBA role.""" juju.config(f"{INTEGRATOR_APP_NAME}1", {"database-name": "preserved", "extra-user-roles": ""}) diff --git a/machines/tests/integration/integration/roles/test_instance_dba_role.py b/machines/tests/integration/integration/roles/test_instance_dba_role.py index 895323f46..54091fdf2 100644 --- a/machines/tests/integration/integration/roles/test_instance_dba_role.py +++ b/machines/tests/integration/integration/roles/test_instance_dba_role.py @@ -5,7 +5,6 @@ import logging import jubilant_backports -import pytest from jubilant_backports import Juju from ...helpers import execute_queries_on_unit @@ -25,8 +24,6 @@ TIMEOUT = 15 * MINUTE_SECS -@pytest.mark.abort_on_fail -@pytest.mark.skip_if_deployed def test_build_and_deploy(juju: Juju, charm) -> None: """Simple test to ensure that the mysql and data-integrator charms get deployed.""" juju.deploy( @@ -52,7 +49,6 @@ def test_build_and_deploy(juju: Juju, charm) -> None: ) -@pytest.mark.abort_on_fail def test_charmed_dba_role(juju: Juju): """Test the instance-level DBA role.""" # configure integrator and relate diff --git a/machines/tests/integration/integration/roles/test_instance_roles.py b/machines/tests/integration/integration/roles/test_instance_roles.py index adee68471..26685341c 100644 --- a/machines/tests/integration/integration/roles/test_instance_roles.py +++ b/machines/tests/integration/integration/roles/test_instance_roles.py @@ -27,8 +27,6 @@ TIMEOUT = 15 * MINUTE_SECS -@pytest.mark.abort_on_fail -@pytest.mark.skip_if_deployed def test_build_and_deploy(juju: Juju, charm) -> None: """Simple test to ensure that the mysql and data-integrator charms get deployed.""" juju.deploy( @@ -63,7 +61,6 @@ def test_build_and_deploy(juju: Juju, charm) -> None: ) -@pytest.mark.abort_on_fail def test_charmed_read_role(juju: Juju): """Test the instance-level charmed_read role.""" juju.config( @@ -143,7 +140,6 @@ def test_charmed_read_role(juju: Juju): ) -@pytest.mark.abort_on_fail def test_charmed_dml_role(juju: Juju): """Test the instance-level charmed_dml role.""" juju.config( diff --git a/machines/tests/integration/integration/spaces/test_spaced_db.py b/machines/tests/integration/integration/spaces/test_spaced_db.py index fbad10ae8..add5e4725 100644 --- a/machines/tests/integration/integration/spaces/test_spaced_db.py +++ b/machines/tests/integration/integration/spaces/test_spaced_db.py @@ -22,7 +22,6 @@ logger = logging.getLogger(__name__) -@pytest.mark.abort_on_fail def test_build_and_deploy(juju: Juju, lxd_spaces, charm) -> None: """Build the charm and deploy 3 units to ensure a cluster is formed.""" juju.deploy( diff --git a/machines/tests/integration/integration/test_backup_aws.py b/machines/tests/integration/integration/test_backup_aws.py index 9f7444de2..263a0ef1f 100644 --- a/machines/tests/integration/integration/test_backup_aws.py +++ b/machines/tests/integration/integration/test_backup_aws.py @@ -72,7 +72,6 @@ def clean_backups_from_buckets(cloud_configs_aws): bucket_object.delete() -@pytest.mark.abort_on_fail def test_build_and_deploy(juju: Juju, charm) -> None: """Simple test to ensure that the mysql charm gets deployed.""" logger.info("Deploying mysql") @@ -117,7 +116,6 @@ def test_build_and_deploy(juju: Juju, charm) -> None: ) -@pytest.mark.abort_on_fail def test_backup(juju: Juju, cloud_configs_aws) -> None: """Test to create a backup and list backups.""" global backup_id, value_before_backup, value_after_backup @@ -202,7 +200,6 @@ def test_backup(juju: Juju, cloud_configs_aws) -> None: ) -@pytest.mark.abort_on_fail def test_restore_on_same_cluster(juju: Juju, cloud_configs_aws) -> None: """Test to restore a backup to the same mysql cluster.""" cloud_configs, cloud_credentials = cloud_configs_aws @@ -317,7 +314,6 @@ def test_restore_on_same_cluster(juju: Juju, cloud_configs_aws) -> None: scale_app_units(juju, DATABASE_APP_NAME, 0) -@pytest.mark.abort_on_fail def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_aws) -> None: """Test to restore a backup on a new mysql cluster.""" cloud_configs, cloud_credentials = cloud_configs_aws diff --git a/machines/tests/integration/integration/test_backup_ceph.py b/machines/tests/integration/integration/test_backup_ceph.py index 76e0839e7..78e94d373 100644 --- a/machines/tests/integration/integration/test_backup_ceph.py +++ b/machines/tests/integration/integration/test_backup_ceph.py @@ -162,7 +162,6 @@ def clean_backups_from_buckets(cloud_configs_ceph): bucket_object.delete() -@pytest.mark.abort_on_fail def test_build_and_deploy(juju: Juju, charm) -> None: """Simple test to ensure that the mysql charm gets deployed.""" logger.info("Deploying mysql") @@ -207,7 +206,6 @@ def test_build_and_deploy(juju: Juju, charm) -> None: ) -@pytest.mark.abort_on_fail def test_backup(juju: Juju, cloud_configs_ceph) -> None: """Test to create a backup and list backups.""" global backup_id, value_before_backup, value_after_backup @@ -292,7 +290,6 @@ def test_backup(juju: Juju, cloud_configs_ceph) -> None: ) -@pytest.mark.abort_on_fail def test_restore_on_same_cluster(juju: Juju, cloud_configs_ceph) -> None: """Test to restore a backup to the same mysql cluster.""" cloud_configs, cloud_credentials = cloud_configs_ceph @@ -407,7 +404,6 @@ def test_restore_on_same_cluster(juju: Juju, cloud_configs_ceph) -> None: scale_app_units(juju, DATABASE_APP_NAME, 0) # TODO: is this needed? -@pytest.mark.abort_on_fail def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_ceph) -> None: """Test to restore a backup on a new mysql cluster.""" cloud_configs, cloud_credentials = cloud_configs_ceph diff --git a/machines/tests/integration/integration/test_backup_gcp.py b/machines/tests/integration/integration/test_backup_gcp.py index 590b26b04..a18b50e10 100644 --- a/machines/tests/integration/integration/test_backup_gcp.py +++ b/machines/tests/integration/integration/test_backup_gcp.py @@ -72,7 +72,6 @@ def clean_backups_from_buckets(cloud_configs_gcp): bucket_object.delete() -@pytest.mark.abort_on_fail def test_build_and_deploy(juju: Juju, charm) -> None: """Simple test to ensure that the mysql charm gets deployed.""" logger.info("Deploying mysql") @@ -117,7 +116,6 @@ def test_build_and_deploy(juju: Juju, charm) -> None: ) -@pytest.mark.abort_on_fail def test_backup(juju: Juju, cloud_configs_gcp) -> None: """Test to create a backup and list backups.""" global backup_id, value_before_backup, value_after_backup @@ -202,7 +200,6 @@ def test_backup(juju: Juju, cloud_configs_gcp) -> None: ) -@pytest.mark.abort_on_fail def test_restore_on_same_cluster(juju: Juju, cloud_configs_gcp) -> None: """Test to restore a backup to the same mysql cluster.""" cloud_configs, cloud_credentials = cloud_configs_gcp @@ -317,7 +314,6 @@ def test_restore_on_same_cluster(juju: Juju, cloud_configs_gcp) -> None: scale_app_units(juju, DATABASE_APP_NAME, 0) -@pytest.mark.abort_on_fail def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_gcp) -> None: """Test to restore a backup on a new mysql cluster.""" cloud_configs, cloud_credentials = cloud_configs_gcp diff --git a/machines/tests/integration/integration/test_backup_pitr_aws.py b/machines/tests/integration/integration/test_backup_pitr_aws.py index 7ebcbda98..b208db190 100644 --- a/machines/tests/integration/integration/test_backup_pitr_aws.py +++ b/machines/tests/integration/integration/test_backup_pitr_aws.py @@ -1,13 +1,11 @@ # Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. -import pytest from jubilant_backports import Juju from .helpers_backups import build_and_deploy_operations, pitr_operations -@pytest.mark.abort_on_fail def test_build_and_deploy_aws( juju: Juju, cloud_configs_aws: tuple[dict[str, str], dict[str, str]], charm ) -> None: diff --git a/machines/tests/integration/integration/test_backup_pitr_gcp.py b/machines/tests/integration/integration/test_backup_pitr_gcp.py index d648c4274..de87bab4d 100644 --- a/machines/tests/integration/integration/test_backup_pitr_gcp.py +++ b/machines/tests/integration/integration/test_backup_pitr_gcp.py @@ -1,13 +1,11 @@ # Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. -import pytest from jubilant_backports import Juju from .helpers_backups import build_and_deploy_operations, pitr_operations -@pytest.mark.abort_on_fail def test_build_and_deploy_gcp( juju: Juju, cloud_configs_gcp: tuple[dict[str, str], dict[str, str]], charm ) -> None: diff --git a/machines/tests/integration/integration/test_saturate_max_connections.py b/machines/tests/integration/integration/test_saturate_max_connections.py index 0bd87f2db..50f1afea4 100644 --- a/machines/tests/integration/integration/test_saturate_max_connections.py +++ b/machines/tests/integration/integration/test_saturate_max_connections.py @@ -18,7 +18,6 @@ CONNECTIONS = 10 -@pytest.mark.abort_on_fail def test_build_and_deploy(juju: Juju, charm) -> None: """Build the charm and deploy 1 units to ensure a cluster is formed.""" juju.deploy( @@ -31,7 +30,6 @@ def test_build_and_deploy(juju: Juju, charm) -> None: ) -@pytest.mark.abort_on_fail def test_deploy_and_relate_test_app(juju: Juju) -> None: config = {"auto_start_writes": False, "sleep_interval": "500"} logger.info("Deploying test app") @@ -54,7 +52,6 @@ def test_deploy_and_relate_test_app(juju: Juju) -> None: ) -@pytest.mark.abort_on_fail def test_saturate_max_connections(juju: Juju) -> None: app_unit_name = get_app_units(juju, TEST_APP_NAME)[0] mysql_unit_name = get_app_units(juju, MYSQL_APP_NAME)[0] diff --git a/machines/tests/integration/integration/test_tls.py b/machines/tests/integration/integration/test_tls.py index 248ed3157..97b913a9e 100644 --- a/machines/tests/integration/integration/test_tls.py +++ b/machines/tests/integration/integration/test_tls.py @@ -6,7 +6,6 @@ from time import sleep import jubilant_backports -import pytest from jubilant_backports import Juju from constants import CLUSTER_ADMIN_USERNAME, TLS_SSL_CERT_FILE @@ -46,8 +45,6 @@ config = {} -@pytest.mark.skip_if_deployed -@pytest.mark.abort_on_fail def test_build_and_deploy(juju: Juju, charm) -> None: """Build the charm and deploy 3 units to ensure a cluster is formed.""" logger.info(f"Deploying {APP_NAME}") @@ -69,7 +66,6 @@ def test_build_and_deploy(juju: Juju, charm) -> None: ) -@pytest.mark.abort_on_fail def test_connection_before_tls(juju: Juju) -> None: """Ensure connections (with and without ssl) are possible before relating with TLS operator.""" app_units = get_app_units(juju, APP_NAME) @@ -97,7 +93,6 @@ def test_connection_before_tls(juju: Juju) -> None: ) -@pytest.mark.abort_on_fail def test_enable_tls(juju: Juju) -> None: """Test for encryption enablement when relation to TLS charm.""" app_units = get_app_units(juju, APP_NAME) @@ -147,7 +142,6 @@ def test_enable_tls(juju: Juju) -> None: assert get_tls_ca(juju, app_units[0]), "❌ No CA found after TLS relation" -@pytest.mark.abort_on_fail def test_rotate_tls_key(juju: Juju) -> None: """Verify rotating tls private keys restarts cluster with new certificates. @@ -202,7 +196,6 @@ def test_rotate_tls_key(juju: Juju) -> None: ) -@pytest.mark.abort_on_fail def test_disable_tls(juju: Juju) -> None: # Remove the relation app_units = get_app_units(juju, APP_NAME) diff --git a/machines/tests/integration/integration/test_vm_reboot.py b/machines/tests/integration/integration/test_vm_reboot.py index 0849875f5..2058a23c2 100644 --- a/machines/tests/integration/integration/test_vm_reboot.py +++ b/machines/tests/integration/integration/test_vm_reboot.py @@ -6,7 +6,6 @@ from time import sleep import jubilant_backports -import pytest from jubilant_backports import Juju from ..helpers_ha import MINUTE_SECS, get_app_units, get_unit_machine @@ -19,8 +18,6 @@ TIMEOUT = 15 * MINUTE_SECS -@pytest.mark.skip_if_deployed -@pytest.mark.abort_on_fail def test_build_and_deploy(juju: Juju, charm) -> None: """Build the charm and deploy 3 units to ensure a cluster is formed.""" logger.info(f"Deploying {APP_NAME}") @@ -42,7 +39,6 @@ def test_build_and_deploy(juju: Juju, charm) -> None: ) -@pytest.mark.abort_on_fail def test_reboot_1_of_3_units(juju: Juju) -> None: """Reboot a single unit and ensure it comes back online.""" app_units = get_app_units(juju, APP_NAME) @@ -61,7 +57,6 @@ def test_reboot_1_of_3_units(juju: Juju) -> None: ) -@pytest.mark.abort_on_fail def test_reboot_2_of_3_units(juju: Juju) -> None: """Reboot 2 units and ensure they come back online.""" app_units = get_app_units(juju, APP_NAME) @@ -80,7 +75,6 @@ def test_reboot_2_of_3_units(juju: Juju) -> None: ) -@pytest.mark.abort_on_fail def test_reboot_3_of_3_units(juju: Juju) -> None: """Reboot all 3 units and ensure they come back online.""" app_units = get_app_units(juju, APP_NAME) diff --git a/machines/tests/spread/integration/test_architecture.py/task.yaml b/machines/tests/spread/integration/test_architecture.py/task.yaml index 9a1bacc35..90181d0f6 100644 --- a/machines/tests/spread/integration/test_architecture.py/task.yaml +++ b/machines/tests/spread/integration/test_architecture.py/task.yaml @@ -2,7 +2,7 @@ summary: test_architecture.py environment: TEST_MODULE: test_architecture.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results backends: diff --git a/machines/tests/spread/integration/test_async_replication.py/task.yaml b/machines/tests/spread/integration/test_async_replication.py/task.yaml index 2a424dd97..a1f1bc174 100644 --- a/machines/tests/spread/integration/test_async_replication.py/task.yaml +++ b/machines/tests/spread/integration/test_async_replication.py/task.yaml @@ -2,7 +2,7 @@ summary: test_async_replication.py environment: TEST_MODULE: high_availability/test_async_replication.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results variants: diff --git a/machines/tests/spread/integration/test_async_replication_upgrade.py/task.yaml b/machines/tests/spread/integration/test_async_replication_upgrade.py/task.yaml index 8874e2b29..5e7bc75fc 100644 --- a/machines/tests/spread/integration/test_async_replication_upgrade.py/task.yaml +++ b/machines/tests/spread/integration/test_async_replication_upgrade.py/task.yaml @@ -2,7 +2,7 @@ summary: test_async_replication_upgrade.py environment: TEST_MODULE: high_availability/test_async_replication_upgrade.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results variants: diff --git a/machines/tests/spread/integration/test_backup_aws.py/task.yaml b/machines/tests/spread/integration/test_backup_aws.py/task.yaml index dd385cc2e..aefca2e42 100644 --- a/machines/tests/spread/integration/test_backup_aws.py/task.yaml +++ b/machines/tests/spread/integration/test_backup_aws.py/task.yaml @@ -2,7 +2,7 @@ summary: test_backup_aws.py environment: TEST_MODULE: test_backup_aws.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results backends: diff --git a/machines/tests/spread/integration/test_backup_ceph.py/task.yaml b/machines/tests/spread/integration/test_backup_ceph.py/task.yaml index d9935971c..3d542ad9f 100644 --- a/machines/tests/spread/integration/test_backup_ceph.py/task.yaml +++ b/machines/tests/spread/integration/test_backup_ceph.py/task.yaml @@ -2,6 +2,6 @@ summary: test_backup_ceph.py environment: TEST_MODULE: test_backup_ceph.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/machines/tests/spread/integration/test_backup_gcp.py/task.yaml b/machines/tests/spread/integration/test_backup_gcp.py/task.yaml index d932d0d92..1f2ce92d5 100644 --- a/machines/tests/spread/integration/test_backup_gcp.py/task.yaml +++ b/machines/tests/spread/integration/test_backup_gcp.py/task.yaml @@ -2,7 +2,7 @@ summary: test_backup_gcp.py environment: TEST_MODULE: test_backup_gcp.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results backends: diff --git a/machines/tests/spread/integration/test_backup_pitr_aws.py/task.yaml b/machines/tests/spread/integration/test_backup_pitr_aws.py/task.yaml index cf7a0672e..8107c0dbc 100644 --- a/machines/tests/spread/integration/test_backup_pitr_aws.py/task.yaml +++ b/machines/tests/spread/integration/test_backup_pitr_aws.py/task.yaml @@ -2,7 +2,7 @@ summary: test_backup_pitr_aws.py environment: TEST_MODULE: test_backup_pitr_aws.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results backends: diff --git a/machines/tests/spread/integration/test_backup_pitr_gcp.py/task.yaml b/machines/tests/spread/integration/test_backup_pitr_gcp.py/task.yaml index 50bb35062..897e483d0 100644 --- a/machines/tests/spread/integration/test_backup_pitr_gcp.py/task.yaml +++ b/machines/tests/spread/integration/test_backup_pitr_gcp.py/task.yaml @@ -2,7 +2,7 @@ summary: test_backup_pitr_gcp.py environment: TEST_MODULE: test_backup_pitr_gcp.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results backends: diff --git a/machines/tests/spread/integration/test_database.py/task.yaml b/machines/tests/spread/integration/test_database.py/task.yaml index c8de1abca..549bc776c 100644 --- a/machines/tests/spread/integration/test_database.py/task.yaml +++ b/machines/tests/spread/integration/test_database.py/task.yaml @@ -2,6 +2,6 @@ summary: test_database.py environment: TEST_MODULE: relations/test_database.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/machines/tests/spread/integration/test_database_dba_role.py/task.yaml b/machines/tests/spread/integration/test_database_dba_role.py/task.yaml index 0b4e5f5ad..5d60a8282 100644 --- a/machines/tests/spread/integration/test_database_dba_role.py/task.yaml +++ b/machines/tests/spread/integration/test_database_dba_role.py/task.yaml @@ -2,6 +2,6 @@ summary: test_database_dba_role.py environment: TEST_MODULE: roles/test_database_dba_role.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/machines/tests/spread/integration/test_db_router.py/task.yaml b/machines/tests/spread/integration/test_db_router.py/task.yaml index 0758900b4..ed76e17f2 100644 --- a/machines/tests/spread/integration/test_db_router.py/task.yaml +++ b/machines/tests/spread/integration/test_db_router.py/task.yaml @@ -2,6 +2,6 @@ summary: test_db_router.py environment: TEST_MODULE: relations/test_db_router.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/machines/tests/spread/integration/test_instance_dba_role.py/task.yaml b/machines/tests/spread/integration/test_instance_dba_role.py/task.yaml index 780946c6f..addeea2e7 100644 --- a/machines/tests/spread/integration/test_instance_dba_role.py/task.yaml +++ b/machines/tests/spread/integration/test_instance_dba_role.py/task.yaml @@ -2,6 +2,6 @@ summary: test_instance_dba_role.py environment: TEST_MODULE: roles/test_instance_dba_role.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/machines/tests/spread/integration/test_instance_roles.py/task.yaml b/machines/tests/spread/integration/test_instance_roles.py/task.yaml index 1d970b44e..6709bc81b 100644 --- a/machines/tests/spread/integration/test_instance_roles.py/task.yaml +++ b/machines/tests/spread/integration/test_instance_roles.py/task.yaml @@ -2,6 +2,6 @@ summary: test_instance_roles.py environment: TEST_MODULE: roles/test_instance_roles.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/machines/tests/spread/integration/test_primary_switchover.py/task.yaml b/machines/tests/spread/integration/test_primary_switchover.py/task.yaml index b3ab09e0a..350f2c037 100644 --- a/machines/tests/spread/integration/test_primary_switchover.py/task.yaml +++ b/machines/tests/spread/integration/test_primary_switchover.py/task.yaml @@ -2,6 +2,6 @@ summary: test_primary_switchover environment: TEST_MODULE: high_availability/test_primary_switchover.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/machines/tests/spread/integration/test_relation_mysql_legacy.py/task.yaml b/machines/tests/spread/integration/test_relation_mysql_legacy.py/task.yaml index 74f59da01..8e92c5d50 100644 --- a/machines/tests/spread/integration/test_relation_mysql_legacy.py/task.yaml +++ b/machines/tests/spread/integration/test_relation_mysql_legacy.py/task.yaml @@ -2,6 +2,6 @@ summary: test_relation_mysql_legacy.py environment: TEST_MODULE: relations/test_relation_mysql_legacy.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/machines/tests/spread/integration/test_replication_data_consistency.py/task.yaml b/machines/tests/spread/integration/test_replication_data_consistency.py/task.yaml index 285997cd0..c0429dd9e 100644 --- a/machines/tests/spread/integration/test_replication_data_consistency.py/task.yaml +++ b/machines/tests/spread/integration/test_replication_data_consistency.py/task.yaml @@ -2,6 +2,6 @@ summary: test_replication_data_consistency.py environment: TEST_MODULE: high_availability/test_replication_data_consistency.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/machines/tests/spread/integration/test_replication_data_isolation.py/task.yaml b/machines/tests/spread/integration/test_replication_data_isolation.py/task.yaml index cdbe969c5..6fb68b6a3 100644 --- a/machines/tests/spread/integration/test_replication_data_isolation.py/task.yaml +++ b/machines/tests/spread/integration/test_replication_data_isolation.py/task.yaml @@ -2,6 +2,6 @@ summary: test_replication_data_isolation.py environment: TEST_MODULE: high_availability/test_replication_data_isolation.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/machines/tests/spread/integration/test_replication_logs_rotation.py/task.yaml b/machines/tests/spread/integration/test_replication_logs_rotation.py/task.yaml index 9a518c529..82edf7da7 100644 --- a/machines/tests/spread/integration/test_replication_logs_rotation.py/task.yaml +++ b/machines/tests/spread/integration/test_replication_logs_rotation.py/task.yaml @@ -2,6 +2,6 @@ summary: test_replication_logs_rotation.py environment: TEST_MODULE: high_availability/test_replication_logs_rotation.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/machines/tests/spread/integration/test_replication_reelection.py/task.yaml b/machines/tests/spread/integration/test_replication_reelection.py/task.yaml index 3ef7a35ed..0fb53e797 100644 --- a/machines/tests/spread/integration/test_replication_reelection.py/task.yaml +++ b/machines/tests/spread/integration/test_replication_reelection.py/task.yaml @@ -2,6 +2,6 @@ summary: test_replication_reelection.py environment: TEST_MODULE: high_availability/test_replication_reelection.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/machines/tests/spread/integration/test_replication_scaling.py/task.yaml b/machines/tests/spread/integration/test_replication_scaling.py/task.yaml index a00f52ea0..1e9b8d43e 100644 --- a/machines/tests/spread/integration/test_replication_scaling.py/task.yaml +++ b/machines/tests/spread/integration/test_replication_scaling.py/task.yaml @@ -2,6 +2,6 @@ summary: test_replication_scaling.py environment: TEST_MODULE: high_availability/test_replication_scaling.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/machines/tests/spread/integration/test_replication_unit_endpoints.py/task.yaml b/machines/tests/spread/integration/test_replication_unit_endpoints.py/task.yaml index 81d88864d..976530e92 100644 --- a/machines/tests/spread/integration/test_replication_unit_endpoints.py/task.yaml +++ b/machines/tests/spread/integration/test_replication_unit_endpoints.py/task.yaml @@ -2,6 +2,6 @@ summary: test_replication_unit_endpoints.py environment: TEST_MODULE: high_availability/test_replication_unit_endpoints.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/machines/tests/spread/integration/test_replication_variables.py/task.yaml b/machines/tests/spread/integration/test_replication_variables.py/task.yaml index ee7c96b23..4fd87c9d6 100644 --- a/machines/tests/spread/integration/test_replication_variables.py/task.yaml +++ b/machines/tests/spread/integration/test_replication_variables.py/task.yaml @@ -2,6 +2,6 @@ summary: test_replication_variables.py environment: TEST_MODULE: high_availability/test_replication_variables.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/machines/tests/spread/integration/test_saturate_max_connections.py/task.yaml b/machines/tests/spread/integration/test_saturate_max_connections.py/task.yaml index ff95a562c..9aec1554a 100644 --- a/machines/tests/spread/integration/test_saturate_max_connections.py/task.yaml +++ b/machines/tests/spread/integration/test_saturate_max_connections.py/task.yaml @@ -2,6 +2,6 @@ summary: test_saturate_max_connections.py environment: TEST_MODULE: test_saturate_max_connections.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/machines/tests/spread/integration/test_self_healing_network_cut.py/task.yaml b/machines/tests/spread/integration/test_self_healing_network_cut.py/task.yaml index 9fcce7948..6fb0178cd 100644 --- a/machines/tests/spread/integration/test_self_healing_network_cut.py/task.yaml +++ b/machines/tests/spread/integration/test_self_healing_network_cut.py/task.yaml @@ -2,6 +2,6 @@ summary: test_self_healing_network_cut.py environment: TEST_MODULE: high_availability/test_self_healing_network_cut.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/machines/tests/spread/integration/test_self_healing_process_frozen.py/task.yaml b/machines/tests/spread/integration/test_self_healing_process_frozen.py/task.yaml index e93ab2abe..73c415634 100644 --- a/machines/tests/spread/integration/test_self_healing_process_frozen.py/task.yaml +++ b/machines/tests/spread/integration/test_self_healing_process_frozen.py/task.yaml @@ -2,6 +2,6 @@ summary: test_self_healing_process_frozen.py environment: TEST_MODULE: high_availability/test_self_healing_process_frozen.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/machines/tests/spread/integration/test_self_healing_process_killed.py/task.yaml b/machines/tests/spread/integration/test_self_healing_process_killed.py/task.yaml index dbd8acae8..f13f0f23c 100644 --- a/machines/tests/spread/integration/test_self_healing_process_killed.py/task.yaml +++ b/machines/tests/spread/integration/test_self_healing_process_killed.py/task.yaml @@ -2,6 +2,6 @@ summary: test_self_healing_process_killed.py environment: TEST_MODULE: high_availability/test_self_healing_process_killed.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/machines/tests/spread/integration/test_self_healing_restart_forceful.py/task.yaml b/machines/tests/spread/integration/test_self_healing_restart_forceful.py/task.yaml index c0aa052df..fe69a72a8 100644 --- a/machines/tests/spread/integration/test_self_healing_restart_forceful.py/task.yaml +++ b/machines/tests/spread/integration/test_self_healing_restart_forceful.py/task.yaml @@ -2,6 +2,6 @@ summary: test_self_healing_restart_forceful.py environment: TEST_MODULE: high_availability/test_self_healing_restart_forceful.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/machines/tests/spread/integration/test_self_healing_restart_graceful.py/task.yaml b/machines/tests/spread/integration/test_self_healing_restart_graceful.py/task.yaml index 212647a63..5f10d654f 100644 --- a/machines/tests/spread/integration/test_self_healing_restart_graceful.py/task.yaml +++ b/machines/tests/spread/integration/test_self_healing_restart_graceful.py/task.yaml @@ -2,6 +2,6 @@ summary: test_self_healing_restart_graceful.py environment: TEST_MODULE: high_availability/test_self_healing_restart_graceful.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/machines/tests/spread/integration/test_self_healing_stop_all.py/task.yaml b/machines/tests/spread/integration/test_self_healing_stop_all.py/task.yaml index 1c0729970..34ead76c9 100644 --- a/machines/tests/spread/integration/test_self_healing_stop_all.py/task.yaml +++ b/machines/tests/spread/integration/test_self_healing_stop_all.py/task.yaml @@ -2,6 +2,6 @@ summary: test_self_healing_stop_all.py environment: TEST_MODULE: high_availability/test_self_healing_stop_all.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/machines/tests/spread/integration/test_self_healing_stop_primary.py/task.yaml b/machines/tests/spread/integration/test_self_healing_stop_primary.py/task.yaml index af71ab7c9..e243b1835 100644 --- a/machines/tests/spread/integration/test_self_healing_stop_primary.py/task.yaml +++ b/machines/tests/spread/integration/test_self_healing_stop_primary.py/task.yaml @@ -2,6 +2,6 @@ summary: test_self_healing_stop_primary.py environment: TEST_MODULE: high_availability/test_self_healing_stop_primary.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/machines/tests/spread/integration/test_shared_db.py/task.yaml b/machines/tests/spread/integration/test_shared_db.py/task.yaml index 82d008e67..99597d51e 100644 --- a/machines/tests/spread/integration/test_shared_db.py/task.yaml +++ b/machines/tests/spread/integration/test_shared_db.py/task.yaml @@ -2,6 +2,6 @@ summary: test_shared_db.py environment: TEST_MODULE: relations/test_shared_db.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/machines/tests/spread/integration/test_spaced_db.py/task.yaml b/machines/tests/spread/integration/test_spaced_db.py/task.yaml index a54d1b54b..d02e70aa6 100644 --- a/machines/tests/spread/integration/test_spaced_db.py/task.yaml +++ b/machines/tests/spread/integration/test_spaced_db.py/task.yaml @@ -2,6 +2,6 @@ summary: test_spaced_db.py environment: TEST_MODULE: spaces/test_spaced_db.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/machines/tests/spread/integration/test_subordinate_charms.py/task.yaml b/machines/tests/spread/integration/test_subordinate_charms.py/task.yaml index e59194f85..e04484bea 100644 --- a/machines/tests/spread/integration/test_subordinate_charms.py/task.yaml +++ b/machines/tests/spread/integration/test_subordinate_charms.py/task.yaml @@ -2,7 +2,7 @@ summary: test_subordinate_charms.py environment: TEST_MODULE: test_subordinate_charms.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results backends: diff --git a/machines/tests/spread/integration/test_tls.py/task.yaml b/machines/tests/spread/integration/test_tls.py/task.yaml index a2b993ead..defb25c09 100644 --- a/machines/tests/spread/integration/test_tls.py/task.yaml +++ b/machines/tests/spread/integration/test_tls.py/task.yaml @@ -2,6 +2,6 @@ summary: test_tls.py environment: TEST_MODULE: test_tls.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/machines/tests/spread/integration/test_upgrade.py/task.yaml b/machines/tests/spread/integration/test_upgrade.py/task.yaml index 83af4dc2a..67280c6cd 100644 --- a/machines/tests/spread/integration/test_upgrade.py/task.yaml +++ b/machines/tests/spread/integration/test_upgrade.py/task.yaml @@ -2,6 +2,6 @@ summary: test_upgrade.py environment: TEST_MODULE: high_availability/test_upgrade.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/machines/tests/spread/integration/test_upgrade_rollback_incompat.py/task.yaml b/machines/tests/spread/integration/test_upgrade_rollback_incompat.py/task.yaml index d02a2ca7a..4cae0b857 100644 --- a/machines/tests/spread/integration/test_upgrade_rollback_incompat.py/task.yaml +++ b/machines/tests/spread/integration/test_upgrade_rollback_incompat.py/task.yaml @@ -2,7 +2,7 @@ summary: test_upgrade_rollback_incompat.py environment: TEST_MODULE: high_availability/test_upgrade_rollback_incompat.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results systems: diff --git a/machines/tests/spread/integration/test_upgrade_skip_pre_upgrade_check.py/task.yaml b/machines/tests/spread/integration/test_upgrade_skip_pre_upgrade_check.py/task.yaml index 999bc05e0..8f2dc4820 100644 --- a/machines/tests/spread/integration/test_upgrade_skip_pre_upgrade_check.py/task.yaml +++ b/machines/tests/spread/integration/test_upgrade_skip_pre_upgrade_check.py/task.yaml @@ -2,6 +2,6 @@ summary: test_upgrade_skip_pre_upgrade_check.py environment: TEST_MODULE: high_availability/test_upgrade_skip_pre_upgrade_check.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/machines/tests/spread/integration/test_vm_reboot.py/task.yaml b/machines/tests/spread/integration/test_vm_reboot.py/task.yaml index e0d7863ff..4788cda93 100644 --- a/machines/tests/spread/integration/test_vm_reboot.py/task.yaml +++ b/machines/tests/spread/integration/test_vm_reboot.py/task.yaml @@ -2,6 +2,6 @@ summary: test_vm_reboot.py environment: TEST_MODULE: test_vm_reboot.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results diff --git a/machines/tests/spread/release/test_upgrade_from_stable_2023_04_20.py/task.yaml b/machines/tests/spread/release/test_upgrade_from_stable_2023_04_20.py/task.yaml index 2b83d6200..0a4bb30a0 100644 --- a/machines/tests/spread/release/test_upgrade_from_stable_2023_04_20.py/task.yaml +++ b/machines/tests/spread/release/test_upgrade_from_stable_2023_04_20.py/task.yaml @@ -4,7 +4,7 @@ environment: CHARM_REVISION_AMD64: 151 CHARM_REVISION_ARM64: execute: | - tox run -e integration -- "tests/integration/release/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/release/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results backends: diff --git a/machines/tests/spread/release/test_upgrade_from_stable_2023_10_06.py/task.yaml b/machines/tests/spread/release/test_upgrade_from_stable_2023_10_06.py/task.yaml index 75e0d41c1..bd7384051 100644 --- a/machines/tests/spread/release/test_upgrade_from_stable_2023_10_06.py/task.yaml +++ b/machines/tests/spread/release/test_upgrade_from_stable_2023_10_06.py/task.yaml @@ -4,7 +4,7 @@ environment: CHARM_REVISION_AMD64: 196 CHARM_REVISION_ARM64: execute: | - tox run -e integration -- "tests/integration/release/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/release/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results backends: diff --git a/machines/tests/spread/release/test_upgrade_from_stable_2024_06_26.py/task.yaml b/machines/tests/spread/release/test_upgrade_from_stable_2024_06_26.py/task.yaml index d60f1cf6a..24bc1067b 100644 --- a/machines/tests/spread/release/test_upgrade_from_stable_2024_06_26.py/task.yaml +++ b/machines/tests/spread/release/test_upgrade_from_stable_2024_06_26.py/task.yaml @@ -4,7 +4,7 @@ environment: CHARM_REVISION_AMD64: 240 CHARM_REVISION_ARM64: execute: | - tox run -e integration -- "tests/integration/release/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/release/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results backends: diff --git a/machines/tests/spread/release/test_upgrade_from_stable_2024_12_02.py/task.yaml b/machines/tests/spread/release/test_upgrade_from_stable_2024_12_02.py/task.yaml index 08a17214a..cfa2cf0c6 100644 --- a/machines/tests/spread/release/test_upgrade_from_stable_2024_12_02.py/task.yaml +++ b/machines/tests/spread/release/test_upgrade_from_stable_2024_12_02.py/task.yaml @@ -4,7 +4,7 @@ environment: CHARM_REVISION_AMD64: 313 CHARM_REVISION_ARM64: 312 execute: | - tox run -e integration -- "tests/integration/release/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/release/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results backends: diff --git a/machines/tests/spread/release/test_upgrade_from_stable_2025_03_31.py/task.yaml b/machines/tests/spread/release/test_upgrade_from_stable_2025_03_31.py/task.yaml index 7beb4e45d..56806f79e 100644 --- a/machines/tests/spread/release/test_upgrade_from_stable_2025_03_31.py/task.yaml +++ b/machines/tests/spread/release/test_upgrade_from_stable_2025_03_31.py/task.yaml @@ -4,7 +4,7 @@ environment: CHARM_REVISION_AMD64: 366 CHARM_REVISION_ARM64: 367 execute: | - tox run -e integration -- "tests/integration/release/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/release/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results backends: diff --git a/machines/tests/spread/release/test_upgrade_from_stable_2026_01_22.py/task.yaml b/machines/tests/spread/release/test_upgrade_from_stable_2026_01_22.py/task.yaml index a321dca10..ce788a909 100644 --- a/machines/tests/spread/release/test_upgrade_from_stable_2026_01_22.py/task.yaml +++ b/machines/tests/spread/release/test_upgrade_from_stable_2026_01_22.py/task.yaml @@ -4,7 +4,7 @@ environment: CHARM_REVISION_AMD64: 444 CHARM_REVISION_ARM64: 442 execute: | - tox run -e integration -- "tests/integration/release/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" + tox run -e integration -- "tests/integration/release/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" artifacts: - allure-results backends: diff --git a/machines/tox.ini b/machines/tox.ini index 080f0b356..156826ce3 100644 --- a/machines/tox.ini +++ b/machines/tox.ini @@ -86,4 +86,4 @@ pass_env = commands_pre = poetry install --only integration commands = - poetry run pytest -v --tb native --log-cli-level=INFO -s --ignore={[vars]tests_path}/unit/ {posargs} + poetry run pytest -v -x --tb native --log-cli-level=INFO -s --ignore={[vars]tests_path}/unit/ {posargs} From 43fb89e5cf93e38a0a9389300e84e48dc4327dfe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Juan=20Luis=20Cano=20Rodr=C3=ADguez?= Date: Wed, 4 Feb 2026 19:10:10 +0100 Subject: [PATCH 04/40] [DPE-9376] Add MySQL 8.0.44 release details (#64) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add MySQL 8.0.44 release details Added release information for MySQL version 8.0.44 and updated links. Fix gh-63 Signed-off-by: Juan Luis Cano Rodríguez * Update releases.md with new release information Added details for releases 442-444 and updated links. Signed-off-by: Juan Luis Cano Rodríguez * Add release details for revisions 342-344 Added release information for revisions 342-344, including architecture support for amd64, arm64, and s390x. Signed-off-by: Juan Luis Cano Rodríguez --------- Signed-off-by: Juan Luis Cano Rodríguez --- kubernetes/docs/reference/releases.md | 16 ++++++++++++++-- machines/docs/reference/releases.md | 17 ++++++++++++++--- 2 files changed, 28 insertions(+), 5 deletions(-) diff --git a/kubernetes/docs/reference/releases.md b/kubernetes/docs/reference/releases.md index 6b3178a1a..b074c97aa 100644 --- a/kubernetes/docs/reference/releases.md +++ b/kubernetes/docs/reference/releases.md @@ -8,6 +8,7 @@ To see all releases and commits, check the [Charmed MySQL K8s Releases page on G | Release | MySQL version | Juju version | [TLS encryption](/how-to/enable-tls)* | [COS monitoring](/how-to/monitoring-cos/enable-monitoring) | [Minor version upgrades](/how-to/refresh/single-cluster/refresh-single-cluster) | [Cluster-cluster replication](/how-to/cluster-cluster-replication/deploy) | [Point-in-time recovery](point-in-time-recovery) |:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| +| [342], [343], [344] | 8.0.44 | `3.5.4+` | ![check] | ![check] | ![check] | ![check] | ![check] | | [254], [255] | 8.0.41 | `3.5.4+` | ![check] | ![check] | ![check] | ![check] | ![check] | | [240], [241] | 8.0.41 | `3.5.4+` | ![check] | ![check] | ![check] | ![check] | | | [210], [211] | 8.0.39 | `3.5.4+` | ![check] | ![check] | ![check] | ![check] | | @@ -30,6 +31,16 @@ Several [revisions](https://documentation.ubuntu.com/juju/3.6/reference/charm/#c > > See: [`juju set-constraints`](https://juju.is/docs/juju/juju-set-constraints), [`juju info`](https://juju.is/docs/juju/juju-info) +### Release 342-344 + +| Revision | amd64 | arm64 | s390x | Ubuntu 22.04 LTS +|:--------:|:-----:|:-----:|:-----:|:-----:| +|[343] | ![check] | | | ![check] | +|[344] | | ![check] | | ![check] | +|[342] | | | ![check] | ![check] | + +[details=Older releases] + ### Release 254-255 | Revision | amd64 | arm64 | Ubuntu 22.04 LTS @@ -37,8 +48,6 @@ Several [revisions](https://documentation.ubuntu.com/juju/3.6/reference/charm/#c |[254] || ![check] | ![check] | |[255] | ![check]| | ![check] | -[details=Older releases] - ### Release 240-241 | Revision | amd64 | arm64 | Ubuntu 22.04 LTS @@ -92,6 +101,9 @@ Several [revisions](https://documentation.ubuntu.com/juju/3.6/reference/charm/#c [/details] +[344]: https://github.com/canonical/mysql-k8s-operator/releases/tag/rev342 +[343]: https://github.com/canonical/mysql-k8s-operator/releases/tag/rev342 +[342]: https://github.com/canonical/mysql-k8s-operator/releases/tag/rev342 [255]: https://github.com/canonical/mysql-k8s-operator/releases/tag/rev255 [254]: https://github.com/canonical/mysql-k8s-operator/releases/tag/rev255 [240]: https://github.com/canonical/mysql-k8s-operator/releases/tag/rev240 diff --git a/machines/docs/reference/releases.md b/machines/docs/reference/releases.md index e4488a470..37ba6aeec 100644 --- a/machines/docs/reference/releases.md +++ b/machines/docs/reference/releases.md @@ -16,6 +16,7 @@ For a given release, this table shows: | Release | MySQL version | Juju version | [TLS encryption](/how-to/enable-tls)* | [COS monitoring](/how-to/monitoring-cos/enable-monitoring) | [In-place upgrades](/how-to/refresh/single-cluster/refresh-single-cluster) | [Cluster-cluster replication](/how-to/cluster-cluster-replication/deploy) | |:---:|:---:|:---:|:---:|:---:|:---:|:---:| +| [442], [443], [444] | 8.0.44 | `3.4.3+` | ![check] | ![check] | ![check] | ![check] | | [366], [367] | 8.0.41 | `3.4.3+` | ![check] | ![check] | ![check] | ![check] | | [312], [313] | 8.0.39 | `3.4.3+` | ![check] | ![check] | ![check] | ![check] | | [240] | 8.0.36 | `3.4.3+` | ![check] | ![check] | ![check] | ![check] | @@ -35,6 +36,16 @@ Several [revisions](https://documentation.ubuntu.com/juju/3.6/reference/charm/#c > If you deploy a specific revision, **you must make sure it matches your base and architecture** via the tables below or with [`juju info`](https://juju.is/docs/juju/juju-info) +### Release 442-444 + +| Revision | amd64 | arm64 | s390x | Ubuntu 22.04 LTS +|:--------:|:-----:|:-----:|:-----:|:-----:| +|[444] | ![check] | | | ![check] | +|[442] | | ![check] | | ![check] | +|[443] | | | ![check] | ![check] | + +[details=Older releases] + ### Release 366-367 | Revision | amd64 | arm64 | Ubuntu 22.04 LTS @@ -42,8 +53,6 @@ Several [revisions](https://documentation.ubuntu.com/juju/3.6/reference/charm/#c |[366] |![check] | | ![check] | |[367] | | ![check]| ![check] | -[details=Older releases] - ### Release 312-313 | Revision | amd64 | arm64 | Ubuntu 22.04 LTS @@ -51,7 +60,6 @@ Several [revisions](https://documentation.ubuntu.com/juju/3.6/reference/charm/#c |[313] |![check] | | ![check] | |[312] | | ![check]| ![check] | - ### Release 240 | Revision | amd64 | arm64 | Ubuntu 22.04 LTS @@ -78,6 +86,9 @@ Several [revisions](https://documentation.ubuntu.com/juju/3.6/reference/charm/#c +[444]: https://github.com/canonical/mysql-operator/releases/tag/rev442 +[443]: https://github.com/canonical/mysql-operator/releases/tag/rev442 +[442]: https://github.com/canonical/mysql-operator/releases/tag/rev442 [367]: https://github.com/canonical/mysql-operator/releases/tag/rev366 [366]: https://github.com/canonical/mysql-operator/releases/tag/rev366 [313]: https://github.com/canonical/mysql-operator/releases/tag/rev312 From 2217f03f3acf1d5324e5d8a2922f97fabf696d6e Mon Sep 17 00:00:00 2001 From: Paulo Machado Date: Mon, 9 Feb 2026 14:22:22 -0300 Subject: [PATCH 05/40] guard against mismatch between peers and cluster topology on initial setup (#66) --- kubernetes/lib/charms/mysql/v0/mysql.py | 7 ++++++- machines/lib/charms/mysql/v0/mysql.py | 7 ++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/kubernetes/lib/charms/mysql/v0/mysql.py b/kubernetes/lib/charms/mysql/v0/mysql.py index ebd21bd92..465134ea1 100644 --- a/kubernetes/lib/charms/mysql/v0/mysql.py +++ b/kubernetes/lib/charms/mysql/v0/mysql.py @@ -136,7 +136,7 @@ def __init__( # Increment this major API version when introducing breaking changes LIBAPI = 0 -LIBPATCH = 100 +LIBPATCH = 101 PYDEPS = ["mysql_shell_client ~= 0.6"] @@ -884,6 +884,11 @@ def get_cluster_endpoints(self, relation_name: str) -> tuple[str, str, str]: if v["status"] == InstanceState.RECOVERING: continue + # Early calls for endpoint update can be run before unit joined the relation + # so we skip when unit (k) not unit_labels dict + if k not in unit_labels: + continue + address = f"{self.get_unit_address(unit_labels[k], relation_name)}:3306" if v["status"] != InstanceState.ONLINE: diff --git a/machines/lib/charms/mysql/v0/mysql.py b/machines/lib/charms/mysql/v0/mysql.py index ebd21bd92..465134ea1 100644 --- a/machines/lib/charms/mysql/v0/mysql.py +++ b/machines/lib/charms/mysql/v0/mysql.py @@ -136,7 +136,7 @@ def __init__( # Increment this major API version when introducing breaking changes LIBAPI = 0 -LIBPATCH = 100 +LIBPATCH = 101 PYDEPS = ["mysql_shell_client ~= 0.6"] @@ -884,6 +884,11 @@ def get_cluster_endpoints(self, relation_name: str) -> tuple[str, str, str]: if v["status"] == InstanceState.RECOVERING: continue + # Early calls for endpoint update can be run before unit joined the relation + # so we skip when unit (k) not unit_labels dict + if k not in unit_labels: + continue + address = f"{self.get_unit_address(unit_labels[k], relation_name)}:3306" if v["status"] != InstanceState.ONLINE: From 353873541c94fc23783fc02b92806976c7291dd9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sinclert=20P=C3=A9rez?= Date: Tue, 10 Feb 2026 09:08:07 +0100 Subject: [PATCH 06/40] [MISC] Remove legacy pytest-asyncio config (#69) --- kubernetes/pyproject.toml | 1 - machines/pyproject.toml | 1 - 2 files changed, 2 deletions(-) diff --git a/kubernetes/pyproject.toml b/kubernetes/pyproject.toml index 27bd0b430..b98f4b35b 100644 --- a/kubernetes/pyproject.toml +++ b/kubernetes/pyproject.toml @@ -85,7 +85,6 @@ show_missing = true minversion = "6.0" log_cli_level = "INFO" markers = ["juju3", "only_with_juju_secrets", "only_without_juju_secrets"] -asyncio_mode = "auto" # Linting tools configuration [tool.ruff] diff --git a/machines/pyproject.toml b/machines/pyproject.toml index 154a016c0..d32a067e9 100644 --- a/machines/pyproject.toml +++ b/machines/pyproject.toml @@ -80,7 +80,6 @@ show_missing = true [tool.pytest.ini_options] minversion = "6.0" log_cli_level = "INFO" -asyncio_mode = "auto" # Formatting tools configuration [tool.ruff] From ba9416fa131f99b1fbc1e8bda0284523611140a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sinclert=20P=C3=A9rez?= Date: Tue, 10 Feb 2026 10:03:58 +0100 Subject: [PATCH 07/40] [MISC] Migrate to ops[tracing] package (#74) --- .../tempo_coordinator_k8s/v0/charm_tracing.py | 1090 ----------------- .../tempo_coordinator_k8s/v0/tracing.py | 987 --------------- kubernetes/poetry.lock | 286 +---- kubernetes/pyproject.toml | 4 +- kubernetes/src/charm.py | 38 +- kubernetes/src/constants.py | 2 - kubernetes/tests/unit/conftest.py | 7 - .../tempo_coordinator_k8s/v0/charm_tracing.py | 1090 ----------------- machines/poetry.lock | 285 +---- machines/pyproject.toml | 4 +- machines/src/charm.py | 32 +- machines/tests/unit/conftest.py | 7 - 12 files changed, 133 insertions(+), 3699 deletions(-) delete mode 100644 kubernetes/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py delete mode 100644 kubernetes/lib/charms/tempo_coordinator_k8s/v0/tracing.py delete mode 100644 machines/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py diff --git a/kubernetes/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py b/kubernetes/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py deleted file mode 100644 index a9b6deeb6..000000000 --- a/kubernetes/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py +++ /dev/null @@ -1,1090 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2022 Canonical Ltd. -# See LICENSE file for licensing details. - -"""This charm library contains utilities to instrument your Charm with opentelemetry tracing data collection. - -(yes! charm code, not workload code!) - -This means that, if your charm is related to, for example, COS' Tempo charm, you will be able to inspect -in real time from the Grafana dashboard the execution flow of your charm. - -# Quickstart -Fetch the following charm libs: - - charmcraft fetch-lib charms.tempo_coordinator_k8s.v0.tracing - charmcraft fetch-lib charms.tempo_coordinator_k8s.v0.charm_tracing - -Then edit your charm code to include: - -```python -# import the necessary charm libs -from charms.tempo_coordinator_k8s.v0.tracing import TracingEndpointRequirer, charm_tracing_config -from charms.tempo_coordinator_k8s.v0.charm_tracing import charm_tracing - -# decorate your charm class with charm_tracing: -@charm_tracing( - # forward-declare the instance attributes that the instrumentor will look up to obtain the - # tempo endpoint and server certificate - tracing_endpoint="tracing_endpoint", - server_cert="server_cert" -) -class MyCharm(CharmBase): - _path_to_cert = "/path/to/cert.crt" - # path to cert file **in the charm container**. Its presence will be used to determine whether - # the charm is ready to use tls for encrypting charm traces. If your charm does not support tls, - # you can ignore this and pass None to charm_tracing_config. - # If you do support TLS, you'll need to make sure that the server cert is copied to this location - # and kept up to date so the instrumentor can use it. - - def __init__(self, ...): - ... - self.tracing = TracingEndpointRequirer(self, ...) - self.tracing_endpoint, self.server_cert = charm_tracing_config(self.tracing, self._path_to_cert) -``` - -# Detailed usage -To use this library, you need to do two things: -1) decorate your charm class with - -`@trace_charm(tracing_endpoint="my_tracing_endpoint")` - -2) add to your charm a "my_tracing_endpoint" (you can name this attribute whatever you like) -**property**, **method** or **instance attribute** that returns an otlp http/https endpoint url. -If you are using the ``charms.tempo_coordinator_k8s.v0.tracing.TracingEndpointRequirer`` as -``self.tracing = TracingEndpointRequirer(self)``, the implementation could be: - -``` - @property - def my_tracing_endpoint(self) -> Optional[str]: - '''Tempo endpoint for charm tracing''' - if self.tracing.is_ready(): - return self.tracing.get_endpoint("otlp_http") - else: - return None -``` - -At this point your charm will be automatically instrumented so that: -- charm execution starts a trace, containing - - every event as a span (including custom events) - - every charm method call (except dunders) as a span - -We recommend that you scale up your tracing provider and relate it to an ingress so that your tracing requests -go through the ingress and get load balanced across all units. Otherwise, if the provider's leader goes down, your tracing goes down. - - -## TLS support -If your charm integrates with a TLS provider which is also trusted by the tracing provider (the Tempo charm), -you can configure ``charm_tracing`` to use TLS by passing a ``server_cert`` parameter to the decorator. - -If your charm is not trusting the same CA as the Tempo endpoint it is sending traces to, -you'll need to implement a cert-transfer relation to obtain the CA certificate from the same -CA that Tempo is using. - -For example: -``` -from charms.tempo_coordinator_k8s.v0.charm_tracing import trace_charm -@trace_charm( - tracing_endpoint="my_tracing_endpoint", - server_cert="_server_cert" -) -class MyCharm(CharmBase): - self._server_cert = "/path/to/server.crt" - ... - - def on_tls_changed(self, e) -> Optional[str]: - # update the server cert on the charm container for charm tracing - Path(self._server_cert).write_text(self.get_server_cert()) - - def on_tls_broken(self, e) -> Optional[str]: - # remove the server cert so charm_tracing won't try to use tls anymore - Path(self._server_cert).unlink() -``` - - -## More fine-grained manual instrumentation -if you wish to add more spans to the trace, you can do so by getting a hold of the tracer like so: -``` -import opentelemetry -... -def get_tracer(self) -> opentelemetry.trace.Tracer: - return opentelemetry.trace.get_tracer(type(self).__name__) -``` - -By default, the tracer is named after the charm type. If you wish to override that, you can pass -a different ``service_name`` argument to ``trace_charm``. - -See the official opentelemetry Python SDK documentation for usage: -https://opentelemetry-python.readthedocs.io/en/latest/ - - -## Caching traces -The `trace_charm` machinery will buffer any traces collected during charm execution and store them -to a file on the charm container until a tracing backend becomes available. At that point, it will -flush them to the tracing receiver. - -By default, the buffer is configured to start dropping old traces if any of these conditions apply: - -- the storage size exceeds 10 MiB -- the number of buffered events exceeds 100 - -You can configure this by, for example: - -```python -@trace_charm( - tracing_endpoint="my_tracing_endpoint", - server_cert="_server_cert", - # only cache up to 42 events - buffer_max_events=42, - # only cache up to 42 MiB - buffer_max_size_mib=42, # minimum 10! -) -class MyCharm(CharmBase): - ... -``` - -Note that setting `buffer_max_events` to 0 will effectively disable the buffer. - -The path of the buffer file is by default in the charm's execution root, which for k8s charms means -that in case of pod churn, the cache will be lost. The recommended solution is to use an existing storage -(or add a new one) such as: - -```yaml -storage: - data: - type: filesystem - location: /charm-traces -``` - -and then configure the `@trace_charm` decorator to use it as path for storing the buffer: -```python -@trace_charm( - tracing_endpoint="my_tracing_endpoint", - server_cert="_server_cert", - # store traces to a PVC so they're not lost on pod restart. - buffer_path="/charm-traces/buffer.file", -) -class MyCharm(CharmBase): - ... -``` - -## Upgrading from `tempo_k8s.v0` - -If you are upgrading from `tempo_k8s.v0.charm_tracing` (note that since then, the charm library moved to -`tempo_coordinator_k8s.v0.charm_tracing`), you need to take the following steps (assuming you already -have the newest version of the library in your charm): -1) If you need the dependency for your tests, add the following dependency to your charm project -(or, if your project had a dependency on `opentelemetry-exporter-otlp-proto-grpc` only because -of `charm_tracing` v0, you can replace it with): - -`opentelemetry-exporter-otlp-proto-http>=1.21.0`. - -2) Update the charm method referenced to from ``@trace`` and ``@trace_charm``, -to return from ``TracingEndpointRequirer.get_endpoint("otlp_http")`` instead of ``grpc_http``. -For example: - -``` - from charms.tempo_k8s.v0.charm_tracing import trace_charm - - @trace_charm( - tracing_endpoint="my_tracing_endpoint", - ) - class MyCharm(CharmBase): - - ... - - @property - def my_tracing_endpoint(self) -> Optional[str]: - '''Tempo endpoint for charm tracing''' - if self.tracing.is_ready(): - return self.tracing.otlp_grpc_endpoint() # OLD API, DEPRECATED. - else: - return None -``` - -needs to be replaced with: - -``` - from charms.tempo_coordinator_k8s.v0.charm_tracing import trace_charm - - @trace_charm( - tracing_endpoint="my_tracing_endpoint", - ) - class MyCharm(CharmBase): - - ... - - @property - def my_tracing_endpoint(self) -> Optional[str]: - '''Tempo endpoint for charm tracing''' - if self.tracing.is_ready(): - return self.tracing.get_endpoint("otlp_http") # NEW API, use this. - else: - return None -``` - -3) If you were passing a certificate (str) using `server_cert`, you need to change it to -provide an *absolute* path to the certificate file instead. -""" -import typing - -from opentelemetry.exporter.otlp.proto.common._internal.trace_encoder import ( - encode_spans, -) -from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter - - -def _remove_stale_otel_sdk_packages(): - """Hack to remove stale opentelemetry sdk packages from the charm's python venv. - - See https://github.com/canonical/grafana-agent-operator/issues/146 and - https://bugs.launchpad.net/juju/+bug/2058335 for more context. This patch can be removed after - this juju issue is resolved and sufficient time has passed to expect most users of this library - have migrated to the patched version of juju. When this patch is removed, un-ignore rule E402 for this file in the pyproject.toml (see setting - [tool.ruff.lint.per-file-ignores] in pyproject.toml). - - This only has an effect if executed on an upgrade-charm event. - """ - # all imports are local to keep this function standalone, side-effect-free, and easy to revert later - import os - - if os.getenv("JUJU_DISPATCH_PATH") != "hooks/upgrade-charm": - return - - import logging - import shutil - from collections import defaultdict - - from importlib_metadata import distributions - - otel_logger = logging.getLogger("charm_tracing_otel_patcher") - otel_logger.debug("Applying _remove_stale_otel_sdk_packages patch on charm upgrade") - # group by name all distributions starting with "opentelemetry_" - otel_distributions = defaultdict(list) - for distribution in distributions(): - name = distribution._normalized_name # type: ignore - if name.startswith("opentelemetry_"): - otel_distributions[name].append(distribution) - - otel_logger.debug(f"Found {len(otel_distributions)} opentelemetry distributions") - - # If we have multiple distributions with the same name, remove any that have 0 associated files - for name, distributions_ in otel_distributions.items(): - if len(distributions_) <= 1: - continue - - otel_logger.debug(f"Package {name} has multiple ({len(distributions_)}) distributions.") - for distribution in distributions_: - if not distribution.files: # Not None or empty list - path = distribution._path # type: ignore - otel_logger.info(f"Removing empty distribution of {name} at {path}.") - shutil.rmtree(path) - - otel_logger.debug("Successfully applied _remove_stale_otel_sdk_packages patch. ") - - -# apply hacky patch to remove stale opentelemetry sdk packages on upgrade-charm. -# it could be trouble if someone ever decides to implement their own tracer parallel to -# ours and before the charm has inited. We assume they won't. -_remove_stale_otel_sdk_packages() - -import functools -import inspect -import logging -import os -from contextlib import contextmanager -from contextvars import Context, ContextVar, copy_context -from pathlib import Path -from typing import ( - Any, - Callable, - Generator, - List, - Optional, - Sequence, - Type, - TypeVar, - Union, - cast, -) - -import opentelemetry -import ops -from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter -from opentelemetry.sdk.resources import Resource -from opentelemetry.sdk.trace import ReadableSpan, Span, TracerProvider -from opentelemetry.sdk.trace.export import ( - BatchSpanProcessor, - SpanExporter, - SpanExportResult, -) -from opentelemetry.trace import INVALID_SPAN, Tracer -from opentelemetry.trace import get_current_span as otlp_get_current_span -from opentelemetry.trace import ( - get_tracer, - get_tracer_provider, - set_span_in_context, - set_tracer_provider, -) -from ops.charm import CharmBase -from ops.framework import Framework - -# The unique Charmhub library identifier, never change it -LIBID = "01780f1e588c42c3976d26780fdf9b89" - -# Increment this major API version when introducing breaking changes -LIBAPI = 0 - -# Increment this PATCH version before using `charmcraft publish-lib` or reset -# to 0 if you are raising the major API version - -LIBPATCH = 5 - -PYDEPS = ["opentelemetry-exporter-otlp-proto-http==1.21.0"] - -logger = logging.getLogger("tracing") -dev_logger = logging.getLogger("tracing-dev") - -# set this to 0 if you are debugging/developing this library source -dev_logger.setLevel(logging.ERROR) - -_CharmType = Type[CharmBase] # the type CharmBase and any subclass thereof -_C = TypeVar("_C", bound=_CharmType) -_T = TypeVar("_T", bound=type) -_F = TypeVar("_F", bound=Type[Callable]) -tracer: ContextVar[Tracer] = ContextVar("tracer") -_GetterType = Union[Callable[[_CharmType], Optional[str]], property] - -CHARM_TRACING_ENABLED = "CHARM_TRACING_ENABLED" -BUFFER_DEFAULT_CACHE_FILE_NAME = ".charm_tracing_buffer.raw" -# we store the buffer as raw otlp-native protobuf (bytes) since it's hard to serialize/deserialize it in -# any portable format. Json dumping is supported, but loading isn't. -# cfr: https://github.com/open-telemetry/opentelemetry-python/issues/1003 - -BUFFER_DEFAULT_CACHE_FILE_SIZE_LIMIT_MiB = 10 -_BUFFER_CACHE_FILE_SIZE_LIMIT_MiB_MIN = 10 -BUFFER_DEFAULT_MAX_EVENT_HISTORY_LENGTH = 100 -_MiB_TO_B = 2**20 # megabyte to byte conversion rate -_OTLP_SPAN_EXPORTER_TIMEOUT = 1 -"""Timeout in seconds that the OTLP span exporter has to push traces to the backend.""" - - -class _Buffer: - """Handles buffering for spans emitted while no tracing backend is configured or available. - - Use the max_event_history_length_buffering param of @trace_charm to tune - the amount of memory that this will hog on your units. - - The buffer is formatted as a bespoke byte dump (protobuf limitation). - We cannot store them as json because that is not well-supported by the sdk - (see https://github.com/open-telemetry/opentelemetry-python/issues/3364). - """ - - _SPANSEP = b"__CHARM_TRACING_BUFFER_SPAN_SEP__" - - def __init__(self, db_file: Path, max_event_history_length: int, max_buffer_size_mib: int): - self._db_file = db_file - self._max_event_history_length = max_event_history_length - self._max_buffer_size_mib = max(max_buffer_size_mib, _BUFFER_CACHE_FILE_SIZE_LIMIT_MiB_MIN) - - # set by caller - self.exporter: Optional[OTLPSpanExporter] = None - - def save(self, spans: typing.Sequence[ReadableSpan]): - """Save the spans collected by this exporter to the cache file. - - This method should be as fail-safe as possible. - """ - if self._max_event_history_length < 1: - dev_logger.debug("buffer disabled: max history length < 1") - return - - current_history_length = len(self.load()) - new_history_length = current_history_length + len(spans) - if (diff := self._max_event_history_length - new_history_length) < 0: - self.drop(diff) - self._save(spans) - - def _serialize(self, spans: Sequence[ReadableSpan]) -> bytes: - # encode because otherwise we can't json-dump them - return encode_spans(spans).SerializeToString() - - def _save(self, spans: Sequence[ReadableSpan], replace: bool = False): - dev_logger.debug(f"saving {len(spans)} new spans to buffer") - old = [] if replace else self.load() - new = self._serialize(spans) - - try: - # if the buffer exceeds the size limit, we start dropping old spans until it does - - while len((new + self._SPANSEP.join(old))) > (self._max_buffer_size_mib * _MiB_TO_B): - if not old: - # if we've already dropped all spans and still we can't get under the - # size limit, we can't save this span - logger.error( - f"span exceeds total buffer size limit ({self._max_buffer_size_mib}MiB); " - f"buffering FAILED" - ) - return - - old = old[1:] - logger.warning( - f"buffer size exceeds {self._max_buffer_size_mib}MiB; dropping older spans... " - f"Please increase the buffer size, disable buffering, or ensure the spans can be flushed." - ) - - self._db_file.write_bytes(new + self._SPANSEP.join(old)) - except Exception: - logger.exception("error buffering spans") - - def load(self) -> List[bytes]: - """Load currently buffered spans from the cache file. - - This method should be as fail-safe as possible. - """ - if not self._db_file.exists(): - dev_logger.debug("buffer file not found. buffer empty.") - return [] - try: - spans = self._db_file.read_bytes().split(self._SPANSEP) - except Exception: - logger.exception(f"error parsing {self._db_file}") - return [] - return spans - - def drop(self, n_spans: Optional[int] = None): - """Drop some currently buffered spans from the cache file.""" - current = self.load() - if n_spans: - dev_logger.debug(f"dropping {n_spans} spans from buffer") - new = current[n_spans:] - else: - dev_logger.debug("emptying buffer") - new = [] - - self._db_file.write_bytes(self._SPANSEP.join(new)) - - def flush(self) -> Optional[bool]: - """Export all buffered spans to the given exporter, then clear the buffer. - - Returns whether the flush was successful, and None if there was nothing to flush. - """ - if not self.exporter: - dev_logger.debug("no exporter set; skipping buffer flush") - return False - - buffered_spans = self.load() - if not buffered_spans: - dev_logger.debug("nothing to flush; buffer empty") - return None - - errors = False - for span in buffered_spans: - try: - out = self.exporter._export(span) # type: ignore - if not (200 <= out.status_code < 300): - # take any 2xx status code as a success - errors = True - except ConnectionError: - dev_logger.debug( - "failed exporting buffered span; backend might be down or still starting" - ) - errors = True - except Exception: - logger.exception("unexpected error while flushing span batch from buffer") - errors = True - - if not errors: - self.drop() - else: - logger.error("failed flushing spans; buffer preserved") - return not errors - - @property - def is_empty(self): - """Utility to check whether the buffer has any stored spans. - - This is more efficient than attempting a load() given how large the buffer might be. - """ - return (not self._db_file.exists()) or (self._db_file.stat().st_size == 0) - - -class _OTLPSpanExporter(OTLPSpanExporter): - """Subclass of OTLPSpanExporter to configure the max retry timeout, so that it fails a bit faster.""" - - # The issue we're trying to solve is that the model takes AGES to settle if e.g. tls is misconfigured, - # as every hook of a charm_tracing-instrumented charm takes about a minute to exit, as the charm can't - # flush the traces and keeps retrying for 'too long' - - _MAX_RETRY_TIMEOUT = 4 - # we give the exporter 4 seconds in total to succeed pushing the traces to tempo - # if it fails, we'll be caching the data in the buffer and flush it the next time, so there's no data loss risk. - # this means 2/3 retries (hard to guess from the implementation) and up to ~7 seconds total wait - - -class _BufferedExporter(InMemorySpanExporter): - def __init__(self, buffer: _Buffer) -> None: - super().__init__() - self._buffer = buffer - - def export(self, spans: typing.Sequence[ReadableSpan]) -> SpanExportResult: - self._buffer.save(spans) - return super().export(spans) - - def force_flush(self, timeout_millis: int = 0) -> bool: - # parent implementation is fake, so the timeout_millis arg is not doing anything. - result = super().force_flush(timeout_millis) - self._buffer.save(self.get_finished_spans()) - return result - - -def is_enabled() -> bool: - """Whether charm tracing is enabled.""" - return os.getenv(CHARM_TRACING_ENABLED, "1") == "1" - - -@contextmanager -def charm_tracing_disabled(): - """Contextmanager to temporarily disable charm tracing. - - For usage in tests. - """ - previous = os.getenv(CHARM_TRACING_ENABLED, "1") - os.environ[CHARM_TRACING_ENABLED] = "0" - yield - os.environ[CHARM_TRACING_ENABLED] = previous - - -def get_current_span() -> Union[Span, None]: - """Return the currently active Span, if there is one, else None. - - If you'd rather keep your logic unconditional, you can use opentelemetry.trace.get_current_span, - which will return an object that behaves like a span but records no data. - """ - span = otlp_get_current_span() - if span is INVALID_SPAN: - return None - return cast(Span, span) - - -def _get_tracer_from_context(ctx: Context) -> Optional[ContextVar]: - tracers = [v for v in ctx if v is not None and v.name == "tracer"] - if tracers: - return tracers[0] - return None - - -def _get_tracer() -> Optional[Tracer]: - """Find tracer in context variable and as a fallback locate it in the full context.""" - try: - return tracer.get() - except LookupError: - # fallback: this course-corrects for a user error where charm_tracing symbols are imported - # from different paths (typically charms.tempo_coordinator_k8s... and lib.charms.tempo_coordinator_k8s...) - try: - ctx: Context = copy_context() - if context_tracer := _get_tracer_from_context(ctx): - logger.warning( - "Tracer not found in `tracer` context var. " - "Verify that you're importing all `charm_tracing` symbols from the same module path. \n" - "For example, DO" - ": `from charms.lib...charm_tracing import foo, bar`. \n" - "DONT: \n" - " \t - `from charms.lib...charm_tracing import foo` \n" - " \t - `from lib...charm_tracing import bar` \n" - "For more info: https://python-notes.curiousefficiency.org/en/latest/python" - "_concepts/import_traps.html#the-double-import-trap" - ) - return context_tracer.get() - else: - return None - except LookupError: - return None - - -@contextmanager -def _span(name: str) -> Generator[Optional[Span], Any, Any]: - """Context to create a span if there is a tracer, otherwise do nothing.""" - if tracer := _get_tracer(): - with tracer.start_as_current_span(name) as span: - yield cast(Span, span) - else: - yield None - - -class TracingError(RuntimeError): - """Base class for errors raised by this module.""" - - -class UntraceableObjectError(TracingError): - """Raised when an object you're attempting to instrument cannot be autoinstrumented.""" - - -def _get_tracing_endpoint( - tracing_endpoint_attr: str, - charm_instance: object, - charm_type: type, -): - _tracing_endpoint = getattr(charm_instance, tracing_endpoint_attr) - if callable(_tracing_endpoint): - tracing_endpoint = _tracing_endpoint() - else: - tracing_endpoint = _tracing_endpoint - - if tracing_endpoint is None: - return - - elif not isinstance(tracing_endpoint, str): - raise TypeError( - f"{charm_type.__name__}.{tracing_endpoint_attr} should resolve to a tempo endpoint (string); " - f"got {tracing_endpoint} instead." - ) - - dev_logger.debug(f"Setting up span exporter to endpoint: {tracing_endpoint}/v1/traces") - return f"{tracing_endpoint}/v1/traces" - - -def _get_server_cert( - server_cert_attr: str, - charm_instance: ops.CharmBase, - charm_type: Type[ops.CharmBase], -): - _server_cert = getattr(charm_instance, server_cert_attr) - if callable(_server_cert): - server_cert = _server_cert() - else: - server_cert = _server_cert - - if server_cert is None: - logger.warning( - f"{charm_type}.{server_cert_attr} is None; sending traces over INSECURE connection." - ) - return - elif not Path(server_cert).is_absolute(): - raise ValueError( - f"{charm_type}.{server_cert_attr} should resolve to a valid tls cert absolute path (string | Path)); " - f"got {server_cert} instead." - ) - return server_cert - - -def _setup_root_span_initializer( - charm_type: _CharmType, - tracing_endpoint_attr: str, - server_cert_attr: Optional[str], - service_name: Optional[str], - buffer_path: Optional[Path], - buffer_max_events: int, - buffer_max_size_mib: int, -): - """Patch the charm's initializer.""" - original_init = charm_type.__init__ - - @functools.wraps(original_init) - def wrap_init(self: CharmBase, framework: Framework, *args, **kwargs): - # we're using 'self' here because this is charm init code, makes sense to read what's below - # from the perspective of the charm. Self.unit.name... - - original_init(self, framework, *args, **kwargs) - # we call this from inside the init context instead of, say, _autoinstrument, because we want it to - # be checked on a per-charm-instantiation basis, not on a per-type-declaration one. - if not is_enabled(): - # this will only happen during unittesting, hopefully, so it's fine to log a - # bit more verbosely - logger.info("Tracing DISABLED: skipping root span initialization") - return - - original_event_context = framework._event_context - # default service name isn't just app name because it could conflict with the workload service name - _service_name = service_name or f"{self.app.name}-charm" - - unit_name = self.unit.name - resource = Resource.create( - attributes={ - "service.name": _service_name, - "compose_service": _service_name, - "charm_type": type(self).__name__, - # juju topology - "juju_unit": unit_name, - "juju_application": self.app.name, - "juju_model": self.model.name, - "juju_model_uuid": self.model.uuid, - } - ) - provider = TracerProvider(resource=resource) - - # if anything goes wrong with retrieving the endpoint, we let the exception bubble up. - tracing_endpoint = _get_tracing_endpoint(tracing_endpoint_attr, self, charm_type) - - buffer_only = False - # whether we're only exporting to buffer, or also to the otlp exporter. - - if not tracing_endpoint: - # tracing is off if tracing_endpoint is None - # however we can buffer things until tracing comes online - buffer_only = True - - server_cert: Optional[Union[str, Path]] = ( - _get_server_cert(server_cert_attr, self, charm_type) if server_cert_attr else None - ) - - if (tracing_endpoint and tracing_endpoint.startswith("https://")) and not server_cert: - logger.error( - "Tracing endpoint is https, but no server_cert has been passed." - "Please point @trace_charm to a `server_cert` attr. " - "This might also mean that the tracing provider is related to a " - "certificates provider, but this application is not (yet). " - "In that case, you might just have to wait a bit for the certificates " - "integration to settle. This span will be buffered." - ) - buffer_only = True - - buffer = _Buffer( - db_file=buffer_path or Path() / BUFFER_DEFAULT_CACHE_FILE_NAME, - max_event_history_length=buffer_max_events, - max_buffer_size_mib=buffer_max_size_mib, - ) - previous_spans_buffered = not buffer.is_empty - - exporters: List[SpanExporter] = [] - if buffer_only: - # we have to buffer because we're missing necessary backend configuration - dev_logger.debug("buffering mode: ON") - exporters.append(_BufferedExporter(buffer)) - - else: - dev_logger.debug("buffering mode: FALLBACK") - # in principle, we have the right configuration to be pushing traces, - # but if we fail for whatever reason, we will put everything in the buffer - # and retry the next time - otlp_exporter = _OTLPSpanExporter( - endpoint=tracing_endpoint, - certificate_file=str(Path(server_cert).absolute()) if server_cert else None, - timeout=_OTLP_SPAN_EXPORTER_TIMEOUT, # give individual requests 1 second to succeed - ) - exporters.append(otlp_exporter) - exporters.append(_BufferedExporter(buffer)) - buffer.exporter = otlp_exporter - - for exporter in exporters: - processor = BatchSpanProcessor(exporter) - provider.add_span_processor(processor) - - set_tracer_provider(provider) - _tracer = get_tracer(_service_name) # type: ignore - _tracer_token = tracer.set(_tracer) - - dispatch_path = os.getenv("JUJU_DISPATCH_PATH", "") # something like hooks/install - event_name = dispatch_path.split("/")[1] if "/" in dispatch_path else dispatch_path - root_span_name = f"{unit_name}: {event_name} event" - span = _tracer.start_span(root_span_name, attributes={"juju.dispatch_path": dispatch_path}) - - # all these shenanigans are to work around the fact that the opentelemetry tracing API is built - # on the assumption that spans will be used as contextmanagers. - # Since we don't (as we need to close the span on framework.commit), - # we need to manually set the root span as current. - ctx = set_span_in_context(span) - - # log a trace id, so we can pick it up from the logs (and jhack) to look it up in tempo. - root_trace_id = hex(span.get_span_context().trace_id)[2:] # strip 0x prefix - logger.debug(f"Starting root trace with id={root_trace_id!r}.") - - span_token = opentelemetry.context.attach(ctx) # type: ignore - - @contextmanager - def wrap_event_context(event_name: str): - dev_logger.debug(f"entering event context: {event_name}") - # when the framework enters an event context, we create a span. - with _span("event: " + event_name) as event_context_span: - if event_context_span: - # todo: figure out how to inject event attrs in here - event_context_span.add_event(event_name) - yield original_event_context(event_name) - - framework._event_context = wrap_event_context # type: ignore - - original_close = framework.close - - @functools.wraps(original_close) - def wrap_close(): - dev_logger.debug("tearing down tracer and flushing traces") - span.end() - opentelemetry.context.detach(span_token) # type: ignore - tracer.reset(_tracer_token) - tp = cast(TracerProvider, get_tracer_provider()) - flush_successful = tp.force_flush(timeout_millis=1000) # don't block for too long - - if buffer_only: - # if we're in buffer_only mode, it means we couldn't even set up the exporter for - # tempo as we're missing some data. - # so attempting to flush the buffer doesn't make sense - dev_logger.debug("tracing backend unavailable: all spans pushed to buffer") - - else: - dev_logger.debug("tracing backend found: attempting to flush buffer...") - - # if we do have an exporter for tempo, and we could send traces to it, - # we can attempt to flush the buffer as well. - if not flush_successful: - logger.error("flushing FAILED: unable to push traces to backend.") - else: - dev_logger.debug("flush succeeded.") - - # the backend has accepted the spans generated during this event, - if not previous_spans_buffered: - # if the buffer was empty to begin with, any spans we collected now can be discarded - buffer.drop() - dev_logger.debug("buffer dropped: this trace has been sent already") - else: - # if the buffer was nonempty, we can attempt to flush it - dev_logger.debug("attempting buffer flush...") - buffer_flush_successful = buffer.flush() - if buffer_flush_successful: - dev_logger.debug("buffer flush OK") - elif buffer_flush_successful is None: - # TODO is this even possible? - dev_logger.debug("buffer flush OK; empty: nothing to flush") - else: - # this situation is pretty weird, I'm not even sure it can happen, - # because it would mean that we did manage - # to push traces directly to the tempo exporter (flush_successful), - # but the buffer flush failed to push to the same exporter! - logger.error("buffer flush FAILED") - - tp.shutdown() - original_close() - - framework.close = wrap_close - return - - charm_type.__init__ = wrap_init # type: ignore - - -def trace_charm( - tracing_endpoint: str, - server_cert: Optional[str] = None, - service_name: Optional[str] = None, - extra_types: Sequence[type] = (), - buffer_max_events: int = BUFFER_DEFAULT_MAX_EVENT_HISTORY_LENGTH, - buffer_max_size_mib: int = BUFFER_DEFAULT_CACHE_FILE_SIZE_LIMIT_MiB, - buffer_path: Optional[Union[str, Path]] = None, -) -> Callable[[_T], _T]: - """Autoinstrument the decorated charm with tracing telemetry. - - Use this function to get out-of-the-box traces for all events emitted on this charm and all - method calls on instances of this class. - - Usage: - >>> from charms.tempo_coordinator_k8s.v0.charm_tracing import trace_charm - >>> from charms.tempo_coordinator_k8s.v0.tracing import TracingEndpointRequirer - >>> from ops import CharmBase - >>> - >>> @trace_charm( - >>> tracing_endpoint="tempo_otlp_http_endpoint", - >>> ) - >>> class MyCharm(CharmBase): - >>> - >>> def __init__(self, framework: Framework): - >>> ... - >>> self.tracing = TracingEndpointRequirer(self) - >>> - >>> @property - >>> def tempo_otlp_http_endpoint(self) -> Optional[str]: - >>> if self.tracing.is_ready(): - >>> return self.tracing.otlp_http_endpoint() - >>> else: - >>> return None - >>> - - :param tracing_endpoint: name of a method, property or attribute on the charm type that returns an - optional (fully resolvable) tempo url to which the charm traces will be pushed. - If None, tracing will be effectively disabled. - :param server_cert: name of a method, property or attribute on the charm type that returns an - optional absolute path to a CA certificate file to be used when sending traces to a remote server. - If it returns None, an _insecure_ connection will be used. To avoid errors in transient - situations where the endpoint is already https but there is no certificate on disk yet, it - is recommended to disable tracing (by returning None from the tracing_endpoint) altogether - until the cert has been written to disk. - :param service_name: service name tag to attach to all traces generated by this charm. - Defaults to the juju application name this charm is deployed under. - :param extra_types: pass any number of types that you also wish to autoinstrument. - For example, charm libs, relation endpoint wrappers, workload abstractions, ... - :param buffer_max_events: max number of events to save in the buffer. Set to 0 to disable buffering. - :param buffer_max_size_mib: max size of the buffer file. When exceeded, spans will be dropped. - Minimum 10MiB. - :param buffer_path: path to buffer file to use for saving buffered spans. - """ - - def _decorator(charm_type: _T) -> _T: - """Autoinstrument the wrapped charmbase type.""" - _autoinstrument( - charm_type, - tracing_endpoint_attr=tracing_endpoint, - server_cert_attr=server_cert, - service_name=service_name, - extra_types=extra_types, - buffer_path=Path(buffer_path) if buffer_path else None, - buffer_max_size_mib=buffer_max_size_mib, - buffer_max_events=buffer_max_events, - ) - return charm_type - - return _decorator - - -def _autoinstrument( - charm_type: _T, - tracing_endpoint_attr: str, - server_cert_attr: Optional[str] = None, - service_name: Optional[str] = None, - extra_types: Sequence[type] = (), - buffer_max_events: int = BUFFER_DEFAULT_MAX_EVENT_HISTORY_LENGTH, - buffer_max_size_mib: int = BUFFER_DEFAULT_CACHE_FILE_SIZE_LIMIT_MiB, - buffer_path: Optional[Path] = None, -) -> _T: - """Set up tracing on this charm class. - - Use this function to get out-of-the-box traces for all events emitted on this charm and all - method calls on instances of this class. - - Usage: - - >>> from charms.tempo_coordinator_k8s.v0.charm_tracing import _autoinstrument - >>> from ops.main import main - >>> _autoinstrument( - >>> MyCharm, - >>> tracing_endpoint_attr="tempo_otlp_http_endpoint", - >>> service_name="MyCharm", - >>> extra_types=(Foo, Bar) - >>> ) - >>> main(MyCharm) - - :param charm_type: the CharmBase subclass to autoinstrument. - :param tracing_endpoint_attr: name of a method, property or attribute on the charm type that returns an - optional (fully resolvable) tempo url to which the charm traces will be pushed. - If None, tracing will be effectively disabled. - :param server_cert_attr: name of a method, property or attribute on the charm type that returns an - optional absolute path to a CA certificate file to be used when sending traces to a remote server. - If it returns None, an _insecure_ connection will be used. To avoid errors in transient - situations where the endpoint is already https but there is no certificate on disk yet, it - is recommended to disable tracing (by returning None from the tracing_endpoint) altogether - until the cert has been written to disk. - :param service_name: service name tag to attach to all traces generated by this charm. - Defaults to the juju application name this charm is deployed under. - :param extra_types: pass any number of types that you also wish to autoinstrument. - For example, charm libs, relation endpoint wrappers, workload abstractions, ... - :param buffer_max_events: max number of events to save in the buffer. Set to 0 to disable buffering. - :param buffer_max_size_mib: max size of the buffer file. When exceeded, spans will be dropped. - Minimum 10MiB. - :param buffer_path: path to buffer file to use for saving buffered spans. - """ - dev_logger.debug(f"instrumenting {charm_type}") - _setup_root_span_initializer( - charm_type, - tracing_endpoint_attr, - server_cert_attr=server_cert_attr, - service_name=service_name, - buffer_path=buffer_path, - buffer_max_events=buffer_max_events, - buffer_max_size_mib=buffer_max_size_mib, - ) - trace_type(charm_type) - for type_ in extra_types: - trace_type(type_) - - return charm_type - - -def trace_type(cls: _T) -> _T: - """Set up tracing on this class. - - Use this decorator to get out-of-the-box traces for all method calls on instances of this class. - It assumes that this class is only instantiated after a charm type decorated with `@trace_charm` - has been instantiated. - """ - dev_logger.debug(f"instrumenting {cls}") - for name, method in inspect.getmembers(cls, predicate=inspect.isfunction): - dev_logger.debug(f"discovered {method}") - - if method.__name__.startswith("__"): - dev_logger.debug(f"skipping {method} (dunder)") - continue - - # the span title in the general case should be: - # method call: MyCharmWrappedMethods.b - # if the method has a name (functools.wrapped or regular method), let - # _trace_callable use its default algorithm to determine what name to give the span. - trace_method_name = None - try: - qualname_c0 = method.__qualname__.split(".")[0] - if not hasattr(cls, method.__name__): - # if the callable doesn't have a __name__ (probably a decorated method), - # it probably has a bad qualname too (such as my_decorator..wrapper) which is not - # great for finding out what the trace is about. So we use the method name instead and - # add a reference to the decorator name. Result: - # method call: @my_decorator(MyCharmWrappedMethods.b) - trace_method_name = f"@{qualname_c0}({cls.__name__}.{name})" - except Exception: # noqa: failsafe - pass - - new_method = trace_method(method, name=trace_method_name) - - if isinstance(inspect.getattr_static(cls, name), staticmethod): - new_method = staticmethod(new_method) - setattr(cls, name, new_method) - - return cls - - -def trace_method(method: _F, name: Optional[str] = None) -> _F: - """Trace this method. - - A span will be opened when this method is called and closed when it returns. - """ - return _trace_callable(method, "method", name=name) - - -def trace_function(function: _F, name: Optional[str] = None) -> _F: - """Trace this function. - - A span will be opened when this function is called and closed when it returns. - """ - return _trace_callable(function, "function", name=name) - - -def _trace_callable(callable: _F, qualifier: str, name: Optional[str] = None) -> _F: - dev_logger.debug(f"instrumenting {callable}") - - # sig = inspect.signature(callable) - @functools.wraps(callable) - def wrapped_function(*args, **kwargs): # type: ignore - name_ = name or getattr( - callable, "__qualname__", getattr(callable, "__name__", str(callable)) - ) - with _span(f"{qualifier} call: {name_}"): # type: ignore - return callable(*args, **kwargs) # type: ignore - - # wrapped_function.__signature__ = sig - return wrapped_function # type: ignore - - -def trace(obj: Union[Type, Callable]): - """Trace this object and send the resulting spans to Tempo. - - It will dispatch to ``trace_type`` if the decorated object is a class, otherwise - ``trace_function``. - """ - if isinstance(obj, type): - if issubclass(obj, CharmBase): - raise ValueError( - "cannot use @trace on CharmBase subclasses: use @trace_charm instead " - "(we need some arguments!)" - ) - return trace_type(obj) - else: - try: - return trace_function(obj) - except Exception: - raise UntraceableObjectError( - f"cannot create span from {type(obj)}; instrument {obj} manually." - ) diff --git a/kubernetes/lib/charms/tempo_coordinator_k8s/v0/tracing.py b/kubernetes/lib/charms/tempo_coordinator_k8s/v0/tracing.py deleted file mode 100644 index 27144fa62..000000000 --- a/kubernetes/lib/charms/tempo_coordinator_k8s/v0/tracing.py +++ /dev/null @@ -1,987 +0,0 @@ -# Copyright 2024 Canonical Ltd. -# See LICENSE file for licensing details. -"""## Overview. - -This document explains how to integrate with the Tempo charm for the purpose of pushing traces to a -tracing endpoint provided by Tempo. It also explains how alternative implementations of the Tempo charm -may maintain the same interface and be backward compatible with all currently integrated charms. - -## Requirer Library Usage - -Charms seeking to push traces to Tempo, must do so using the `TracingEndpointRequirer` -object from this charm library. For the simplest use cases, using the `TracingEndpointRequirer` -object only requires instantiating it, typically in the constructor of your charm. The -`TracingEndpointRequirer` constructor requires the name of the relation over which a tracing endpoint - is exposed by the Tempo charm, and a list of protocols it intends to send traces with. - This relation must use the `tracing` interface. - The `TracingEndpointRequirer` object may be instantiated as follows - - from charms.tempo_coordinator_k8s.v0.tracing import TracingEndpointRequirer - - def __init__(self, *args): - super().__init__(*args) - # ... - self.tracing = TracingEndpointRequirer(self, - protocols=['otlp_grpc', 'otlp_http', 'jaeger_http_thrift'] - ) - # ... - -Note that the first argument (`self`) to `TracingEndpointRequirer` is always a reference to the -parent charm. - -Alternatively to providing the list of requested protocols at init time, the charm can do it at -any point in time by calling the -`TracingEndpointRequirer.request_protocols(*protocol:str, relation:Optional[Relation])` method. -Using this method also allows you to use per-relation protocols. - -Units of requirer charms obtain the tempo endpoint to which they will push their traces by calling -`TracingEndpointRequirer.get_endpoint(protocol: str)`, where `protocol` is, for example: -- `otlp_grpc` -- `otlp_http` -- `zipkin` -- `tempo` - -If the `protocol` is not in the list of protocols that the charm requested at endpoint set-up time, -the library will raise an error. - -We recommend that you scale up your tracing provider and relate it to an ingress so that your tracing requests -go through the ingress and get load balanced across all units. Otherwise, if the provider's leader goes down, your tracing goes down. - -## Provider Library Usage - -The `TracingEndpointProvider` object may be used by charms to manage relations with their -trace sources. For this purposes a Tempo-like charm needs to do two things - -1. Instantiate the `TracingEndpointProvider` object by providing it a -reference to the parent (Tempo) charm and optionally the name of the relation that the Tempo charm -uses to interact with its trace sources. This relation must conform to the `tracing` interface -and it is strongly recommended that this relation be named `tracing` which is its -default value. - -For example a Tempo charm may instantiate the `TracingEndpointProvider` in its constructor as -follows - - from charms.tempo_coordinator_k8s.v0.tracing import TracingEndpointProvider - - def __init__(self, *args): - super().__init__(*args) - # ... - self.tracing = TracingEndpointProvider(self) - # ... - - - -""" # noqa: W505 -import enum -import json -import logging -from pathlib import Path -from typing import ( - TYPE_CHECKING, - Any, - Dict, - List, - Literal, - MutableMapping, - Optional, - Sequence, - Tuple, - Union, - cast, -) - -import pydantic -from ops.charm import ( - CharmBase, - CharmEvents, - RelationBrokenEvent, - RelationEvent, - RelationRole, -) -from ops.framework import EventSource, Object -from ops.model import ModelError, Relation -from pydantic import BaseModel, Field - -# The unique Charmhub library identifier, never change it -LIBID = "d2f02b1f8d1244b5989fd55bc3a28943" - -# Increment this major API version when introducing breaking changes -LIBAPI = 0 - -# Increment this PATCH version before using `charmcraft publish-lib` or reset -# to 0 if you are raising the major API version -LIBPATCH = 5 - -PYDEPS = ["pydantic"] - -logger = logging.getLogger(__name__) - -DEFAULT_RELATION_NAME = "tracing" -RELATION_INTERFACE_NAME = "tracing" - -# Supported list rationale https://github.com/canonical/tempo-coordinator-k8s-operator/issues/8 -ReceiverProtocol = Literal[ - "zipkin", - "otlp_grpc", - "otlp_http", - "jaeger_grpc", - "jaeger_thrift_http", -] - -RawReceiver = Tuple[ReceiverProtocol, str] -"""Helper type. A raw receiver is defined as a tuple consisting of the protocol name, and the (external, if available), -(secured, if available) resolvable server url. -""" - -BUILTIN_JUJU_KEYS = {"ingress-address", "private-address", "egress-subnets"} - - -class TransportProtocolType(str, enum.Enum): - """Receiver Type.""" - - http = "http" - grpc = "grpc" - - -receiver_protocol_to_transport_protocol: Dict[ReceiverProtocol, TransportProtocolType] = { - "zipkin": TransportProtocolType.http, - "otlp_grpc": TransportProtocolType.grpc, - "otlp_http": TransportProtocolType.http, - "jaeger_thrift_http": TransportProtocolType.http, - "jaeger_grpc": TransportProtocolType.grpc, -} -"""A mapping between telemetry protocols and their corresponding transport protocol. -""" - - -class TracingError(Exception): - """Base class for custom errors raised by this library.""" - - -class NotReadyError(TracingError): - """Raised by the provider wrapper if a requirer hasn't published the required data (yet).""" - - -class ProtocolNotRequestedError(TracingError): - """Raised if the user attempts to obtain an endpoint for a protocol it did not request.""" - - -class DataValidationError(TracingError): - """Raised when data validation fails on IPU relation data.""" - - -class AmbiguousRelationUsageError(TracingError): - """Raised when one wrongly assumes that there can only be one relation on an endpoint.""" - - -if int(pydantic.version.VERSION.split(".")[0]) < 2: - - class DatabagModel(BaseModel): # type: ignore - """Base databag model.""" - - class Config: - """Pydantic config.""" - - # ignore any extra fields in the databag - extra = "ignore" - """Ignore any extra fields in the databag.""" - allow_population_by_field_name = True - """Allow instantiating this class by field name (instead of forcing alias).""" - - _NEST_UNDER = None - - @classmethod - def load(cls, databag: MutableMapping): - """Load this model from a Juju databag.""" - if cls._NEST_UNDER: - return cls.parse_obj(json.loads(databag[cls._NEST_UNDER])) - - try: - data = { - k: json.loads(v) - for k, v in databag.items() - # Don't attempt to parse model-external values - if k in {f.alias for f in cls.__fields__.values()} - } - except json.JSONDecodeError as e: - msg = f"invalid databag contents: expecting json. {databag}" - logger.error(msg) - raise DataValidationError(msg) from e - - try: - return cls.parse_raw(json.dumps(data)) # type: ignore - except pydantic.ValidationError as e: - msg = f"failed to validate databag: {databag}" - logger.debug(msg, exc_info=True) - raise DataValidationError(msg) from e - - def dump(self, databag: Optional[MutableMapping] = None, clear: bool = True): - """Write the contents of this model to Juju databag. - - :param databag: the databag to write the data to. - :param clear: ensure the databag is cleared before writing it. - """ - if clear and databag: - databag.clear() - - if databag is None: - databag = {} - - if self._NEST_UNDER: - databag[self._NEST_UNDER] = self.json(by_alias=True) - return databag - - dct = self.dict() - for key, field in self.__fields__.items(): # type: ignore - value = dct[key] - databag[field.alias or key] = json.dumps(value) - - return databag - -else: - from pydantic import ConfigDict - - class DatabagModel(BaseModel): - """Base databag model.""" - - model_config = ConfigDict( - # ignore any extra fields in the databag - extra="ignore", - # Allow instantiating this class by field name (instead of forcing alias). - populate_by_name=True, - # Custom config key: whether to nest the whole datastructure (as json) - # under a field or spread it out at the toplevel. - _NEST_UNDER=None, # type: ignore - ) - """Pydantic config.""" - - @classmethod - def load(cls, databag: MutableMapping): - """Load this model from a Juju databag.""" - nest_under = cls.model_config.get("_NEST_UNDER") # type: ignore - if nest_under: - return cls.model_validate(json.loads(databag[nest_under])) # type: ignore - - try: - data = { - k: json.loads(v) - for k, v in databag.items() - # Don't attempt to parse model-external values - if k in {(f.alias or n) for n, f in cls.__fields__.items()} - } - except json.JSONDecodeError as e: - msg = f"invalid databag contents: expecting json. {databag}" - logger.error(msg) - raise DataValidationError(msg) from e - - try: - return cls.model_validate_json(json.dumps(data)) # type: ignore - except pydantic.ValidationError as e: - msg = f"failed to validate databag: {databag}" - logger.debug(msg, exc_info=True) - raise DataValidationError(msg) from e - - def dump(self, databag: Optional[MutableMapping] = None, clear: bool = True): - """Write the contents of this model to Juju databag. - - :param databag: the databag to write the data to. - :param clear: ensure the databag is cleared before writing it. - """ - if clear and databag: - databag.clear() - - if databag is None: - databag = {} - nest_under = self.model_config.get("_NEST_UNDER") - if nest_under: - databag[nest_under] = self.model_dump_json( # type: ignore - by_alias=True, - # skip keys whose values are default - exclude_defaults=True, - ) - return databag - - dct = self.model_dump() # type: ignore - for key, field in self.model_fields.items(): # type: ignore - value = dct[key] - if value == field.default: - continue - databag[field.alias or key] = json.dumps(value) - - return databag - - -# todo use models from charm-relation-interfaces -if int(pydantic.version.VERSION.split(".")[0]) < 2: - - class ProtocolType(BaseModel): # type: ignore - """Protocol Type.""" - - class Config: - """Pydantic config.""" - - use_enum_values = True - """Allow serializing enum values.""" - - name: str = Field( - ..., - description="Receiver protocol name. What protocols are supported (and what they are called) " - "may differ per provider.", - examples=["otlp_grpc", "otlp_http", "tempo_http"], - ) - - type: TransportProtocolType = Field( - ..., - description="The transport protocol used by this receiver.", - examples=["http", "grpc"], - ) - -else: - - class ProtocolType(BaseModel): - """Protocol Type.""" - - model_config = ConfigDict( # type: ignore - # Allow serializing enum values. - use_enum_values=True - ) - """Pydantic config.""" - - name: str = Field( - ..., - description="Receiver protocol name. What protocols are supported (and what they are called) " - "may differ per provider.", - examples=["otlp_grpc", "otlp_http", "tempo_http"], - ) - - type: TransportProtocolType = Field( - ..., - description="The transport protocol used by this receiver.", - examples=["http", "grpc"], - ) - - -class Receiver(BaseModel): - """Specification of an active receiver.""" - - protocol: ProtocolType = Field(..., description="Receiver protocol name and type.") - url: str = Field( - ..., - description="""URL at which the receiver is reachable. If there's an ingress, it would be the external URL. - Otherwise, it would be the service's fqdn or internal IP. - If the protocol type is grpc, the url will not contain a scheme.""", - examples=[ - "http://traefik_address:2331", - "https://traefik_address:2331", - "http://tempo_public_ip:2331", - "https://tempo_public_ip:2331", - "tempo_public_ip:2331", - ], - ) - - -class TracingProviderAppData(DatabagModel): # noqa: D101 - """Application databag model for the tracing provider.""" - - receivers: List[Receiver] = Field( - ..., - description="List of all receivers enabled on the tracing provider.", - ) - - -class TracingRequirerAppData(DatabagModel): # noqa: D101 - """Application databag model for the tracing requirer.""" - - receivers: List[ReceiverProtocol] - """Requested receivers.""" - - -class _AutoSnapshotEvent(RelationEvent): - __args__: Tuple[str, ...] = () - __optional_kwargs__: Dict[str, Any] = {} - - @classmethod - def __attrs__(cls): - return cls.__args__ + tuple(cls.__optional_kwargs__.keys()) - - def __init__(self, handle, relation, *args, **kwargs): - super().__init__(handle, relation) - - if not len(self.__args__) == len(args): - raise TypeError("expected {} args, got {}".format(len(self.__args__), len(args))) - - for attr, obj in zip(self.__args__, args): - setattr(self, attr, obj) - for attr, default in self.__optional_kwargs__.items(): - obj = kwargs.get(attr, default) - setattr(self, attr, obj) - - def snapshot(self) -> dict: - dct = super().snapshot() - for attr in self.__attrs__(): - obj = getattr(self, attr) - try: - dct[attr] = obj - except ValueError as e: - raise ValueError( - "cannot automagically serialize {}: " - "override this method and do it " - "manually.".format(obj) - ) from e - - return dct - - def restore(self, snapshot: dict) -> None: - super().restore(snapshot) - for attr, obj in snapshot.items(): - setattr(self, attr, obj) - - -class RelationNotFoundError(Exception): - """Raised if no relation with the given name is found.""" - - def __init__(self, relation_name: str): - self.relation_name = relation_name - self.message = "No relation named '{}' found".format(relation_name) - super().__init__(self.message) - - -class RelationInterfaceMismatchError(Exception): - """Raised if the relation with the given name has an unexpected interface.""" - - def __init__( - self, - relation_name: str, - expected_relation_interface: str, - actual_relation_interface: str, - ): - self.relation_name = relation_name - self.expected_relation_interface = expected_relation_interface - self.actual_relation_interface = actual_relation_interface - self.message = ( - "The '{}' relation has '{}' as interface rather than the expected '{}'".format( - relation_name, actual_relation_interface, expected_relation_interface - ) - ) - - super().__init__(self.message) - - -class RelationRoleMismatchError(Exception): - """Raised if the relation with the given name has a different role than expected.""" - - def __init__( - self, - relation_name: str, - expected_relation_role: RelationRole, - actual_relation_role: RelationRole, - ): - self.relation_name = relation_name - self.expected_relation_interface = expected_relation_role - self.actual_relation_role = actual_relation_role - self.message = "The '{}' relation has role '{}' rather than the expected '{}'".format( - relation_name, repr(actual_relation_role), repr(expected_relation_role) - ) - - super().__init__(self.message) - - -def _validate_relation_by_interface_and_direction( - charm: CharmBase, - relation_name: str, - expected_relation_interface: str, - expected_relation_role: RelationRole, -): - """Validate a relation. - - Verifies that the `relation_name` provided: (1) exists in metadata.yaml, - (2) declares as interface the interface name passed as `relation_interface` - and (3) has the right "direction", i.e., it is a relation that `charm` - provides or requires. - - Args: - charm: a `CharmBase` object to scan for the matching relation. - relation_name: the name of the relation to be verified. - expected_relation_interface: the interface name to be matched by the - relation named `relation_name`. - expected_relation_role: whether the `relation_name` must be either - provided or required by `charm`. - - Raises: - RelationNotFoundError: If there is no relation in the charm's metadata.yaml - with the same name as provided via `relation_name` argument. - RelationInterfaceMismatchError: The relation with the same name as provided - via `relation_name` argument does not have the same relation interface - as specified via the `expected_relation_interface` argument. - RelationRoleMismatchError: If the relation with the same name as provided - via `relation_name` argument does not have the same role as specified - via the `expected_relation_role` argument. - """ - if relation_name not in charm.meta.relations: - raise RelationNotFoundError(relation_name) - - relation = charm.meta.relations[relation_name] - - # fixme: why do we need to cast here? - actual_relation_interface = cast(str, relation.interface_name) - - if actual_relation_interface != expected_relation_interface: - raise RelationInterfaceMismatchError( - relation_name, expected_relation_interface, actual_relation_interface - ) - - if expected_relation_role is RelationRole.provides: - if relation_name not in charm.meta.provides: - raise RelationRoleMismatchError( - relation_name, RelationRole.provides, RelationRole.requires - ) - elif expected_relation_role is RelationRole.requires: - if relation_name not in charm.meta.requires: - raise RelationRoleMismatchError( - relation_name, RelationRole.requires, RelationRole.provides - ) - else: - raise TypeError("Unexpected RelationDirection: {}".format(expected_relation_role)) - - -class RequestEvent(RelationEvent): - """Event emitted when a remote requests a tracing endpoint.""" - - @property - def requested_receivers(self) -> List[ReceiverProtocol]: - """List of receiver protocols that have been requested.""" - relation = self.relation - app = relation.app - if not app: - raise NotReadyError("relation.app is None") - - return TracingRequirerAppData.load(relation.data[app]).receivers - - -class BrokenEvent(RelationBrokenEvent): - """Event emitted when a relation on tracing is broken.""" - - -class TracingEndpointProviderEvents(CharmEvents): - """TracingEndpointProvider events.""" - - request = EventSource(RequestEvent) - broken = EventSource(BrokenEvent) - - -class TracingEndpointProvider(Object): - """Class representing a trace receiver service.""" - - on = TracingEndpointProviderEvents() # type: ignore - - def __init__( - self, - charm: CharmBase, - external_url: Optional[str] = None, - relation_name: str = DEFAULT_RELATION_NAME, - ): - """Initialize. - - Args: - charm: a `CharmBase` instance that manages this instance of the Tempo service. - external_url: external address of the node hosting the tempo server, - if an ingress is present. - relation_name: an optional string name of the relation between `charm` - and the Tempo charmed service. The default is "tracing". - - Raises: - RelationNotFoundError: If there is no relation in the charm's metadata.yaml - with the same name as provided via `relation_name` argument. - RelationInterfaceMismatchError: The relation with the same name as provided - via `relation_name` argument does not have the `tracing` relation - interface. - RelationRoleMismatchError: If the relation with the same name as provided - via `relation_name` argument does not have the `RelationRole.requires` - role. - """ - _validate_relation_by_interface_and_direction( - charm, relation_name, RELATION_INTERFACE_NAME, RelationRole.provides - ) - - super().__init__(charm, relation_name + "tracing-provider") - self._charm = charm - self._external_url = external_url - self._relation_name = relation_name - self.framework.observe( - self._charm.on[relation_name].relation_joined, self._on_relation_event - ) - self.framework.observe( - self._charm.on[relation_name].relation_created, self._on_relation_event - ) - self.framework.observe( - self._charm.on[relation_name].relation_changed, self._on_relation_event - ) - self.framework.observe( - self._charm.on[relation_name].relation_broken, self._on_relation_broken_event - ) - - def _on_relation_broken_event(self, e: RelationBrokenEvent): - """Handle relation broken events.""" - self.on.broken.emit(e.relation) - - def _on_relation_event(self, e: RelationEvent): - """Handle relation created/joined/changed events.""" - if self.is_requirer_ready(e.relation): - self.on.request.emit(e.relation) - - def is_requirer_ready(self, relation: Relation): - """Attempt to determine if requirer has already populated app data.""" - try: - self._get_requested_protocols(relation) - except NotReadyError: - return False - return True - - @staticmethod - def _get_requested_protocols(relation: Relation): - app = relation.app - if not app: - raise NotReadyError("relation.app is None") - - try: - databag = TracingRequirerAppData.load(relation.data[app]) - except (json.JSONDecodeError, pydantic.ValidationError, DataValidationError): - logger.info(f"relation {relation} is not ready to talk tracing") - raise NotReadyError() - return databag.receivers - - def requested_protocols(self): - """All receiver protocols that have been requested by our related apps.""" - requested_protocols = set() - for relation in self.relations: - try: - protocols = self._get_requested_protocols(relation) - except NotReadyError: - continue - requested_protocols.update(protocols) - return requested_protocols - - @property - def relations(self) -> List[Relation]: - """All relations active on this endpoint.""" - return self._charm.model.relations[self._relation_name] - - def publish_receivers(self, receivers: Sequence[RawReceiver]): - """Let all requirers know that these receivers are active and listening.""" - if not self._charm.unit.is_leader(): - raise RuntimeError("only leader can do this") - - for relation in self.relations: - try: - TracingProviderAppData( - receivers=[ - Receiver( - url=url, - protocol=ProtocolType( - name=protocol, - type=receiver_protocol_to_transport_protocol[protocol], - ), - ) - for protocol, url in receivers - ], - ).dump(relation.data[self._charm.app]) - - except ModelError as e: - # args are bytes - msg = e.args[0] - if isinstance(msg, bytes): - if msg.startswith( - b"ERROR cannot read relation application settings: permission denied" - ): - logger.error( - f"encountered error {e} while attempting to update_relation_data." - f"The relation must be gone." - ) - continue - raise - - -class EndpointRemovedEvent(RelationBrokenEvent): - """Event representing a change in one of the receiver endpoints.""" - - -class EndpointChangedEvent(_AutoSnapshotEvent): - """Event representing a change in one of the receiver endpoints.""" - - __args__ = ("_receivers",) - - if TYPE_CHECKING: - _receivers = [] # type: List[dict] - - @property - def receivers(self) -> List[Receiver]: - """Cast receivers back from dict.""" - return [Receiver(**i) for i in self._receivers] - - -class TracingEndpointRequirerEvents(CharmEvents): - """TracingEndpointRequirer events.""" - - endpoint_changed = EventSource(EndpointChangedEvent) - endpoint_removed = EventSource(EndpointRemovedEvent) - - -class TracingEndpointRequirer(Object): - """A tracing endpoint for Tempo.""" - - on = TracingEndpointRequirerEvents() # type: ignore - - def __init__( - self, - charm: CharmBase, - relation_name: str = DEFAULT_RELATION_NAME, - protocols: Optional[List[ReceiverProtocol]] = None, - ): - """Construct a tracing requirer for a Tempo charm. - - If your application supports pushing traces to a distributed tracing backend, the - `TracingEndpointRequirer` object enables your charm to easily access endpoint information - exchanged over a `tracing` relation interface. - - Args: - charm: a `CharmBase` object that manages this - `TracingEndpointRequirer` object. Typically, this is `self` in the instantiating - class. - relation_name: an optional string name of the relation between `charm` - and the Tempo charmed service. The default is "tracing". It is strongly - advised not to change the default, so that people deploying your charm will have a - consistent experience with all other charms that provide tracing endpoints. - protocols: optional list of protocols that the charm intends to send traces with. - The provider will enable receivers for these and only these protocols, - so be sure to enable all protocols the charm or its workload are going to need. - - Raises: - RelationNotFoundError: If there is no relation in the charm's metadata.yaml - with the same name as provided via `relation_name` argument. - RelationInterfaceMismatchError: The relation with the same name as provided - via `relation_name` argument does not have the `tracing` relation - interface. - RelationRoleMismatchError: If the relation with the same name as provided - via `relation_name` argument does not have the `RelationRole.provides` - role. - """ - _validate_relation_by_interface_and_direction( - charm, relation_name, RELATION_INTERFACE_NAME, RelationRole.requires - ) - - super().__init__(charm, relation_name) - - self._is_single_endpoint = charm.meta.relations[relation_name].limit == 1 - - self._charm = charm - self._relation_name = relation_name - - events = self._charm.on[self._relation_name] - self.framework.observe(events.relation_changed, self._on_tracing_relation_changed) - self.framework.observe(events.relation_broken, self._on_tracing_relation_broken) - - if protocols: - self.request_protocols(protocols) - - def request_protocols( - self, protocols: Sequence[ReceiverProtocol], relation: Optional[Relation] = None - ): - """Publish the list of protocols which the provider should activate.""" - # todo: should we check if _is_single_endpoint and len(self.relations) > 1 and raise, here? - relations = [relation] if relation else self.relations - - if not protocols: - # empty sequence - raise ValueError( - "You need to pass a nonempty sequence of protocols to `request_protocols`." - ) - - try: - if self._charm.unit.is_leader(): - for relation in relations: - TracingRequirerAppData( - receivers=list(protocols), - ).dump(relation.data[self._charm.app]) - - except ModelError as e: - # args are bytes - msg = e.args[0] - if isinstance(msg, bytes): - if msg.startswith( - b"ERROR cannot read relation application settings: permission denied" - ): - logger.error( - f"encountered error {e} while attempting to request_protocols." - f"The relation must be gone." - ) - return - raise - - @property - def relations(self) -> List[Relation]: - """The tracing relations associated with this endpoint.""" - return self._charm.model.relations[self._relation_name] - - @property - def _relation(self) -> Optional[Relation]: - """If this wraps a single endpoint, the relation bound to it, if any.""" - if not self._is_single_endpoint: - objname = type(self).__name__ - raise AmbiguousRelationUsageError( - f"This {objname} wraps a {self._relation_name} endpoint that has " - "limit != 1. We can't determine what relation, of the possibly many, you are " - f"talking about. Please pass a relation instance while calling {objname}, " - "or set limit=1 in the charm metadata." - ) - relations = self.relations - return relations[0] if relations else None - - def is_ready(self, relation: Optional[Relation] = None): - """Is this endpoint ready?""" - relation = relation or self._relation - if not relation: - logger.debug(f"no relation on {self._relation_name !r}: tracing not ready") - return False - if relation.data is None: - logger.error(f"relation data is None for {relation}") - return False - if not relation.app: - logger.error(f"{relation} event received but there is no relation.app") - return False - try: - databag = dict(relation.data[relation.app]) - TracingProviderAppData.load(databag) - - except (json.JSONDecodeError, pydantic.ValidationError, DataValidationError): - logger.info(f"failed validating relation data for {relation}") - return False - return True - - def _on_tracing_relation_changed(self, event): - """Notify the providers that there is new endpoint information available.""" - relation = event.relation - if not self.is_ready(relation): - self.on.endpoint_removed.emit(relation) # type: ignore - return - - data = TracingProviderAppData.load(relation.data[relation.app]) - self.on.endpoint_changed.emit(relation, [i.dict() for i in data.receivers]) # type: ignore - - def _on_tracing_relation_broken(self, event: RelationBrokenEvent): - """Notify the providers that the endpoint is broken.""" - relation = event.relation - self.on.endpoint_removed.emit(relation) # type: ignore - - def get_all_endpoints( - self, relation: Optional[Relation] = None - ) -> Optional[TracingProviderAppData]: - """Unmarshalled relation data.""" - relation = relation or self._relation - if not self.is_ready(relation): - return - return TracingProviderAppData.load(relation.data[relation.app]) # type: ignore - - def _get_endpoint( - self, relation: Optional[Relation], protocol: ReceiverProtocol - ) -> Optional[str]: - app_data = self.get_all_endpoints(relation) - if not app_data: - return None - receivers: List[Receiver] = list( - filter(lambda i: i.protocol.name == protocol, app_data.receivers) - ) - if not receivers: - # it can happen if the charm requests tracing protocols, but the relay (such as grafana-agent) isn't yet - # connected to the tracing backend. In this case, it's not an error the charm author can do anything about - logger.warning(f"no receiver found with protocol={protocol!r}.") - return - if len(receivers) > 1: - # if we have more than 1 receiver that matches, it shouldn't matter which receiver we'll be using. - logger.warning( - f"too many receivers with protocol={protocol!r}; using first one. Found: {receivers}" - ) - - receiver = receivers[0] - return receiver.url - - def get_endpoint( - self, protocol: ReceiverProtocol, relation: Optional[Relation] = None - ) -> Optional[str]: - """Receiver endpoint for the given protocol. - - It could happen that this function gets called before the provider publishes the endpoints. - In such a scenario, if a non-leader unit calls this function, a permission denied exception will be raised due to - restricted access. To prevent this, this function needs to be guarded by the `is_ready` check. - - Raises: - ProtocolNotRequestedError: - If the charm unit is the leader unit and attempts to obtain an endpoint for a protocol it did not request. - """ - endpoint = self._get_endpoint(relation or self._relation, protocol=protocol) - if not endpoint: - requested_protocols = set() - relations = [relation] if relation else self.relations - for relation in relations: - try: - databag = TracingRequirerAppData.load(relation.data[self._charm.app]) - except DataValidationError: - continue - - requested_protocols.update(databag.receivers) - - if protocol not in requested_protocols: - raise ProtocolNotRequestedError(protocol, relation) - - return None - return endpoint - - -def charm_tracing_config( - endpoint_requirer: TracingEndpointRequirer, cert_path: Optional[Union[Path, str]] -) -> Tuple[Optional[str], Optional[str]]: - """Return the charm_tracing config you likely want. - - If no endpoint is provided: - disable charm tracing. - If https endpoint is provided but cert_path is not found on disk: - disable charm tracing. - If https endpoint is provided and cert_path is None: - ERROR - Else: - proceed with charm tracing (with or without tls, as appropriate) - - Usage: - >>> from lib.charms.tempo_coordinator_k8s.v0.charm_tracing import trace_charm - >>> from lib.charms.tempo_coordinator_k8s.v0.tracing import charm_tracing_config - >>> @trace_charm(tracing_endpoint="my_endpoint", cert_path="cert_path") - >>> class MyCharm(...): - >>> _cert_path = "/path/to/cert/on/charm/container.crt" - >>> def __init__(self, ...): - >>> self.tracing = TracingEndpointRequirer(...) - >>> self.my_endpoint, self.cert_path = charm_tracing_config( - ... self.tracing, self._cert_path) - """ - if not endpoint_requirer.is_ready(): - return None, None - - endpoint = endpoint_requirer.get_endpoint("otlp_http") - if not endpoint: - return None, None - - is_https = endpoint.startswith("https://") - - if is_https: - if cert_path is None or not Path(cert_path).exists(): - # disable charm tracing until we obtain a cert to prevent tls errors - logger.error( - "Tracing endpoint is https, but no server_cert has been passed." - "Please point @trace_charm to a `server_cert` attr. " - "This might also mean that the tracing provider is related to a " - "certificates provider, but this application is not (yet). " - "In that case, you might just have to wait a bit for the certificates " - "integration to settle. " - ) - return None, None - return endpoint, str(cert_path) - else: - return endpoint, None diff --git a/kubernetes/poetry.lock b/kubernetes/poetry.lock index 63f53774c..6f4ac8570 100644 --- a/kubernetes/poetry.lock +++ b/kubernetes/poetry.lock @@ -91,18 +91,6 @@ tests = ["attrs[tests-no-zope]", "zope-interface"] tests-mypy = ["mypy (>=1.6) ; platform_python_implementation == \"CPython\" and python_version >= \"3.8\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.8\""] tests-no-zope = ["attrs[tests-mypy]", "cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] -[[package]] -name = "backoff" -version = "2.2.1" -description = "Function decoration for backoff and retry" -optional = false -python-versions = ">=3.7,<4.0" -groups = ["charm-libs"] -files = [ - {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"}, - {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"}, -] - [[package]] name = "backports-strenum" version = "1.3.1" @@ -293,7 +281,7 @@ version = "3.3.2" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7.0" -groups = ["charm-libs", "integration"] +groups = ["integration"] files = [ {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, @@ -556,24 +544,6 @@ ssh = ["bcrypt (>=3.1.5)"] test = ["certifi", "cryptography-vectors (==43.0.1)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] test-randomorder = ["pytest-randomly"] -[[package]] -name = "deprecated" -version = "1.2.14" -description = "Python @deprecated decorator to deprecate old python classes, functions or methods." -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -groups = ["charm-libs"] -files = [ - {file = "Deprecated-1.2.14-py2.py3-none-any.whl", hash = "sha256:6fac8b097794a90302bdbb17b9b815e732d3c4720583ff1b198499d78470466c"}, - {file = "Deprecated-1.2.14.tar.gz", hash = "sha256:e5323eb936458dccc2582dc6f9c322c852a775a27065ff2b0c4970b9d53d01b3"}, -] - -[package.dependencies] -wrapt = ">=1.10,<2" - -[package.extras] -dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"] - [[package]] name = "exceptiongroup" version = "1.2.1" @@ -614,24 +584,6 @@ pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] reauth = ["pyu2f (>=0.1.5)"] requests = ["requests (>=2.20.0,<3.0.0.dev0)"] -[[package]] -name = "googleapis-common-protos" -version = "1.63.1" -description = "Common protobufs used in Google APIs" -optional = false -python-versions = ">=3.7" -groups = ["charm-libs"] -files = [ - {file = "googleapis-common-protos-1.63.1.tar.gz", hash = "sha256:c6442f7a0a6b2a80369457d79e6672bb7dcbaab88e0848302497e3ec80780a6a"}, - {file = "googleapis_common_protos-1.63.1-py2.py3-none-any.whl", hash = "sha256:0e1c2cdfcbc354b76e4a211a35ea35d6926a835cba1377073c4861db904a1877"}, -] - -[package.dependencies] -protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0.dev0" - -[package.extras] -grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] - [[package]] name = "h11" version = "0.16.0" @@ -727,7 +679,7 @@ version = "6.11.0" description = "Read metadata from Python packages" optional = false python-versions = ">=3.8" -groups = ["charm-libs"] +groups = ["main", "charm-libs", "integration"] files = [ {file = "importlib_metadata-6.11.0-py3-none-any.whl", hash = "sha256:f0afba6205ad8f8947c7d338b5342d5db2afbfd82f9cbef7879a9539cc12eb9b"}, {file = "importlib_metadata-6.11.0.tar.gz", hash = "sha256:1231cf92d825c9e03cfc4da076a16de6422c863558229ea0b22b675657463443"}, @@ -1106,123 +1058,92 @@ signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] [[package]] name = "opentelemetry-api" -version = "1.21.0" +version = "1.39.1" description = "OpenTelemetry Python API" optional = false -python-versions = ">=3.7" -groups = ["charm-libs"] -files = [ - {file = "opentelemetry_api-1.21.0-py3-none-any.whl", hash = "sha256:4bb86b28627b7e41098f0e93280fe4892a1abed1b79a19aec6f928f39b17dffb"}, - {file = "opentelemetry_api-1.21.0.tar.gz", hash = "sha256:d6185fd5043e000075d921822fd2d26b953eba8ca21b1e2fa360dd46a7686316"}, -] - -[package.dependencies] -deprecated = ">=1.2.6" -importlib-metadata = ">=6.0,<7.0" - -[[package]] -name = "opentelemetry-exporter-otlp-proto-common" -version = "1.21.0" -description = "OpenTelemetry Protobuf encoding" -optional = false -python-versions = ">=3.7" -groups = ["charm-libs"] -files = [ - {file = "opentelemetry_exporter_otlp_proto_common-1.21.0-py3-none-any.whl", hash = "sha256:97b1022b38270ec65d11fbfa348e0cd49d12006485c2321ea3b1b7037d42b6ec"}, - {file = "opentelemetry_exporter_otlp_proto_common-1.21.0.tar.gz", hash = "sha256:61db274d8a68d636fb2ec2a0f281922949361cdd8236e25ff5539edf942b3226"}, -] - -[package.dependencies] -backoff = {version = ">=1.10.0,<3.0.0", markers = "python_version >= \"3.7\""} -opentelemetry-proto = "1.21.0" - -[[package]] -name = "opentelemetry-exporter-otlp-proto-http" -version = "1.21.0" -description = "OpenTelemetry Collector Protobuf over HTTP Exporter" -optional = false -python-versions = ">=3.7" -groups = ["charm-libs"] -files = [ - {file = "opentelemetry_exporter_otlp_proto_http-1.21.0-py3-none-any.whl", hash = "sha256:56837773de6fb2714c01fc4895caebe876f6397bbc4d16afddf89e1299a55ee2"}, - {file = "opentelemetry_exporter_otlp_proto_http-1.21.0.tar.gz", hash = "sha256:19d60afa4ae8597f7ef61ad75c8b6c6b7ef8cb73a33fb4aed4dbc86d5c8d3301"}, -] - -[package.dependencies] -backoff = {version = ">=1.10.0,<3.0.0", markers = "python_version >= \"3.7\""} -deprecated = ">=1.2.6" -googleapis-common-protos = ">=1.52,<2.0" -opentelemetry-api = ">=1.15,<2.0" -opentelemetry-exporter-otlp-proto-common = "1.21.0" -opentelemetry-proto = "1.21.0" -opentelemetry-sdk = ">=1.21.0,<1.22.0" -requests = ">=2.7,<3.0" - -[package.extras] -test = ["responses (==0.22.0)"] - -[[package]] -name = "opentelemetry-proto" -version = "1.21.0" -description = "OpenTelemetry Python Proto" -optional = false -python-versions = ">=3.7" -groups = ["charm-libs"] +python-versions = ">=3.9" +groups = ["main", "charm-libs", "integration"] files = [ - {file = "opentelemetry_proto-1.21.0-py3-none-any.whl", hash = "sha256:32fc4248e83eebd80994e13963e683f25f3b443226336bb12b5b6d53638f50ba"}, - {file = "opentelemetry_proto-1.21.0.tar.gz", hash = "sha256:7d5172c29ed1b525b5ecf4ebe758c7138a9224441b3cfe683d0a237c33b1941f"}, + {file = "opentelemetry_api-1.39.1-py3-none-any.whl", hash = "sha256:2edd8463432a7f8443edce90972169b195e7d6a05500cd29e6d13898187c9950"}, + {file = "opentelemetry_api-1.39.1.tar.gz", hash = "sha256:fbde8c80e1b937a2c61f20347e91c0c18a1940cecf012d62e65a7caf08967c9c"}, ] [package.dependencies] -protobuf = ">=3.19,<5.0" +importlib-metadata = ">=6.0,<8.8.0" +typing-extensions = ">=4.5.0" [[package]] name = "opentelemetry-sdk" -version = "1.21.0" +version = "1.39.1" description = "OpenTelemetry Python SDK" optional = false -python-versions = ">=3.7" -groups = ["charm-libs"] +python-versions = ">=3.9" +groups = ["main"] files = [ - {file = "opentelemetry_sdk-1.21.0-py3-none-any.whl", hash = "sha256:9fe633243a8c655fedace3a0b89ccdfc654c0290ea2d8e839bd5db3131186f73"}, - {file = "opentelemetry_sdk-1.21.0.tar.gz", hash = "sha256:3ec8cd3020328d6bc5c9991ccaf9ae820ccb6395a5648d9a95d3ec88275b8879"}, + {file = "opentelemetry_sdk-1.39.1-py3-none-any.whl", hash = "sha256:4d5482c478513ecb0a5d938dcc61394e647066e0cc2676bee9f3af3f3f45f01c"}, + {file = "opentelemetry_sdk-1.39.1.tar.gz", hash = "sha256:cf4d4563caf7bff906c9f7967e2be22d0d6b349b908be0d90fb21c8e9c995cc6"}, ] [package.dependencies] -opentelemetry-api = "1.21.0" -opentelemetry-semantic-conventions = "0.42b0" -typing-extensions = ">=3.7.4" +opentelemetry-api = "1.39.1" +opentelemetry-semantic-conventions = "0.60b1" +typing-extensions = ">=4.5.0" [[package]] name = "opentelemetry-semantic-conventions" -version = "0.42b0" +version = "0.60b1" description = "OpenTelemetry Semantic Conventions" optional = false -python-versions = ">=3.7" -groups = ["charm-libs"] +python-versions = ">=3.9" +groups = ["main"] files = [ - {file = "opentelemetry_semantic_conventions-0.42b0-py3-none-any.whl", hash = "sha256:5cd719cbfec448af658860796c5d0fcea2fdf0945a2bed2363f42cb1ee39f526"}, - {file = "opentelemetry_semantic_conventions-0.42b0.tar.gz", hash = "sha256:44ae67a0a3252a05072877857e5cc1242c98d4cf12870159f1a94bec800d38ec"}, + {file = "opentelemetry_semantic_conventions-0.60b1-py3-none-any.whl", hash = "sha256:9fa8c8b0c110da289809292b0591220d3a7b53c1526a23021e977d68597893fb"}, + {file = "opentelemetry_semantic_conventions-0.60b1.tar.gz", hash = "sha256:87c228b5a0669b748c76d76df6c364c369c28f1c465e50f661e39737e84bc953"}, ] +[package.dependencies] +opentelemetry-api = "1.39.1" +typing-extensions = ">=4.5.0" + [[package]] name = "ops" -version = "2.15.0" +version = "2.23.1" description = "The Python library behind great charms" optional = false python-versions = ">=3.8" groups = ["main", "charm-libs", "integration"] files = [ - {file = "ops-2.15.0-py3-none-any.whl", hash = "sha256:8e47ab8a814301776b0ff42b32544ebdece7f1639168d2c86dc7a25930d2e493"}, - {file = "ops-2.15.0.tar.gz", hash = "sha256:f3bad7417e98e8f390523fad097702eed16e99b38a25e9fe856aad226474b057"}, + {file = "ops-2.23.1-py3-none-any.whl", hash = "sha256:fdf58163beafd25180c12a4c7efaf1e76e5f8710508a97840c07055bb78b0c77"}, + {file = "ops-2.23.1.tar.gz", hash = "sha256:aecacd67ef7ca913f63f397e0330bfa93d70529a3ef71ed2d99e2bc232564ae3"}, ] [package.dependencies] +importlib-metadata = "*" +opentelemetry-api = ">=1.0,<2.0" +ops-tracing = {version = "2.23.1", optional = true, markers = "extra == \"tracing\""} PyYAML = "==6.*" websocket-client = "==1.*" [package.extras] -docs = ["canonical-sphinx-extensions", "furo", "linkify-it-py", "myst-parser", "pyspelling", "sphinx (==6.2.1)", "sphinx-autobuild", "sphinx-copybutton", "sphinx-design", "sphinx-notfound-page", "sphinx-tabs", "sphinxcontrib-jquery", "sphinxext-opengraph"] +testing = ["ops-scenario (==7.23.1)"] +tracing = ["ops-tracing (==2.23.1)"] + +[[package]] +name = "ops-tracing" +version = "2.23.1" +description = "The tracing facility for the Ops library." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "ops_tracing-2.23.1-py3-none-any.whl", hash = "sha256:2943b069ecc8b6b5eb700f1ca4369ca35e11a4bdd58527069e1372c787aa7bc8"}, + {file = "ops_tracing-2.23.1.tar.gz", hash = "sha256:a5dece112f7ae4b1fb947ff090a2ffba56cb7c56af26b6f797b8b00fe1e50585"}, +] + +[package.dependencies] +opentelemetry-sdk = ">=1.30,<2.0" +ops = "2.23.1" +pydantic = "*" [[package]] name = "packaging" @@ -1307,7 +1228,7 @@ version = "3.20.3" description = "Protocol Buffers" optional = false python-versions = ">=3.7" -groups = ["charm-libs", "integration"] +groups = ["integration"] files = [ {file = "protobuf-3.20.3-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:f4bd856d702e5b0d96a00ec6b307b0f51c1982c2bf9c0052cf9019e9a544ba99"}, {file = "protobuf-3.20.3-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9aae4406ea63d825636cc11ffb34ad3379335803216ee3a856787bcf5ccc751e"}, @@ -1379,7 +1300,7 @@ version = "1.10.15" description = "Data validation and settings management using python type hints" optional = false python-versions = ">=3.7" -groups = ["charm-libs"] +groups = ["main", "charm-libs"] files = [ {file = "pydantic-1.10.15-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:22ed12ee588b1df028a2aa5d66f07bf8f8b4c8579c2e96d5a9c1f96b77f3bb55"}, {file = "pydantic-1.10.15-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:75279d3cac98186b6ebc2597b06bcbc7244744f6b0b44a23e4ef01e5683cc0d2"}, @@ -1637,7 +1558,7 @@ version = "2.32.3" description = "Python HTTP for Humans." optional = false python-versions = ">=3.8" -groups = ["charm-libs", "integration"] +groups = ["integration"] files = [ {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, @@ -1937,7 +1858,6 @@ files = [ {file = "typing_extensions-4.12.1-py3-none-any.whl", hash = "sha256:6024b58b69089e5a89c347397254e35f1bf02a907728ec7fee9bf0fe837d203a"}, {file = "typing_extensions-4.12.1.tar.gz", hash = "sha256:915f5e35ff76f56588223f15fdd5938f9a1cf9195c0de25130c627e4d597f6d1"}, ] -markers = {main = "python_version == \"3.10\""} [[package]] name = "typing-inspect" @@ -1961,7 +1881,7 @@ version = "2.5.0" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.9" -groups = ["main", "charm-libs", "integration"] +groups = ["main", "integration"] files = [ {file = "urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc"}, {file = "urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760"}, @@ -1975,20 +1895,20 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "websocket-client" -version = "1.8.0" +version = "1.9.0" description = "WebSocket client for Python with low level API options" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["main", "charm-libs", "integration"] files = [ - {file = "websocket_client-1.8.0-py3-none-any.whl", hash = "sha256:17b44cc997f5c498e809b22cdf2d9c7a9e71c02c8cc2b6c56e7c2d1239bfa526"}, - {file = "websocket_client-1.8.0.tar.gz", hash = "sha256:3239df9f44da632f96012472805d40a23281a991027ce11d2f45a6f24ac4c3da"}, + {file = "websocket_client-1.9.0-py3-none-any.whl", hash = "sha256:af248a825037ef591efbf6ed20cc5faa03d3b47b9e5a2230a529eeee1c1fc3ef"}, + {file = "websocket_client-1.9.0.tar.gz", hash = "sha256:9e813624b6eb619999a97dc7958469217c3176312b3a16a4bd1bc7e08a46ec98"}, ] [package.extras] -docs = ["Sphinx (>=6.0)", "myst-parser (>=2.0.0)", "sphinx-rtd-theme (>=1.1.0)"] +docs = ["Sphinx (>=6.0)", "myst-parser (>=2.0.0)", "sphinx_rtd_theme (>=1.1.0)"] optional = ["python-socks", "wsaccel"] -test = ["websockets"] +test = ["pytest", "websockets"] [[package]] name = "websockets" @@ -2069,93 +1989,13 @@ files = [ {file = "websockets-14.1.tar.gz", hash = "sha256:398b10c77d471c0aab20a845e7a60076b6390bfdaac7a6d2edb0d2c59d75e8d8"}, ] -[[package]] -name = "wrapt" -version = "1.16.0" -description = "Module for decorators, wrappers and monkey patching." -optional = false -python-versions = ">=3.6" -groups = ["charm-libs"] -files = [ - {file = "wrapt-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4"}, - {file = "wrapt-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e4fdb9275308292e880dcbeb12546df7f3e0f96c6b41197e0cf37d2826359020"}, - {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb2dee3874a500de01c93d5c71415fcaef1d858370d405824783e7a8ef5db440"}, - {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a88e6010048489cda82b1326889ec075a8c856c2e6a256072b28eaee3ccf487"}, - {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac83a914ebaf589b69f7d0a1277602ff494e21f4c2f743313414378f8f50a4cf"}, - {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:73aa7d98215d39b8455f103de64391cb79dfcad601701a3aa0dddacf74911d72"}, - {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:807cc8543a477ab7422f1120a217054f958a66ef7314f76dd9e77d3f02cdccd0"}, - {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bf5703fdeb350e36885f2875d853ce13172ae281c56e509f4e6eca049bdfb136"}, - {file = "wrapt-1.16.0-cp310-cp310-win32.whl", hash = "sha256:f6b2d0c6703c988d334f297aa5df18c45e97b0af3679bb75059e0e0bd8b1069d"}, - {file = "wrapt-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:decbfa2f618fa8ed81c95ee18a387ff973143c656ef800c9f24fb7e9c16054e2"}, - {file = "wrapt-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1a5db485fe2de4403f13fafdc231b0dbae5eca4359232d2efc79025527375b09"}, - {file = "wrapt-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:75ea7d0ee2a15733684badb16de6794894ed9c55aa5e9903260922f0482e687d"}, - {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a452f9ca3e3267cd4d0fcf2edd0d035b1934ac2bd7e0e57ac91ad6b95c0c6389"}, - {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:43aa59eadec7890d9958748db829df269f0368521ba6dc68cc172d5d03ed8060"}, - {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72554a23c78a8e7aa02abbd699d129eead8b147a23c56e08d08dfc29cfdddca1"}, - {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d2efee35b4b0a347e0d99d28e884dfd82797852d62fcd7ebdeee26f3ceb72cf3"}, - {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6dcfcffe73710be01d90cae08c3e548d90932d37b39ef83969ae135d36ef3956"}, - {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:eb6e651000a19c96f452c85132811d25e9264d836951022d6e81df2fff38337d"}, - {file = "wrapt-1.16.0-cp311-cp311-win32.whl", hash = "sha256:66027d667efe95cc4fa945af59f92c5a02c6f5bb6012bff9e60542c74c75c362"}, - {file = "wrapt-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:aefbc4cb0a54f91af643660a0a150ce2c090d3652cf4052a5397fb2de549cd89"}, - {file = "wrapt-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5eb404d89131ec9b4f748fa5cfb5346802e5ee8836f57d516576e61f304f3b7b"}, - {file = "wrapt-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9090c9e676d5236a6948330e83cb89969f433b1943a558968f659ead07cb3b36"}, - {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94265b00870aa407bd0cbcfd536f17ecde43b94fb8d228560a1e9d3041462d73"}, - {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2058f813d4f2b5e3a9eb2eb3faf8f1d99b81c3e51aeda4b168406443e8ba809"}, - {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98b5e1f498a8ca1858a1cdbffb023bfd954da4e3fa2c0cb5853d40014557248b"}, - {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:14d7dc606219cdd7405133c713f2c218d4252f2a469003f8c46bb92d5d095d81"}, - {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:49aac49dc4782cb04f58986e81ea0b4768e4ff197b57324dcbd7699c5dfb40b9"}, - {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:418abb18146475c310d7a6dc71143d6f7adec5b004ac9ce08dc7a34e2babdc5c"}, - {file = "wrapt-1.16.0-cp312-cp312-win32.whl", hash = "sha256:685f568fa5e627e93f3b52fda002c7ed2fa1800b50ce51f6ed1d572d8ab3e7fc"}, - {file = "wrapt-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:dcdba5c86e368442528f7060039eda390cc4091bfd1dca41e8046af7c910dda8"}, - {file = "wrapt-1.16.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d462f28826f4657968ae51d2181a074dfe03c200d6131690b7d65d55b0f360f8"}, - {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a33a747400b94b6d6b8a165e4480264a64a78c8a4c734b62136062e9a248dd39"}, - {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3646eefa23daeba62643a58aac816945cadc0afaf21800a1421eeba5f6cfb9c"}, - {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ebf019be5c09d400cf7b024aa52b1f3aeebeff51550d007e92c3c1c4afc2a40"}, - {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:0d2691979e93d06a95a26257adb7bfd0c93818e89b1406f5a28f36e0d8c1e1fc"}, - {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:1acd723ee2a8826f3d53910255643e33673e1d11db84ce5880675954183ec47e"}, - {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bc57efac2da352a51cc4658878a68d2b1b67dbe9d33c36cb826ca449d80a8465"}, - {file = "wrapt-1.16.0-cp36-cp36m-win32.whl", hash = "sha256:da4813f751142436b075ed7aa012a8778aa43a99f7b36afe9b742d3ed8bdc95e"}, - {file = "wrapt-1.16.0-cp36-cp36m-win_amd64.whl", hash = "sha256:6f6eac2360f2d543cc875a0e5efd413b6cbd483cb3ad7ebf888884a6e0d2e966"}, - {file = "wrapt-1.16.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a0ea261ce52b5952bf669684a251a66df239ec6d441ccb59ec7afa882265d593"}, - {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bd2d7ff69a2cac767fbf7a2b206add2e9a210e57947dd7ce03e25d03d2de292"}, - {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9159485323798c8dc530a224bd3ffcf76659319ccc7bbd52e01e73bd0241a0c5"}, - {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a86373cf37cd7764f2201b76496aba58a52e76dedfaa698ef9e9688bfd9e41cf"}, - {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:73870c364c11f03ed072dda68ff7aea6d2a3a5c3fe250d917a429c7432e15228"}, - {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b935ae30c6e7400022b50f8d359c03ed233d45b725cfdd299462f41ee5ffba6f"}, - {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:db98ad84a55eb09b3c32a96c576476777e87c520a34e2519d3e59c44710c002c"}, - {file = "wrapt-1.16.0-cp37-cp37m-win32.whl", hash = "sha256:9153ed35fc5e4fa3b2fe97bddaa7cbec0ed22412b85bcdaf54aeba92ea37428c"}, - {file = "wrapt-1.16.0-cp37-cp37m-win_amd64.whl", hash = "sha256:66dfbaa7cfa3eb707bbfcd46dab2bc6207b005cbc9caa2199bcbc81d95071a00"}, - {file = "wrapt-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1dd50a2696ff89f57bd8847647a1c363b687d3d796dc30d4dd4a9d1689a706f0"}, - {file = "wrapt-1.16.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:44a2754372e32ab315734c6c73b24351d06e77ffff6ae27d2ecf14cf3d229202"}, - {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e9723528b9f787dc59168369e42ae1c3b0d3fadb2f1a71de14531d321ee05b0"}, - {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbed418ba5c3dce92619656802cc5355cb679e58d0d89b50f116e4a9d5a9603e"}, - {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:941988b89b4fd6b41c3f0bfb20e92bd23746579736b7343283297c4c8cbae68f"}, - {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6a42cd0cfa8ffc1915aef79cb4284f6383d8a3e9dcca70c445dcfdd639d51267"}, - {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ca9b6085e4f866bd584fb135a041bfc32cab916e69f714a7d1d397f8c4891ca"}, - {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5e49454f19ef621089e204f862388d29e6e8d8b162efce05208913dde5b9ad6"}, - {file = "wrapt-1.16.0-cp38-cp38-win32.whl", hash = "sha256:c31f72b1b6624c9d863fc095da460802f43a7c6868c5dda140f51da24fd47d7b"}, - {file = "wrapt-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:490b0ee15c1a55be9c1bd8609b8cecd60e325f0575fc98f50058eae366e01f41"}, - {file = "wrapt-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9b201ae332c3637a42f02d1045e1d0cccfdc41f1f2f801dafbaa7e9b4797bfc2"}, - {file = "wrapt-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2076fad65c6736184e77d7d4729b63a6d1ae0b70da4868adeec40989858eb3fb"}, - {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5cd603b575ebceca7da5a3a251e69561bec509e0b46e4993e1cac402b7247b8"}, - {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b47cfad9e9bbbed2339081f4e346c93ecd7ab504299403320bf85f7f85c7d46c"}, - {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8212564d49c50eb4565e502814f694e240c55551a5f1bc841d4fcaabb0a9b8a"}, - {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5f15814a33e42b04e3de432e573aa557f9f0f56458745c2074952f564c50e664"}, - {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db2e408d983b0e61e238cf579c09ef7020560441906ca990fe8412153e3b291f"}, - {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:edfad1d29c73f9b863ebe7082ae9321374ccb10879eeabc84ba3b69f2579d537"}, - {file = "wrapt-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed867c42c268f876097248e05b6117a65bcd1e63b779e916fe2e33cd6fd0d3c3"}, - {file = "wrapt-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:eb1b046be06b0fce7249f1d025cd359b4b80fc1c3e24ad9eca33e0dcdb2e4a35"}, - {file = "wrapt-1.16.0-py3-none-any.whl", hash = "sha256:6906c4100a8fcbf2fa735f6059214bb13b97f75b1a61777fcf6432121ef12ef1"}, - {file = "wrapt-1.16.0.tar.gz", hash = "sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d"}, -] - [[package]] name = "zipp" version = "3.19.2" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.8" -groups = ["charm-libs"] +groups = ["main", "charm-libs", "integration"] files = [ {file = "zipp-3.19.2-py3-none-any.whl", hash = "sha256:f091755f667055f2d02b32c53771a7a6c8b47e1fdbc4b72a8b9072b3eef8015c"}, {file = "zipp-3.19.2.tar.gz", hash = "sha256:bf1dcf6450f873a13e952a29504887c89e6de7506209e5b1bcc3460135d4de19"}, @@ -2168,4 +2008,4 @@ test = ["big-O", "importlib-resources ; python_version < \"3.9\"", "jaraco.funct [metadata] lock-version = "2.1" python-versions = "~=3.10" -content-hash = "967e4e13222da4f822b94d30e187d85b91a2b4ea105d1c902cb9967612b27577" +content-hash = "e559e07b9ecdd5dd6da59495b16846784c4255e6b25b7ec69af7d2068d3a2fad" diff --git a/kubernetes/pyproject.toml b/kubernetes/pyproject.toml index b98f4b35b..1dcdd36c7 100644 --- a/kubernetes/pyproject.toml +++ b/kubernetes/pyproject.toml @@ -7,7 +7,7 @@ dependencies = [ "boto3~=1.28", "jinja2~=3.1", "lightkube~=0.15.0", - "ops~=2.15", + "ops[tracing]~=2.21", "pyyaml~=6.0", "tenacity~=8.2", ] @@ -31,8 +31,6 @@ charm-libs = [ "jsonschema", # loki_k8s/v0/loki_push_api.py and prometheus_k8s/v0/prometheus_scrape.py "cosl>=0.0.50", - # tempo_coordinator_k8s/v0/charm_tracing.py - "opentelemetry-exporter-otlp-proto-http==1.21.0", ] format = [ "ruff~=0.12.7", diff --git a/kubernetes/src/charm.py b/kubernetes/src/charm.py index 1e753d9ba..e0bb755ed 100755 --- a/kubernetes/src/charm.py +++ b/kubernetes/src/charm.py @@ -52,8 +52,6 @@ from charms.mysql.v0.tls import MySQLTLS from charms.prometheus_k8s.v0.prometheus_scrape import MetricsEndpointProvider from charms.rolling_ops.v0.rollingops import RollingOpsManager -from charms.tempo_coordinator_k8s.v0.charm_tracing import trace_charm -from charms.tempo_coordinator_k8s.v0.tracing import TracingEndpointRequirer from ops import EventBase, ModelError, RelationBrokenEvent, RelationCreatedEvent from ops.charm import RelationChangedEvent, RelationDepartedEvent, UpdateStatusEvent from ops.model import ( @@ -65,6 +63,7 @@ WaitingStatus, ) from ops.pebble import ChangeError, Layer +from ops_tracing import Tracing from tenacity import RetryError, Retrying, stop_after_attempt from config import CharmConfig, MySQLConfig @@ -94,8 +93,6 @@ ROOT_PASSWORD_KEY, SERVER_CONFIG_PASSWORD_KEY, SERVER_CONFIG_USERNAME, - TRACING_PROTOCOL, - TRACING_RELATION_NAME, ) from k8s_helpers import KubernetesHelpers from log_rotate_manager import LogRotateManager @@ -110,29 +107,6 @@ logger = logging.getLogger(__name__) -@trace_charm( - tracing_endpoint="tracing_endpoint", - extra_types=( - GrafanaDashboardProvider, - KubernetesHelpers, - LogProxyConsumer, - LogRotateManager, - MetricsEndpointProvider, - MySQL, - MySQLAsyncReplicationConsumer, - MySQLAsyncReplicationOffer, - MySQLBackups, - MySQLConfig, - MySQLK8sUpgrade, - MySQLProvider, - MySQLRelation, - MySQLRootRelation, - MySQLTLS, - RollingOpsManager, - RotateMySQLLogs, - S3Requirer, - ), -) class MySQLOperatorCharm(MySQLCharmBase, TypedCharmBase[CharmConfig]): """Operator framework charm for MySQL.""" @@ -202,15 +176,7 @@ def __init__(self, *args): self.replication_offer = MySQLAsyncReplicationOffer(self) self.replication_consumer = MySQLAsyncReplicationConsumer(self) - self.tracing = TracingEndpointRequirer( - self, protocols=[TRACING_PROTOCOL], relation_name=TRACING_RELATION_NAME - ) - - @property - def tracing_endpoint(self) -> Optional[str]: - """Otlp http endpoint for charm instrumentation.""" - if self.tracing.is_ready(): - return self.tracing.get_endpoint(TRACING_PROTOCOL) + self.tracing = Tracing(self, tracing_relation_name="tracing") @property def _mysql(self) -> MySQL: diff --git a/kubernetes/src/constants.py b/kubernetes/src/constants.py index 5ed29a2df..f1a8a633a 100644 --- a/kubernetes/src/constants.py +++ b/kubernetes/src/constants.py @@ -67,5 +67,3 @@ "certificate": "cert", "certificate-authority": "ca", } -TRACING_RELATION_NAME = "tracing" -TRACING_PROTOCOL = "otlp_http" diff --git a/kubernetes/tests/unit/conftest.py b/kubernetes/tests/unit/conftest.py index ea09858ff..4eee8bc71 100644 --- a/kubernetes/tests/unit/conftest.py +++ b/kubernetes/tests/unit/conftest.py @@ -2,7 +2,6 @@ # See LICENSE file for licensing details. import pytest -from charms.tempo_coordinator_k8s.v0.charm_tracing import charm_tracing_disabled @pytest.fixture(autouse=True) @@ -13,9 +12,3 @@ def with_juju_secrets(monkeypatch): @pytest.fixture def without_juju_secrets(monkeypatch): monkeypatch.setattr("ops.JujuVersion.has_secrets", False) - - -@pytest.fixture(autouse=True) -def disable_charm_tracing(): - with charm_tracing_disabled(): - yield diff --git a/machines/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py b/machines/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py deleted file mode 100644 index a9b6deeb6..000000000 --- a/machines/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py +++ /dev/null @@ -1,1090 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2022 Canonical Ltd. -# See LICENSE file for licensing details. - -"""This charm library contains utilities to instrument your Charm with opentelemetry tracing data collection. - -(yes! charm code, not workload code!) - -This means that, if your charm is related to, for example, COS' Tempo charm, you will be able to inspect -in real time from the Grafana dashboard the execution flow of your charm. - -# Quickstart -Fetch the following charm libs: - - charmcraft fetch-lib charms.tempo_coordinator_k8s.v0.tracing - charmcraft fetch-lib charms.tempo_coordinator_k8s.v0.charm_tracing - -Then edit your charm code to include: - -```python -# import the necessary charm libs -from charms.tempo_coordinator_k8s.v0.tracing import TracingEndpointRequirer, charm_tracing_config -from charms.tempo_coordinator_k8s.v0.charm_tracing import charm_tracing - -# decorate your charm class with charm_tracing: -@charm_tracing( - # forward-declare the instance attributes that the instrumentor will look up to obtain the - # tempo endpoint and server certificate - tracing_endpoint="tracing_endpoint", - server_cert="server_cert" -) -class MyCharm(CharmBase): - _path_to_cert = "/path/to/cert.crt" - # path to cert file **in the charm container**. Its presence will be used to determine whether - # the charm is ready to use tls for encrypting charm traces. If your charm does not support tls, - # you can ignore this and pass None to charm_tracing_config. - # If you do support TLS, you'll need to make sure that the server cert is copied to this location - # and kept up to date so the instrumentor can use it. - - def __init__(self, ...): - ... - self.tracing = TracingEndpointRequirer(self, ...) - self.tracing_endpoint, self.server_cert = charm_tracing_config(self.tracing, self._path_to_cert) -``` - -# Detailed usage -To use this library, you need to do two things: -1) decorate your charm class with - -`@trace_charm(tracing_endpoint="my_tracing_endpoint")` - -2) add to your charm a "my_tracing_endpoint" (you can name this attribute whatever you like) -**property**, **method** or **instance attribute** that returns an otlp http/https endpoint url. -If you are using the ``charms.tempo_coordinator_k8s.v0.tracing.TracingEndpointRequirer`` as -``self.tracing = TracingEndpointRequirer(self)``, the implementation could be: - -``` - @property - def my_tracing_endpoint(self) -> Optional[str]: - '''Tempo endpoint for charm tracing''' - if self.tracing.is_ready(): - return self.tracing.get_endpoint("otlp_http") - else: - return None -``` - -At this point your charm will be automatically instrumented so that: -- charm execution starts a trace, containing - - every event as a span (including custom events) - - every charm method call (except dunders) as a span - -We recommend that you scale up your tracing provider and relate it to an ingress so that your tracing requests -go through the ingress and get load balanced across all units. Otherwise, if the provider's leader goes down, your tracing goes down. - - -## TLS support -If your charm integrates with a TLS provider which is also trusted by the tracing provider (the Tempo charm), -you can configure ``charm_tracing`` to use TLS by passing a ``server_cert`` parameter to the decorator. - -If your charm is not trusting the same CA as the Tempo endpoint it is sending traces to, -you'll need to implement a cert-transfer relation to obtain the CA certificate from the same -CA that Tempo is using. - -For example: -``` -from charms.tempo_coordinator_k8s.v0.charm_tracing import trace_charm -@trace_charm( - tracing_endpoint="my_tracing_endpoint", - server_cert="_server_cert" -) -class MyCharm(CharmBase): - self._server_cert = "/path/to/server.crt" - ... - - def on_tls_changed(self, e) -> Optional[str]: - # update the server cert on the charm container for charm tracing - Path(self._server_cert).write_text(self.get_server_cert()) - - def on_tls_broken(self, e) -> Optional[str]: - # remove the server cert so charm_tracing won't try to use tls anymore - Path(self._server_cert).unlink() -``` - - -## More fine-grained manual instrumentation -if you wish to add more spans to the trace, you can do so by getting a hold of the tracer like so: -``` -import opentelemetry -... -def get_tracer(self) -> opentelemetry.trace.Tracer: - return opentelemetry.trace.get_tracer(type(self).__name__) -``` - -By default, the tracer is named after the charm type. If you wish to override that, you can pass -a different ``service_name`` argument to ``trace_charm``. - -See the official opentelemetry Python SDK documentation for usage: -https://opentelemetry-python.readthedocs.io/en/latest/ - - -## Caching traces -The `trace_charm` machinery will buffer any traces collected during charm execution and store them -to a file on the charm container until a tracing backend becomes available. At that point, it will -flush them to the tracing receiver. - -By default, the buffer is configured to start dropping old traces if any of these conditions apply: - -- the storage size exceeds 10 MiB -- the number of buffered events exceeds 100 - -You can configure this by, for example: - -```python -@trace_charm( - tracing_endpoint="my_tracing_endpoint", - server_cert="_server_cert", - # only cache up to 42 events - buffer_max_events=42, - # only cache up to 42 MiB - buffer_max_size_mib=42, # minimum 10! -) -class MyCharm(CharmBase): - ... -``` - -Note that setting `buffer_max_events` to 0 will effectively disable the buffer. - -The path of the buffer file is by default in the charm's execution root, which for k8s charms means -that in case of pod churn, the cache will be lost. The recommended solution is to use an existing storage -(or add a new one) such as: - -```yaml -storage: - data: - type: filesystem - location: /charm-traces -``` - -and then configure the `@trace_charm` decorator to use it as path for storing the buffer: -```python -@trace_charm( - tracing_endpoint="my_tracing_endpoint", - server_cert="_server_cert", - # store traces to a PVC so they're not lost on pod restart. - buffer_path="/charm-traces/buffer.file", -) -class MyCharm(CharmBase): - ... -``` - -## Upgrading from `tempo_k8s.v0` - -If you are upgrading from `tempo_k8s.v0.charm_tracing` (note that since then, the charm library moved to -`tempo_coordinator_k8s.v0.charm_tracing`), you need to take the following steps (assuming you already -have the newest version of the library in your charm): -1) If you need the dependency for your tests, add the following dependency to your charm project -(or, if your project had a dependency on `opentelemetry-exporter-otlp-proto-grpc` only because -of `charm_tracing` v0, you can replace it with): - -`opentelemetry-exporter-otlp-proto-http>=1.21.0`. - -2) Update the charm method referenced to from ``@trace`` and ``@trace_charm``, -to return from ``TracingEndpointRequirer.get_endpoint("otlp_http")`` instead of ``grpc_http``. -For example: - -``` - from charms.tempo_k8s.v0.charm_tracing import trace_charm - - @trace_charm( - tracing_endpoint="my_tracing_endpoint", - ) - class MyCharm(CharmBase): - - ... - - @property - def my_tracing_endpoint(self) -> Optional[str]: - '''Tempo endpoint for charm tracing''' - if self.tracing.is_ready(): - return self.tracing.otlp_grpc_endpoint() # OLD API, DEPRECATED. - else: - return None -``` - -needs to be replaced with: - -``` - from charms.tempo_coordinator_k8s.v0.charm_tracing import trace_charm - - @trace_charm( - tracing_endpoint="my_tracing_endpoint", - ) - class MyCharm(CharmBase): - - ... - - @property - def my_tracing_endpoint(self) -> Optional[str]: - '''Tempo endpoint for charm tracing''' - if self.tracing.is_ready(): - return self.tracing.get_endpoint("otlp_http") # NEW API, use this. - else: - return None -``` - -3) If you were passing a certificate (str) using `server_cert`, you need to change it to -provide an *absolute* path to the certificate file instead. -""" -import typing - -from opentelemetry.exporter.otlp.proto.common._internal.trace_encoder import ( - encode_spans, -) -from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter - - -def _remove_stale_otel_sdk_packages(): - """Hack to remove stale opentelemetry sdk packages from the charm's python venv. - - See https://github.com/canonical/grafana-agent-operator/issues/146 and - https://bugs.launchpad.net/juju/+bug/2058335 for more context. This patch can be removed after - this juju issue is resolved and sufficient time has passed to expect most users of this library - have migrated to the patched version of juju. When this patch is removed, un-ignore rule E402 for this file in the pyproject.toml (see setting - [tool.ruff.lint.per-file-ignores] in pyproject.toml). - - This only has an effect if executed on an upgrade-charm event. - """ - # all imports are local to keep this function standalone, side-effect-free, and easy to revert later - import os - - if os.getenv("JUJU_DISPATCH_PATH") != "hooks/upgrade-charm": - return - - import logging - import shutil - from collections import defaultdict - - from importlib_metadata import distributions - - otel_logger = logging.getLogger("charm_tracing_otel_patcher") - otel_logger.debug("Applying _remove_stale_otel_sdk_packages patch on charm upgrade") - # group by name all distributions starting with "opentelemetry_" - otel_distributions = defaultdict(list) - for distribution in distributions(): - name = distribution._normalized_name # type: ignore - if name.startswith("opentelemetry_"): - otel_distributions[name].append(distribution) - - otel_logger.debug(f"Found {len(otel_distributions)} opentelemetry distributions") - - # If we have multiple distributions with the same name, remove any that have 0 associated files - for name, distributions_ in otel_distributions.items(): - if len(distributions_) <= 1: - continue - - otel_logger.debug(f"Package {name} has multiple ({len(distributions_)}) distributions.") - for distribution in distributions_: - if not distribution.files: # Not None or empty list - path = distribution._path # type: ignore - otel_logger.info(f"Removing empty distribution of {name} at {path}.") - shutil.rmtree(path) - - otel_logger.debug("Successfully applied _remove_stale_otel_sdk_packages patch. ") - - -# apply hacky patch to remove stale opentelemetry sdk packages on upgrade-charm. -# it could be trouble if someone ever decides to implement their own tracer parallel to -# ours and before the charm has inited. We assume they won't. -_remove_stale_otel_sdk_packages() - -import functools -import inspect -import logging -import os -from contextlib import contextmanager -from contextvars import Context, ContextVar, copy_context -from pathlib import Path -from typing import ( - Any, - Callable, - Generator, - List, - Optional, - Sequence, - Type, - TypeVar, - Union, - cast, -) - -import opentelemetry -import ops -from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter -from opentelemetry.sdk.resources import Resource -from opentelemetry.sdk.trace import ReadableSpan, Span, TracerProvider -from opentelemetry.sdk.trace.export import ( - BatchSpanProcessor, - SpanExporter, - SpanExportResult, -) -from opentelemetry.trace import INVALID_SPAN, Tracer -from opentelemetry.trace import get_current_span as otlp_get_current_span -from opentelemetry.trace import ( - get_tracer, - get_tracer_provider, - set_span_in_context, - set_tracer_provider, -) -from ops.charm import CharmBase -from ops.framework import Framework - -# The unique Charmhub library identifier, never change it -LIBID = "01780f1e588c42c3976d26780fdf9b89" - -# Increment this major API version when introducing breaking changes -LIBAPI = 0 - -# Increment this PATCH version before using `charmcraft publish-lib` or reset -# to 0 if you are raising the major API version - -LIBPATCH = 5 - -PYDEPS = ["opentelemetry-exporter-otlp-proto-http==1.21.0"] - -logger = logging.getLogger("tracing") -dev_logger = logging.getLogger("tracing-dev") - -# set this to 0 if you are debugging/developing this library source -dev_logger.setLevel(logging.ERROR) - -_CharmType = Type[CharmBase] # the type CharmBase and any subclass thereof -_C = TypeVar("_C", bound=_CharmType) -_T = TypeVar("_T", bound=type) -_F = TypeVar("_F", bound=Type[Callable]) -tracer: ContextVar[Tracer] = ContextVar("tracer") -_GetterType = Union[Callable[[_CharmType], Optional[str]], property] - -CHARM_TRACING_ENABLED = "CHARM_TRACING_ENABLED" -BUFFER_DEFAULT_CACHE_FILE_NAME = ".charm_tracing_buffer.raw" -# we store the buffer as raw otlp-native protobuf (bytes) since it's hard to serialize/deserialize it in -# any portable format. Json dumping is supported, but loading isn't. -# cfr: https://github.com/open-telemetry/opentelemetry-python/issues/1003 - -BUFFER_DEFAULT_CACHE_FILE_SIZE_LIMIT_MiB = 10 -_BUFFER_CACHE_FILE_SIZE_LIMIT_MiB_MIN = 10 -BUFFER_DEFAULT_MAX_EVENT_HISTORY_LENGTH = 100 -_MiB_TO_B = 2**20 # megabyte to byte conversion rate -_OTLP_SPAN_EXPORTER_TIMEOUT = 1 -"""Timeout in seconds that the OTLP span exporter has to push traces to the backend.""" - - -class _Buffer: - """Handles buffering for spans emitted while no tracing backend is configured or available. - - Use the max_event_history_length_buffering param of @trace_charm to tune - the amount of memory that this will hog on your units. - - The buffer is formatted as a bespoke byte dump (protobuf limitation). - We cannot store them as json because that is not well-supported by the sdk - (see https://github.com/open-telemetry/opentelemetry-python/issues/3364). - """ - - _SPANSEP = b"__CHARM_TRACING_BUFFER_SPAN_SEP__" - - def __init__(self, db_file: Path, max_event_history_length: int, max_buffer_size_mib: int): - self._db_file = db_file - self._max_event_history_length = max_event_history_length - self._max_buffer_size_mib = max(max_buffer_size_mib, _BUFFER_CACHE_FILE_SIZE_LIMIT_MiB_MIN) - - # set by caller - self.exporter: Optional[OTLPSpanExporter] = None - - def save(self, spans: typing.Sequence[ReadableSpan]): - """Save the spans collected by this exporter to the cache file. - - This method should be as fail-safe as possible. - """ - if self._max_event_history_length < 1: - dev_logger.debug("buffer disabled: max history length < 1") - return - - current_history_length = len(self.load()) - new_history_length = current_history_length + len(spans) - if (diff := self._max_event_history_length - new_history_length) < 0: - self.drop(diff) - self._save(spans) - - def _serialize(self, spans: Sequence[ReadableSpan]) -> bytes: - # encode because otherwise we can't json-dump them - return encode_spans(spans).SerializeToString() - - def _save(self, spans: Sequence[ReadableSpan], replace: bool = False): - dev_logger.debug(f"saving {len(spans)} new spans to buffer") - old = [] if replace else self.load() - new = self._serialize(spans) - - try: - # if the buffer exceeds the size limit, we start dropping old spans until it does - - while len((new + self._SPANSEP.join(old))) > (self._max_buffer_size_mib * _MiB_TO_B): - if not old: - # if we've already dropped all spans and still we can't get under the - # size limit, we can't save this span - logger.error( - f"span exceeds total buffer size limit ({self._max_buffer_size_mib}MiB); " - f"buffering FAILED" - ) - return - - old = old[1:] - logger.warning( - f"buffer size exceeds {self._max_buffer_size_mib}MiB; dropping older spans... " - f"Please increase the buffer size, disable buffering, or ensure the spans can be flushed." - ) - - self._db_file.write_bytes(new + self._SPANSEP.join(old)) - except Exception: - logger.exception("error buffering spans") - - def load(self) -> List[bytes]: - """Load currently buffered spans from the cache file. - - This method should be as fail-safe as possible. - """ - if not self._db_file.exists(): - dev_logger.debug("buffer file not found. buffer empty.") - return [] - try: - spans = self._db_file.read_bytes().split(self._SPANSEP) - except Exception: - logger.exception(f"error parsing {self._db_file}") - return [] - return spans - - def drop(self, n_spans: Optional[int] = None): - """Drop some currently buffered spans from the cache file.""" - current = self.load() - if n_spans: - dev_logger.debug(f"dropping {n_spans} spans from buffer") - new = current[n_spans:] - else: - dev_logger.debug("emptying buffer") - new = [] - - self._db_file.write_bytes(self._SPANSEP.join(new)) - - def flush(self) -> Optional[bool]: - """Export all buffered spans to the given exporter, then clear the buffer. - - Returns whether the flush was successful, and None if there was nothing to flush. - """ - if not self.exporter: - dev_logger.debug("no exporter set; skipping buffer flush") - return False - - buffered_spans = self.load() - if not buffered_spans: - dev_logger.debug("nothing to flush; buffer empty") - return None - - errors = False - for span in buffered_spans: - try: - out = self.exporter._export(span) # type: ignore - if not (200 <= out.status_code < 300): - # take any 2xx status code as a success - errors = True - except ConnectionError: - dev_logger.debug( - "failed exporting buffered span; backend might be down or still starting" - ) - errors = True - except Exception: - logger.exception("unexpected error while flushing span batch from buffer") - errors = True - - if not errors: - self.drop() - else: - logger.error("failed flushing spans; buffer preserved") - return not errors - - @property - def is_empty(self): - """Utility to check whether the buffer has any stored spans. - - This is more efficient than attempting a load() given how large the buffer might be. - """ - return (not self._db_file.exists()) or (self._db_file.stat().st_size == 0) - - -class _OTLPSpanExporter(OTLPSpanExporter): - """Subclass of OTLPSpanExporter to configure the max retry timeout, so that it fails a bit faster.""" - - # The issue we're trying to solve is that the model takes AGES to settle if e.g. tls is misconfigured, - # as every hook of a charm_tracing-instrumented charm takes about a minute to exit, as the charm can't - # flush the traces and keeps retrying for 'too long' - - _MAX_RETRY_TIMEOUT = 4 - # we give the exporter 4 seconds in total to succeed pushing the traces to tempo - # if it fails, we'll be caching the data in the buffer and flush it the next time, so there's no data loss risk. - # this means 2/3 retries (hard to guess from the implementation) and up to ~7 seconds total wait - - -class _BufferedExporter(InMemorySpanExporter): - def __init__(self, buffer: _Buffer) -> None: - super().__init__() - self._buffer = buffer - - def export(self, spans: typing.Sequence[ReadableSpan]) -> SpanExportResult: - self._buffer.save(spans) - return super().export(spans) - - def force_flush(self, timeout_millis: int = 0) -> bool: - # parent implementation is fake, so the timeout_millis arg is not doing anything. - result = super().force_flush(timeout_millis) - self._buffer.save(self.get_finished_spans()) - return result - - -def is_enabled() -> bool: - """Whether charm tracing is enabled.""" - return os.getenv(CHARM_TRACING_ENABLED, "1") == "1" - - -@contextmanager -def charm_tracing_disabled(): - """Contextmanager to temporarily disable charm tracing. - - For usage in tests. - """ - previous = os.getenv(CHARM_TRACING_ENABLED, "1") - os.environ[CHARM_TRACING_ENABLED] = "0" - yield - os.environ[CHARM_TRACING_ENABLED] = previous - - -def get_current_span() -> Union[Span, None]: - """Return the currently active Span, if there is one, else None. - - If you'd rather keep your logic unconditional, you can use opentelemetry.trace.get_current_span, - which will return an object that behaves like a span but records no data. - """ - span = otlp_get_current_span() - if span is INVALID_SPAN: - return None - return cast(Span, span) - - -def _get_tracer_from_context(ctx: Context) -> Optional[ContextVar]: - tracers = [v for v in ctx if v is not None and v.name == "tracer"] - if tracers: - return tracers[0] - return None - - -def _get_tracer() -> Optional[Tracer]: - """Find tracer in context variable and as a fallback locate it in the full context.""" - try: - return tracer.get() - except LookupError: - # fallback: this course-corrects for a user error where charm_tracing symbols are imported - # from different paths (typically charms.tempo_coordinator_k8s... and lib.charms.tempo_coordinator_k8s...) - try: - ctx: Context = copy_context() - if context_tracer := _get_tracer_from_context(ctx): - logger.warning( - "Tracer not found in `tracer` context var. " - "Verify that you're importing all `charm_tracing` symbols from the same module path. \n" - "For example, DO" - ": `from charms.lib...charm_tracing import foo, bar`. \n" - "DONT: \n" - " \t - `from charms.lib...charm_tracing import foo` \n" - " \t - `from lib...charm_tracing import bar` \n" - "For more info: https://python-notes.curiousefficiency.org/en/latest/python" - "_concepts/import_traps.html#the-double-import-trap" - ) - return context_tracer.get() - else: - return None - except LookupError: - return None - - -@contextmanager -def _span(name: str) -> Generator[Optional[Span], Any, Any]: - """Context to create a span if there is a tracer, otherwise do nothing.""" - if tracer := _get_tracer(): - with tracer.start_as_current_span(name) as span: - yield cast(Span, span) - else: - yield None - - -class TracingError(RuntimeError): - """Base class for errors raised by this module.""" - - -class UntraceableObjectError(TracingError): - """Raised when an object you're attempting to instrument cannot be autoinstrumented.""" - - -def _get_tracing_endpoint( - tracing_endpoint_attr: str, - charm_instance: object, - charm_type: type, -): - _tracing_endpoint = getattr(charm_instance, tracing_endpoint_attr) - if callable(_tracing_endpoint): - tracing_endpoint = _tracing_endpoint() - else: - tracing_endpoint = _tracing_endpoint - - if tracing_endpoint is None: - return - - elif not isinstance(tracing_endpoint, str): - raise TypeError( - f"{charm_type.__name__}.{tracing_endpoint_attr} should resolve to a tempo endpoint (string); " - f"got {tracing_endpoint} instead." - ) - - dev_logger.debug(f"Setting up span exporter to endpoint: {tracing_endpoint}/v1/traces") - return f"{tracing_endpoint}/v1/traces" - - -def _get_server_cert( - server_cert_attr: str, - charm_instance: ops.CharmBase, - charm_type: Type[ops.CharmBase], -): - _server_cert = getattr(charm_instance, server_cert_attr) - if callable(_server_cert): - server_cert = _server_cert() - else: - server_cert = _server_cert - - if server_cert is None: - logger.warning( - f"{charm_type}.{server_cert_attr} is None; sending traces over INSECURE connection." - ) - return - elif not Path(server_cert).is_absolute(): - raise ValueError( - f"{charm_type}.{server_cert_attr} should resolve to a valid tls cert absolute path (string | Path)); " - f"got {server_cert} instead." - ) - return server_cert - - -def _setup_root_span_initializer( - charm_type: _CharmType, - tracing_endpoint_attr: str, - server_cert_attr: Optional[str], - service_name: Optional[str], - buffer_path: Optional[Path], - buffer_max_events: int, - buffer_max_size_mib: int, -): - """Patch the charm's initializer.""" - original_init = charm_type.__init__ - - @functools.wraps(original_init) - def wrap_init(self: CharmBase, framework: Framework, *args, **kwargs): - # we're using 'self' here because this is charm init code, makes sense to read what's below - # from the perspective of the charm. Self.unit.name... - - original_init(self, framework, *args, **kwargs) - # we call this from inside the init context instead of, say, _autoinstrument, because we want it to - # be checked on a per-charm-instantiation basis, not on a per-type-declaration one. - if not is_enabled(): - # this will only happen during unittesting, hopefully, so it's fine to log a - # bit more verbosely - logger.info("Tracing DISABLED: skipping root span initialization") - return - - original_event_context = framework._event_context - # default service name isn't just app name because it could conflict with the workload service name - _service_name = service_name or f"{self.app.name}-charm" - - unit_name = self.unit.name - resource = Resource.create( - attributes={ - "service.name": _service_name, - "compose_service": _service_name, - "charm_type": type(self).__name__, - # juju topology - "juju_unit": unit_name, - "juju_application": self.app.name, - "juju_model": self.model.name, - "juju_model_uuid": self.model.uuid, - } - ) - provider = TracerProvider(resource=resource) - - # if anything goes wrong with retrieving the endpoint, we let the exception bubble up. - tracing_endpoint = _get_tracing_endpoint(tracing_endpoint_attr, self, charm_type) - - buffer_only = False - # whether we're only exporting to buffer, or also to the otlp exporter. - - if not tracing_endpoint: - # tracing is off if tracing_endpoint is None - # however we can buffer things until tracing comes online - buffer_only = True - - server_cert: Optional[Union[str, Path]] = ( - _get_server_cert(server_cert_attr, self, charm_type) if server_cert_attr else None - ) - - if (tracing_endpoint and tracing_endpoint.startswith("https://")) and not server_cert: - logger.error( - "Tracing endpoint is https, but no server_cert has been passed." - "Please point @trace_charm to a `server_cert` attr. " - "This might also mean that the tracing provider is related to a " - "certificates provider, but this application is not (yet). " - "In that case, you might just have to wait a bit for the certificates " - "integration to settle. This span will be buffered." - ) - buffer_only = True - - buffer = _Buffer( - db_file=buffer_path or Path() / BUFFER_DEFAULT_CACHE_FILE_NAME, - max_event_history_length=buffer_max_events, - max_buffer_size_mib=buffer_max_size_mib, - ) - previous_spans_buffered = not buffer.is_empty - - exporters: List[SpanExporter] = [] - if buffer_only: - # we have to buffer because we're missing necessary backend configuration - dev_logger.debug("buffering mode: ON") - exporters.append(_BufferedExporter(buffer)) - - else: - dev_logger.debug("buffering mode: FALLBACK") - # in principle, we have the right configuration to be pushing traces, - # but if we fail for whatever reason, we will put everything in the buffer - # and retry the next time - otlp_exporter = _OTLPSpanExporter( - endpoint=tracing_endpoint, - certificate_file=str(Path(server_cert).absolute()) if server_cert else None, - timeout=_OTLP_SPAN_EXPORTER_TIMEOUT, # give individual requests 1 second to succeed - ) - exporters.append(otlp_exporter) - exporters.append(_BufferedExporter(buffer)) - buffer.exporter = otlp_exporter - - for exporter in exporters: - processor = BatchSpanProcessor(exporter) - provider.add_span_processor(processor) - - set_tracer_provider(provider) - _tracer = get_tracer(_service_name) # type: ignore - _tracer_token = tracer.set(_tracer) - - dispatch_path = os.getenv("JUJU_DISPATCH_PATH", "") # something like hooks/install - event_name = dispatch_path.split("/")[1] if "/" in dispatch_path else dispatch_path - root_span_name = f"{unit_name}: {event_name} event" - span = _tracer.start_span(root_span_name, attributes={"juju.dispatch_path": dispatch_path}) - - # all these shenanigans are to work around the fact that the opentelemetry tracing API is built - # on the assumption that spans will be used as contextmanagers. - # Since we don't (as we need to close the span on framework.commit), - # we need to manually set the root span as current. - ctx = set_span_in_context(span) - - # log a trace id, so we can pick it up from the logs (and jhack) to look it up in tempo. - root_trace_id = hex(span.get_span_context().trace_id)[2:] # strip 0x prefix - logger.debug(f"Starting root trace with id={root_trace_id!r}.") - - span_token = opentelemetry.context.attach(ctx) # type: ignore - - @contextmanager - def wrap_event_context(event_name: str): - dev_logger.debug(f"entering event context: {event_name}") - # when the framework enters an event context, we create a span. - with _span("event: " + event_name) as event_context_span: - if event_context_span: - # todo: figure out how to inject event attrs in here - event_context_span.add_event(event_name) - yield original_event_context(event_name) - - framework._event_context = wrap_event_context # type: ignore - - original_close = framework.close - - @functools.wraps(original_close) - def wrap_close(): - dev_logger.debug("tearing down tracer and flushing traces") - span.end() - opentelemetry.context.detach(span_token) # type: ignore - tracer.reset(_tracer_token) - tp = cast(TracerProvider, get_tracer_provider()) - flush_successful = tp.force_flush(timeout_millis=1000) # don't block for too long - - if buffer_only: - # if we're in buffer_only mode, it means we couldn't even set up the exporter for - # tempo as we're missing some data. - # so attempting to flush the buffer doesn't make sense - dev_logger.debug("tracing backend unavailable: all spans pushed to buffer") - - else: - dev_logger.debug("tracing backend found: attempting to flush buffer...") - - # if we do have an exporter for tempo, and we could send traces to it, - # we can attempt to flush the buffer as well. - if not flush_successful: - logger.error("flushing FAILED: unable to push traces to backend.") - else: - dev_logger.debug("flush succeeded.") - - # the backend has accepted the spans generated during this event, - if not previous_spans_buffered: - # if the buffer was empty to begin with, any spans we collected now can be discarded - buffer.drop() - dev_logger.debug("buffer dropped: this trace has been sent already") - else: - # if the buffer was nonempty, we can attempt to flush it - dev_logger.debug("attempting buffer flush...") - buffer_flush_successful = buffer.flush() - if buffer_flush_successful: - dev_logger.debug("buffer flush OK") - elif buffer_flush_successful is None: - # TODO is this even possible? - dev_logger.debug("buffer flush OK; empty: nothing to flush") - else: - # this situation is pretty weird, I'm not even sure it can happen, - # because it would mean that we did manage - # to push traces directly to the tempo exporter (flush_successful), - # but the buffer flush failed to push to the same exporter! - logger.error("buffer flush FAILED") - - tp.shutdown() - original_close() - - framework.close = wrap_close - return - - charm_type.__init__ = wrap_init # type: ignore - - -def trace_charm( - tracing_endpoint: str, - server_cert: Optional[str] = None, - service_name: Optional[str] = None, - extra_types: Sequence[type] = (), - buffer_max_events: int = BUFFER_DEFAULT_MAX_EVENT_HISTORY_LENGTH, - buffer_max_size_mib: int = BUFFER_DEFAULT_CACHE_FILE_SIZE_LIMIT_MiB, - buffer_path: Optional[Union[str, Path]] = None, -) -> Callable[[_T], _T]: - """Autoinstrument the decorated charm with tracing telemetry. - - Use this function to get out-of-the-box traces for all events emitted on this charm and all - method calls on instances of this class. - - Usage: - >>> from charms.tempo_coordinator_k8s.v0.charm_tracing import trace_charm - >>> from charms.tempo_coordinator_k8s.v0.tracing import TracingEndpointRequirer - >>> from ops import CharmBase - >>> - >>> @trace_charm( - >>> tracing_endpoint="tempo_otlp_http_endpoint", - >>> ) - >>> class MyCharm(CharmBase): - >>> - >>> def __init__(self, framework: Framework): - >>> ... - >>> self.tracing = TracingEndpointRequirer(self) - >>> - >>> @property - >>> def tempo_otlp_http_endpoint(self) -> Optional[str]: - >>> if self.tracing.is_ready(): - >>> return self.tracing.otlp_http_endpoint() - >>> else: - >>> return None - >>> - - :param tracing_endpoint: name of a method, property or attribute on the charm type that returns an - optional (fully resolvable) tempo url to which the charm traces will be pushed. - If None, tracing will be effectively disabled. - :param server_cert: name of a method, property or attribute on the charm type that returns an - optional absolute path to a CA certificate file to be used when sending traces to a remote server. - If it returns None, an _insecure_ connection will be used. To avoid errors in transient - situations where the endpoint is already https but there is no certificate on disk yet, it - is recommended to disable tracing (by returning None from the tracing_endpoint) altogether - until the cert has been written to disk. - :param service_name: service name tag to attach to all traces generated by this charm. - Defaults to the juju application name this charm is deployed under. - :param extra_types: pass any number of types that you also wish to autoinstrument. - For example, charm libs, relation endpoint wrappers, workload abstractions, ... - :param buffer_max_events: max number of events to save in the buffer. Set to 0 to disable buffering. - :param buffer_max_size_mib: max size of the buffer file. When exceeded, spans will be dropped. - Minimum 10MiB. - :param buffer_path: path to buffer file to use for saving buffered spans. - """ - - def _decorator(charm_type: _T) -> _T: - """Autoinstrument the wrapped charmbase type.""" - _autoinstrument( - charm_type, - tracing_endpoint_attr=tracing_endpoint, - server_cert_attr=server_cert, - service_name=service_name, - extra_types=extra_types, - buffer_path=Path(buffer_path) if buffer_path else None, - buffer_max_size_mib=buffer_max_size_mib, - buffer_max_events=buffer_max_events, - ) - return charm_type - - return _decorator - - -def _autoinstrument( - charm_type: _T, - tracing_endpoint_attr: str, - server_cert_attr: Optional[str] = None, - service_name: Optional[str] = None, - extra_types: Sequence[type] = (), - buffer_max_events: int = BUFFER_DEFAULT_MAX_EVENT_HISTORY_LENGTH, - buffer_max_size_mib: int = BUFFER_DEFAULT_CACHE_FILE_SIZE_LIMIT_MiB, - buffer_path: Optional[Path] = None, -) -> _T: - """Set up tracing on this charm class. - - Use this function to get out-of-the-box traces for all events emitted on this charm and all - method calls on instances of this class. - - Usage: - - >>> from charms.tempo_coordinator_k8s.v0.charm_tracing import _autoinstrument - >>> from ops.main import main - >>> _autoinstrument( - >>> MyCharm, - >>> tracing_endpoint_attr="tempo_otlp_http_endpoint", - >>> service_name="MyCharm", - >>> extra_types=(Foo, Bar) - >>> ) - >>> main(MyCharm) - - :param charm_type: the CharmBase subclass to autoinstrument. - :param tracing_endpoint_attr: name of a method, property or attribute on the charm type that returns an - optional (fully resolvable) tempo url to which the charm traces will be pushed. - If None, tracing will be effectively disabled. - :param server_cert_attr: name of a method, property or attribute on the charm type that returns an - optional absolute path to a CA certificate file to be used when sending traces to a remote server. - If it returns None, an _insecure_ connection will be used. To avoid errors in transient - situations where the endpoint is already https but there is no certificate on disk yet, it - is recommended to disable tracing (by returning None from the tracing_endpoint) altogether - until the cert has been written to disk. - :param service_name: service name tag to attach to all traces generated by this charm. - Defaults to the juju application name this charm is deployed under. - :param extra_types: pass any number of types that you also wish to autoinstrument. - For example, charm libs, relation endpoint wrappers, workload abstractions, ... - :param buffer_max_events: max number of events to save in the buffer. Set to 0 to disable buffering. - :param buffer_max_size_mib: max size of the buffer file. When exceeded, spans will be dropped. - Minimum 10MiB. - :param buffer_path: path to buffer file to use for saving buffered spans. - """ - dev_logger.debug(f"instrumenting {charm_type}") - _setup_root_span_initializer( - charm_type, - tracing_endpoint_attr, - server_cert_attr=server_cert_attr, - service_name=service_name, - buffer_path=buffer_path, - buffer_max_events=buffer_max_events, - buffer_max_size_mib=buffer_max_size_mib, - ) - trace_type(charm_type) - for type_ in extra_types: - trace_type(type_) - - return charm_type - - -def trace_type(cls: _T) -> _T: - """Set up tracing on this class. - - Use this decorator to get out-of-the-box traces for all method calls on instances of this class. - It assumes that this class is only instantiated after a charm type decorated with `@trace_charm` - has been instantiated. - """ - dev_logger.debug(f"instrumenting {cls}") - for name, method in inspect.getmembers(cls, predicate=inspect.isfunction): - dev_logger.debug(f"discovered {method}") - - if method.__name__.startswith("__"): - dev_logger.debug(f"skipping {method} (dunder)") - continue - - # the span title in the general case should be: - # method call: MyCharmWrappedMethods.b - # if the method has a name (functools.wrapped or regular method), let - # _trace_callable use its default algorithm to determine what name to give the span. - trace_method_name = None - try: - qualname_c0 = method.__qualname__.split(".")[0] - if not hasattr(cls, method.__name__): - # if the callable doesn't have a __name__ (probably a decorated method), - # it probably has a bad qualname too (such as my_decorator..wrapper) which is not - # great for finding out what the trace is about. So we use the method name instead and - # add a reference to the decorator name. Result: - # method call: @my_decorator(MyCharmWrappedMethods.b) - trace_method_name = f"@{qualname_c0}({cls.__name__}.{name})" - except Exception: # noqa: failsafe - pass - - new_method = trace_method(method, name=trace_method_name) - - if isinstance(inspect.getattr_static(cls, name), staticmethod): - new_method = staticmethod(new_method) - setattr(cls, name, new_method) - - return cls - - -def trace_method(method: _F, name: Optional[str] = None) -> _F: - """Trace this method. - - A span will be opened when this method is called and closed when it returns. - """ - return _trace_callable(method, "method", name=name) - - -def trace_function(function: _F, name: Optional[str] = None) -> _F: - """Trace this function. - - A span will be opened when this function is called and closed when it returns. - """ - return _trace_callable(function, "function", name=name) - - -def _trace_callable(callable: _F, qualifier: str, name: Optional[str] = None) -> _F: - dev_logger.debug(f"instrumenting {callable}") - - # sig = inspect.signature(callable) - @functools.wraps(callable) - def wrapped_function(*args, **kwargs): # type: ignore - name_ = name or getattr( - callable, "__qualname__", getattr(callable, "__name__", str(callable)) - ) - with _span(f"{qualifier} call: {name_}"): # type: ignore - return callable(*args, **kwargs) # type: ignore - - # wrapped_function.__signature__ = sig - return wrapped_function # type: ignore - - -def trace(obj: Union[Type, Callable]): - """Trace this object and send the resulting spans to Tempo. - - It will dispatch to ``trace_type`` if the decorated object is a class, otherwise - ``trace_function``. - """ - if isinstance(obj, type): - if issubclass(obj, CharmBase): - raise ValueError( - "cannot use @trace on CharmBase subclasses: use @trace_charm instead " - "(we need some arguments!)" - ) - return trace_type(obj) - else: - try: - return trace_function(obj) - except Exception: - raise UntraceableObjectError( - f"cannot create span from {type(obj)}; instrument {obj} manually." - ) diff --git a/machines/poetry.lock b/machines/poetry.lock index 73b04c410..de1188eed 100644 --- a/machines/poetry.lock +++ b/machines/poetry.lock @@ -90,18 +90,6 @@ docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib- tests = ["attrs[tests-no-zope]", "zope-interface"] tests-no-zope = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.1.1) ; platform_python_implementation == \"CPython\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version < \"3.11\"", "pytest-xdist[psutil]"] -[[package]] -name = "backoff" -version = "2.2.1" -description = "Function decoration for backoff and retry" -optional = false -python-versions = ">=3.7,<4.0" -groups = ["charm-libs"] -files = [ - {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"}, - {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"}, -] - [[package]] name = "backports-strenum" version = "1.3.1" @@ -301,7 +289,7 @@ version = "3.2.0" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7.0" -groups = ["charm-libs", "integration"] +groups = ["integration"] files = [ {file = "charset-normalizer-3.2.0.tar.gz", hash = "sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace"}, {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710"}, @@ -565,24 +553,6 @@ ssh = ["bcrypt (>=3.1.5)"] test = ["certifi (>=2024)", "cryptography-vectors (==44.0.2)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"] test-randomorder = ["pytest-randomly"] -[[package]] -name = "deprecated" -version = "1.2.14" -description = "Python @deprecated decorator to deprecate old python classes, functions or methods." -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -groups = ["charm-libs"] -files = [ - {file = "Deprecated-1.2.14-py2.py3-none-any.whl", hash = "sha256:6fac8b097794a90302bdbb17b9b815e732d3c4720583ff1b198499d78470466c"}, - {file = "Deprecated-1.2.14.tar.gz", hash = "sha256:e5323eb936458dccc2582dc6f9c322c852a775a27065ff2b0c4970b9d53d01b3"}, -] - -[package.dependencies] -wrapt = ">=1.10,<2" - -[package.extras] -dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"] - [[package]] name = "exceptiongroup" version = "1.1.2" @@ -623,24 +593,6 @@ pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] reauth = ["pyu2f (>=0.1.5)"] requests = ["requests (>=2.20.0,<3.0.0.dev0)"] -[[package]] -name = "googleapis-common-protos" -version = "1.63.0" -description = "Common protobufs used in Google APIs" -optional = false -python-versions = ">=3.7" -groups = ["charm-libs"] -files = [ - {file = "googleapis-common-protos-1.63.0.tar.gz", hash = "sha256:17ad01b11d5f1d0171c06d3ba5c04c54474e883b66b949722b4938ee2694ef4e"}, - {file = "googleapis_common_protos-1.63.0-py2.py3-none-any.whl", hash = "sha256:ae45f75702f7c08b541f750854a678bd8f534a1a6bace6afe975f1d0a82d6632"}, -] - -[package.dependencies] -protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0.dev0" - -[package.extras] -grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] - [[package]] name = "h11" version = "0.16.0" @@ -734,7 +686,7 @@ version = "6.0.1" description = "Read metadata from Python packages" optional = false python-versions = ">=3.7" -groups = ["charm-libs"] +groups = ["main", "charm-libs"] files = [ {file = "importlib_metadata-6.0.1-py3-none-any.whl", hash = "sha256:1543daade821c89b1c4a55986c326f36e54f2e6ca3bad96be4563d0acb74dcd4"}, {file = "importlib_metadata-6.0.1.tar.gz", hash = "sha256:950127d57e35a806d520817d3e92eec3f19fdae9f0cd99da77a407c5aabefba3"}, @@ -1112,123 +1064,92 @@ signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] [[package]] name = "opentelemetry-api" -version = "1.21.0" +version = "1.39.1" description = "OpenTelemetry Python API" optional = false -python-versions = ">=3.7" -groups = ["charm-libs"] -files = [ - {file = "opentelemetry_api-1.21.0-py3-none-any.whl", hash = "sha256:4bb86b28627b7e41098f0e93280fe4892a1abed1b79a19aec6f928f39b17dffb"}, - {file = "opentelemetry_api-1.21.0.tar.gz", hash = "sha256:d6185fd5043e000075d921822fd2d26b953eba8ca21b1e2fa360dd46a7686316"}, -] - -[package.dependencies] -deprecated = ">=1.2.6" -importlib-metadata = ">=6.0,<7.0" - -[[package]] -name = "opentelemetry-exporter-otlp-proto-common" -version = "1.21.0" -description = "OpenTelemetry Protobuf encoding" -optional = false -python-versions = ">=3.7" -groups = ["charm-libs"] -files = [ - {file = "opentelemetry_exporter_otlp_proto_common-1.21.0-py3-none-any.whl", hash = "sha256:97b1022b38270ec65d11fbfa348e0cd49d12006485c2321ea3b1b7037d42b6ec"}, - {file = "opentelemetry_exporter_otlp_proto_common-1.21.0.tar.gz", hash = "sha256:61db274d8a68d636fb2ec2a0f281922949361cdd8236e25ff5539edf942b3226"}, -] - -[package.dependencies] -backoff = {version = ">=1.10.0,<3.0.0", markers = "python_version >= \"3.7\""} -opentelemetry-proto = "1.21.0" - -[[package]] -name = "opentelemetry-exporter-otlp-proto-http" -version = "1.21.0" -description = "OpenTelemetry Collector Protobuf over HTTP Exporter" -optional = false -python-versions = ">=3.7" -groups = ["charm-libs"] -files = [ - {file = "opentelemetry_exporter_otlp_proto_http-1.21.0-py3-none-any.whl", hash = "sha256:56837773de6fb2714c01fc4895caebe876f6397bbc4d16afddf89e1299a55ee2"}, - {file = "opentelemetry_exporter_otlp_proto_http-1.21.0.tar.gz", hash = "sha256:19d60afa4ae8597f7ef61ad75c8b6c6b7ef8cb73a33fb4aed4dbc86d5c8d3301"}, -] - -[package.dependencies] -backoff = {version = ">=1.10.0,<3.0.0", markers = "python_version >= \"3.7\""} -deprecated = ">=1.2.6" -googleapis-common-protos = ">=1.52,<2.0" -opentelemetry-api = ">=1.15,<2.0" -opentelemetry-exporter-otlp-proto-common = "1.21.0" -opentelemetry-proto = "1.21.0" -opentelemetry-sdk = ">=1.21.0,<1.22.0" -requests = ">=2.7,<3.0" - -[package.extras] -test = ["responses (==0.22.0)"] - -[[package]] -name = "opentelemetry-proto" -version = "1.21.0" -description = "OpenTelemetry Python Proto" -optional = false -python-versions = ">=3.7" -groups = ["charm-libs"] +python-versions = ">=3.9" +groups = ["main", "charm-libs"] files = [ - {file = "opentelemetry_proto-1.21.0-py3-none-any.whl", hash = "sha256:32fc4248e83eebd80994e13963e683f25f3b443226336bb12b5b6d53638f50ba"}, - {file = "opentelemetry_proto-1.21.0.tar.gz", hash = "sha256:7d5172c29ed1b525b5ecf4ebe758c7138a9224441b3cfe683d0a237c33b1941f"}, + {file = "opentelemetry_api-1.39.1-py3-none-any.whl", hash = "sha256:2edd8463432a7f8443edce90972169b195e7d6a05500cd29e6d13898187c9950"}, + {file = "opentelemetry_api-1.39.1.tar.gz", hash = "sha256:fbde8c80e1b937a2c61f20347e91c0c18a1940cecf012d62e65a7caf08967c9c"}, ] [package.dependencies] -protobuf = ">=3.19,<5.0" +importlib-metadata = ">=6.0,<8.8.0" +typing-extensions = ">=4.5.0" [[package]] name = "opentelemetry-sdk" -version = "1.21.0" +version = "1.39.1" description = "OpenTelemetry Python SDK" optional = false -python-versions = ">=3.7" -groups = ["charm-libs"] +python-versions = ">=3.9" +groups = ["main"] files = [ - {file = "opentelemetry_sdk-1.21.0-py3-none-any.whl", hash = "sha256:9fe633243a8c655fedace3a0b89ccdfc654c0290ea2d8e839bd5db3131186f73"}, - {file = "opentelemetry_sdk-1.21.0.tar.gz", hash = "sha256:3ec8cd3020328d6bc5c9991ccaf9ae820ccb6395a5648d9a95d3ec88275b8879"}, + {file = "opentelemetry_sdk-1.39.1-py3-none-any.whl", hash = "sha256:4d5482c478513ecb0a5d938dcc61394e647066e0cc2676bee9f3af3f3f45f01c"}, + {file = "opentelemetry_sdk-1.39.1.tar.gz", hash = "sha256:cf4d4563caf7bff906c9f7967e2be22d0d6b349b908be0d90fb21c8e9c995cc6"}, ] [package.dependencies] -opentelemetry-api = "1.21.0" -opentelemetry-semantic-conventions = "0.42b0" -typing-extensions = ">=3.7.4" +opentelemetry-api = "1.39.1" +opentelemetry-semantic-conventions = "0.60b1" +typing-extensions = ">=4.5.0" [[package]] name = "opentelemetry-semantic-conventions" -version = "0.42b0" +version = "0.60b1" description = "OpenTelemetry Semantic Conventions" optional = false -python-versions = ">=3.7" -groups = ["charm-libs"] +python-versions = ">=3.9" +groups = ["main"] files = [ - {file = "opentelemetry_semantic_conventions-0.42b0-py3-none-any.whl", hash = "sha256:5cd719cbfec448af658860796c5d0fcea2fdf0945a2bed2363f42cb1ee39f526"}, - {file = "opentelemetry_semantic_conventions-0.42b0.tar.gz", hash = "sha256:44ae67a0a3252a05072877857e5cc1242c98d4cf12870159f1a94bec800d38ec"}, + {file = "opentelemetry_semantic_conventions-0.60b1-py3-none-any.whl", hash = "sha256:9fa8c8b0c110da289809292b0591220d3a7b53c1526a23021e977d68597893fb"}, + {file = "opentelemetry_semantic_conventions-0.60b1.tar.gz", hash = "sha256:87c228b5a0669b748c76d76df6c364c369c28f1c465e50f661e39737e84bc953"}, ] +[package.dependencies] +opentelemetry-api = "1.39.1" +typing-extensions = ">=4.5.0" + [[package]] name = "ops" -version = "2.15.0" +version = "2.23.1" description = "The Python library behind great charms" optional = false python-versions = ">=3.8" groups = ["main", "charm-libs"] files = [ - {file = "ops-2.15.0-py3-none-any.whl", hash = "sha256:8e47ab8a814301776b0ff42b32544ebdece7f1639168d2c86dc7a25930d2e493"}, - {file = "ops-2.15.0.tar.gz", hash = "sha256:f3bad7417e98e8f390523fad097702eed16e99b38a25e9fe856aad226474b057"}, + {file = "ops-2.23.1-py3-none-any.whl", hash = "sha256:fdf58163beafd25180c12a4c7efaf1e76e5f8710508a97840c07055bb78b0c77"}, + {file = "ops-2.23.1.tar.gz", hash = "sha256:aecacd67ef7ca913f63f397e0330bfa93d70529a3ef71ed2d99e2bc232564ae3"}, ] [package.dependencies] +importlib-metadata = "*" +opentelemetry-api = ">=1.0,<2.0" +ops-tracing = {version = "2.23.1", optional = true, markers = "extra == \"tracing\""} PyYAML = "==6.*" websocket-client = "==1.*" [package.extras] -docs = ["canonical-sphinx-extensions", "furo", "linkify-it-py", "myst-parser", "pyspelling", "sphinx (==6.2.1)", "sphinx-autobuild", "sphinx-copybutton", "sphinx-design", "sphinx-notfound-page", "sphinx-tabs", "sphinxcontrib-jquery", "sphinxext-opengraph"] +testing = ["ops-scenario (==7.23.1)"] +tracing = ["ops-tracing (==2.23.1)"] + +[[package]] +name = "ops-tracing" +version = "2.23.1" +description = "The tracing facility for the Ops library." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "ops_tracing-2.23.1-py3-none-any.whl", hash = "sha256:2943b069ecc8b6b5eb700f1ca4369ca35e11a4bdd58527069e1372c787aa7bc8"}, + {file = "ops_tracing-2.23.1.tar.gz", hash = "sha256:a5dece112f7ae4b1fb947ff090a2ffba56cb7c56af26b6f797b8b00fe1e50585"}, +] + +[package.dependencies] +opentelemetry-sdk = ">=1.30,<2.0" +ops = "2.23.1" +pydantic = "*" [[package]] name = "packaging" @@ -1315,7 +1236,7 @@ version = "3.20.3" description = "Protocol Buffers" optional = false python-versions = ">=3.7" -groups = ["charm-libs", "integration"] +groups = ["integration"] files = [ {file = "protobuf-3.20.3-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:f4bd856d702e5b0d96a00ec6b307b0f51c1982c2bf9c0052cf9019e9a544ba99"}, {file = "protobuf-3.20.3-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9aae4406ea63d825636cc11ffb34ad3379335803216ee3a856787bcf5ccc751e"}, @@ -1387,7 +1308,7 @@ version = "1.10.15" description = "Data validation and settings management using python type hints" optional = false python-versions = ">=3.7" -groups = ["charm-libs"] +groups = ["main", "charm-libs"] files = [ {file = "pydantic-1.10.15-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:22ed12ee588b1df028a2aa5d66f07bf8f8b4c8579c2e96d5a9c1f96b77f3bb55"}, {file = "pydantic-1.10.15-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:75279d3cac98186b6ebc2597b06bcbc7244744f6b0b44a23e4ef01e5683cc0d2"}, @@ -1669,7 +1590,7 @@ version = "2.31.0" description = "Python HTTP for Humans." optional = false python-versions = ">=3.7" -groups = ["charm-libs", "integration"] +groups = ["integration"] files = [ {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, @@ -1984,7 +1905,7 @@ version = "2.5.0" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.9" -groups = ["main", "charm-libs", "integration"] +groups = ["main", "integration"] files = [ {file = "urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc"}, {file = "urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760"}, @@ -1998,20 +1919,20 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "websocket-client" -version = "1.6.1" +version = "1.9.0" description = "WebSocket client for Python with low level API options" optional = false -python-versions = ">=3.7" +python-versions = ">=3.9" groups = ["main", "charm-libs", "integration"] files = [ - {file = "websocket-client-1.6.1.tar.gz", hash = "sha256:c951af98631d24f8df89ab1019fc365f2227c0892f12fd150e935607c79dd0dd"}, - {file = "websocket_client-1.6.1-py3-none-any.whl", hash = "sha256:f1f9f2ad5291f0225a49efad77abf9e700b6fef553900623060dad6e26503b9d"}, + {file = "websocket_client-1.9.0-py3-none-any.whl", hash = "sha256:af248a825037ef591efbf6ed20cc5faa03d3b47b9e5a2230a529eeee1c1fc3ef"}, + {file = "websocket_client-1.9.0.tar.gz", hash = "sha256:9e813624b6eb619999a97dc7958469217c3176312b3a16a4bd1bc7e08a46ec98"}, ] [package.extras] -docs = ["Sphinx (>=3.4)", "sphinx-rtd-theme (>=0.5)"] +docs = ["Sphinx (>=6.0)", "myst-parser (>=2.0.0)", "sphinx_rtd_theme (>=1.1.0)"] optional = ["python-socks", "wsaccel"] -test = ["websockets"] +test = ["pytest", "websockets"] [[package]] name = "websockets" @@ -2092,93 +2013,13 @@ files = [ {file = "websockets-14.1.tar.gz", hash = "sha256:398b10c77d471c0aab20a845e7a60076b6390bfdaac7a6d2edb0d2c59d75e8d8"}, ] -[[package]] -name = "wrapt" -version = "1.16.0" -description = "Module for decorators, wrappers and monkey patching." -optional = false -python-versions = ">=3.6" -groups = ["charm-libs"] -files = [ - {file = "wrapt-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4"}, - {file = "wrapt-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e4fdb9275308292e880dcbeb12546df7f3e0f96c6b41197e0cf37d2826359020"}, - {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb2dee3874a500de01c93d5c71415fcaef1d858370d405824783e7a8ef5db440"}, - {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a88e6010048489cda82b1326889ec075a8c856c2e6a256072b28eaee3ccf487"}, - {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac83a914ebaf589b69f7d0a1277602ff494e21f4c2f743313414378f8f50a4cf"}, - {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:73aa7d98215d39b8455f103de64391cb79dfcad601701a3aa0dddacf74911d72"}, - {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:807cc8543a477ab7422f1120a217054f958a66ef7314f76dd9e77d3f02cdccd0"}, - {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bf5703fdeb350e36885f2875d853ce13172ae281c56e509f4e6eca049bdfb136"}, - {file = "wrapt-1.16.0-cp310-cp310-win32.whl", hash = "sha256:f6b2d0c6703c988d334f297aa5df18c45e97b0af3679bb75059e0e0bd8b1069d"}, - {file = "wrapt-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:decbfa2f618fa8ed81c95ee18a387ff973143c656ef800c9f24fb7e9c16054e2"}, - {file = "wrapt-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1a5db485fe2de4403f13fafdc231b0dbae5eca4359232d2efc79025527375b09"}, - {file = "wrapt-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:75ea7d0ee2a15733684badb16de6794894ed9c55aa5e9903260922f0482e687d"}, - {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a452f9ca3e3267cd4d0fcf2edd0d035b1934ac2bd7e0e57ac91ad6b95c0c6389"}, - {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:43aa59eadec7890d9958748db829df269f0368521ba6dc68cc172d5d03ed8060"}, - {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72554a23c78a8e7aa02abbd699d129eead8b147a23c56e08d08dfc29cfdddca1"}, - {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d2efee35b4b0a347e0d99d28e884dfd82797852d62fcd7ebdeee26f3ceb72cf3"}, - {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6dcfcffe73710be01d90cae08c3e548d90932d37b39ef83969ae135d36ef3956"}, - {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:eb6e651000a19c96f452c85132811d25e9264d836951022d6e81df2fff38337d"}, - {file = "wrapt-1.16.0-cp311-cp311-win32.whl", hash = "sha256:66027d667efe95cc4fa945af59f92c5a02c6f5bb6012bff9e60542c74c75c362"}, - {file = "wrapt-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:aefbc4cb0a54f91af643660a0a150ce2c090d3652cf4052a5397fb2de549cd89"}, - {file = "wrapt-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5eb404d89131ec9b4f748fa5cfb5346802e5ee8836f57d516576e61f304f3b7b"}, - {file = "wrapt-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9090c9e676d5236a6948330e83cb89969f433b1943a558968f659ead07cb3b36"}, - {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94265b00870aa407bd0cbcfd536f17ecde43b94fb8d228560a1e9d3041462d73"}, - {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2058f813d4f2b5e3a9eb2eb3faf8f1d99b81c3e51aeda4b168406443e8ba809"}, - {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98b5e1f498a8ca1858a1cdbffb023bfd954da4e3fa2c0cb5853d40014557248b"}, - {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:14d7dc606219cdd7405133c713f2c218d4252f2a469003f8c46bb92d5d095d81"}, - {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:49aac49dc4782cb04f58986e81ea0b4768e4ff197b57324dcbd7699c5dfb40b9"}, - {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:418abb18146475c310d7a6dc71143d6f7adec5b004ac9ce08dc7a34e2babdc5c"}, - {file = "wrapt-1.16.0-cp312-cp312-win32.whl", hash = "sha256:685f568fa5e627e93f3b52fda002c7ed2fa1800b50ce51f6ed1d572d8ab3e7fc"}, - {file = "wrapt-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:dcdba5c86e368442528f7060039eda390cc4091bfd1dca41e8046af7c910dda8"}, - {file = "wrapt-1.16.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d462f28826f4657968ae51d2181a074dfe03c200d6131690b7d65d55b0f360f8"}, - {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a33a747400b94b6d6b8a165e4480264a64a78c8a4c734b62136062e9a248dd39"}, - {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3646eefa23daeba62643a58aac816945cadc0afaf21800a1421eeba5f6cfb9c"}, - {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ebf019be5c09d400cf7b024aa52b1f3aeebeff51550d007e92c3c1c4afc2a40"}, - {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:0d2691979e93d06a95a26257adb7bfd0c93818e89b1406f5a28f36e0d8c1e1fc"}, - {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:1acd723ee2a8826f3d53910255643e33673e1d11db84ce5880675954183ec47e"}, - {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bc57efac2da352a51cc4658878a68d2b1b67dbe9d33c36cb826ca449d80a8465"}, - {file = "wrapt-1.16.0-cp36-cp36m-win32.whl", hash = "sha256:da4813f751142436b075ed7aa012a8778aa43a99f7b36afe9b742d3ed8bdc95e"}, - {file = "wrapt-1.16.0-cp36-cp36m-win_amd64.whl", hash = "sha256:6f6eac2360f2d543cc875a0e5efd413b6cbd483cb3ad7ebf888884a6e0d2e966"}, - {file = "wrapt-1.16.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a0ea261ce52b5952bf669684a251a66df239ec6d441ccb59ec7afa882265d593"}, - {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bd2d7ff69a2cac767fbf7a2b206add2e9a210e57947dd7ce03e25d03d2de292"}, - {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9159485323798c8dc530a224bd3ffcf76659319ccc7bbd52e01e73bd0241a0c5"}, - {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a86373cf37cd7764f2201b76496aba58a52e76dedfaa698ef9e9688bfd9e41cf"}, - {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:73870c364c11f03ed072dda68ff7aea6d2a3a5c3fe250d917a429c7432e15228"}, - {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b935ae30c6e7400022b50f8d359c03ed233d45b725cfdd299462f41ee5ffba6f"}, - {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:db98ad84a55eb09b3c32a96c576476777e87c520a34e2519d3e59c44710c002c"}, - {file = "wrapt-1.16.0-cp37-cp37m-win32.whl", hash = "sha256:9153ed35fc5e4fa3b2fe97bddaa7cbec0ed22412b85bcdaf54aeba92ea37428c"}, - {file = "wrapt-1.16.0-cp37-cp37m-win_amd64.whl", hash = "sha256:66dfbaa7cfa3eb707bbfcd46dab2bc6207b005cbc9caa2199bcbc81d95071a00"}, - {file = "wrapt-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1dd50a2696ff89f57bd8847647a1c363b687d3d796dc30d4dd4a9d1689a706f0"}, - {file = "wrapt-1.16.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:44a2754372e32ab315734c6c73b24351d06e77ffff6ae27d2ecf14cf3d229202"}, - {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e9723528b9f787dc59168369e42ae1c3b0d3fadb2f1a71de14531d321ee05b0"}, - {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbed418ba5c3dce92619656802cc5355cb679e58d0d89b50f116e4a9d5a9603e"}, - {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:941988b89b4fd6b41c3f0bfb20e92bd23746579736b7343283297c4c8cbae68f"}, - {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6a42cd0cfa8ffc1915aef79cb4284f6383d8a3e9dcca70c445dcfdd639d51267"}, - {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ca9b6085e4f866bd584fb135a041bfc32cab916e69f714a7d1d397f8c4891ca"}, - {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5e49454f19ef621089e204f862388d29e6e8d8b162efce05208913dde5b9ad6"}, - {file = "wrapt-1.16.0-cp38-cp38-win32.whl", hash = "sha256:c31f72b1b6624c9d863fc095da460802f43a7c6868c5dda140f51da24fd47d7b"}, - {file = "wrapt-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:490b0ee15c1a55be9c1bd8609b8cecd60e325f0575fc98f50058eae366e01f41"}, - {file = "wrapt-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9b201ae332c3637a42f02d1045e1d0cccfdc41f1f2f801dafbaa7e9b4797bfc2"}, - {file = "wrapt-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2076fad65c6736184e77d7d4729b63a6d1ae0b70da4868adeec40989858eb3fb"}, - {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5cd603b575ebceca7da5a3a251e69561bec509e0b46e4993e1cac402b7247b8"}, - {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b47cfad9e9bbbed2339081f4e346c93ecd7ab504299403320bf85f7f85c7d46c"}, - {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8212564d49c50eb4565e502814f694e240c55551a5f1bc841d4fcaabb0a9b8a"}, - {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5f15814a33e42b04e3de432e573aa557f9f0f56458745c2074952f564c50e664"}, - {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db2e408d983b0e61e238cf579c09ef7020560441906ca990fe8412153e3b291f"}, - {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:edfad1d29c73f9b863ebe7082ae9321374ccb10879eeabc84ba3b69f2579d537"}, - {file = "wrapt-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed867c42c268f876097248e05b6117a65bcd1e63b779e916fe2e33cd6fd0d3c3"}, - {file = "wrapt-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:eb1b046be06b0fce7249f1d025cd359b4b80fc1c3e24ad9eca33e0dcdb2e4a35"}, - {file = "wrapt-1.16.0-py3-none-any.whl", hash = "sha256:6906c4100a8fcbf2fa735f6059214bb13b97f75b1a61777fcf6432121ef12ef1"}, - {file = "wrapt-1.16.0.tar.gz", hash = "sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d"}, -] - [[package]] name = "zipp" version = "3.18.2" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.8" -groups = ["charm-libs"] +groups = ["main", "charm-libs"] files = [ {file = "zipp-3.18.2-py3-none-any.whl", hash = "sha256:dce197b859eb796242b0622af1b8beb0a722d52aa2f57133ead08edd5bf5374e"}, {file = "zipp-3.18.2.tar.gz", hash = "sha256:6278d9ddbcfb1f1089a88fde84481528b07b0e10474e09dcfe53dad4069fa059"}, @@ -2191,4 +2032,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more [metadata] lock-version = "2.1" python-versions = "~=3.10" -content-hash = "ea832f8a530c89a53b58d1f858bd5eb4c501135d8586b90df8a327444287869c" +content-hash = "a4b785c176248ea7e5771171cf8842c3e4a88a00710aa32255c0c06d10e67b44" diff --git a/machines/pyproject.toml b/machines/pyproject.toml index d32a067e9..96e68858c 100644 --- a/machines/pyproject.toml +++ b/machines/pyproject.toml @@ -6,7 +6,7 @@ name = "mysql-operator" # Charm is not meant to be packaged dependencies = [ "boto3~=1.28", "jinja2~=3.1", - "ops~=2.8", + "ops[tracing]~=2.21", "python_hosts~=1.0", "pyyaml~=6.0", "tenacity~=8.2", @@ -31,8 +31,6 @@ charm-libs = [ # tls_certificates_interface/v2/tls_certificates.py "cryptography>=42.0.5", "jsonschema", - # tempo_coordinator_k8s/v0/charm_tracing.py - "opentelemetry-exporter-otlp-proto-http==1.21.0", ] format = [ "ruff~=0.12", diff --git a/machines/src/charm.py b/machines/src/charm.py index b4dcdf930..5fe63d20d 100755 --- a/machines/src/charm.py +++ b/machines/src/charm.py @@ -19,7 +19,7 @@ import ops from charms.data_platform_libs.v0.data_models import TypedCharmBase from charms.data_platform_libs.v0.s3 import S3Requirer -from charms.grafana_agent.v0.cos_agent import COSAgentProvider, charm_tracing_config +from charms.grafana_agent.v0.cos_agent import COSAgentProvider from charms.mysql.v0.async_replication import ( RELATION_CONSUMER, RELATION_OFFER, @@ -50,7 +50,6 @@ ) from charms.mysql.v0.tls import MySQLTLS from charms.rolling_ops.v0.rollingops import RollingOpsManager -from charms.tempo_coordinator_k8s.v0.charm_tracing import trace_charm from ops import ( ActiveStatus, BlockedStatus, @@ -65,6 +64,7 @@ Unit, WaitingStatus, ) +from ops_tracing import Tracing from tenacity import ( RetryError, Retrying, @@ -134,27 +134,6 @@ class MySQLCustomCharmEvents(FlushMySQLLogsCharmEvents, IPAddressChangeCharmEven """Custom event sources for the charm.""" -@trace_charm( - tracing_endpoint="tracing_endpoint", - extra_types=( - COSAgentProvider, - DBRouterRelation, - MySQL, - MySQLAsyncReplicationConsumer, - MySQLAsyncReplicationOffer, - MySQLBackups, - MySQLConfig, - MySQLLogs, - MySQLMachineHostnameResolution, - MySQLProvider, - MySQLRelation, - MySQLTLS, - MySQLVMUpgrade, - RollingOpsManager, - S3Requirer, - SharedDBRelation, - ), -) class MySQLOperatorCharm(MySQLCharmBase, TypedCharmBase[CharmConfig]): """Operator framework charm for MySQL.""" @@ -217,7 +196,7 @@ def __init__(self, *args): self.replication_offer = MySQLAsyncReplicationOffer(self) self.replication_consumer = MySQLAsyncReplicationConsumer(self) - self.tracing_endpoint_config, _ = charm_tracing_config(self._grafana_agent, None) + self.tracing = Tracing(self, tracing_relation_name="tracing") # ======================= # Charm Lifecycle Hooks @@ -638,11 +617,6 @@ def _on_cos_agent_relation_broken(self, _: RelationBrokenEvent) -> None: # Helpers # ======================= - @property - def tracing_endpoint(self) -> str | None: - """Otlp http endpoint for charm instrumentation.""" - return self.tracing_endpoint_config - @property def _mysql(self): """Returns an instance of the MySQL object.""" diff --git a/machines/tests/unit/conftest.py b/machines/tests/unit/conftest.py index ea09858ff..4eee8bc71 100644 --- a/machines/tests/unit/conftest.py +++ b/machines/tests/unit/conftest.py @@ -2,7 +2,6 @@ # See LICENSE file for licensing details. import pytest -from charms.tempo_coordinator_k8s.v0.charm_tracing import charm_tracing_disabled @pytest.fixture(autouse=True) @@ -13,9 +12,3 @@ def with_juju_secrets(monkeypatch): @pytest.fixture def without_juju_secrets(monkeypatch): monkeypatch.setattr("ops.JujuVersion.has_secrets", False) - - -@pytest.fixture(autouse=True) -def disable_charm_tracing(): - with charm_tracing_disabled(): - yield From a0543d2edebdb889835be6e946a86ecd67cfff0e Mon Sep 17 00:00:00 2001 From: Paulo Machado Date: Tue, 10 Feb 2026 16:21:21 -0300 Subject: [PATCH 08/40] Removing unsupported use case test (#77) --- .../test_upgrade_skip_pre_upgrade_check.py | 91 ------------------- .../task.yaml | 7 -- 2 files changed, 98 deletions(-) delete mode 100644 machines/tests/integration/integration/high_availability/test_upgrade_skip_pre_upgrade_check.py delete mode 100644 machines/tests/spread/integration/test_upgrade_skip_pre_upgrade_check.py/task.yaml diff --git a/machines/tests/integration/integration/high_availability/test_upgrade_skip_pre_upgrade_check.py b/machines/tests/integration/integration/high_availability/test_upgrade_skip_pre_upgrade_check.py deleted file mode 100644 index 6c5ef74f1..000000000 --- a/machines/tests/integration/integration/high_availability/test_upgrade_skip_pre_upgrade_check.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright 2023 Canonical Ltd. -# See LICENSE file for licensing details. - -import logging - -import jubilant_backports -from jubilant_backports import Juju - -from ...helpers_ha import ( - check_mysql_units_writes_increment, - get_app_units, - wait_for_apps_status, - wait_for_unit_status, -) - -MYSQL_APP_NAME = "mysql" -MYSQL_TEST_APP_NAME = "mysql-test-app" - -MINUTE_SECS = 60 - - -def test_deploy_stable(juju: Juju) -> None: - """Simple test to ensure that the MySQL and application charms get deployed.""" - logging.info("Deploying MySQL cluster") - juju.deploy( - charm=MYSQL_APP_NAME, - app=MYSQL_APP_NAME, - base="ubuntu@22.04", - channel="8.0/stable", - config={"profile": "testing"}, - num_units=3, - ) - juju.deploy( - charm=MYSQL_TEST_APP_NAME, - app=MYSQL_TEST_APP_NAME, - base="ubuntu@22.04", - channel="latest/edge", - config={"sleep_interval": 50}, - num_units=1, - ) - - juju.integrate( - f"{MYSQL_APP_NAME}:database", - f"{MYSQL_TEST_APP_NAME}:database", - ) - - logging.info("Wait for applications to become active") - juju.wait( - ready=wait_for_apps_status( - jubilant_backports.all_active, MYSQL_APP_NAME, MYSQL_TEST_APP_NAME - ), - error=jubilant_backports.any_blocked, - timeout=20 * MINUTE_SECS, - ) - - -def test_refresh_without_pre_upgrade_check(juju: Juju, charm: str) -> None: - """Test updating from stable channel.""" - logging.info("Refresh the charm") - juju.refresh(app=MYSQL_APP_NAME, path=charm) - - logging.info("Wait for rolling restart") - app_units = get_app_units(juju, MYSQL_APP_NAME) - app_units_funcs = [wait_for_unit_status(MYSQL_APP_NAME, unit, "error") for unit in app_units] - - juju.wait( - ready=lambda status: any(status_func(status) for status_func in app_units_funcs), - timeout=10 * MINUTE_SECS, - successes=1, - ) - - check_mysql_units_writes_increment(juju, MYSQL_APP_NAME) - - -def test_rollback_without_pre_upgrade_check(juju: Juju, charm: str) -> None: - """Test refresh back to stable channel.""" - # Early Jubilant 1.X.Y versions do not support the `switch` option - logging.info("Refresh the charm to stable channel") - juju.cli("refresh", "--channel=8.0/stable", f"--switch={MYSQL_APP_NAME}", MYSQL_APP_NAME) - - logging.info("Wait for rolling restart") - app_units = get_app_units(juju, MYSQL_APP_NAME) - app_units_funcs = [wait_for_unit_status(MYSQL_APP_NAME, unit, "error") for unit in app_units] - - juju.wait( - ready=lambda status: any(status_func(status) for status_func in app_units_funcs), - timeout=10 * MINUTE_SECS, - successes=1, - ) - - check_mysql_units_writes_increment(juju, MYSQL_APP_NAME) diff --git a/machines/tests/spread/integration/test_upgrade_skip_pre_upgrade_check.py/task.yaml b/machines/tests/spread/integration/test_upgrade_skip_pre_upgrade_check.py/task.yaml deleted file mode 100644 index 8f2dc4820..000000000 --- a/machines/tests/spread/integration/test_upgrade_skip_pre_upgrade_check.py/task.yaml +++ /dev/null @@ -1,7 +0,0 @@ -summary: test_upgrade_skip_pre_upgrade_check.py -environment: - TEST_MODULE: high_availability/test_upgrade_skip_pre_upgrade_check.py -execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" -artifacts: - - allure-results From 8a184b5386455564df08e55005fc6e7f5e77a993 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Juan=20Luis=20Cano=20Rodr=C3=ADguez?= Date: Tue, 10 Feb 2026 21:16:53 +0100 Subject: [PATCH 09/40] [MISC] Unify packaging (8.0/edge) (#76) * Avoid using incomplete PEP 621 [project] table * Unify linting configuration --- kubernetes/poetry.lock | 12 +++---- kubernetes/pyproject.toml | 14 ++++----- kubernetes/src/charm.py | 9 +++--- kubernetes/src/config.py | 34 ++++++++++---------- kubernetes/src/k8s_helpers.py | 11 +++---- kubernetes/src/mysql_k8s_executor.py | 2 +- kubernetes/src/mysql_k8s_helpers.py | 37 +++++++++++----------- kubernetes/tests/integration/helpers.py | 7 ++--- kubernetes/tests/unit/test_upgrade.py | 34 +++++++++++--------- machines/poetry.lock | 4 +-- machines/pyproject.toml | 42 +++++++++++++++++-------- 11 files changed, 112 insertions(+), 94 deletions(-) diff --git a/kubernetes/poetry.lock b/kubernetes/poetry.lock index 6f4ac8570..b5738f471 100644 --- a/kubernetes/poetry.lock +++ b/kubernetes/poetry.lock @@ -679,7 +679,7 @@ version = "6.11.0" description = "Read metadata from Python packages" optional = false python-versions = ">=3.8" -groups = ["main", "charm-libs", "integration"] +groups = ["main", "charm-libs"] files = [ {file = "importlib_metadata-6.11.0-py3-none-any.whl", hash = "sha256:f0afba6205ad8f8947c7d338b5342d5db2afbfd82f9cbef7879a9539cc12eb9b"}, {file = "importlib_metadata-6.11.0.tar.gz", hash = "sha256:1231cf92d825c9e03cfc4da076a16de6422c863558229ea0b22b675657463443"}, @@ -1062,7 +1062,7 @@ version = "1.39.1" description = "OpenTelemetry Python API" optional = false python-versions = ">=3.9" -groups = ["main", "charm-libs", "integration"] +groups = ["main", "charm-libs"] files = [ {file = "opentelemetry_api-1.39.1-py3-none-any.whl", hash = "sha256:2edd8463432a7f8443edce90972169b195e7d6a05500cd29e6d13898187c9950"}, {file = "opentelemetry_api-1.39.1.tar.gz", hash = "sha256:fbde8c80e1b937a2c61f20347e91c0c18a1940cecf012d62e65a7caf08967c9c"}, @@ -1111,7 +1111,7 @@ version = "2.23.1" description = "The Python library behind great charms" optional = false python-versions = ">=3.8" -groups = ["main", "charm-libs", "integration"] +groups = ["main", "charm-libs"] files = [ {file = "ops-2.23.1-py3-none-any.whl", hash = "sha256:fdf58163beafd25180c12a4c7efaf1e76e5f8710508a97840c07055bb78b0c77"}, {file = "ops-2.23.1.tar.gz", hash = "sha256:aecacd67ef7ca913f63f397e0330bfa93d70529a3ef71ed2d99e2bc232564ae3"}, @@ -1995,7 +1995,7 @@ version = "3.19.2" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.8" -groups = ["main", "charm-libs", "integration"] +groups = ["main", "charm-libs"] files = [ {file = "zipp-3.19.2-py3-none-any.whl", hash = "sha256:f091755f667055f2d02b32c53771a7a6c8b47e1fdbc4b72a8b9072b3eef8015c"}, {file = "zipp-3.19.2.tar.gz", hash = "sha256:bf1dcf6450f873a13e952a29504887c89e6de7506209e5b1bcc3460135d4de19"}, @@ -2007,5 +2007,5 @@ test = ["big-O", "importlib-resources ; python_version < \"3.9\"", "jaraco.funct [metadata] lock-version = "2.1" -python-versions = "~=3.10" -content-hash = "e559e07b9ecdd5dd6da59495b16846784c4255e6b25b7ec69af7d2068d3a2fad" +python-versions = "^3.10" +content-hash = "fd1d73b9ae588606a3d067549d868fedc4338f301c3ff825ee15a551805f651b" diff --git a/kubernetes/pyproject.toml b/kubernetes/pyproject.toml index 1dcdd36c7..fb9dfab2f 100644 --- a/kubernetes/pyproject.toml +++ b/kubernetes/pyproject.toml @@ -1,9 +1,8 @@ # Copyright 2021 Canonical Ltd. # See LICENSE file for licensing details. -[project] -name = "mysql-k8s-operator" # Charm is not meant to be packaged -dependencies = [ +[dependency-groups] +main = [ "boto3~=1.28", "jinja2~=3.1", "lightkube~=0.15.0", @@ -11,9 +10,6 @@ dependencies = [ "pyyaml~=6.0", "tenacity~=8.2", ] -requires-python = "~=3.10" - -[dependency-groups] charm-libs = [ # data_platform_libs/v0/data_interfaces.py "ops>=2.0.0", @@ -50,7 +46,6 @@ integration = [ "pytest~=7.4", "jinja2~=3.1", "juju~=3.6", - "ops~=2.15", "mysql-connector-python~=9.1.0", "tenacity~=8.2", "boto3~=1.28", @@ -67,6 +62,9 @@ integration = [ package-mode = false requires-poetry = ">=2.2.0" +[tool.poetry.dependencies] +python = "^3.10" + [tool.poetry.group.format] optional = true @@ -88,7 +86,7 @@ markers = ["juju3", "only_with_juju_secrets", "only_without_juju_secrets"] [tool.ruff] # preview and explicit preview are enabled for CPY001 preview = true -target-version = "py38" # Bump when ready to re-lint +target-version = "py310" src = ["src", "."] line-length = 99 diff --git a/kubernetes/src/charm.py b/kubernetes/src/charm.py index e0bb755ed..c8bb4a94a 100755 --- a/kubernetes/src/charm.py +++ b/kubernetes/src/charm.py @@ -17,7 +17,6 @@ import random from socket import getfqdn from time import sleep -from typing import Optional import ops from charms.data_platform_libs.v0.data_models import TypedCharmBase @@ -265,7 +264,7 @@ def _pebble_layer(self) -> Layer: return Layer(layer) # pyright: ignore [reportArgumentType] @property - def restart_peers(self) -> Optional[ops.model.Relation]: + def restart_peers(self) -> ops.model.Relation | None: """Retrieve the peer relation.""" return self.model.get_relation("restart") @@ -314,7 +313,7 @@ def unit_initialized(self, raise_exceptions: bool = False) -> bool: else: return False - def get_unit_hostname(self, unit_name: Optional[str] = None) -> str: + def get_unit_hostname(self, unit_name: str | None = None) -> str: """Get the hostname.localdomain for a unit. Translate juju unit name to hostname.localdomain, necessary @@ -375,7 +374,7 @@ def _create_cluster(self) -> None: logger.exception("Failed to initialize primary") raise - def _get_primary_from_online_peer(self) -> Optional[str]: + def _get_primary_from_online_peer(self) -> str | None: """Get the primary address from an online peer.""" for unit in self.peers.units: if self.peers.data[unit].get("member-state") == InstanceState.ONLINE.lower(): @@ -989,7 +988,7 @@ def _is_cluster_blocked(self) -> bool: # avoid changing status while async replication is setting up return not (self.replication_consumer.idle and self.replication_offer.idle) - def _on_update_status(self, _: Optional[UpdateStatusEvent]) -> None: + def _on_update_status(self, _: UpdateStatusEvent | None) -> None: """Handle the update status event.""" if not self.upgrade.idle: # avoid changing status while upgrade is in progress diff --git a/kubernetes/src/config.py b/kubernetes/src/config.py index 97bf64ff2..b1b0ae20f 100644 --- a/kubernetes/src/config.py +++ b/kubernetes/src/config.py @@ -7,7 +7,7 @@ import configparser import logging import re -from typing import ClassVar, Optional +from typing import ClassVar from charms.data_platform_libs.v0.data_models import BaseConfigModel from charms.mysql.v0.mysql import MAX_CONNECTIONS_FLOOR @@ -51,14 +51,14 @@ class CharmConfig(BaseConfigModel): """Manager for the structured configuration.""" profile: str - cluster_name: Optional[str] - cluster_set_name: Optional[str] - profile_limit_memory: Optional[int] - mysql_interface_user: Optional[str] - mysql_interface_database: Optional[str] - mysql_root_interface_user: Optional[str] - mysql_root_interface_database: Optional[str] - experimental_max_connections: Optional[int] + cluster_name: str | None + cluster_set_name: str | None + profile_limit_memory: int | None + mysql_interface_user: str | None + mysql_interface_database: str | None + mysql_root_interface_user: str | None + mysql_root_interface_database: str | None + experimental_max_connections: int | None binlog_retention_days: int plugin_audit_enabled: bool plugin_audit_strategy: str @@ -67,7 +67,7 @@ class CharmConfig(BaseConfigModel): @validator("profile") @classmethod - def profile_values(cls, value: str) -> Optional[str]: + def profile_values(cls, value: str) -> str | None: """Check profile config option is one of `testing` or `production`.""" if value not in ["testing", "production"]: raise ValueError("Value not one of 'testing' or 'production'") @@ -76,7 +76,7 @@ def profile_values(cls, value: str) -> Optional[str]: @validator("cluster_name", "cluster_set_name") @classmethod - def cluster_name_validator(cls, value: str) -> Optional[str]: + def cluster_name_validator(cls, value: str) -> str | None: """Check for valid cluster, cluster-set name. Limited to 63 characters, and must start with a letter and @@ -98,7 +98,7 @@ def cluster_name_validator(cls, value: str) -> Optional[str]: @validator("profile_limit_memory") @classmethod - def profile_limit_memory_validator(cls, value: int) -> Optional[int]: + def profile_limit_memory_validator(cls, value: int) -> int | None: """Check profile limit memory.""" if value < 600: raise ValueError("MySQL Charm requires at least 600MB for bootstrapping") @@ -109,7 +109,7 @@ def profile_limit_memory_validator(cls, value: int) -> Optional[int]: @validator("mysql_interface_user", "mysql_root_interface_user") @classmethod - def user_name_validator(cls, value: str) -> Optional[str]: + def user_name_validator(cls, value: str) -> str | None: """Check user name is valid.""" if len(value) > 32: raise ValueError("User name constrained to 32 characters") @@ -118,7 +118,7 @@ def user_name_validator(cls, value: str) -> Optional[str]: @validator("mysql_interface_database", "mysql_root_interface_database") @classmethod - def database_name_validator(cls, value: str) -> Optional[str]: + def database_name_validator(cls, value: str) -> str | None: """Check database name is valid.""" if not re.match(r"^[^\\\/?%*:|\"<>.]{1,64}$", value): raise ValueError( @@ -130,7 +130,7 @@ def database_name_validator(cls, value: str) -> Optional[str]: @validator("experimental_max_connections") @classmethod - def experimental_max_connections_validator(cls, value: int) -> Optional[int]: + def experimental_max_connections_validator(cls, value: int) -> int | None: """Check experimental max connections.""" if value < MAX_CONNECTIONS_FLOOR: raise ValueError( @@ -151,7 +151,7 @@ def binlog_retention_days_validator(cls, value: int) -> int: @validator("plugin_audit_strategy") @classmethod - def plugin_audit_strategy_validator(cls, value: str) -> Optional[str]: + def plugin_audit_strategy_validator(cls, value: str) -> str | None: """Check profile config option is one of `testing` or `production`.""" if value not in ["async", "semi-async"]: raise ValueError("plugin_audit_strategy not one of 'async' or 'semi-async'") @@ -160,7 +160,7 @@ def plugin_audit_strategy_validator(cls, value: str) -> Optional[str]: @validator("logs_audit_policy") @classmethod - def logs_audit_policy_validator(cls, value: str) -> Optional[str]: + def logs_audit_policy_validator(cls, value: str) -> str | None: """Check values for audit log policy.""" valid_values = ["all", "logins", "queries"] if value not in valid_values: diff --git a/kubernetes/src/k8s_helpers.py b/kubernetes/src/k8s_helpers.py index edf864247..e597661c1 100644 --- a/kubernetes/src/k8s_helpers.py +++ b/kubernetes/src/k8s_helpers.py @@ -6,7 +6,6 @@ import logging import socket import typing -from typing import Dict, List, Optional, Tuple from lightkube.core.client import Client from lightkube.core.exceptions import ApiError @@ -47,7 +46,7 @@ def __init__(self, charm: "MySQLOperatorCharm"): self.cluster_name = charm.app_peer_data.get("cluster-name") self.client = Client() # type: ignore - def create_endpoint_services(self, roles: List[str]) -> None: + def create_endpoint_services(self, roles: list[str]) -> None: """Create kubernetes service for endpoints. Args: @@ -94,7 +93,7 @@ def create_endpoint_services(self, roles: List[str]) -> None: logger.exception("Kubernetes service creation failed: %s", e) raise KubernetesClientError from e - def delete_endpoint_services(self, roles: List[str]) -> None: + def delete_endpoint_services(self, roles: list[str]) -> None: """Delete kubernetes service for endpoints. Args: @@ -112,7 +111,7 @@ def delete_endpoint_services(self, roles: List[str]) -> None: else: logger.warning("Kubernetes service deletion failed: %s", e) - def label_pod(self, role: str, pod_name: Optional[str] = None) -> None: + def label_pod(self, role: str, pod_name: str | None = None) -> None: """Create or update pod labels. Args: @@ -148,7 +147,7 @@ def label_pod(self, role: str, pod_name: Optional[str] = None) -> None: logger.exception("Kubernetes pod label creation failed: %s", e) raise KubernetesClientError from e - def get_resources_limits(self, container_name: str) -> Dict: + def get_resources_limits(self, container_name: str) -> dict: """Return resources limits for a given container. Args: @@ -187,7 +186,7 @@ def get_node_allocable_memory(self) -> int: raise KubernetesClientError from e @retry(stop=stop_after_attempt(60), wait=wait_fixed(1), reraise=True) - def wait_service_ready(self, service_endpoint: Tuple[str, int]) -> None: + def wait_service_ready(self, service_endpoint: tuple[str, int]) -> None: """Wait for a service to be listening on a given endpoint. Args: diff --git a/kubernetes/src/mysql_k8s_executor.py b/kubernetes/src/mysql_k8s_executor.py index ae1d1dbe9..7010d6ee3 100644 --- a/kubernetes/src/mysql_k8s_executor.py +++ b/kubernetes/src/mysql_k8s_executor.py @@ -5,7 +5,7 @@ import json import re -from typing import Generator +from collections.abc import Generator import ops from mysql_shell.executors import BaseExecutor diff --git a/kubernetes/src/mysql_k8s_helpers.py b/kubernetes/src/mysql_k8s_helpers.py index 83fb57449..5308f9004 100644 --- a/kubernetes/src/mysql_k8s_helpers.py +++ b/kubernetes/src/mysql_k8s_helpers.py @@ -6,7 +6,8 @@ import json import logging -from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple +from collections.abc import Iterable +from typing import TYPE_CHECKING import jinja2 from charms.mysql.v0.mysql import ( @@ -343,16 +344,16 @@ def setup_logrotate_config( def execute_backup_commands( self, s3_path: str, - s3_parameters: Dict[str, str], + s3_parameters: dict[str, str], xtrabackup_location: str = CHARMED_MYSQL_XTRABACKUP_LOCATION, xbcloud_location: str = CHARMED_MYSQL_XBCLOUD_LOCATION, xtrabackup_plugin_dir: str = XTRABACKUP_PLUGIN_DIR, mysqld_socket_file: str = MYSQLD_SOCK_FILE, tmp_base_directory: str = MYSQL_DATA_DIR, defaults_config_file: str = MYSQLD_DEFAULTS_CONFIG_FILE, - user: Optional[str] = MYSQL_SYSTEM_USER, - group: Optional[str] = MYSQL_SYSTEM_GROUP, - ) -> Tuple[str, str]: + user: str | None = MYSQL_SYSTEM_USER, + group: str | None = MYSQL_SYSTEM_GROUP, + ) -> tuple[str, str]: """Executes commands to create a backup.""" return super().execute_backup_commands( s3_path, @@ -378,13 +379,13 @@ def delete_temp_backup_directory(self, from_directory: str = MYSQL_DATA_DIR) -> def retrieve_backup_with_xbcloud( self, backup_id: str, - s3_parameters: Dict[str, str], + s3_parameters: dict[str, str], temp_restore_directory: str = MYSQL_DATA_DIR, xbcloud_location: str = CHARMED_MYSQL_XBCLOUD_LOCATION, xbstream_location: str = CHARMED_MYSQL_XBSTREAM_LOCATION, user: str = MYSQL_SYSTEM_USER, group: str = MYSQL_SYSTEM_GROUP, - ) -> Tuple[str, str, str]: + ) -> tuple[str, str, str]: """Retrieve the specified backup from S3. The backup is retrieved using xbcloud and stored in a temp dir in the @@ -400,7 +401,7 @@ def retrieve_backup_with_xbcloud( group, ) - def prepare_backup_for_restore(self, backup_location: str) -> Tuple[str, str]: + def prepare_backup_for_restore(self, backup_location: str) -> tuple[str, str]: """Prepare the backup in the provided dir for restore.""" return super().prepare_backup_for_restore( backup_location, @@ -418,7 +419,7 @@ def empty_data_files(self) -> None: group=MYSQL_SYSTEM_GROUP, ) - def restore_backup(self, backup_location: str) -> Tuple[str, str]: + def restore_backup(self, backup_location: str) -> tuple[str, str]: """Restore the provided prepared backup.""" return super().restore_backup( backup_location, @@ -639,14 +640,14 @@ def restart_mysql_exporter(self) -> None: def _execute_commands( self, - commands: List[str], + commands: list[str], bash: bool = False, - user: Optional[str] = None, - group: Optional[str] = None, - env_extra: Optional[Dict] = None, - timeout: Optional[float] = None, - stream_output: Optional[str] = None, - ) -> Tuple[str, str]: + user: str | None = None, + group: str | None = None, + env_extra: dict | None = None, + timeout: float | None = None, + stream_output: str | None = None, + ) -> tuple[str, str]: """Execute commands on the server where MySQL is running.""" try: if bash: @@ -699,7 +700,7 @@ def write_content_to_file( """ self.container.push(path, content, permissions=permission, user=owner, group=group) - def read_file_content(self, path: str) -> Optional[str]: + def read_file_content(self, path: str) -> str | None: """Read file content. Args: @@ -805,7 +806,7 @@ def set_cluster_primary(self, new_primary_address: str) -> None: super().set_cluster_primary(new_primary_address) self.update_endpoints(PEER) - def fetch_error_log(self) -> Optional[str]: + def fetch_error_log(self) -> str | None: """Fetch the MySQL error log.""" return self.read_file_content(MYSQL_LOG_ERROR) diff --git a/kubernetes/tests/integration/helpers.py b/kubernetes/tests/integration/helpers.py index 341e48457..3590061d6 100644 --- a/kubernetes/tests/integration/helpers.py +++ b/kubernetes/tests/integration/helpers.py @@ -4,7 +4,6 @@ import itertools import secrets import string -from typing import Dict, List from mysql.connector.errors import ( DatabaseError, @@ -34,10 +33,10 @@ def execute_queries_on_unit( unit_address: str, username: str, password: str, - queries: List[str], + queries: list[str], commit: bool = False, raw: bool = False, -) -> List: +) -> list: """Execute given MySQL queries on a unit. Args: @@ -68,7 +67,7 @@ def execute_queries_on_unit( @retry(stop=stop_after_attempt(8), wait=wait_fixed(15), reraise=True) -def is_connection_possible(credentials: Dict, **extra_opts) -> bool: +def is_connection_possible(credentials: dict, **extra_opts) -> bool: """Test a connection to a MySQL server. Args: diff --git a/kubernetes/tests/unit/test_upgrade.py b/kubernetes/tests/unit/test_upgrade.py index 0b25b2acf..305ff2ff1 100644 --- a/kubernetes/tests/unit/test_upgrade.py +++ b/kubernetes/tests/unit/test_upgrade.py @@ -190,13 +190,16 @@ def test_pebble_ready( self.harness.update_relation_data( self.upgrade_relation_id, "mysql-k8s/0", {"state": "upgrading"} ) - with patch( - "charm.MySQLOperatorCharm.unit_initialized", - return_value=True, - ), patch( - "charm.MySQLOperatorCharm.cluster_initialized", - new_callable=PropertyMock, - return_value=True, + with ( + patch( + "charm.MySQLOperatorCharm.unit_initialized", + return_value=True, + ), + patch( + "charm.MySQLOperatorCharm.cluster_initialized", + new_callable=PropertyMock, + return_value=True, + ), ): self.harness.container_pebble_ready("mysql") self.assertEqual( @@ -210,13 +213,16 @@ def test_pebble_ready( # setup for exception mock_recover_unit_after_restart.side_effect = RetryError("dummy") - with patch( - "charm.MySQLOperatorCharm.unit_initialized", - return_value=True, - ), patch( - "charm.MySQLOperatorCharm.cluster_initialized", - new_callable=PropertyMock, - return_value=True, + with ( + patch( + "charm.MySQLOperatorCharm.unit_initialized", + return_value=True, + ), + patch( + "charm.MySQLOperatorCharm.cluster_initialized", + new_callable=PropertyMock, + return_value=True, + ), ): self.harness.container_pebble_ready("mysql") self.assertTrue(isinstance(self.charm.unit.status, BlockedStatus)) diff --git a/machines/poetry.lock b/machines/poetry.lock index de1188eed..bc694dba6 100644 --- a/machines/poetry.lock +++ b/machines/poetry.lock @@ -2031,5 +2031,5 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more [metadata] lock-version = "2.1" -python-versions = "~=3.10" -content-hash = "a4b785c176248ea7e5771171cf8842c3e4a88a00710aa32255c0c06d10e67b44" +python-versions = "^3.10" +content-hash = "fd1d73b9ae588606a3d067549d868fedc4338f301c3ff825ee15a551805f651b" diff --git a/machines/pyproject.toml b/machines/pyproject.toml index 96e68858c..f05767b1d 100644 --- a/machines/pyproject.toml +++ b/machines/pyproject.toml @@ -1,9 +1,8 @@ # Copyright 2022 Canonical Ltd. # See LICENSE file for licensing details. -[project] -name = "mysql-operator" # Charm is not meant to be packaged -dependencies = [ +[dependency-groups] +main = [ "boto3~=1.28", "jinja2~=3.1", "ops[tracing]~=2.21", @@ -12,9 +11,6 @@ dependencies = [ "tenacity~=8.2", "typing_extensions~=4.7", ] -requires-python = "~=3.10" - -[dependency-groups] charm-libs = [ # data_platform_libs/v0/data_interfaces.py "ops>=2.0.0", @@ -63,6 +59,9 @@ integration = [ package-mode = false requires-poetry = ">=2.2.0" +[tool.poetry.dependencies] +python = "^3.10" + [tool.poetry.group.format] optional = true @@ -79,7 +78,7 @@ show_missing = true minversion = "6.0" log_cli_level = "INFO" -# Formatting tools configuration +# Linting tools configuration [tool.ruff] # preview and explicit preview are enabled for CPY001 preview = true @@ -89,7 +88,23 @@ line-length = 99 [tool.ruff.lint] explicit-preview-rules = true -select = ["A", "E", "W", "F", "C", "N", "D", "I", "B", "CPY001", "RUF", "S", "SIM", "UP", "TC"] +select = [ + "A", + "E", + "W", + "F", + "C", + "N", + "D", + "I", + "B", + "CPY001", + "RUF", + "S", + "SIM", + "UP", + "TC", +] ignore = [ "D107", # Ignore D107 Missing docstring in __init__ "E501", # Ignore E501 Line too long @@ -97,11 +112,12 @@ ignore = [ [tool.ruff.lint.per-file-ignores] "tests/*" = [ - "D1", "D417", - # Asserts - "B011", - # Disable security checks for tests - "S", + "D1", + "D417", + # Asserts + "B011", + # Disable security checks for tests + "S", ] [tool.ruff.lint.flake8-copyright] From fa7ff76500aab2762c6ed5883d35702849a9a0e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Juan=20Luis=20Cano=20Rodr=C3=ADguez?= Date: Fri, 13 Feb 2026 10:54:51 +0100 Subject: [PATCH 10/40] Restore ops in integration tests (#84) --- kubernetes/poetry.lock | 8 ++++---- kubernetes/pyproject.toml | 1 + 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/kubernetes/poetry.lock b/kubernetes/poetry.lock index b5738f471..098dfa0ea 100644 --- a/kubernetes/poetry.lock +++ b/kubernetes/poetry.lock @@ -679,7 +679,7 @@ version = "6.11.0" description = "Read metadata from Python packages" optional = false python-versions = ">=3.8" -groups = ["main", "charm-libs"] +groups = ["main", "charm-libs", "integration"] files = [ {file = "importlib_metadata-6.11.0-py3-none-any.whl", hash = "sha256:f0afba6205ad8f8947c7d338b5342d5db2afbfd82f9cbef7879a9539cc12eb9b"}, {file = "importlib_metadata-6.11.0.tar.gz", hash = "sha256:1231cf92d825c9e03cfc4da076a16de6422c863558229ea0b22b675657463443"}, @@ -1062,7 +1062,7 @@ version = "1.39.1" description = "OpenTelemetry Python API" optional = false python-versions = ">=3.9" -groups = ["main", "charm-libs"] +groups = ["main", "charm-libs", "integration"] files = [ {file = "opentelemetry_api-1.39.1-py3-none-any.whl", hash = "sha256:2edd8463432a7f8443edce90972169b195e7d6a05500cd29e6d13898187c9950"}, {file = "opentelemetry_api-1.39.1.tar.gz", hash = "sha256:fbde8c80e1b937a2c61f20347e91c0c18a1940cecf012d62e65a7caf08967c9c"}, @@ -1111,7 +1111,7 @@ version = "2.23.1" description = "The Python library behind great charms" optional = false python-versions = ">=3.8" -groups = ["main", "charm-libs"] +groups = ["main", "charm-libs", "integration"] files = [ {file = "ops-2.23.1-py3-none-any.whl", hash = "sha256:fdf58163beafd25180c12a4c7efaf1e76e5f8710508a97840c07055bb78b0c77"}, {file = "ops-2.23.1.tar.gz", hash = "sha256:aecacd67ef7ca913f63f397e0330bfa93d70529a3ef71ed2d99e2bc232564ae3"}, @@ -1995,7 +1995,7 @@ version = "3.19.2" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.8" -groups = ["main", "charm-libs"] +groups = ["main", "charm-libs", "integration"] files = [ {file = "zipp-3.19.2-py3-none-any.whl", hash = "sha256:f091755f667055f2d02b32c53771a7a6c8b47e1fdbc4b72a8b9072b3eef8015c"}, {file = "zipp-3.19.2.tar.gz", hash = "sha256:bf1dcf6450f873a13e952a29504887c89e6de7506209e5b1bcc3460135d4de19"}, diff --git a/kubernetes/pyproject.toml b/kubernetes/pyproject.toml index fb9dfab2f..91ee32fb7 100644 --- a/kubernetes/pyproject.toml +++ b/kubernetes/pyproject.toml @@ -46,6 +46,7 @@ integration = [ "pytest~=7.4", "jinja2~=3.1", "juju~=3.6", + "ops~=2.15", "mysql-connector-python~=9.1.0", "tenacity~=8.2", "boto3~=1.28", From ea9442eaa02efe6ac8b2cc735146154abbe32e00 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sinclert=20P=C3=A9rez?= Date: Fri, 13 Feb 2026 14:01:10 +0100 Subject: [PATCH 11/40] [MISC] 8.0 - Bump mysql-shell-client to >=0.7.1 (#80) --- .../lib/charms/mysql/v0/async_replication.py | 2 +- kubernetes/lib/charms/mysql/v0/backups.py | 4 +-- kubernetes/lib/charms/mysql/v0/mysql.py | 27 +++++++++++-------- kubernetes/lib/charms/mysql/v0/tls.py | 4 +-- kubernetes/poetry.lock | 6 ++--- kubernetes/pyproject.toml | 2 +- .../lib/charms/mysql/v0/async_replication.py | 4 +-- machines/lib/charms/mysql/v0/backups.py | 4 +-- machines/lib/charms/mysql/v0/mysql.py | 27 +++++++++++-------- machines/lib/charms/mysql/v0/tls.py | 4 +-- machines/poetry.lock | 6 ++--- machines/pyproject.toml | 2 +- machines/tests/unit/test_mysql.py | 23 +++++++++------- 13 files changed, 64 insertions(+), 51 deletions(-) diff --git a/kubernetes/lib/charms/mysql/v0/async_replication.py b/kubernetes/lib/charms/mysql/v0/async_replication.py index 12787e77c..da8b257c5 100644 --- a/kubernetes/lib/charms/mysql/v0/async_replication.py +++ b/kubernetes/lib/charms/mysql/v0/async_replication.py @@ -59,7 +59,7 @@ LIBAPI = 0 LIBPATCH = 12 -PYDEPS = ["mysql_shell_client ~= 0.6"] +PYDEPS = ["mysql_shell_client ~= 0.7"] RELATION_OFFER = "replication-offer" RELATION_CONSUMER = "replication" diff --git a/kubernetes/lib/charms/mysql/v0/backups.py b/kubernetes/lib/charms/mysql/v0/backups.py index b70dc9f1e..cfbcf52b8 100644 --- a/kubernetes/lib/charms/mysql/v0/backups.py +++ b/kubernetes/lib/charms/mysql/v0/backups.py @@ -107,9 +107,9 @@ def is_unit_blocked(self) -> bool: # The unique Charmhub library identifier, never change it LIBID = "183844304be247129572309a5fb1e47c" LIBAPI = 0 -LIBPATCH = 19 +LIBPATCH = 20 -PYDEPS = ["mysql_shell_client ~= 0.6"] +PYDEPS = ["mysql_shell_client ~= 0.7"] ANOTHER_S3_CLUSTER_REPOSITORY_ERROR_MESSAGE = "S3 repository claimed by another cluster" MOVE_RESTORED_CLUSTER_TO_ANOTHER_S3_REPOSITORY_ERROR = ( diff --git a/kubernetes/lib/charms/mysql/v0/mysql.py b/kubernetes/lib/charms/mysql/v0/mysql.py index 465134ea1..3b4d54018 100644 --- a/kubernetes/lib/charms/mysql/v0/mysql.py +++ b/kubernetes/lib/charms/mysql/v0/mysql.py @@ -136,9 +136,9 @@ def __init__( # Increment this major API version when introducing breaking changes LIBAPI = 0 -LIBPATCH = 101 +LIBPATCH = 102 -PYDEPS = ["mysql_shell_client ~= 0.6"] +PYDEPS = ["mysql_shell_client ~= 0.7"] UNIT_TEARDOWN_LOCKNAME = "unit-teardown" UNIT_ADD_LOCKNAME = "unit-add" @@ -1426,23 +1426,28 @@ def create_database(self, database: str) -> None: primary_address = self.get_cluster_primary_address() primary_executor = self._build_instance_tcp_executor(primary_address) + primary_client = MySQLInstanceClient(primary_executor, self._quoter) + + try: + logger.info(f"Creating application {database=}") + primary_client.create_instance_database(database) + except ExecutionError as e: + logger.error(f"Failed to create application database {database}") + raise MySQLCreateApplicationDatabaseError() from e role_name = self._build_mysql_database_dba_role(database) - role_query = self._auth_query_builder.build_database_admin_role_query(role_name, database) queries = ";".join([ - f"CREATE DATABASE `{database}`", - f"GRANT SELECT ON `{database}`.* TO '{ROLE_READ}'", - f"GRANT SELECT, INSERT, DELETE, UPDATE ON `{database}`.* TO '{ROLE_DML}'", - role_query, + self._auth_query_builder.build_instance_reader_role_update_query(database), + self._auth_query_builder.build_instance_writer_role_update_query(database), + self._auth_query_builder.build_database_admin_role_query(role_name, database), ]) try: - logger.info(f"Creating application {database=} and DBA {role_name=}") + logger.info(f"Creating application DBA {role_name=}") primary_executor.execute_sql(queries) - except ExecutionError as e: - logger.error(f"Failed to create application database {database}") - raise MySQLCreateApplicationDatabaseError() from e + except ExecutionError: + logger.warning(f"Failed to create application DBA {role_name}") def create_scoped_user( self, diff --git a/kubernetes/lib/charms/mysql/v0/tls.py b/kubernetes/lib/charms/mysql/v0/tls.py index ed43e71fc..5c87d9640 100644 --- a/kubernetes/lib/charms/mysql/v0/tls.py +++ b/kubernetes/lib/charms/mysql/v0/tls.py @@ -50,9 +50,9 @@ LIBID = "eb73947deedd4380a3a90d527e0878eb" LIBAPI = 0 -LIBPATCH = 11 +LIBPATCH = 12 -PYDEPS = ["mysql_shell_client ~= 0.6"] +PYDEPS = ["mysql_shell_client ~= 0.7"] SCOPE = "unit" diff --git a/kubernetes/poetry.lock b/kubernetes/poetry.lock index 098dfa0ea..871e56020 100644 --- a/kubernetes/poetry.lock +++ b/kubernetes/poetry.lock @@ -1024,14 +1024,14 @@ telemetry = ["opentelemetry-api (==1.18.0)", "opentelemetry-exporter-otlp-proto- [[package]] name = "mysql-shell-client" -version = "0.6.1" +version = "0.7.1" description = "Python client for MySQL Shell" optional = false python-versions = ">=3.10" groups = ["charm-libs"] files = [ - {file = "mysql_shell_client-0.6.1-py3-none-any.whl", hash = "sha256:364bfddb6003aeaf4599057f29d0e2d76f4463e998adf397c7ca1918f97485b2"}, - {file = "mysql_shell_client-0.6.1.tar.gz", hash = "sha256:4d67003aaf049c3003b931b95bd1f6ceb3fa129f252d4f7c6c41646869404dff"}, + {file = "mysql_shell_client-0.7.1-py3-none-any.whl", hash = "sha256:0223fde0ad97a132d1ad0c4ba91cb15c1ef1041272c2b74d764a5441f3092562"}, + {file = "mysql_shell_client-0.7.1.tar.gz", hash = "sha256:e5595cd6ec76b2c0d887dccc32c4feb18031afd7f22b5b7222596308052efa44"}, ] [package.extras] diff --git a/kubernetes/pyproject.toml b/kubernetes/pyproject.toml index 91ee32fb7..029af4ac3 100644 --- a/kubernetes/pyproject.toml +++ b/kubernetes/pyproject.toml @@ -20,7 +20,7 @@ charm-libs = [ # tempo_coordinator_k8s/v0/charm_tracing.py requires pydantic "pydantic~=1.10", # mysql/v0/*.py" - "mysql_shell_client~=0.6", + "mysql_shell_client~=0.7", # tls_certificates_interface/v1/tls_certificates.py # tls_certificates lib uses a feature only available in cryptography >=42.0.5 "cryptography>=42.0.5", diff --git a/machines/lib/charms/mysql/v0/async_replication.py b/machines/lib/charms/mysql/v0/async_replication.py index 0040fab41..e333febec 100644 --- a/machines/lib/charms/mysql/v0/async_replication.py +++ b/machines/lib/charms/mysql/v0/async_replication.py @@ -57,9 +57,9 @@ # The unique Charmhub library identifier, never change it LIBID = "4de21f1a022c4e2c87ac8e672ec16f6a" LIBAPI = 0 -LIBPATCH = 12 +LIBPATCH = 13 -PYDEPS = ["mysql_shell_client ~= 0.6"] +PYDEPS = ["mysql_shell_client ~= 0.7"] RELATION_OFFER = "replication-offer" RELATION_CONSUMER = "replication" diff --git a/machines/lib/charms/mysql/v0/backups.py b/machines/lib/charms/mysql/v0/backups.py index b70dc9f1e..cfbcf52b8 100644 --- a/machines/lib/charms/mysql/v0/backups.py +++ b/machines/lib/charms/mysql/v0/backups.py @@ -107,9 +107,9 @@ def is_unit_blocked(self) -> bool: # The unique Charmhub library identifier, never change it LIBID = "183844304be247129572309a5fb1e47c" LIBAPI = 0 -LIBPATCH = 19 +LIBPATCH = 20 -PYDEPS = ["mysql_shell_client ~= 0.6"] +PYDEPS = ["mysql_shell_client ~= 0.7"] ANOTHER_S3_CLUSTER_REPOSITORY_ERROR_MESSAGE = "S3 repository claimed by another cluster" MOVE_RESTORED_CLUSTER_TO_ANOTHER_S3_REPOSITORY_ERROR = ( diff --git a/machines/lib/charms/mysql/v0/mysql.py b/machines/lib/charms/mysql/v0/mysql.py index 465134ea1..3b4d54018 100644 --- a/machines/lib/charms/mysql/v0/mysql.py +++ b/machines/lib/charms/mysql/v0/mysql.py @@ -136,9 +136,9 @@ def __init__( # Increment this major API version when introducing breaking changes LIBAPI = 0 -LIBPATCH = 101 +LIBPATCH = 102 -PYDEPS = ["mysql_shell_client ~= 0.6"] +PYDEPS = ["mysql_shell_client ~= 0.7"] UNIT_TEARDOWN_LOCKNAME = "unit-teardown" UNIT_ADD_LOCKNAME = "unit-add" @@ -1426,23 +1426,28 @@ def create_database(self, database: str) -> None: primary_address = self.get_cluster_primary_address() primary_executor = self._build_instance_tcp_executor(primary_address) + primary_client = MySQLInstanceClient(primary_executor, self._quoter) + + try: + logger.info(f"Creating application {database=}") + primary_client.create_instance_database(database) + except ExecutionError as e: + logger.error(f"Failed to create application database {database}") + raise MySQLCreateApplicationDatabaseError() from e role_name = self._build_mysql_database_dba_role(database) - role_query = self._auth_query_builder.build_database_admin_role_query(role_name, database) queries = ";".join([ - f"CREATE DATABASE `{database}`", - f"GRANT SELECT ON `{database}`.* TO '{ROLE_READ}'", - f"GRANT SELECT, INSERT, DELETE, UPDATE ON `{database}`.* TO '{ROLE_DML}'", - role_query, + self._auth_query_builder.build_instance_reader_role_update_query(database), + self._auth_query_builder.build_instance_writer_role_update_query(database), + self._auth_query_builder.build_database_admin_role_query(role_name, database), ]) try: - logger.info(f"Creating application {database=} and DBA {role_name=}") + logger.info(f"Creating application DBA {role_name=}") primary_executor.execute_sql(queries) - except ExecutionError as e: - logger.error(f"Failed to create application database {database}") - raise MySQLCreateApplicationDatabaseError() from e + except ExecutionError: + logger.warning(f"Failed to create application DBA {role_name}") def create_scoped_user( self, diff --git a/machines/lib/charms/mysql/v0/tls.py b/machines/lib/charms/mysql/v0/tls.py index ed43e71fc..5c87d9640 100644 --- a/machines/lib/charms/mysql/v0/tls.py +++ b/machines/lib/charms/mysql/v0/tls.py @@ -50,9 +50,9 @@ LIBID = "eb73947deedd4380a3a90d527e0878eb" LIBAPI = 0 -LIBPATCH = 11 +LIBPATCH = 12 -PYDEPS = ["mysql_shell_client ~= 0.6"] +PYDEPS = ["mysql_shell_client ~= 0.7"] SCOPE = "unit" diff --git a/machines/poetry.lock b/machines/poetry.lock index bc694dba6..79312cf33 100644 --- a/machines/poetry.lock +++ b/machines/poetry.lock @@ -1030,14 +1030,14 @@ telemetry = ["opentelemetry-api (==1.18.0)", "opentelemetry-exporter-otlp-proto- [[package]] name = "mysql-shell-client" -version = "0.6.1" +version = "0.7.1" description = "Python client for MySQL Shell" optional = false python-versions = ">=3.10" groups = ["charm-libs"] files = [ - {file = "mysql_shell_client-0.6.1-py3-none-any.whl", hash = "sha256:364bfddb6003aeaf4599057f29d0e2d76f4463e998adf397c7ca1918f97485b2"}, - {file = "mysql_shell_client-0.6.1.tar.gz", hash = "sha256:4d67003aaf049c3003b931b95bd1f6ceb3fa129f252d4f7c6c41646869404dff"}, + {file = "mysql_shell_client-0.7.1-py3-none-any.whl", hash = "sha256:0223fde0ad97a132d1ad0c4ba91cb15c1ef1041272c2b74d764a5441f3092562"}, + {file = "mysql_shell_client-0.7.1.tar.gz", hash = "sha256:e5595cd6ec76b2c0d887dccc32c4feb18031afd7f22b5b7222596308052efa44"}, ] [package.extras] diff --git a/machines/pyproject.toml b/machines/pyproject.toml index f05767b1d..ba1bc332e 100644 --- a/machines/pyproject.toml +++ b/machines/pyproject.toml @@ -23,7 +23,7 @@ charm-libs = [ # grafana_agent/v0/cos_agent.py "cosl>=0.0.50", # mysql/v0/*.py" - "mysql_shell_client~=0.6", + "mysql_shell_client~=0.7", # tls_certificates_interface/v2/tls_certificates.py "cryptography>=42.0.5", "jsonschema", diff --git a/machines/tests/unit/test_mysql.py b/machines/tests/unit/test_mysql.py index 758a350ce..c8b74d1c9 100644 --- a/machines/tests/unit/test_mysql.py +++ b/machines/tests/unit/test_mysql.py @@ -303,16 +303,19 @@ def test_create_application_database( self.mock_executor.execute_sql.assert_not_called() _get_non_system_databases.return_value = set() - query = ";".join([ - "CREATE DATABASE `test_database`", - "GRANT SELECT ON `test_database`.* TO 'charmed_read'", - "GRANT SELECT, INSERT, DELETE, UPDATE ON `test_database`.* TO 'charmed_dml'", - "CREATE ROLE 'test_database_00'", - "GRANT SELECT, INSERT, DELETE, UPDATE, EXECUTE, ALTER, ALTER ROUTINE, CREATE, CREATE ROUTINE, CREATE VIEW, DROP, INDEX, LOCK TABLES, REFERENCES, TRIGGER ON `test_database`.* TO 'test_database_00'", + creation_query = "CREATE DATABASE `test_database`" + granting_query = ";".join([ + "GRANT SELECT ON `test_database`.* TO `charmed_read`", + "GRANT SELECT, INSERT, DELETE, UPDATE ON `test_database`.* TO `charmed_dml`", + "CREATE ROLE `test_database_00`", + "GRANT SELECT, INSERT, DELETE, UPDATE, EXECUTE, ALTER, ALTER ROUTINE, CREATE, CREATE ROUTINE, CREATE VIEW, DROP, INDEX, LOCK TABLES, REFERENCES, TRIGGER ON `test_database`.* TO `test_database_00`", ]) self.mysql.create_database("test_database") - self.mock_executor.execute_sql.assert_called_once_with(query) + self.mock_executor.execute_sql.assert_has_calls([ + call(creation_query), + call(granting_query), + ]) @patch("charms.mysql.v0.mysql.MySQLBase.get_cluster_primary_address") @patch("charms.mysql.v0.mysql.MySQLBase.get_non_system_databases") @@ -335,7 +338,7 @@ def test_create_application_database_failure( def test_create_application_scoped_user(self, _get_cluster_primary_address): """Test the successful execution of create_application_scoped_user.""" create_commands = ";".join(( - "CREATE USER 'test_username'@'1.1.1.1' IDENTIFIED BY 'test_password' ATTRIBUTE '{\\\"unit_name\\\": \\\"app/0\\\"}'", + "CREATE USER `test_username`@`1.1.1.1` IDENTIFIED BY 'test_password' ATTRIBUTE '{\\\"unit_name\\\": \\\"app/0\\\"}'", "", )) grant_commands = ";".join(( @@ -857,7 +860,7 @@ def test_delete_users_for_relation_failure(self, _get_cluster_primary_address): @patch("charms.mysql.v0.mysql.MySQLBase.get_cluster_primary_address") def test_delete_user(self, _get_cluster_primary_address): """Test delete_user() method.""" - query = "DROP USER IF EXISTS 'testuser'@'%'" + query = "DROP USER IF EXISTS `testuser`@`%`" self.mysql.delete_user("testuser") self.mock_executor.execute_sql.assert_called_once_with(query) @@ -904,7 +907,7 @@ def test_update_user_password(self, _get_cluster_global_primary_address): """Test the successful execution of update_user_password.""" _get_cluster_global_primary_address.return_value = "1.1.1.1" - query = "ALTER USER 'test_user'@'%' IDENTIFIED BY 'test_password'" + query = "ALTER USER `test_user`@`%` IDENTIFIED BY 'test_password'" self.mysql.update_user_password("test_user", "test_password") self.mock_executor.execute_sql.assert_called_once_with(query) From c9728c6aa4001550fe80ca63831715030310b458 Mon Sep 17 00:00:00 2001 From: Paulo Machado Date: Thu, 19 Feb 2026 13:34:19 -0300 Subject: [PATCH 12/40] add bases for test-app (#101) --- machines/tests/integration/integration/spaces/test_spaced_db.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/machines/tests/integration/integration/spaces/test_spaced_db.py b/machines/tests/integration/integration/spaces/test_spaced_db.py index add5e4725..5c0d4226e 100644 --- a/machines/tests/integration/integration/spaces/test_spaced_db.py +++ b/machines/tests/integration/integration/spaces/test_spaced_db.py @@ -94,6 +94,8 @@ def test_integrate_with_isolated_space(juju: Juju): isolated_app_name, constraints={"spaces": "isolated"}, bind={"database": "isolated"}, + num_units=1, + base="ubuntu@22.04", channel="latest/edge", ) juju.wait( From 07a511c0d8962b642e2e13251abb9ae6ccfa4416 Mon Sep 17 00:00:00 2001 From: Paulo Machado Date: Thu, 19 Feb 2026 16:16:36 -0300 Subject: [PATCH 13/40] [MISC] integration test fixes (#89) * set trust and architecture constraint where missing * fix import path * allow grace period between deploy and first juju status call * double default delay between juju status calls * format * add bases for test-app * missed sleep * TODO comment for juju 3.6.15 --- kubernetes/tests/integration/helpers_ha.py | 2 ++ .../integration/helpers_backups.py | 13 +++++++++ .../test_async_replication.py | 5 ++++ .../test_async_replication_upgrade.py | 6 ++++ .../test_primary_switchover.py | 5 ++++ .../test_replication_data_consistency.py | 5 ++++ .../test_replication_data_isolation.py | 7 +++++ .../test_replication_logs_rotation.py | 5 ++++ .../test_replication_reelection.py | 6 ++++ .../test_replication_scaling.py | 5 ++++ .../test_replication_unit_endpoints.py | 8 ++++++ .../test_replication_variables.py | 1 + .../test_self_healing_network_cut.py | 6 ++++ .../test_self_healing_node_drain.py | 6 ++++ .../test_self_healing_pod.py | 6 ++++ .../test_self_healing_process_frozen.py | 6 ++++ .../test_self_healing_process_killed.py | 5 ++++ .../test_self_healing_restart_graceful.py | 6 ++++ .../test_self_healing_setup_crash.py | 3 ++ .../test_self_healing_stop_all.py | 7 +++++ .../test_self_healing_stop_primary.py | 6 ++++ .../high_availability/test_upgrade.py | 9 ++++++ .../test_upgrade_from_stable.py | 8 +++++- .../test_upgrade_rollback_incompat.py | 6 ++++ .../integration/relations/test_database.py | 9 ++++++ .../integration/relations/test_mysql_root.py | 5 ++++ .../roles/test_database_dba_role.py | 4 +++ .../roles/test_instance_dba_role.py | 3 ++ .../integration/roles/test_instance_roles.py | 7 +++++ .../integration/test_architecture.py | 12 ++++++++ .../integration/test_backup_aws.py | 27 +++++++++++++++--- .../integration/test_backup_ceph.py | 27 +++++++++++++++--- .../integration/test_backup_gcp.py | 28 ++++++++++++++++--- .../integration/integration/test_charm.py | 2 ++ .../test_cos_integration_bundle.py | 1 + .../integration/test_multi_relations.py | 5 ++++ .../test_osm_integration_bundle.py | 4 +++ .../test_saturate_max_connections.py | 4 +++ .../tests/integration/integration/test_tls.py | 2 ++ .../test_upgrade_from_stable.py | 3 ++ machines/tests/integration/helpers_ha.py | 2 ++ .../integration/helpers_backups.py | 8 ++++++ .../test_async_replication_upgrade.py | 2 ++ .../test_primary_switchover.py | 1 + .../test_replication_data_consistency.py | 1 + .../test_replication_data_isolation.py | 2 ++ .../test_replication_logs_rotation.py | 1 + .../test_replication_reelection.py | 3 ++ .../test_replication_scaling.py | 3 ++ .../test_replication_unit_endpoints.py | 1 + .../test_replication_variables.py | 1 + .../test_self_healing_network_cut.py | 2 ++ .../test_self_healing_process_frozen.py | 1 + .../test_self_healing_process_killed.py | 1 + .../test_self_healing_restart_forceful.py | 3 ++ .../test_self_healing_restart_graceful.py | 2 ++ .../test_self_healing_stop_all.py | 3 ++ .../test_self_healing_stop_primary.py | 1 + .../high_availability/test_upgrade.py | 6 ++++ .../test_upgrade_rollback_incompat.py | 5 ++++ .../integration/relations/test_database.py | 9 ++++++ .../integration/relations/test_db_router.py | 5 ++++ .../relations/test_relation_mysql_legacy.py | 5 ++++ .../integration/relations/test_shared_db.py | 4 +++ .../roles/test_database_dba_role.py | 4 +++ .../roles/test_instance_dba_role.py | 3 ++ .../integration/roles/test_instance_roles.py | 7 +++++ .../integration/spaces/test_spaced_db.py | 7 +++++ .../integration/test_architecture.py | 23 ++++++++++++--- .../integration/test_backup_aws.py | 10 +++++++ .../integration/test_backup_ceph.py | 10 +++++++ .../integration/test_backup_gcp.py | 10 +++++++ .../test_saturate_max_connections.py | 1 + .../integration/test_subordinate_charms.py | 2 ++ .../tests/integration/integration/test_tls.py | 3 ++ .../integration/integration/test_vm_reboot.py | 4 +++ .../test_upgrade_from_stable.py | 3 ++ 77 files changed, 427 insertions(+), 17 deletions(-) diff --git a/kubernetes/tests/integration/helpers_ha.py b/kubernetes/tests/integration/helpers_ha.py index 63bc473b9..bd8e32b5b 100644 --- a/kubernetes/tests/integration/helpers_ha.py +++ b/kubernetes/tests/integration/helpers_ha.py @@ -267,11 +267,13 @@ def scale_app_units(juju: Juju, app_name: str, num_units: int) -> None: juju.wait( ready=lambda status: len(status.apps[app_name].units) == num_units, timeout=20 * MINUTE_SECS, + delay=2, ) juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, app_name), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) diff --git a/kubernetes/tests/integration/integration/helpers_backups.py b/kubernetes/tests/integration/integration/helpers_backups.py index 3be3cf636..b2239f976 100644 --- a/kubernetes/tests/integration/integration/helpers_backups.py +++ b/kubernetes/tests/integration/integration/helpers_backups.py @@ -2,6 +2,7 @@ # See LICENSE file for licensing details. import logging +from time import sleep import boto3 import jubilant_backports @@ -83,9 +84,14 @@ def build_and_deploy_operations( trust=True, ) + # Allow some time between deploy and status call. Avoids: + # ERROR getting details for storage database/0: filesystem for storage instance "database/0" not found + sleep(30) + juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, MYSQL_APPLICATION_NAME), timeout=15 * MINUTE_SECS, + delay=2, ) logger.info("Rotating mysql credentials") @@ -103,6 +109,7 @@ def build_and_deploy_operations( ), )), timeout=TIMEOUT, + delay=2, ) juju.config(S3_INTEGRATOR, cloud_configs) s3_unit_name = get_app_units(juju, S3_INTEGRATOR)[0] @@ -116,6 +123,7 @@ def build_and_deploy_operations( jubilant_backports.all_active, MYSQL_APPLICATION_NAME, S3_INTEGRATOR ), timeout=TIMEOUT, + delay=2, ) juju.integrate(MYSQL_APPLICATION_NAME, S3_INTEGRATOR) juju.wait( @@ -123,6 +131,7 @@ def build_and_deploy_operations( jubilant_backports.all_active, MYSQL_APPLICATION_NAME, S3_INTEGRATOR ), timeout=TIMEOUT, + delay=2, ) @@ -218,6 +227,7 @@ def pitr_operations( jubilant_backports.all_agents_idle(status, MYSQL_APPLICATION_NAME, S3_INTEGRATOR), )), timeout=TIMEOUT, + delay=2, ) assert check_test_data_existence(first_mysql_ip, should_not_exist=[td1, td2]), ( "test data should not exist" @@ -234,6 +244,7 @@ def pitr_operations( jubilant_backports.all_agents_idle(status, MYSQL_APPLICATION_NAME, S3_INTEGRATOR), )), timeout=TIMEOUT, + delay=2, ) assert check_test_data_existence(first_mysql_ip, should_exist=[td1, td2]), ( "both test data should exist" @@ -250,6 +261,7 @@ def pitr_operations( jubilant_backports.all_agents_idle(status, MYSQL_APPLICATION_NAME, S3_INTEGRATOR), )), timeout=TIMEOUT, + delay=2, ) assert check_test_data_existence(first_mysql_ip, should_exist=[td1], should_not_exist=[td2]), ( "only first test data should exist" @@ -266,6 +278,7 @@ def pitr_operations( jubilant_backports.all_agents_idle(status, MYSQL_APPLICATION_NAME, S3_INTEGRATOR), )), timeout=TIMEOUT, + delay=2, ) assert check_test_data_existence(first_mysql_ip, should_exist=[td1, td2]), ( "both test data should exist" diff --git a/kubernetes/tests/integration/integration/high_availability/test_async_replication.py b/kubernetes/tests/integration/integration/high_availability/test_async_replication.py index 5c172bf27..d07e52ece 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_async_replication.py +++ b/kubernetes/tests/integration/integration/high_availability/test_async_replication.py @@ -86,6 +86,7 @@ def test_build_and_deploy(first_model: str, second_model: str, charm: str) -> No constraints=constraints, resources=resources, num_units=3, + trust=True, ) model_2 = Juju(model=second_model) model_2.deploy( @@ -96,6 +97,7 @@ def test_build_and_deploy(first_model: str, second_model: str, charm: str) -> No constraints=constraints, resources=resources, num_units=3, + trust=True, ) logging.info("Waiting for the applications to settle") @@ -141,6 +143,7 @@ def test_async_relate(first_model: str, second_model: str) -> None: def test_deploy_router_and_app(first_model: str) -> None: """Deploy the router and the test application.""" logging.info("Deploying the router and test application") + constraints = {"arch": architecture.architecture} model_1 = Juju(model=first_model) model_1.deploy( charm=MYSQL_ROUTER_NAME, @@ -149,6 +152,7 @@ def test_deploy_router_and_app(first_model: str) -> None: channel="8.0/edge", num_units=1, trust=True, + constraints=constraints, ) model_1.deploy( charm=MYSQL_TEST_APP_NAME, @@ -157,6 +161,7 @@ def test_deploy_router_and_app(first_model: str) -> None: channel="latest/edge", num_units=1, trust=False, + constraints=constraints, ) logging.info("Relating the router and test application") diff --git a/kubernetes/tests/integration/integration/high_availability/test_async_replication_upgrade.py b/kubernetes/tests/integration/integration/high_availability/test_async_replication_upgrade.py index a78c5031a..ed68cb0cd 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_async_replication_upgrade.py +++ b/kubernetes/tests/integration/integration/high_availability/test_async_replication_upgrade.py @@ -88,6 +88,7 @@ def test_build_and_deploy(first_model: str, second_model: str, charm: str) -> No constraints=constraints, resources=resources, num_units=3, + trust=True, ) model_2 = Juju(model=second_model) model_2.deploy( @@ -98,6 +99,7 @@ def test_build_and_deploy(first_model: str, second_model: str, charm: str) -> No constraints=constraints, resources=resources, num_units=3, + trust=True, ) logging.info("Waiting for the applications to settle") @@ -143,6 +145,7 @@ def test_async_relate(first_model: str, second_model: str) -> None: def test_deploy_test_app(first_model: str) -> None: """Deploy the test application.""" logging.info("Deploying the test application") + constraints = {"arch": architecture.architecture} model_1 = Juju(model=first_model) model_1.deploy( charm=MYSQL_TEST_APP_NAME, @@ -150,6 +153,7 @@ def test_deploy_test_app(first_model: str) -> None: base="ubuntu@22.04", channel="latest/edge", num_units=1, + constraints=constraints, ) logging.info("Relating the test application") @@ -278,6 +282,7 @@ def run_upgrade_from_edge(juju: Juju, app_name: str, charm: str) -> None: juju.wait( ready=wait_for_unit_message(app_name, upgrade_unit, "upgrade completed"), timeout=10 * MINUTE_SECS, + delay=2, ) logging.info("Resume upgrade") @@ -293,6 +298,7 @@ def run_upgrade_from_edge(juju: Juju, app_name: str, charm: str) -> None: juju.wait( ready=lambda status: jubilant_backports.all_active(status, app_name), timeout=20 * MINUTE_SECS, + delay=2, ) logging.info("Ensure continuous writes are incrementing") diff --git a/kubernetes/tests/integration/integration/high_availability/test_primary_switchover.py b/kubernetes/tests/integration/integration/high_availability/test_primary_switchover.py index 4f9428072..c0c8a2e3d 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_primary_switchover.py +++ b/kubernetes/tests/integration/integration/high_availability/test_primary_switchover.py @@ -7,6 +7,7 @@ import jubilant_backports from jubilant_backports import Juju +from ... import architecture from ...helpers_ha import ( CHARM_METADATA, get_app_name, @@ -35,7 +36,9 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: config={"profile": "testing"}, resources={"mysql-image": CHARM_METADATA["resources"]["mysql-image"]["upstream-source"]}, num_units=3, + trust=True, ) + constraints = {"arch": architecture.architecture} juju.deploy( charm=MYSQL_TEST_APP_NAME, app=MYSQL_TEST_APP_NAME, @@ -43,6 +46,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: channel="latest/edge", config={"sleep_interval": 300}, num_units=1, + constraints=constraints, ) juju.integrate( @@ -57,6 +61,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) diff --git a/kubernetes/tests/integration/integration/high_availability/test_replication_data_consistency.py b/kubernetes/tests/integration/integration/high_availability/test_replication_data_consistency.py index 4761bb008..805cc8a80 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_replication_data_consistency.py +++ b/kubernetes/tests/integration/integration/high_availability/test_replication_data_consistency.py @@ -6,6 +6,7 @@ import jubilant_backports from jubilant_backports import Juju +from ... import architecture from ...helpers import generate_random_string from ...helpers_ha import ( CHARM_METADATA, @@ -32,7 +33,9 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: config={"profile": "testing"}, resources={"mysql-image": CHARM_METADATA["resources"]["mysql-image"]["upstream-source"]}, num_units=3, + trust=True, ) + constraints = {"arch": architecture.architecture} juju.deploy( charm=MYSQL_TEST_APP_NAME, app=MYSQL_TEST_APP_NAME, @@ -40,6 +43,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: channel="latest/edge", config={"sleep_interval": 300}, num_units=1, + constraints=constraints, ) juju.integrate( @@ -54,6 +58,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) diff --git a/kubernetes/tests/integration/integration/high_availability/test_replication_data_isolation.py b/kubernetes/tests/integration/integration/high_availability/test_replication_data_isolation.py index 1830f51d2..a9442f18e 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_replication_data_isolation.py +++ b/kubernetes/tests/integration/integration/high_availability/test_replication_data_isolation.py @@ -6,6 +6,7 @@ import jubilant_backports from jubilant_backports import Juju +from ... import architecture from ...helpers_ha import ( CHARM_METADATA, insert_mysql_test_data, @@ -30,7 +31,9 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: config={"profile": "testing"}, resources={"mysql-image": CHARM_METADATA["resources"]["mysql-image"]["upstream-source"]}, num_units=3, + trust=True, ) + constraints = {"arch": architecture.architecture} juju.deploy( charm=MYSQL_TEST_APP_NAME, app=MYSQL_TEST_APP_NAME, @@ -38,6 +41,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: channel="latest/edge", config={"sleep_interval": 300}, num_units=1, + constraints=constraints, ) juju.integrate( @@ -52,6 +56,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) @@ -72,6 +77,7 @@ def test_cluster_data_isolation(juju: Juju, charm: str) -> None: config={"profile": "testing"}, resources={"mysql-image": CHARM_METADATA["resources"]["mysql-image"]["upstream-source"]}, num_units=1, + trust=True, ) logging.info("Wait for application to become active") @@ -79,6 +85,7 @@ def test_cluster_data_isolation(juju: Juju, charm: str) -> None: ready=wait_for_apps_status(jubilant_backports.all_active, mysql_other_app_name), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) table_name = "cluster_isolation_table" diff --git a/kubernetes/tests/integration/integration/high_availability/test_replication_logs_rotation.py b/kubernetes/tests/integration/integration/high_availability/test_replication_logs_rotation.py index a899a89d7..7b984a7c2 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_replication_logs_rotation.py +++ b/kubernetes/tests/integration/integration/high_availability/test_replication_logs_rotation.py @@ -17,6 +17,7 @@ from constants import CONTAINER_NAME, MYSQL_LOG_DIR +from ... import architecture from ...helpers_ha import ( CHARM_METADATA, get_app_leader, @@ -41,7 +42,9 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: config={"profile": "testing"}, resources={"mysql-image": CHARM_METADATA["resources"]["mysql-image"]["upstream-source"]}, num_units=3, + trust=True, ) + constraints = {"arch": architecture.architecture} juju.deploy( charm=MYSQL_TEST_APP_NAME, app=MYSQL_TEST_APP_NAME, @@ -49,6 +52,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: channel="latest/edge", config={"sleep_interval": 300}, num_units=1, + constraints=constraints, ) juju.integrate( @@ -63,6 +67,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) diff --git a/kubernetes/tests/integration/integration/high_availability/test_replication_reelection.py b/kubernetes/tests/integration/integration/high_availability/test_replication_reelection.py index a6c3b9729..9715eba70 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_replication_reelection.py +++ b/kubernetes/tests/integration/integration/high_availability/test_replication_reelection.py @@ -6,6 +6,7 @@ import jubilant_backports from jubilant_backports import Juju +from ... import architecture from ...helpers import generate_random_string from ...helpers_ha import ( CHARM_METADATA, @@ -34,7 +35,9 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: config={"profile": "testing"}, resources={"mysql-image": CHARM_METADATA["resources"]["mysql-image"]["upstream-source"]}, num_units=3, + trust=True, ) + constraints = {"arch": architecture.architecture} juju.deploy( charm=MYSQL_TEST_APP_NAME, app=MYSQL_TEST_APP_NAME, @@ -42,6 +45,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: channel="latest/edge", config={"sleep_interval": 300}, num_units=1, + constraints=constraints, ) juju.integrate( @@ -56,6 +60,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) @@ -72,6 +77,7 @@ def test_kill_primary_check_reelection(juju: Juju) -> None: ready=wait_for_apps_status(jubilant_backports.all_active, MYSQL_APP_NAME), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) # Confirm that the new primary unit is different diff --git a/kubernetes/tests/integration/integration/high_availability/test_replication_scaling.py b/kubernetes/tests/integration/integration/high_availability/test_replication_scaling.py index ae0990e91..b1cf25ad9 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_replication_scaling.py +++ b/kubernetes/tests/integration/integration/high_availability/test_replication_scaling.py @@ -6,6 +6,7 @@ import jubilant_backports from jubilant_backports import Juju +from ... import architecture from ...helpers import generate_random_string from ...helpers_ha import ( CHARM_METADATA, @@ -32,7 +33,9 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: config={"profile": "testing"}, resources={"mysql-image": CHARM_METADATA["resources"]["mysql-image"]["upstream-source"]}, num_units=3, + trust=True, ) + constraints = {"arch": architecture.architecture} juju.deploy( charm=MYSQL_TEST_APP_NAME, app=MYSQL_TEST_APP_NAME, @@ -40,6 +43,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: channel="latest/edge", config={"sleep_interval": 300}, num_units=1, + constraints=constraints, ) juju.integrate( @@ -54,6 +58,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) diff --git a/kubernetes/tests/integration/integration/high_availability/test_replication_unit_endpoints.py b/kubernetes/tests/integration/integration/high_availability/test_replication_unit_endpoints.py index 7c6abed2b..d82e5727e 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_replication_unit_endpoints.py +++ b/kubernetes/tests/integration/integration/high_availability/test_replication_unit_endpoints.py @@ -6,6 +6,7 @@ import jubilant_backports from jubilant_backports import Juju +from ... import architecture from ...helpers_ha import ( CHARM_METADATA, get_app_units, @@ -35,7 +36,9 @@ def test_deploy_highly_available_cluster_1(juju: Juju, charm: str) -> None: config={"cluster-name": MYSQL_APP_CLUSTER, "profile": "testing"}, resources={"mysql-image": CHARM_METADATA["resources"]["mysql-image"]["upstream-source"]}, num_units=3, + trust=True, ) + constraints = {"arch": architecture.architecture} juju.deploy( charm="mysql-test-app", app=MYSQL_TEST_APP_NAME_1, @@ -43,6 +46,7 @@ def test_deploy_highly_available_cluster_1(juju: Juju, charm: str) -> None: channel="latest/edge", config={"sleep_interval": 300}, num_units=1, + constraints=constraints, ) juju.integrate( @@ -56,11 +60,13 @@ def test_deploy_highly_available_cluster_1(juju: Juju, charm: str) -> None: ready=wait_for_apps_status(jubilant_backports.all_active, MYSQL_APP_NAME_1), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, MYSQL_TEST_APP_NAME_1), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) @@ -95,11 +101,13 @@ def test_deploy_highly_available_cluster_2(juju: Juju, charm: str) -> None: ready=wait_for_apps_status(jubilant_backports.all_active, MYSQL_APP_NAME_2), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, MYSQL_TEST_APP_NAME_2), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) diff --git a/kubernetes/tests/integration/integration/high_availability/test_replication_variables.py b/kubernetes/tests/integration/integration/high_availability/test_replication_variables.py index 820bd1cdd..8a4da0737 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_replication_variables.py +++ b/kubernetes/tests/integration/integration/high_availability/test_replication_variables.py @@ -38,6 +38,7 @@ def test_build_and_deploy(juju: Juju, charm) -> None: juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, APP_NAME), timeout=TIMEOUT, + delay=2, ) diff --git a/kubernetes/tests/integration/integration/high_availability/test_self_healing_network_cut.py b/kubernetes/tests/integration/integration/high_availability/test_self_healing_network_cut.py index 9ba12702b..7412ae8b8 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_self_healing_network_cut.py +++ b/kubernetes/tests/integration/integration/high_availability/test_self_healing_network_cut.py @@ -12,6 +12,7 @@ import jubilant_backports from jubilant_backports import Juju +from ... import architecture from ...helpers_ha import ( CHARM_METADATA, check_mysql_instances_online, @@ -39,7 +40,9 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: config={"profile": "testing"}, resources={"mysql-image": CHARM_METADATA["resources"]["mysql-image"]["upstream-source"]}, num_units=3, + trust=True, ) + constraints = {"arch": architecture.architecture} juju.deploy( charm=MYSQL_TEST_APP_NAME, app=MYSQL_TEST_APP_NAME, @@ -47,6 +50,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: channel="latest/edge", config={"sleep_interval": 300}, num_units=1, + constraints=constraints, ) juju.integrate( @@ -61,6 +65,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) @@ -94,6 +99,7 @@ def test_network_cut_affecting_an_instance(juju: Juju, continuous_writes, chaos_ juju.wait( ready=wait_for_unit_status(MYSQL_APP_NAME, mysql_primary, "active"), timeout=20 * MINUTE_SECS, + delay=2, ) logging.info("Check that all units are online") diff --git a/kubernetes/tests/integration/integration/high_availability/test_self_healing_node_drain.py b/kubernetes/tests/integration/integration/high_availability/test_self_healing_node_drain.py index 7504d803c..974539f1f 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_self_healing_node_drain.py +++ b/kubernetes/tests/integration/integration/high_availability/test_self_healing_node_drain.py @@ -9,6 +9,7 @@ from lightkube.models.meta_v1 import ObjectMeta from lightkube.resources.core_v1 import PersistentVolume, PersistentVolumeClaim, Pod +from ... import architecture from ...helpers_ha import ( CHARM_METADATA, check_mysql_units_writes_increment, @@ -36,7 +37,9 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: config={"profile": "testing"}, resources={"mysql-image": CHARM_METADATA["resources"]["mysql-image"]["upstream-source"]}, num_units=3, + trust=True, ) + constraints = {"arch": architecture.architecture} juju.deploy( charm=MYSQL_TEST_APP_NAME, app=MYSQL_TEST_APP_NAME, @@ -44,6 +47,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: channel="latest/edge", config={"sleep_interval": 300}, num_units=1, + constraints=constraints, ) juju.integrate( @@ -58,6 +62,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) @@ -82,6 +87,7 @@ def test_pod_eviction_and_pvc_deletion(juju: Juju, continuous_writes) -> None: ready=wait_for_apps_status(jubilant_backports.all_active, MYSQL_APP_NAME), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) logging.info("Ensuring that all instances have incrementing continuous writes") diff --git a/kubernetes/tests/integration/integration/high_availability/test_self_healing_pod.py b/kubernetes/tests/integration/integration/high_availability/test_self_healing_pod.py index b1de5167d..00cd9f54d 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_self_healing_pod.py +++ b/kubernetes/tests/integration/integration/high_availability/test_self_healing_pod.py @@ -6,6 +6,7 @@ import jubilant_backports from jubilant_backports import Juju +from ... import architecture from ...helpers import generate_random_string from ...helpers_ha import ( CHARM_METADATA, @@ -34,7 +35,9 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: config={"profile": "testing"}, resources={"mysql-image": CHARM_METADATA["resources"]["mysql-image"]["upstream-source"]}, num_units=3, + trust=True, ) + constraints = {"arch": architecture.architecture} juju.deploy( charm=MYSQL_TEST_APP_NAME, app=MYSQL_TEST_APP_NAME, @@ -42,6 +45,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: channel="latest/edge", config={"sleep_interval": 300}, num_units=1, + constraints=constraints, ) juju.integrate( @@ -56,6 +60,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) @@ -74,6 +79,7 @@ def test_single_unit_pod_delete(juju: Juju) -> None: ready=wait_for_apps_status(jubilant_backports.all_active, MYSQL_APP_NAME), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) logging.info("Write data to unit and verify that data was written") diff --git a/kubernetes/tests/integration/integration/high_availability/test_self_healing_process_frozen.py b/kubernetes/tests/integration/integration/high_availability/test_self_healing_process_frozen.py index a8c0d3c73..76e2119e2 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_self_healing_process_frozen.py +++ b/kubernetes/tests/integration/integration/high_availability/test_self_healing_process_frozen.py @@ -14,6 +14,7 @@ from constants import CONTAINER_NAME +from ... import architecture from ...helpers_ha import ( CHARM_METADATA, check_mysql_units_writes_increment, @@ -41,7 +42,9 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: config={"profile": "testing"}, resources={"mysql-image": CHARM_METADATA["resources"]["mysql-image"]["upstream-source"]}, num_units=3, + trust=True, ) + constraints = {"arch": architecture.architecture} juju.deploy( charm=MYSQL_TEST_APP_NAME, app=MYSQL_TEST_APP_NAME, @@ -49,6 +52,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: channel="latest/edge", config={"sleep_interval": 300}, num_units=1, + constraints=constraints, ) juju.integrate( @@ -63,6 +67,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) @@ -133,6 +138,7 @@ def test_freeze_db_process(juju: Juju, continuous_writes) -> None: juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, MYSQL_APP_NAME), timeout=20 * MINUTE_SECS, + delay=2, ) logging.info("Ensuring that all instances have incrementing continuous writes") diff --git a/kubernetes/tests/integration/integration/high_availability/test_self_healing_process_killed.py b/kubernetes/tests/integration/integration/high_availability/test_self_healing_process_killed.py index fece4062f..1ebfbd7bf 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_self_healing_process_killed.py +++ b/kubernetes/tests/integration/integration/high_availability/test_self_healing_process_killed.py @@ -9,6 +9,7 @@ from constants import CONTAINER_NAME +from ... import architecture from ...helpers import generate_random_string from ...helpers_ha import ( CHARM_METADATA, @@ -40,7 +41,9 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: config={"profile": "testing"}, resources={"mysql-image": CHARM_METADATA["resources"]["mysql-image"]["upstream-source"]}, num_units=3, + trust=True, ) + constraints = {"arch": architecture.architecture} juju.deploy( charm=MYSQL_TEST_APP_NAME, app=MYSQL_TEST_APP_NAME, @@ -48,6 +51,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: channel="latest/edge", config={"sleep_interval": 300}, num_units=1, + constraints=constraints, ) juju.integrate( @@ -62,6 +66,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) diff --git a/kubernetes/tests/integration/integration/high_availability/test_self_healing_restart_graceful.py b/kubernetes/tests/integration/integration/high_availability/test_self_healing_restart_graceful.py index 89ffb2a58..3da843bac 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_self_healing_restart_graceful.py +++ b/kubernetes/tests/integration/integration/high_availability/test_self_healing_restart_graceful.py @@ -6,6 +6,7 @@ import jubilant_backports from jubilant_backports import Juju +from ... import architecture from ...helpers import execute_queries_on_unit from ...helpers_ha import ( CHARM_METADATA, @@ -36,7 +37,9 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: config={"profile": "testing"}, resources={"mysql-image": CHARM_METADATA["resources"]["mysql-image"]["upstream-source"]}, num_units=3, + trust=True, ) + constraints = {"arch": architecture.architecture} juju.deploy( charm=MYSQL_TEST_APP_NAME, app=MYSQL_TEST_APP_NAME, @@ -44,6 +47,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: channel="latest/edge", config={"sleep_interval": 300}, num_units=1, + constraints=constraints, ) juju.integrate( @@ -58,6 +62,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) @@ -98,6 +103,7 @@ def test_cluster_manual_rejoin(juju: Juju, continuous_writes) -> None: juju.wait( ready=wait_for_unit_status(MYSQL_APP_NAME, mysql_primary_unit, "active"), timeout=20 * MINUTE_SECS, + delay=2, ) # Ensure continuous writes still incrementing for all units diff --git a/kubernetes/tests/integration/integration/high_availability/test_self_healing_setup_crash.py b/kubernetes/tests/integration/integration/high_availability/test_self_healing_setup_crash.py index 58e59df40..03ddfb113 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_self_healing_setup_crash.py +++ b/kubernetes/tests/integration/integration/high_availability/test_self_healing_setup_crash.py @@ -35,6 +35,7 @@ def test_deploy_single_unit_cluster(juju: Juju, charm: str) -> None: ready=wait_for_apps_status(jubilant_backports.all_active, MYSQL_APP_NAME), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) @@ -51,6 +52,7 @@ def test_crash_during_cluster_setup(juju: Juju, charm: str) -> None: ready=wait_for_apps_status(jubilant_backports.any_waiting, MYSQL_APP_NAME), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) logging.info("Deleting pod") @@ -61,4 +63,5 @@ def test_crash_during_cluster_setup(juju: Juju, charm: str) -> None: juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, MYSQL_APP_NAME), timeout=20 * MINUTE_SECS, + delay=2, ) diff --git a/kubernetes/tests/integration/integration/high_availability/test_self_healing_stop_all.py b/kubernetes/tests/integration/integration/high_availability/test_self_healing_stop_all.py index a7b1f944e..331359190 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_self_healing_stop_all.py +++ b/kubernetes/tests/integration/integration/high_availability/test_self_healing_stop_all.py @@ -6,6 +6,7 @@ import jubilant_backports from jubilant_backports import Juju +from ... import architecture from ...helpers_ha import ( CHARM_METADATA, check_mysql_instances_online, @@ -34,7 +35,9 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: config={"profile": "testing"}, resources={"mysql-image": CHARM_METADATA["resources"]["mysql-image"]["upstream-source"]}, num_units=3, + trust=True, ) + constraints = {"arch": architecture.architecture} juju.deploy( charm=MYSQL_TEST_APP_NAME, app=MYSQL_TEST_APP_NAME, @@ -42,6 +45,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: channel="latest/edge", config={"sleep_interval": 300}, num_units=1, + constraints=constraints, ) juju.integrate( @@ -56,6 +60,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) @@ -87,6 +92,7 @@ async def test_graceful_full_cluster_crash(juju: Juju, continuous_writes) -> Non wait_for_unit_status(MYSQL_APP_NAME, f"{MYSQL_APP_NAME}/2", "maintenance")(status), )), timeout=20 * MINUTE_SECS, + delay=2, ) logging.info("Waiting units to be back online") juju.wait( @@ -96,6 +102,7 @@ async def test_graceful_full_cluster_crash(juju: Juju, continuous_writes) -> Non wait_for_unit_status(MYSQL_APP_NAME, f"{MYSQL_APP_NAME}/2", "active")(status), )), timeout=20 * MINUTE_SECS, + delay=2, ) logging.info("Check that all units are online") diff --git a/kubernetes/tests/integration/integration/high_availability/test_self_healing_stop_primary.py b/kubernetes/tests/integration/integration/high_availability/test_self_healing_stop_primary.py index ab2646233..730bddc05 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_self_healing_stop_primary.py +++ b/kubernetes/tests/integration/integration/high_availability/test_self_healing_stop_primary.py @@ -8,6 +8,7 @@ from constants import CONTAINER_NAME +from ... import architecture from ...helpers_ha import ( CHARM_METADATA, check_mysql_units_writes_increment, @@ -34,7 +35,9 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: config={"profile": "testing"}, resources={"mysql-image": CHARM_METADATA["resources"]["mysql-image"]["upstream-source"]}, num_units=3, + trust=True, ) + constraints = {"arch": architecture.architecture} juju.deploy( charm=MYSQL_TEST_APP_NAME, app=MYSQL_TEST_APP_NAME, @@ -42,6 +45,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: channel="latest/edge", config={"sleep_interval": 300}, num_units=1, + constraints=constraints, ) juju.integrate( @@ -56,6 +60,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) @@ -82,6 +87,7 @@ async def test_graceful_crash_of_primary(juju: Juju, continuous_writes) -> None: juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, MYSQL_APP_NAME), timeout=20 * MINUTE_SECS, + delay=2, ) new_mysql_primary_unit = get_mysql_primary_unit(juju, MYSQL_APP_NAME) diff --git a/kubernetes/tests/integration/integration/high_availability/test_upgrade.py b/kubernetes/tests/integration/integration/high_availability/test_upgrade.py index 99f69d86a..fd6fd0493 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_upgrade.py +++ b/kubernetes/tests/integration/integration/high_availability/test_upgrade.py @@ -11,6 +11,7 @@ import jubilant_backports from jubilant_backports import Juju, TaskError +from ... import architecture from ...helpers_ha import ( CHARM_METADATA, check_mysql_units_writes_increment, @@ -44,6 +45,7 @@ def test_deploy_latest(juju: Juju) -> None: num_units=3, trust=True, ) + constraints = {"arch": architecture.architecture} juju.deploy( charm=MYSQL_TEST_APP_NAME, app=MYSQL_TEST_APP_NAME, @@ -51,6 +53,7 @@ def test_deploy_latest(juju: Juju) -> None: channel="latest/edge", num_units=1, trust=False, + constraints=constraints, ) juju.integrate( @@ -65,6 +68,7 @@ def test_deploy_latest(juju: Juju) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) @@ -108,6 +112,7 @@ def test_upgrade_from_edge(juju: Juju, charm: str, continuous_writes) -> None: juju.wait( ready=wait_for_unit_message(MYSQL_APP_NAME, mysql_upgrade_unit, "upgrade completed"), timeout=10 * MINUTE_SECS, + delay=2, ) logging.info("Resume upgrade") @@ -123,6 +128,7 @@ def test_upgrade_from_edge(juju: Juju, charm: str, continuous_writes) -> None: juju.wait( ready=lambda status: jubilant_backports.all_active(status, MYSQL_APP_NAME), timeout=20 * MINUTE_SECS, + delay=2, ) logging.info("Ensure continuous writes are incrementing") @@ -154,6 +160,7 @@ def test_fail_and_rollback(juju: Juju, charm: str, continuous_writes) -> None: juju.wait( ready=wait_for_unit_status(MYSQL_APP_NAME, mysql_upgrade_unit, "blocked"), timeout=10 * MINUTE_SECS, + delay=2, ) logging.info("Ensure continuous writes on remaining units") @@ -170,6 +177,7 @@ def test_fail_and_rollback(juju: Juju, charm: str, continuous_writes) -> None: juju.wait( ready=wait_for_unit_message(MYSQL_APP_NAME, mysql_upgrade_unit, "upgrade completed"), timeout=10 * MINUTE_SECS, + delay=2, ) logging.info("Resume upgrade") @@ -185,6 +193,7 @@ def test_fail_and_rollback(juju: Juju, charm: str, continuous_writes) -> None: juju.wait( ready=lambda status: jubilant_backports.all_active(status, MYSQL_APP_NAME), timeout=20 * MINUTE_SECS, + delay=2, ) logging.info("Ensure continuous writes after rollback procedure") diff --git a/kubernetes/tests/integration/integration/high_availability/test_upgrade_from_stable.py b/kubernetes/tests/integration/integration/high_availability/test_upgrade_from_stable.py index b6084bebe..f394f10c7 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_upgrade_from_stable.py +++ b/kubernetes/tests/integration/integration/high_availability/test_upgrade_from_stable.py @@ -7,6 +7,7 @@ import jubilant_backports from jubilant_backports import Juju, TaskError +from ... import architecture from ...helpers_ha import ( CHARM_METADATA, check_mysql_units_writes_increment, @@ -38,13 +39,15 @@ def test_deploy_stable(juju: Juju) -> None: num_units=3, trust=True, ) + + constraints = {"arch": architecture.architecture} juju.deploy( charm=MYSQL_TEST_APP_NAME, app=MYSQL_TEST_APP_NAME, base="ubuntu@22.04", channel="latest/edge", num_units=1, - trust=False, + constraints=constraints, ) juju.integrate( @@ -59,6 +62,7 @@ def test_deploy_stable(juju: Juju) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) @@ -102,6 +106,7 @@ def test_upgrade_from_stable(juju: Juju, charm: str, continuous_writes) -> None: juju.wait( ready=wait_for_unit_message(MYSQL_APP_NAME, mysql_upgrade_unit, "upgrade completed"), timeout=10 * MINUTE_SECS, + delay=2, ) logging.info("Resume upgrade") @@ -117,6 +122,7 @@ def test_upgrade_from_stable(juju: Juju, charm: str, continuous_writes) -> None: juju.wait( ready=lambda status: jubilant_backports.all_active(status, MYSQL_APP_NAME), timeout=20 * MINUTE_SECS, + delay=2, ) logging.info("Ensure continuous writes are incrementing") diff --git a/kubernetes/tests/integration/integration/high_availability/test_upgrade_rollback_incompat.py b/kubernetes/tests/integration/integration/high_availability/test_upgrade_rollback_incompat.py index 66cb3b88c..79014153e 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_upgrade_rollback_incompat.py +++ b/kubernetes/tests/integration/integration/high_availability/test_upgrade_rollback_incompat.py @@ -51,6 +51,7 @@ def test_build_and_deploy(juju: Juju, charm: str) -> None: ready=wait_for_apps_status(jubilant_backports.all_active, MYSQL_APP_NAME), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) @@ -91,6 +92,7 @@ def test_upgrade_to_failing(juju: Juju, charm: str) -> None: juju.wait( ready=lambda status: jubilant_backports.any_maintenance(status, MYSQL_APP_NAME), timeout=10 * MINUTE_SECS, + delay=2, ) logging.info("Get first upgrading unit") @@ -100,6 +102,7 @@ def test_upgrade_to_failing(juju: Juju, charm: str) -> None: juju.wait( ready=wait_for_unit_status(MYSQL_APP_NAME, upgrade_unit, "blocked"), timeout=10 * MINUTE_SECS, + delay=2, ) @@ -132,12 +135,14 @@ def test_rollback(juju: Juju, charm: str) -> None: juju.wait( ready=lambda status: jubilant_backports.any_maintenance(status, MYSQL_APP_NAME), timeout=10 * MINUTE_SECS, + delay=2, ) logging.info("Wait for upgrade to complete on first upgrading unit") juju.wait( ready=wait_for_unit_message(MYSQL_APP_NAME, mysql_upgrade_unit, "upgrade completed"), timeout=10 * MINUTE_SECS, + delay=2, ) logging.info("Ensure rollback has taken place") @@ -157,6 +162,7 @@ def test_rollback(juju: Juju, charm: str) -> None: juju.wait( ready=lambda status: jubilant_backports.all_active(status, MYSQL_APP_NAME), timeout=20 * MINUTE_SECS, + delay=2, ) diff --git a/kubernetes/tests/integration/integration/relations/test_database.py b/kubernetes/tests/integration/integration/relations/test_database.py index 6d0a98fd9..06bb6d139 100644 --- a/kubernetes/tests/integration/integration/relations/test_database.py +++ b/kubernetes/tests/integration/integration/relations/test_database.py @@ -8,6 +8,7 @@ from jubilant_backports import Juju from ... import markers +from ...architecture import architecture from ...helpers_ha import ( CHARM_METADATA, MINUTE_SECS, @@ -35,11 +36,13 @@ def test_build_and_deploy(juju: Juju, charm): trust=True, ) + constraints = {"arch": architecture} juju.deploy( APPLICATION_APP_NAME, num_units=2, channel="latest/edge", base="ubuntu@22.04", + constraints=constraints, ) @@ -59,12 +62,14 @@ def test_relation_creation_eager(juju: Juju): ready=wait_for_apps_status(jubilant_backports.all_waiting, APPLICATION_APP_NAME), error=jubilant_backports.any_blocked, timeout=15 * MINUTE_SECS, + delay=2, ) logging.info("Waiting for database app to be active...") juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, DATABASE_APP_NAME), error=jubilant_backports.any_blocked, timeout=15 * MINUTE_SECS, + delay=2, ) @@ -74,6 +79,7 @@ def test_relation_creation_databag(juju: Juju): juju.wait( ready=jubilant_backports.all_active, timeout=15 * MINUTE_SECS, + delay=2, ) relation_data = get_relation_data(juju, APPLICATION_APP_NAME, "database") @@ -86,6 +92,7 @@ def test_relation_creation(juju: Juju): juju.wait( ready=jubilant_backports.all_active, timeout=15 * MINUTE_SECS, + delay=2, ) relation_data = get_relation_data(juju, APPLICATION_APP_NAME, "database") @@ -104,9 +111,11 @@ def test_relation_broken(juju: Juju): ready=wait_for_apps_status(jubilant_backports.all_active, DATABASE_APP_NAME), error=jubilant_backports.any_blocked, timeout=15 * MINUTE_SECS, + delay=2, ) juju.wait( ready=wait_for_apps_status(jubilant_backports.all_waiting, APPLICATION_APP_NAME), error=jubilant_backports.any_blocked, timeout=15 * MINUTE_SECS, + delay=2, ) diff --git a/kubernetes/tests/integration/integration/relations/test_mysql_root.py b/kubernetes/tests/integration/integration/relations/test_mysql_root.py index beaebfb56..378ce3360 100644 --- a/kubernetes/tests/integration/integration/relations/test_mysql_root.py +++ b/kubernetes/tests/integration/integration/relations/test_mysql_root.py @@ -7,6 +7,7 @@ import jubilant_backports from jubilant_backports import Juju +from ... import architecture from ...helpers_ha import ( CHARM_METADATA, MINUTE_SECS, @@ -34,11 +35,13 @@ def test_build_and_deploy(juju: Juju, charm): base="ubuntu@22.04", trust=True, ) + constraints = {"arch": architecture.architecture} juju.deploy( APPLICATION_APP_NAME, num_units=2, channel="latest/edge", base="ubuntu@22.04", + constraints=constraints, ) @@ -57,10 +60,12 @@ def test_relation_creation_eager(juju: Juju): ready=wait_for_apps_status(jubilant_backports.all_waiting, APPLICATION_APP_NAME), error=jubilant_backports.any_blocked, timeout=15 * MINUTE_SECS, + delay=2, ) logging.info("Waiting for database app to be active...") juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, DATABASE_APP_NAME), error=jubilant_backports.any_blocked, timeout=15 * MINUTE_SECS, + delay=2, ) diff --git a/kubernetes/tests/integration/integration/roles/test_database_dba_role.py b/kubernetes/tests/integration/integration/roles/test_database_dba_role.py index 2ff5deb95..3008c5075 100644 --- a/kubernetes/tests/integration/integration/roles/test_database_dba_role.py +++ b/kubernetes/tests/integration/integration/roles/test_database_dba_role.py @@ -50,6 +50,7 @@ def test_build_and_deploy(juju: Juju, charm) -> None: juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, DATABASE_APP_NAME), timeout=15 * MINUTE_SECS, + delay=2, ) juju.wait( ready=lambda status: all(( @@ -63,6 +64,7 @@ def test_build_and_deploy(juju: Juju, charm) -> None: ), )), timeout=15 * MINUTE_SECS, + delay=2, ) @@ -78,6 +80,7 @@ def test_charmed_dba_role(juju: Juju): jubilant_backports.all_active, f"{INTEGRATOR_APP_NAME}1", DATABASE_APP_NAME ), timeout=15 * MINUTE_SECS, + delay=2, ) juju.config( @@ -90,6 +93,7 @@ def test_charmed_dba_role(juju: Juju): jubilant_backports.all_active, f"{INTEGRATOR_APP_NAME}2", DATABASE_APP_NAME ), timeout=15 * MINUTE_SECS, + delay=2, ) primary_unit_name = get_mysql_primary_unit(juju, DATABASE_APP_NAME) diff --git a/kubernetes/tests/integration/integration/roles/test_instance_dba_role.py b/kubernetes/tests/integration/integration/roles/test_instance_dba_role.py index bf00e02a6..0c317bbe7 100644 --- a/kubernetes/tests/integration/integration/roles/test_instance_dba_role.py +++ b/kubernetes/tests/integration/integration/roles/test_instance_dba_role.py @@ -40,6 +40,7 @@ def test_build_and_deploy(juju: Juju, charm) -> None: juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, DATABASE_APP_NAME), timeout=15 * MINUTE_SECS, + delay=2, ) juju.wait( ready=lambda status: all(( @@ -49,6 +50,7 @@ def test_build_and_deploy(juju: Juju, charm) -> None: ), )), timeout=15 * MINUTE_SECS, + delay=2, ) @@ -64,6 +66,7 @@ def test_charmed_dba_role(juju: Juju): jubilant_backports.all_active, INTEGRATOR_APP_NAME, DATABASE_APP_NAME ), timeout=15 * MINUTE_SECS, + delay=2, ) primary_unit_name = get_mysql_primary_unit(juju, DATABASE_APP_NAME) diff --git a/kubernetes/tests/integration/integration/roles/test_instance_roles.py b/kubernetes/tests/integration/integration/roles/test_instance_roles.py index 2088fc2de..c863e2c96 100644 --- a/kubernetes/tests/integration/integration/roles/test_instance_roles.py +++ b/kubernetes/tests/integration/integration/roles/test_instance_roles.py @@ -51,6 +51,7 @@ def test_build_and_deploy(juju: Juju, charm) -> None: juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, DATABASE_APP_NAME), timeout=15 * MINUTE_SECS, + delay=2, ) juju.wait( ready=lambda status: all(( @@ -64,6 +65,7 @@ def test_build_and_deploy(juju: Juju, charm) -> None: ), )), timeout=15 * MINUTE_SECS, + delay=2, ) @@ -79,6 +81,7 @@ def test_charmed_read_role(juju: Juju): jubilant_backports.all_active, f"{INTEGRATOR_APP_NAME}1", DATABASE_APP_NAME ), timeout=15 * MINUTE_SECS, + delay=2, ) primary_unit_name = get_mysql_primary_unit(juju, DATABASE_APP_NAME) @@ -149,6 +152,7 @@ def test_charmed_read_role(juju: Juju): ), )), timeout=15 * MINUTE_SECS, + delay=2, ) @@ -163,6 +167,7 @@ def test_charmed_dml_role(juju: Juju): jubilant_backports.all_active, f"{INTEGRATOR_APP_NAME}1", DATABASE_APP_NAME ), timeout=15 * MINUTE_SECS, + delay=2, ) juju.config( @@ -175,6 +180,7 @@ def test_charmed_dml_role(juju: Juju): jubilant_backports.all_active, f"{INTEGRATOR_APP_NAME}2", DATABASE_APP_NAME ), timeout=15 * MINUTE_SECS, + delay=2, ) primary_unit_name = get_mysql_primary_unit(juju, DATABASE_APP_NAME) @@ -260,4 +266,5 @@ def test_charmed_dml_role(juju: Juju): ), )), timeout=15 * MINUTE_SECS, + delay=2, ) diff --git a/kubernetes/tests/integration/integration/test_architecture.py b/kubernetes/tests/integration/integration/test_architecture.py index 9174f97c6..d051f710b 100644 --- a/kubernetes/tests/integration/integration/test_architecture.py +++ b/kubernetes/tests/integration/integration/test_architecture.py @@ -3,6 +3,8 @@ # See LICENSE file for licensing details. +from time import sleep + from jubilant_backports import Juju from .. import markers @@ -26,6 +28,10 @@ def test_arm_charm_on_amd_host(juju: Juju) -> None: base="ubuntu@22.04", ) + # Allow some time between deploy and status call. Avoids: + # ERROR getting details for storage database/0: filesystem for storage instance "database/0" not found + sleep(30) + juju.wait( ready=lambda status: all(( *( @@ -34,6 +40,7 @@ def test_arm_charm_on_amd_host(juju: Juju) -> None: ), )), timeout=TIMEOUT, + delay=2, ) @@ -51,6 +58,10 @@ def test_amd_charm_on_arm_host(juju: Juju) -> None: base="ubuntu@22.04", ) + # Allow some time between deploy and status call. Avoids: + # ERROR getting details for storage database/0: filesystem for storage instance "database/0" not found + sleep(30) + juju.wait( ready=lambda status: all(( *( @@ -59,6 +70,7 @@ def test_amd_charm_on_arm_host(juju: Juju) -> None: ), )), timeout=TIMEOUT, + delay=2, ) diff --git a/kubernetes/tests/integration/integration/test_backup_aws.py b/kubernetes/tests/integration/integration/test_backup_aws.py index a91769ebd..faeaf6bb1 100644 --- a/kubernetes/tests/integration/integration/test_backup_aws.py +++ b/kubernetes/tests/integration/integration/test_backup_aws.py @@ -5,6 +5,7 @@ import logging import socket from pathlib import Path +from time import sleep import boto3 import jubilant_backports @@ -87,11 +88,15 @@ def test_build_and_deploy(juju: Juju, charm) -> None: resources={"mysql-image": CHARM_METADATA["resources"]["mysql-image"]["upstream-source"]}, trust=True, ) + # Allow some time between deploy and status call. Avoids: + # ERROR getting details for storage database/0: filesystem for storage instance "database/0" not found + sleep(30) juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, DATABASE_APP_NAME), error=jubilant_backports.any_blocked, timeout=15 * MINUTE_SECS, + delay=2, ) primary_unit_name = get_mysql_primary_unit(juju, DATABASE_APP_NAME) @@ -118,6 +123,7 @@ def test_build_and_deploy(juju: Juju, charm) -> None: ), )), timeout=TIMEOUT, + delay=2, ) @@ -164,6 +170,7 @@ def test_backup(juju: Juju, cloud_configs_aws) -> None: jubilant_backports.all_active, DATABASE_APP_NAME, S3_INTEGRATOR ), timeout=TIMEOUT, + delay=2, ) # list backups @@ -231,6 +238,7 @@ def test_restore_on_same_cluster(juju: Juju, cloud_configs_aws) -> None: jubilant_backports.all_active, DATABASE_APP_NAME, S3_INTEGRATOR ), timeout=TIMEOUT, + delay=2, ) # restore the backup @@ -295,6 +303,7 @@ def test_restore_on_same_cluster(juju: Juju, cloud_configs_aws) -> None: ), )), timeout=TIMEOUT, + delay=2, ) logger.info("Ensuring inserted values before backup and after restore exist on all units") @@ -336,6 +345,7 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_aws) -> None: juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, new_mysql_application_name), timeout=TIMEOUT, + delay=2, ) # relate to S3 integrator @@ -346,6 +356,7 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_aws) -> None: jubilant_backports.all_active, new_mysql_application_name, S3_INTEGRATOR ), timeout=TIMEOUT, + delay=2, ) # rotate all credentials @@ -380,13 +391,17 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_aws) -> None: jubilant_backports.all_active, new_mysql_application_name, S3_INTEGRATOR ), timeout=TIMEOUT, + delay=2, ) logger.info("Waiting for blocked application status with another cluster S3 repository") juju.wait( # Might take a few minutes to get past this - ready=lambda status: status.apps[new_mysql_application_name].app_status.message - == ANOTHER_S3_CLUSTER_REPOSITORY_ERROR_MESSAGE, + ready=lambda status: ( + status.apps[new_mysql_application_name].app_status.message + == ANOTHER_S3_CLUSTER_REPOSITORY_ERROR_MESSAGE + ), timeout=TIMEOUT, + delay=2, ) # restore the backup @@ -446,6 +461,7 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_aws) -> None: ), )), timeout=TIMEOUT, + delay=2, ) logger.info("Ensuring inserted values before backup and after restore exist on all units") @@ -463,7 +479,10 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_aws) -> None: logger.info("Waiting for blocked application status after restore") juju.wait( - ready=lambda status: status.apps[new_mysql_application_name].app_status.message - == MOVE_RESTORED_CLUSTER_TO_ANOTHER_S3_REPOSITORY_ERROR, + ready=lambda status: ( + status.apps[new_mysql_application_name].app_status.message + == MOVE_RESTORED_CLUSTER_TO_ANOTHER_S3_REPOSITORY_ERROR + ), timeout=TIMEOUT, + delay=2, ) diff --git a/kubernetes/tests/integration/integration/test_backup_ceph.py b/kubernetes/tests/integration/integration/test_backup_ceph.py index e197427b3..bcc703cad 100644 --- a/kubernetes/tests/integration/integration/test_backup_ceph.py +++ b/kubernetes/tests/integration/integration/test_backup_ceph.py @@ -176,10 +176,15 @@ def test_build_and_deploy(juju: Juju, charm) -> None: trust=True, ) + # Allow some time between deploy and status call. Avoids: + # ERROR getting details for storage database/0: filesystem for storage instance "database/0" not found + time.sleep(30) + juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, DATABASE_APP_NAME), error=jubilant_backports.any_blocked, timeout=15 * MINUTE_SECS, + delay=2, ) primary_unit_name = get_mysql_primary_unit(juju, DATABASE_APP_NAME) @@ -206,6 +211,7 @@ def test_build_and_deploy(juju: Juju, charm) -> None: ), )), timeout=TIMEOUT, + delay=2, ) @@ -250,6 +256,7 @@ def test_backup(juju: Juju, cloud_credentials, cloud_configs) -> None: jubilant_backports.all_active, DATABASE_APP_NAME, S3_INTEGRATOR ), timeout=TIMEOUT, + delay=2, ) # list backups @@ -315,6 +322,7 @@ def test_restore_on_same_cluster(juju: Juju, cloud_credentials, cloud_configs) - jubilant_backports.all_active, DATABASE_APP_NAME, S3_INTEGRATOR ), timeout=TIMEOUT, + delay=2, ) # restore the backup @@ -379,6 +387,7 @@ def test_restore_on_same_cluster(juju: Juju, cloud_credentials, cloud_configs) - ), )), timeout=TIMEOUT, + delay=2, ) logger.info("Ensuring inserted values before backup and after restore exist on all units") @@ -418,6 +427,7 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_credentials, cloud_conf juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, new_mysql_application_name), timeout=TIMEOUT, + delay=2, ) # relate to S3 integrator @@ -428,6 +438,7 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_credentials, cloud_conf jubilant_backports.all_active, new_mysql_application_name, S3_INTEGRATOR ), timeout=TIMEOUT, + delay=2, ) # rotate all credentials @@ -462,13 +473,17 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_credentials, cloud_conf jubilant_backports.all_active, new_mysql_application_name, S3_INTEGRATOR ), timeout=TIMEOUT, + delay=2, ) logger.info("Waiting for blocked application status with another cluster S3 repository") juju.wait( # Might take a few minutes to get past this - ready=lambda status: status.apps[new_mysql_application_name].app_status.message - == ANOTHER_S3_CLUSTER_REPOSITORY_ERROR_MESSAGE, + ready=lambda status: ( + status.apps[new_mysql_application_name].app_status.message + == ANOTHER_S3_CLUSTER_REPOSITORY_ERROR_MESSAGE + ), timeout=TIMEOUT, + delay=2, ) # restore the backup @@ -528,6 +543,7 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_credentials, cloud_conf ), )), timeout=TIMEOUT, + delay=2, ) logger.info("Ensuring inserted values before backup and after restore exist on all units") @@ -545,7 +561,10 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_credentials, cloud_conf logger.info("Waiting for blocked application status after restore") juju.wait( - ready=lambda status: status.apps[new_mysql_application_name].app_status.message - == MOVE_RESTORED_CLUSTER_TO_ANOTHER_S3_REPOSITORY_ERROR, + ready=lambda status: ( + status.apps[new_mysql_application_name].app_status.message + == MOVE_RESTORED_CLUSTER_TO_ANOTHER_S3_REPOSITORY_ERROR + ), timeout=TIMEOUT, + delay=2, ) diff --git a/kubernetes/tests/integration/integration/test_backup_gcp.py b/kubernetes/tests/integration/integration/test_backup_gcp.py index c1c00f9aa..7359c9303 100644 --- a/kubernetes/tests/integration/integration/test_backup_gcp.py +++ b/kubernetes/tests/integration/integration/test_backup_gcp.py @@ -5,6 +5,7 @@ import logging import socket from pathlib import Path +from time import sleep import boto3 import jubilant_backports @@ -91,6 +92,7 @@ def test_build_and_deploy(juju: Juju, charm) -> None: ready=wait_for_apps_status(jubilant_backports.all_active, DATABASE_APP_NAME), error=jubilant_backports.any_blocked, timeout=15 * MINUTE_SECS, + delay=2, ) primary_unit_name = get_mysql_primary_unit(juju, DATABASE_APP_NAME) @@ -117,6 +119,7 @@ def test_build_and_deploy(juju: Juju, charm) -> None: ), )), timeout=TIMEOUT, + delay=2, ) @@ -163,6 +166,7 @@ def test_backup(juju: Juju, cloud_configs_gcp) -> None: jubilant_backports.all_active, DATABASE_APP_NAME, S3_INTEGRATOR ), timeout=TIMEOUT, + delay=2, ) # list backups @@ -230,6 +234,7 @@ def test_restore_on_same_cluster(juju: Juju, cloud_configs_gcp) -> None: jubilant_backports.all_active, DATABASE_APP_NAME, S3_INTEGRATOR ), timeout=TIMEOUT, + delay=2, ) # restore the backup @@ -294,6 +299,7 @@ def test_restore_on_same_cluster(juju: Juju, cloud_configs_gcp) -> None: ), )), timeout=TIMEOUT, + delay=2, ) logger.info("Ensuring inserted values before backup and after restore exist on all units") @@ -332,9 +338,14 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_gcp) -> None: trust=True, ) + # Allow some time between deploy and status call. Avoids: + # ERROR getting details for storage database/0: filesystem for storage instance "database/0" not found + sleep(30) + juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, new_mysql_application_name), timeout=TIMEOUT, + delay=2, ) # relate to S3 integrator @@ -345,6 +356,7 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_gcp) -> None: jubilant_backports.all_active, new_mysql_application_name, S3_INTEGRATOR ), timeout=TIMEOUT, + delay=2, ) # rotate all credentials @@ -379,13 +391,17 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_gcp) -> None: jubilant_backports.all_active, new_mysql_application_name, S3_INTEGRATOR ), timeout=TIMEOUT, + delay=2, ) logger.info("Waiting for blocked application status with another cluster S3 repository") juju.wait( # Might take a few minutes to get past this - ready=lambda status: status.apps[new_mysql_application_name].app_status.message - == ANOTHER_S3_CLUSTER_REPOSITORY_ERROR_MESSAGE, + ready=lambda status: ( + status.apps[new_mysql_application_name].app_status.message + == ANOTHER_S3_CLUSTER_REPOSITORY_ERROR_MESSAGE + ), timeout=TIMEOUT, + delay=2, ) # restore the backup @@ -445,6 +461,7 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_gcp) -> None: ), )), timeout=TIMEOUT, + delay=2, ) logger.info("Ensuring inserted values before backup and after restore exist on all units") @@ -462,7 +479,10 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_gcp) -> None: logger.info("Waiting for blocked application status after restore") juju.wait( - ready=lambda status: status.apps[new_mysql_application_name].app_status.message - == MOVE_RESTORED_CLUSTER_TO_ANOTHER_S3_REPOSITORY_ERROR, + ready=lambda status: ( + status.apps[new_mysql_application_name].app_status.message + == MOVE_RESTORED_CLUSTER_TO_ANOTHER_S3_REPOSITORY_ERROR + ), timeout=TIMEOUT, + delay=2, ) diff --git a/kubernetes/tests/integration/integration/test_charm.py b/kubernetes/tests/integration/integration/test_charm.py index 3c54ebb82..866b1444c 100644 --- a/kubernetes/tests/integration/integration/test_charm.py +++ b/kubernetes/tests/integration/integration/test_charm.py @@ -48,6 +48,7 @@ def test_build_and_deploy(juju: Juju, charm) -> None: juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, APP_NAME), timeout=TIMEOUT, + delay=2, ) app_units = get_app_units(juju, APP_NAME) @@ -92,6 +93,7 @@ def test_scale_up_from_zero(juju: Juju) -> None: juju.wait( ready=lambda status: len(status.apps[APP_NAME].units) == 0, timeout=TIMEOUT, + delay=2, ) logger.info("Scaling back up to 3 units") diff --git a/kubernetes/tests/integration/integration/test_cos_integration_bundle.py b/kubernetes/tests/integration/integration/test_cos_integration_bundle.py index 7d90fb7ac..67f508439 100644 --- a/kubernetes/tests/integration/integration/test_cos_integration_bundle.py +++ b/kubernetes/tests/integration/integration/test_cos_integration_bundle.py @@ -44,4 +44,5 @@ def test_deploy_bundle_with_cos_integrations(juju: Juju, charm) -> None: juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, "mysql-k8s"), timeout=TIMEOUT, + delay=2, ) diff --git a/kubernetes/tests/integration/integration/test_multi_relations.py b/kubernetes/tests/integration/integration/test_multi_relations.py index 1a66bf481..ce38070c4 100644 --- a/kubernetes/tests/integration/integration/test_multi_relations.py +++ b/kubernetes/tests/integration/integration/test_multi_relations.py @@ -6,6 +6,7 @@ from jubilant_backports import CLIError, Juju from tenacity import RetryError, Retrying, retry_if_exception_type, stop_after_attempt, wait_fixed +from .. import architecture from ..helpers_ha import CHARM_METADATA, MINUTE_SECS, wait_for_apps_status, wait_for_unit_status MYSQL_APP_NAME = "mysql" @@ -27,6 +28,7 @@ def test_build_and_deploy(juju: Juju, charm): trust=True, ) + constraints = {"arch": architecture.architecture} for idx in range(SCALE_APPS): juju.deploy( "mysql-test-app", @@ -35,6 +37,7 @@ def test_build_and_deploy(juju: Juju, charm): channel="latest/edge", config={"database_name": f"database{idx}", "sleep_interval": "2000"}, base="ubuntu@22.04", + constraints=constraints, ) juju.deploy( "mysql-router-k8s", @@ -43,6 +46,7 @@ def test_build_and_deploy(juju: Juju, charm): channel="8.0/edge", trust=True, base="ubuntu@22.04", + constraints=constraints, ) # Wait until deployment is complete in attempt to reduce CPU stress @@ -138,6 +142,7 @@ def test_scale_in(juju: Juju): # All jubilant.Juju operations risk raising intermittent CLIErrors under CPU pressure, # so we wrap each of them +# TODO: Try to remove when juju 3.6.15+ is out def retry_if_cli_error(fn, *, max_attempts=10): try: for attempt in Retrying( diff --git a/kubernetes/tests/integration/integration/test_osm_integration_bundle.py b/kubernetes/tests/integration/integration/test_osm_integration_bundle.py index bb7f14195..e12163805 100644 --- a/kubernetes/tests/integration/integration/test_osm_integration_bundle.py +++ b/kubernetes/tests/integration/integration/test_osm_integration_bundle.py @@ -99,6 +99,7 @@ def test_deploy_and_relate_osm_bundle(juju: Juju, charm) -> None: juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, APP_NAME, "mongodb"), timeout=TIMEOUT, + delay=2, ) logger.info("Waiting for all apps to have units") @@ -110,6 +111,7 @@ def test_deploy_and_relate_osm_bundle(juju: Juju, charm) -> None: and len(status.apps["mongodb"].units) >= 1 ), timeout=TIMEOUT, + delay=2, ) logger.info("Relate kafka and zookeeper") @@ -118,6 +120,7 @@ def test_deploy_and_relate_osm_bundle(juju: Juju, charm) -> None: juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, "zookeeper"), timeout=TIMEOUT, + delay=2, ) logger.info("Relate keystone and mysql") @@ -128,6 +131,7 @@ def test_deploy_and_relate_osm_bundle(juju: Juju, charm) -> None: ready=wait_for_apps_status(jubilant_backports.all_active, APP_NAME, "osm-keystone"), error=wait_for_apps_status(jubilant_backports.any_error, APP_NAME, "osm-keystone"), timeout=TIMEOUT, + delay=2, ) logger.info("Relate osm-pol and mongo") diff --git a/kubernetes/tests/integration/integration/test_saturate_max_connections.py b/kubernetes/tests/integration/integration/test_saturate_max_connections.py index ff941104e..d0f4cc607 100644 --- a/kubernetes/tests/integration/integration/test_saturate_max_connections.py +++ b/kubernetes/tests/integration/integration/test_saturate_max_connections.py @@ -8,6 +8,7 @@ from jubilant_backports import Juju from mysql.connector.errors import OperationalError +from .. import architecture from ..connector import create_db_connections from ..helpers_ha import CHARM_METADATA, MINUTE_SECS, get_app_units, get_unit_address @@ -33,6 +34,7 @@ def test_build_and_deploy(juju: Juju, charm) -> None: def test_deploy_and_relate_test_app(juju: Juju) -> None: config = {"auto_start_writes": False, "sleep_interval": "500"} + constraints = {"arch": architecture.architecture} logger.info("Deploying test app") juju.deploy( "mysql-test-app", @@ -41,6 +43,7 @@ def test_deploy_and_relate_test_app(juju: Juju) -> None: base="ubuntu@22.04", config=config, channel="latest/edge", + constraints=constraints, ) logger.info("Relating test app to mysql") @@ -50,6 +53,7 @@ def test_deploy_and_relate_test_app(juju: Juju) -> None: juju.wait( jubilant_backports.all_active, timeout=10 * MINUTE_SECS, + delay=2, ) diff --git a/kubernetes/tests/integration/integration/test_tls.py b/kubernetes/tests/integration/integration/test_tls.py index a72ef1d97..235e8f784 100644 --- a/kubernetes/tests/integration/integration/test_tls.py +++ b/kubernetes/tests/integration/integration/test_tls.py @@ -66,6 +66,7 @@ def test_build_and_deploy(juju: Juju, charm) -> None: juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, APP_NAME), timeout=TIMEOUT, + delay=2, ) @@ -111,6 +112,7 @@ def test_enable_tls(juju: Juju) -> None: juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, tls_app_name), timeout=TIMEOUT, + delay=2, ) # Relate with TLS charm diff --git a/kubernetes/tests/integration/release/high_availability/test_upgrade_from_stable.py b/kubernetes/tests/integration/release/high_availability/test_upgrade_from_stable.py index f0cab8d4a..a8d1dd062 100644 --- a/kubernetes/tests/integration/release/high_availability/test_upgrade_from_stable.py +++ b/kubernetes/tests/integration/release/high_availability/test_upgrade_from_stable.py @@ -112,6 +112,7 @@ def deploy_stable(juju: Juju, revision: int, image: str) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) @@ -149,6 +150,7 @@ def upgrade_from_stable(juju: Juju, charm: str) -> None: juju.wait( ready=wait_for_unit_message(MYSQL_APP_NAME, mysql_upgrade_unit, "upgrade completed"), timeout=10 * MINUTE_SECS, + delay=2, ) logging.info("Resume upgrade") @@ -164,6 +166,7 @@ def upgrade_from_stable(juju: Juju, charm: str) -> None: juju.wait( ready=lambda status: jubilant_backports.all_active(status, MYSQL_APP_NAME), timeout=20 * MINUTE_SECS, + delay=2, ) logging.info("Ensure continuous writes are incrementing") diff --git a/machines/tests/integration/helpers_ha.py b/machines/tests/integration/helpers_ha.py index 6683f9168..36e8f66d3 100644 --- a/machines/tests/integration/helpers_ha.py +++ b/machines/tests/integration/helpers_ha.py @@ -142,12 +142,14 @@ def scale_app_units(juju: Juju, app_name: str, num_units: int) -> None: juju.wait( ready=lambda status: len(status.apps[app_name].units) == num_units, timeout=20 * MINUTE_SECS, + delay=2, ) if num_units > 0: juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, app_name), timeout=20 * MINUTE_SECS, + delay=2, ) diff --git a/machines/tests/integration/integration/helpers_backups.py b/machines/tests/integration/integration/helpers_backups.py index 5c43ac458..533a7c221 100644 --- a/machines/tests/integration/integration/helpers_backups.py +++ b/machines/tests/integration/integration/helpers_backups.py @@ -84,6 +84,7 @@ def build_and_deploy_operations( juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, MYSQL_APPLICATION_NAME), timeout=15 * MINUTE_SECS, + delay=2, ) logger.info("Rotating mysql credentials") @@ -102,6 +103,7 @@ def build_and_deploy_operations( ), )), timeout=TIMEOUT, + delay=2, ) juju.config(S3_INTEGRATOR, cloud_configs) s3_unit_name = get_app_units(juju, S3_INTEGRATOR)[0] @@ -115,6 +117,7 @@ def build_and_deploy_operations( jubilant_backports.all_active, MYSQL_APPLICATION_NAME, S3_INTEGRATOR ), timeout=TIMEOUT, + delay=2, ) juju.integrate(MYSQL_APPLICATION_NAME, S3_INTEGRATOR) juju.wait( @@ -122,6 +125,7 @@ def build_and_deploy_operations( jubilant_backports.all_active, MYSQL_APPLICATION_NAME, S3_INTEGRATOR ), timeout=TIMEOUT, + delay=2, ) @@ -220,6 +224,7 @@ def pitr_operations( jubilant_backports.all_agents_idle(status, MYSQL_APPLICATION_NAME, S3_INTEGRATOR), )), timeout=TIMEOUT, + delay=2, ) assert check_test_data_existence(first_mysql_ip, should_not_exist=[td1, td2]), ( "test data should not exist" @@ -236,6 +241,7 @@ def pitr_operations( jubilant_backports.all_agents_idle(status, MYSQL_APPLICATION_NAME, S3_INTEGRATOR), )), timeout=TIMEOUT, + delay=2, ) assert check_test_data_existence(first_mysql_ip, should_exist=[td1, td2]), ( "both test data should exist" @@ -252,6 +258,7 @@ def pitr_operations( jubilant_backports.all_agents_idle(status, MYSQL_APPLICATION_NAME, S3_INTEGRATOR), )), timeout=TIMEOUT, + delay=2, ) assert check_test_data_existence(first_mysql_ip, should_exist=[td1], should_not_exist=[td2]), ( "only first test data should exist" @@ -268,6 +275,7 @@ def pitr_operations( jubilant_backports.all_agents_idle(status, MYSQL_APPLICATION_NAME, S3_INTEGRATOR), )), timeout=TIMEOUT, + delay=2, ) assert check_test_data_existence(first_mysql_ip, should_exist=[td1, td2]), ( "both test data should exist" diff --git a/machines/tests/integration/integration/high_availability/test_async_replication_upgrade.py b/machines/tests/integration/integration/high_availability/test_async_replication_upgrade.py index 4cc89ff9c..4d47d1d43 100644 --- a/machines/tests/integration/integration/high_availability/test_async_replication_upgrade.py +++ b/machines/tests/integration/integration/high_availability/test_async_replication_upgrade.py @@ -264,12 +264,14 @@ def run_upgrade_from_edge(juju: Juju, app_name: str, charm: str) -> None: juju.wait( ready=lambda status: jubilant_backports.any_maintenance(status, app_name), timeout=10 * MINUTE_SECS, + delay=2, ) logging.info("Wait for upgrade to complete") juju.wait( ready=lambda status: jubilant_backports.all_active(status, app_name), timeout=20 * MINUTE_SECS, + delay=2, ) logging.info("Ensure continuous writes are incrementing") diff --git a/machines/tests/integration/integration/high_availability/test_primary_switchover.py b/machines/tests/integration/integration/high_availability/test_primary_switchover.py index 43a2cde7e..3246ff0d8 100644 --- a/machines/tests/integration/integration/high_availability/test_primary_switchover.py +++ b/machines/tests/integration/integration/high_availability/test_primary_switchover.py @@ -55,6 +55,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) diff --git a/machines/tests/integration/integration/high_availability/test_replication_data_consistency.py b/machines/tests/integration/integration/high_availability/test_replication_data_consistency.py index 87dea7677..f0468fc9e 100644 --- a/machines/tests/integration/integration/high_availability/test_replication_data_consistency.py +++ b/machines/tests/integration/integration/high_availability/test_replication_data_consistency.py @@ -52,6 +52,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) diff --git a/machines/tests/integration/integration/high_availability/test_replication_data_isolation.py b/machines/tests/integration/integration/high_availability/test_replication_data_isolation.py index 3f36fa408..2ac1f4a51 100644 --- a/machines/tests/integration/integration/high_availability/test_replication_data_isolation.py +++ b/machines/tests/integration/integration/high_availability/test_replication_data_isolation.py @@ -51,6 +51,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) @@ -80,6 +81,7 @@ def test_cluster_data_isolation(juju: Juju, charm: str) -> None: ready=wait_for_apps_status(jubilant_backports.all_active, mysql_other_app_name), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) table_name = "cluster_isolation_table" diff --git a/machines/tests/integration/integration/high_availability/test_replication_logs_rotation.py b/machines/tests/integration/integration/high_availability/test_replication_logs_rotation.py index 341deffad..291e5d365 100644 --- a/machines/tests/integration/integration/high_availability/test_replication_logs_rotation.py +++ b/machines/tests/integration/integration/high_availability/test_replication_logs_rotation.py @@ -58,6 +58,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) diff --git a/machines/tests/integration/integration/high_availability/test_replication_reelection.py b/machines/tests/integration/integration/high_availability/test_replication_reelection.py index a4d36a0ce..e16b3e3ee 100644 --- a/machines/tests/integration/integration/high_availability/test_replication_reelection.py +++ b/machines/tests/integration/integration/high_availability/test_replication_reelection.py @@ -54,6 +54,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) @@ -70,11 +71,13 @@ def test_kill_primary_check_reelection(juju: Juju) -> None: juju.wait( ready=lambda status: len(status.apps[MYSQL_APP_NAME].units) == 2, timeout=20 * MINUTE_SECS, + delay=2, ) juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, MYSQL_APP_NAME), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) # Confirm that the new primary unit is different diff --git a/machines/tests/integration/integration/high_availability/test_replication_scaling.py b/machines/tests/integration/integration/high_availability/test_replication_scaling.py index 6fd131aeb..91d50e332 100644 --- a/machines/tests/integration/integration/high_availability/test_replication_scaling.py +++ b/machines/tests/integration/integration/high_availability/test_replication_scaling.py @@ -53,6 +53,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) @@ -75,11 +76,13 @@ def test_scaling_without_data_loss(juju: Juju) -> None: juju.wait( ready=lambda status: len(status.apps[MYSQL_APP_NAME].units) == 3, timeout=20 * MINUTE_SECS, + delay=2, ) juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, MYSQL_APP_NAME), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) # Ensure that the data still exists in all the units diff --git a/machines/tests/integration/integration/high_availability/test_replication_unit_endpoints.py b/machines/tests/integration/integration/high_availability/test_replication_unit_endpoints.py index eaf6703a5..29b61f457 100644 --- a/machines/tests/integration/integration/high_availability/test_replication_unit_endpoints.py +++ b/machines/tests/integration/integration/high_availability/test_replication_unit_endpoints.py @@ -63,6 +63,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) diff --git a/machines/tests/integration/integration/high_availability/test_replication_variables.py b/machines/tests/integration/integration/high_availability/test_replication_variables.py index 4b5705529..4fdb844a1 100644 --- a/machines/tests/integration/integration/high_availability/test_replication_variables.py +++ b/machines/tests/integration/integration/high_availability/test_replication_variables.py @@ -49,6 +49,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) diff --git a/machines/tests/integration/integration/high_availability/test_self_healing_network_cut.py b/machines/tests/integration/integration/high_availability/test_self_healing_network_cut.py index 8bb88c582..2d6e2c150 100644 --- a/machines/tests/integration/integration/high_availability/test_self_healing_network_cut.py +++ b/machines/tests/integration/integration/high_availability/test_self_healing_network_cut.py @@ -69,6 +69,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) @@ -137,6 +138,7 @@ def test_network_cut(juju: Juju, continuous_writes) -> None: juju.wait( ready=wait_for_unit_status(MYSQL_APP_NAME, mysql_primary_unit, "active"), timeout=20 * MINUTE_SECS, + delay=2, ) # Ensure continuous writes still incrementing for all units diff --git a/machines/tests/integration/integration/high_availability/test_self_healing_process_frozen.py b/machines/tests/integration/integration/high_availability/test_self_healing_process_frozen.py index 7da835506..1ab60b402 100644 --- a/machines/tests/integration/integration/high_availability/test_self_healing_process_frozen.py +++ b/machines/tests/integration/integration/high_availability/test_self_healing_process_frozen.py @@ -62,6 +62,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) diff --git a/machines/tests/integration/integration/high_availability/test_self_healing_process_killed.py b/machines/tests/integration/integration/high_availability/test_self_healing_process_killed.py index efc88a326..f5e2cc791 100644 --- a/machines/tests/integration/integration/high_availability/test_self_healing_process_killed.py +++ b/machines/tests/integration/integration/high_availability/test_self_healing_process_killed.py @@ -56,6 +56,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) diff --git a/machines/tests/integration/integration/high_availability/test_self_healing_restart_forceful.py b/machines/tests/integration/integration/high_availability/test_self_healing_restart_forceful.py index 3ced5df38..62c26f6be 100644 --- a/machines/tests/integration/integration/high_availability/test_self_healing_restart_forceful.py +++ b/machines/tests/integration/integration/high_availability/test_self_healing_restart_forceful.py @@ -63,6 +63,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) @@ -97,12 +98,14 @@ def test_sst_test(juju: Juju, continuous_writes): juju.wait( ready=wait_for_unit_status(MYSQL_APP_NAME, mysql_primary_unit, "maintenance"), timeout=20 * MINUTE_SECS, + delay=2, ) logging.info("Waiting unit to be back online") juju.wait( ready=wait_for_unit_status(MYSQL_APP_NAME, mysql_primary_unit, "active"), timeout=20 * MINUTE_SECS, + delay=2, ) new_mysql_primary_unit = get_mysql_primary_unit(juju, MYSQL_APP_NAME) diff --git a/machines/tests/integration/integration/high_availability/test_self_healing_restart_graceful.py b/machines/tests/integration/integration/high_availability/test_self_healing_restart_graceful.py index bc970080f..6803009d1 100644 --- a/machines/tests/integration/integration/high_availability/test_self_healing_restart_graceful.py +++ b/machines/tests/integration/integration/high_availability/test_self_healing_restart_graceful.py @@ -57,6 +57,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) @@ -104,6 +105,7 @@ def test_cluster_manual_rejoin(juju: Juju, continuous_writes) -> None: juju.wait( ready=wait_for_unit_status(MYSQL_APP_NAME, mysql_primary_unit, "active"), timeout=20 * MINUTE_SECS, + delay=2, ) # Ensure continuous writes still incrementing for all units diff --git a/machines/tests/integration/integration/high_availability/test_self_healing_stop_all.py b/machines/tests/integration/integration/high_availability/test_self_healing_stop_all.py index 6f422c094..ae9b3ddf5 100644 --- a/machines/tests/integration/integration/high_availability/test_self_healing_stop_all.py +++ b/machines/tests/integration/integration/high_availability/test_self_healing_stop_all.py @@ -63,6 +63,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) @@ -112,6 +113,7 @@ def test_cluster_pause(juju: Juju, continuous_writes) -> None: wait_for_unit_status(MYSQL_APP_NAME, f"{MYSQL_APP_NAME}/2", "maintenance")(status), )), timeout=20 * MINUTE_SECS, + delay=2, ) logging.info("Waiting units to be back online") juju.wait( @@ -121,6 +123,7 @@ def test_cluster_pause(juju: Juju, continuous_writes) -> None: wait_for_unit_status(MYSQL_APP_NAME, f"{MYSQL_APP_NAME}/2", "active")(status), )), timeout=20 * MINUTE_SECS, + delay=2, ) # Ensure continuous writes still incrementing for all units diff --git a/machines/tests/integration/integration/high_availability/test_self_healing_stop_primary.py b/machines/tests/integration/integration/high_availability/test_self_healing_stop_primary.py index dacb3cfe6..6cfb1ea9e 100644 --- a/machines/tests/integration/integration/high_availability/test_self_healing_stop_primary.py +++ b/machines/tests/integration/integration/high_availability/test_self_healing_stop_primary.py @@ -65,6 +65,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) diff --git a/machines/tests/integration/integration/high_availability/test_upgrade.py b/machines/tests/integration/integration/high_availability/test_upgrade.py index 47a6bf67b..a017598d7 100644 --- a/machines/tests/integration/integration/high_availability/test_upgrade.py +++ b/machines/tests/integration/integration/high_availability/test_upgrade.py @@ -57,6 +57,7 @@ def test_deploy_latest(juju: Juju) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) @@ -90,12 +91,14 @@ def test_upgrade_from_edge(juju: Juju, charm: str, continuous_writes) -> None: juju.wait( ready=lambda status: jubilant_backports.any_maintenance(status, MYSQL_APP_NAME), timeout=10 * MINUTE_SECS, + delay=2, ) logging.info("Wait for upgrade to complete") juju.wait( ready=lambda status: jubilant_backports.all_active(status, MYSQL_APP_NAME), timeout=20 * MINUTE_SECS, + delay=2, ) logging.info("Ensure continuous writes are incrementing") @@ -126,6 +129,7 @@ def test_fail_and_rollback(juju: Juju, charm: str, continuous_writes) -> None: juju.wait( ready=wait_for_apps_status(jubilant_backports.any_blocked, MYSQL_APP_NAME), timeout=10 * MINUTE_SECS, + delay=2, ) logging.info("Ensure continuous writes on all units") @@ -141,12 +145,14 @@ def test_fail_and_rollback(juju: Juju, charm: str, continuous_writes) -> None: juju.wait( ready=lambda status: jubilant_backports.any_maintenance(status, MYSQL_APP_NAME), timeout=10 * MINUTE_SECS, + delay=2, ) logging.info("Wait for upgrade to complete") juju.wait( ready=lambda status: jubilant_backports.all_active(status, MYSQL_APP_NAME), timeout=20 * MINUTE_SECS, + delay=2, ) logging.info("Ensure continuous writes after rollback procedure") diff --git a/machines/tests/integration/integration/high_availability/test_upgrade_rollback_incompat.py b/machines/tests/integration/integration/high_availability/test_upgrade_rollback_incompat.py index 611cdb0fb..b34ea96b9 100644 --- a/machines/tests/integration/integration/high_availability/test_upgrade_rollback_incompat.py +++ b/machines/tests/integration/integration/high_availability/test_upgrade_rollback_incompat.py @@ -78,6 +78,7 @@ def test_build_and_deploy(juju: Juju, charm: str) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) @@ -114,6 +115,7 @@ def test_upgrade_to_failing(juju: Juju, charm: str, continuous_writes) -> None: juju.wait( ready=lambda status: jubilant_backports.any_maintenance(status, MYSQL_APP_NAME), timeout=10 * MINUTE_SECS, + delay=2, ) logging.info("Get first upgrading unit") @@ -125,6 +127,7 @@ def test_upgrade_to_failing(juju: Juju, charm: str, continuous_writes) -> None: juju.wait( ready=wait_for_unit_status(MYSQL_APP_NAME, upgrade_unit, "blocked"), timeout=10 * MINUTE_SECS, + delay=2, ) @@ -165,10 +168,12 @@ def test_rollback(juju: Juju, charm: str, continuous_writes) -> None: juju.wait( ready=lambda status: jubilant_backports.any_maintenance(status, MYSQL_APP_NAME), timeout=10 * MINUTE_SECS, + delay=2, ) juju.wait( ready=lambda status: jubilant_backports.all_active(status, MYSQL_APP_NAME), timeout=20 * MINUTE_SECS, + delay=2, ) logging.info("Ensure rollback has taken place") diff --git a/machines/tests/integration/integration/relations/test_database.py b/machines/tests/integration/integration/relations/test_database.py index ef44967da..df590f2b3 100644 --- a/machines/tests/integration/integration/relations/test_database.py +++ b/machines/tests/integration/integration/relations/test_database.py @@ -58,11 +58,13 @@ def test_build_and_deploy(juju: Juju, charm): ready=wait_for_apps_status(jubilant_backports.all_active, DATABASE_APP_NAME), error=jubilant_backports.any_blocked, timeout=TIMEOUT, + delay=2, ) juju.wait( ready=wait_for_apps_status(jubilant_backports.all_waiting, APPLICATION_APP_NAME), error=jubilant_backports.any_blocked, timeout=TIMEOUT, + delay=2, ) @@ -140,11 +142,13 @@ def test_relation_creation_databag(juju: Juju): ready=wait_for_apps_status(jubilant_backports.all_active, DATABASE_APP_NAME), error=jubilant_backports.any_blocked, timeout=TIMEOUT, + delay=2, ) juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, APPLICATION_APP_NAME), error=jubilant_backports.any_blocked, timeout=TIMEOUT, + delay=2, ) relation_data = get_relation_data(juju, APPLICATION_APP_NAME, DB_RELATION_NAME) @@ -160,6 +164,7 @@ def test_relation_creation(juju: Juju): ready=wait_for_apps_status(jubilant_backports.all_active, *APPS), error=jubilant_backports.any_blocked, timeout=TIMEOUT, + delay=2, ) relation_data = get_relation_data(juju, APPLICATION_APP_NAME, DB_RELATION_NAME) @@ -187,6 +192,7 @@ def test_read_only_endpoints(juju: Juju): juju, app_name=DATABASE_APP_NAME, relation_name=DB_RELATION_NAME ), timeout=5 * MINUTE_SECS, + delay=2, ) # increase the number of units @@ -201,6 +207,7 @@ def test_read_only_endpoints(juju: Juju): juju, app_name=DATABASE_APP_NAME, relation_name=DB_RELATION_NAME ), timeout=5 * MINUTE_SECS, + delay=2, ) @@ -212,11 +219,13 @@ def test_relation_broken(juju: Juju): ready=wait_for_apps_status(jubilant_backports.all_active, DATABASE_APP_NAME), error=jubilant_backports.any_blocked, timeout=TIMEOUT, + delay=2, ) juju.wait( ready=wait_for_apps_status(jubilant_backports.all_waiting, APPLICATION_APP_NAME), error=jubilant_backports.any_blocked, timeout=TIMEOUT, + delay=2, ) diff --git a/machines/tests/integration/integration/relations/test_db_router.py b/machines/tests/integration/integration/relations/test_db_router.py index 8b861f78b..e7ce5482f 100644 --- a/machines/tests/integration/integration/relations/test_db_router.py +++ b/machines/tests/integration/integration/relations/test_db_router.py @@ -62,6 +62,7 @@ def test_keystone_bundle_db_router(juju: Juju, charm) -> None: juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, APP_NAME), timeout=SLOW_WAIT_TIMEOUT, + delay=2, ) juju.wait( ready=lambda status: all(( @@ -72,10 +73,12 @@ def test_keystone_bundle_db_router(juju: Juju, charm) -> None: ), )), timeout=SLOW_WAIT_TIMEOUT, + delay=2, ) juju.wait( ready=wait_for_apps_status(jubilant_backports.all_blocked, KEYSTONE_MYSQLROUTER_APP_NAME), timeout=SLOW_WAIT_TIMEOUT, + delay=2, ) juju.integrate(f"{KEYSTONE_MYSQLROUTER_APP_NAME}:db-router", f"{APP_NAME}:db-router") @@ -85,6 +88,7 @@ def test_keystone_bundle_db_router(juju: Juju, charm) -> None: jubilant_backports.all_active, KEYSTONE_APP_NAME, KEYSTONE_MYSQLROUTER_APP_NAME ), timeout=SLOW_WAIT_TIMEOUT, + delay=2, ) mysql_units = get_app_units(juju, APP_NAME) @@ -136,6 +140,7 @@ def test_keystone_bundle_db_router(juju: Juju, charm) -> None: ANOTHER_KEYSTONE_MYSQLROUTER_APP_NAME, ), timeout=SLOW_WAIT_TIMEOUT, + delay=2, ) for unit_name in mysql_units: diff --git a/machines/tests/integration/integration/relations/test_relation_mysql_legacy.py b/machines/tests/integration/integration/relations/test_relation_mysql_legacy.py index 32e0c3d57..dc118e546 100644 --- a/machines/tests/integration/integration/relations/test_relation_mysql_legacy.py +++ b/machines/tests/integration/integration/relations/test_relation_mysql_legacy.py @@ -56,11 +56,13 @@ def test_build_and_deploy(juju: Juju, charm) -> None: ready=wait_for_apps_status(jubilant_backports.all_active, DATABASE_APP_NAME), error=jubilant_backports.any_blocked, timeout=TIMEOUT, + delay=2, ) juju.wait( ready=wait_for_apps_status(jubilant_backports.all_waiting, APPLICATION_APP_NAME), error=jubilant_backports.any_blocked, timeout=TIMEOUT, + delay=2, ) @@ -85,6 +87,7 @@ def test_relation_creation(juju: Juju): ), error=jubilant_backports.any_blocked, timeout=TIMEOUT, + delay=2, ) @@ -114,11 +117,13 @@ def test_relation_broken(juju: Juju): ready=wait_for_apps_status(jubilant_backports.all_active, DATABASE_APP_NAME), error=jubilant_backports.any_blocked, timeout=TIMEOUT, + delay=2, ) juju.wait( ready=wait_for_apps_status(jubilant_backports.all_waiting, APPLICATION_APP_NAME), error=jubilant_backports.any_blocked, timeout=TIMEOUT, + delay=2, ) logger.info( diff --git a/machines/tests/integration/integration/relations/test_shared_db.py b/machines/tests/integration/integration/relations/test_shared_db.py index 172085d45..b730d5864 100644 --- a/machines/tests/integration/integration/relations/test_shared_db.py +++ b/machines/tests/integration/integration/relations/test_shared_db.py @@ -53,6 +53,7 @@ def test_keystone_bundle_shared_db(juju: Juju, charm) -> None: ready=wait_for_apps_status(jubilant_backports.all_active, APP_NAME), error=jubilant_backports.any_blocked, timeout=FAST_WAIT_TIMEOUT, + delay=2, ) mysql_units = get_app_units(juju, APP_NAME) @@ -109,6 +110,7 @@ def test_keystone_bundle_shared_db(juju: Juju, charm) -> None: juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, APP_NAME), timeout=FAST_WAIT_TIMEOUT, + delay=2, ) # Scale mysql back up to 3 units @@ -154,6 +156,7 @@ def deploy_and_relate_keystone_with_mysql( ), )), timeout=SLOW_WAIT_TIMEOUT, + delay=2, ) # Relate keystone to mysql @@ -163,4 +166,5 @@ def deploy_and_relate_keystone_with_mysql( juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, keystone_application_name), timeout=SLOW_WAIT_TIMEOUT, + delay=2, ) diff --git a/machines/tests/integration/integration/roles/test_database_dba_role.py b/machines/tests/integration/integration/roles/test_database_dba_role.py index f81f0ba60..85a561a16 100644 --- a/machines/tests/integration/integration/roles/test_database_dba_role.py +++ b/machines/tests/integration/integration/roles/test_database_dba_role.py @@ -51,12 +51,14 @@ def test_build_and_deploy(juju: Juju, charm) -> None: juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, DATABASE_APP_NAME), timeout=TIMEOUT, + delay=2, ) juju.wait( ready=wait_for_apps_status( jubilant_backports.all_blocked, f"{INTEGRATOR_APP_NAME}1", f"{INTEGRATOR_APP_NAME}2" ), timeout=TIMEOUT, + delay=2, ) @@ -69,6 +71,7 @@ def test_charmed_dba_role(juju: Juju): jubilant_backports.all_active, f"{INTEGRATOR_APP_NAME}1", DATABASE_APP_NAME ), timeout=TIMEOUT, + delay=2, ) juju.config( @@ -81,6 +84,7 @@ def test_charmed_dba_role(juju: Juju): jubilant_backports.all_active, f"{INTEGRATOR_APP_NAME}2", DATABASE_APP_NAME ), timeout=TIMEOUT, + delay=2, ) mysql_unit = get_app_units(juju, DATABASE_APP_NAME)[0] diff --git a/machines/tests/integration/integration/roles/test_instance_dba_role.py b/machines/tests/integration/integration/roles/test_instance_dba_role.py index 54091fdf2..0f676b87d 100644 --- a/machines/tests/integration/integration/roles/test_instance_dba_role.py +++ b/machines/tests/integration/integration/roles/test_instance_dba_role.py @@ -42,10 +42,12 @@ def test_build_and_deploy(juju: Juju, charm) -> None: juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, DATABASE_APP_NAME), timeout=TIMEOUT, + delay=2, ) juju.wait( ready=wait_for_apps_status(jubilant_backports.all_blocked, INTEGRATOR_APP_NAME), timeout=TIMEOUT, + delay=2, ) @@ -62,6 +64,7 @@ def test_charmed_dba_role(juju: Juju): jubilant_backports.all_active, INTEGRATOR_APP_NAME, DATABASE_APP_NAME ), timeout=TIMEOUT, + delay=2, ) mysql_unit = get_app_units(juju, DATABASE_APP_NAME)[0] diff --git a/machines/tests/integration/integration/roles/test_instance_roles.py b/machines/tests/integration/integration/roles/test_instance_roles.py index 26685341c..509df9b9e 100644 --- a/machines/tests/integration/integration/roles/test_instance_roles.py +++ b/machines/tests/integration/integration/roles/test_instance_roles.py @@ -52,12 +52,14 @@ def test_build_and_deploy(juju: Juju, charm) -> None: juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, DATABASE_APP_NAME), timeout=TIMEOUT, + delay=2, ) juju.wait( ready=wait_for_apps_status( jubilant_backports.all_blocked, f"{INTEGRATOR_APP_NAME}1", f"{INTEGRATOR_APP_NAME}2" ), timeout=TIMEOUT, + delay=2, ) @@ -74,6 +76,7 @@ def test_charmed_read_role(juju: Juju): jubilant_backports.all_active, f"{INTEGRATOR_APP_NAME}1", DATABASE_APP_NAME ), timeout=TIMEOUT, + delay=2, ) mysql_units = get_app_units(juju, DATABASE_APP_NAME) @@ -137,6 +140,7 @@ def test_charmed_read_role(juju: Juju): juju.wait( ready=wait_for_apps_status(jubilant_backports.all_blocked, f"{INTEGRATOR_APP_NAME}1"), timeout=TIMEOUT, + delay=2, ) @@ -151,6 +155,7 @@ def test_charmed_dml_role(juju: Juju): jubilant_backports.all_active, f"{INTEGRATOR_APP_NAME}1", DATABASE_APP_NAME ), timeout=TIMEOUT, + delay=2, ) juju.config( @@ -163,6 +168,7 @@ def test_charmed_dml_role(juju: Juju): jubilant_backports.all_active, f"{INTEGRATOR_APP_NAME}2", DATABASE_APP_NAME ), timeout=TIMEOUT, + delay=2, ) mysql_unit = get_app_units(juju, DATABASE_APP_NAME)[0] @@ -235,4 +241,5 @@ def test_charmed_dml_role(juju: Juju): jubilant_backports.all_blocked, f"{INTEGRATOR_APP_NAME}1", f"{INTEGRATOR_APP_NAME}2" ), timeout=TIMEOUT, + delay=2, ) diff --git a/machines/tests/integration/integration/spaces/test_spaced_db.py b/machines/tests/integration/integration/spaces/test_spaced_db.py index 5c0d4226e..76d26289c 100644 --- a/machines/tests/integration/integration/spaces/test_spaced_db.py +++ b/machines/tests/integration/integration/spaces/test_spaced_db.py @@ -48,10 +48,12 @@ def test_build_and_deploy(juju: Juju, lxd_spaces, charm) -> None: juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, DATABASE_APP_NAME), timeout=TIMEOUT, + delay=2, ) juju.wait( ready=wait_for_apps_status(jubilant_backports.all_waiting, APPLICATION_APP_NAME), timeout=TIMEOUT, + delay=2, ) @@ -64,6 +66,7 @@ def test_integrate_with_spaces(juju: Juju): juju.wait( ready=jubilant_backports.all_active, timeout=TIMEOUT, + delay=2, ) unit = get_app_units(juju, APPLICATION_APP_NAME)[0] @@ -82,6 +85,7 @@ def test_integrate_with_spaces(juju: Juju): juju.wait( ready=lambda status: APPLICATION_APP_NAME not in status.apps, timeout=TIMEOUT, + delay=2, ) @@ -101,6 +105,7 @@ def test_integrate_with_isolated_space(juju: Juju): juju.wait( ready=wait_for_apps_status(jubilant_backports.all_waiting, isolated_app_name), timeout=TIMEOUT, + delay=2, ) # Relate the database to the application @@ -113,6 +118,7 @@ def test_integrate_with_isolated_space(juju: Juju): jubilant_backports.all_active, DATABASE_APP_NAME, isolated_app_name ), timeout=TIMEOUT, + delay=2, ) unit = get_app_units(juju, isolated_app_name)[0] @@ -141,4 +147,5 @@ def test_integrate_with_isolated_space(juju: Juju): juju.wait( ready=lambda status: isolated_app_name not in status.apps, timeout=TIMEOUT, + delay=2, ) diff --git a/machines/tests/integration/integration/test_architecture.py b/machines/tests/integration/integration/test_architecture.py index 35aeca5f5..4919370b0 100644 --- a/machines/tests/integration/integration/test_architecture.py +++ b/machines/tests/integration/integration/test_architecture.py @@ -2,6 +2,7 @@ # Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. +from time import sleep import jubilant_backports from jubilant_backports import Juju @@ -23,8 +24,15 @@ def test_arm_charm_on_amd_host(juju: Juju) -> None: config={"profile": "testing"}, base="ubuntu@22.04", ) - - juju.wait(ready=jubilant_backports.all_error, timeout=300) + # Allow some time between deploy and status call. Avoids: + # ERROR getting details for storage database/0: filesystem for storage instance "database/0" not found + sleep(30) + + juju.wait( + ready=jubilant_backports.all_error, + timeout=300, + delay=2, + ) @markers.arm64_only @@ -39,8 +47,15 @@ def test_amd_charm_on_arm_host(juju: Juju) -> None: config={"profile": "testing"}, base="ubuntu@22.04", ) - - juju.wait(ready=jubilant_backports.all_error, timeout=300) + # Allow some time between deploy and status call. Avoids: + # ERROR getting details for storage database/0: filesystem for storage instance "database/0" not found + sleep(30) + + juju.wait( + ready=jubilant_backports.all_error, + timeout=300, + delay=2, + ) # TODO: add s390x test diff --git a/machines/tests/integration/integration/test_backup_aws.py b/machines/tests/integration/integration/test_backup_aws.py index 263a0ef1f..b72d9bddf 100644 --- a/machines/tests/integration/integration/test_backup_aws.py +++ b/machines/tests/integration/integration/test_backup_aws.py @@ -90,6 +90,7 @@ def test_build_and_deploy(juju: Juju, charm) -> None: juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, DATABASE_APP_NAME), timeout=15 * MINUTE_SECS, + delay=2, ) primary_unit_name = get_mysql_primary_unit(juju, DATABASE_APP_NAME) @@ -113,6 +114,7 @@ def test_build_and_deploy(juju: Juju, charm) -> None: ), )), timeout=TIMEOUT, + delay=2, ) @@ -159,6 +161,7 @@ def test_backup(juju: Juju, cloud_configs_aws) -> None: jubilant_backports.all_active, DATABASE_APP_NAME, S3_INTEGRATOR ), timeout=TIMEOUT, + delay=2, ) # list backups @@ -226,6 +229,7 @@ def test_restore_on_same_cluster(juju: Juju, cloud_configs_aws) -> None: jubilant_backports.all_active, DATABASE_APP_NAME, S3_INTEGRATOR ), timeout=TIMEOUT, + delay=2, ) # restore the backup @@ -290,6 +294,7 @@ def test_restore_on_same_cluster(juju: Juju, cloud_configs_aws) -> None: ), )), timeout=TIMEOUT, + delay=2, ) logger.info("Ensuring inserted values before backup and after restore exist on all units") @@ -336,6 +341,7 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_aws) -> None: juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, new_mysql_application_name), timeout=TIMEOUT, + delay=2, ) # relate to S3 integrator @@ -346,6 +352,7 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_aws) -> None: jubilant_backports.all_active, new_mysql_application_name, S3_INTEGRATOR ), timeout=TIMEOUT, + delay=2, ) # rotate all credentials @@ -380,6 +387,7 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_aws) -> None: jubilant_backports.all_active, new_mysql_application_name, S3_INTEGRATOR ), timeout=TIMEOUT, + delay=2, ) logger.info("Waiting for blocked application status with another cluster S3 repository") @@ -387,6 +395,7 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_aws) -> None: ready=lambda status: status.apps[new_mysql_application_name].app_status.message == ANOTHER_S3_CLUSTER_REPOSITORY_ERROR_MESSAGE, timeout=TIMEOUT, + delay=2, ) # restore the backup @@ -440,4 +449,5 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_aws) -> None: ready=lambda status: status.apps[new_mysql_application_name].app_status.message == MOVE_RESTORED_CLUSTER_TO_ANOTHER_S3_REPOSITORY_ERROR, timeout=TIMEOUT, + delay=2, ) diff --git a/machines/tests/integration/integration/test_backup_ceph.py b/machines/tests/integration/integration/test_backup_ceph.py index 78e94d373..49045c773 100644 --- a/machines/tests/integration/integration/test_backup_ceph.py +++ b/machines/tests/integration/integration/test_backup_ceph.py @@ -180,6 +180,7 @@ def test_build_and_deploy(juju: Juju, charm) -> None: juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, DATABASE_APP_NAME), timeout=15 * MINUTE_SECS, + delay=2, ) primary_unit_name = get_mysql_primary_unit(juju, DATABASE_APP_NAME) @@ -203,6 +204,7 @@ def test_build_and_deploy(juju: Juju, charm) -> None: ), )), timeout=TIMEOUT, + delay=2, ) @@ -249,6 +251,7 @@ def test_backup(juju: Juju, cloud_configs_ceph) -> None: jubilant_backports.all_active, DATABASE_APP_NAME, S3_INTEGRATOR ), timeout=TIMEOUT, + delay=2, ) # list backups @@ -316,6 +319,7 @@ def test_restore_on_same_cluster(juju: Juju, cloud_configs_ceph) -> None: jubilant_backports.all_active, DATABASE_APP_NAME, S3_INTEGRATOR ), timeout=TIMEOUT, + delay=2, ) # restore the backup @@ -380,6 +384,7 @@ def test_restore_on_same_cluster(juju: Juju, cloud_configs_ceph) -> None: ), )), timeout=TIMEOUT, + delay=2, ) logger.info("Ensuring inserted values before backup and after restore exist on all units") @@ -427,6 +432,7 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_ceph) -> None: juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, new_mysql_application_name), timeout=TIMEOUT, + delay=2, ) # relate to S3 integrator @@ -437,6 +443,7 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_ceph) -> None: jubilant_backports.all_active, new_mysql_application_name, S3_INTEGRATOR ), timeout=TIMEOUT, + delay=2, ) # rotate all credentials @@ -471,6 +478,7 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_ceph) -> None: jubilant_backports.all_active, new_mysql_application_name, S3_INTEGRATOR ), timeout=TIMEOUT, + delay=2, ) logger.info("Waiting for blocked application status with another cluster S3 repository") @@ -478,6 +486,7 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_ceph) -> None: ready=lambda status: status.apps[new_mysql_application_name].app_status.message == ANOTHER_S3_CLUSTER_REPOSITORY_ERROR_MESSAGE, timeout=TIMEOUT, + delay=2, ) # restore the backup @@ -531,4 +540,5 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_ceph) -> None: ready=lambda status: status.apps[new_mysql_application_name].app_status.message == MOVE_RESTORED_CLUSTER_TO_ANOTHER_S3_REPOSITORY_ERROR, timeout=TIMEOUT, + delay=2, ) diff --git a/machines/tests/integration/integration/test_backup_gcp.py b/machines/tests/integration/integration/test_backup_gcp.py index a18b50e10..3be33abdf 100644 --- a/machines/tests/integration/integration/test_backup_gcp.py +++ b/machines/tests/integration/integration/test_backup_gcp.py @@ -90,6 +90,7 @@ def test_build_and_deploy(juju: Juju, charm) -> None: juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, DATABASE_APP_NAME), timeout=15 * MINUTE_SECS, + delay=2, ) primary_unit_name = get_mysql_primary_unit(juju, DATABASE_APP_NAME) @@ -113,6 +114,7 @@ def test_build_and_deploy(juju: Juju, charm) -> None: ), )), timeout=TIMEOUT, + delay=2, ) @@ -159,6 +161,7 @@ def test_backup(juju: Juju, cloud_configs_gcp) -> None: jubilant_backports.all_active, DATABASE_APP_NAME, S3_INTEGRATOR ), timeout=TIMEOUT, + delay=2, ) # list backups @@ -226,6 +229,7 @@ def test_restore_on_same_cluster(juju: Juju, cloud_configs_gcp) -> None: jubilant_backports.all_active, DATABASE_APP_NAME, S3_INTEGRATOR ), timeout=TIMEOUT, + delay=2, ) # restore the backup @@ -290,6 +294,7 @@ def test_restore_on_same_cluster(juju: Juju, cloud_configs_gcp) -> None: ), )), timeout=TIMEOUT, + delay=2, ) logger.info("Ensuring inserted values before backup and after restore exist on all units") @@ -332,6 +337,7 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_gcp) -> None: juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, new_mysql_application_name), timeout=TIMEOUT, + delay=2, ) # A race condition in Juju 2.9 makes `juju.wait` fail if called too early @@ -346,6 +352,7 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_gcp) -> None: jubilant_backports.all_active, new_mysql_application_name, S3_INTEGRATOR ), timeout=TIMEOUT, + delay=2, ) # rotate all credentials @@ -380,6 +387,7 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_gcp) -> None: jubilant_backports.all_active, new_mysql_application_name, S3_INTEGRATOR ), timeout=TIMEOUT, + delay=2, ) logger.info("Waiting for blocked application status with another cluster S3 repository") @@ -387,6 +395,7 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_gcp) -> None: ready=lambda status: status.apps[new_mysql_application_name].app_status.message == ANOTHER_S3_CLUSTER_REPOSITORY_ERROR_MESSAGE, timeout=TIMEOUT, + delay=2, ) # restore the backup @@ -440,4 +449,5 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_gcp) -> None: ready=lambda status: status.apps[new_mysql_application_name].app_status.message == MOVE_RESTORED_CLUSTER_TO_ANOTHER_S3_REPOSITORY_ERROR, timeout=TIMEOUT, + delay=2, ) diff --git a/machines/tests/integration/integration/test_saturate_max_connections.py b/machines/tests/integration/integration/test_saturate_max_connections.py index 50f1afea4..2f1e00b72 100644 --- a/machines/tests/integration/integration/test_saturate_max_connections.py +++ b/machines/tests/integration/integration/test_saturate_max_connections.py @@ -49,6 +49,7 @@ def test_deploy_and_relate_test_app(juju: Juju) -> None: juju.wait( jubilant_backports.all_active, timeout=10 * MINUTE_SECS, + delay=2, ) diff --git a/machines/tests/integration/integration/test_subordinate_charms.py b/machines/tests/integration/integration/test_subordinate_charms.py index e5599c375..0a9ba125e 100644 --- a/machines/tests/integration/integration/test_subordinate_charms.py +++ b/machines/tests/integration/integration/test_subordinate_charms.py @@ -45,6 +45,7 @@ def test_ubuntu_pro(juju: Juju, charm): juju.wait( jubilant_backports.all_active, timeout=TIMEOUT, + delay=2, ) @@ -65,4 +66,5 @@ def test_landscape_client(juju: Juju): juju.wait( jubilant_backports.all_active, timeout=TIMEOUT, + delay=2, ) diff --git a/machines/tests/integration/integration/test_tls.py b/machines/tests/integration/integration/test_tls.py index 97b913a9e..44ebd4a97 100644 --- a/machines/tests/integration/integration/test_tls.py +++ b/machines/tests/integration/integration/test_tls.py @@ -63,6 +63,7 @@ def test_build_and_deploy(juju: Juju, charm) -> None: juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, APP_NAME), timeout=TIMEOUT, + delay=2, ) @@ -108,6 +109,7 @@ def test_enable_tls(juju: Juju) -> None: juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, tls_app_name), timeout=TIMEOUT, + delay=2, ) # Relate with TLS charm @@ -122,6 +124,7 @@ def test_enable_tls(juju: Juju) -> None: juju.wait( jubilant_backports.all_active, timeout=TIMEOUT, + delay=2, ) # After relating to only encrypted connection should be possible diff --git a/machines/tests/integration/integration/test_vm_reboot.py b/machines/tests/integration/integration/test_vm_reboot.py index 2058a23c2..c0b5bf05d 100644 --- a/machines/tests/integration/integration/test_vm_reboot.py +++ b/machines/tests/integration/integration/test_vm_reboot.py @@ -36,6 +36,7 @@ def test_build_and_deploy(juju: Juju, charm) -> None: juju.wait( jubilant_backports.all_active, timeout=TIMEOUT, + delay=2, ) @@ -54,6 +55,7 @@ def test_reboot_1_of_3_units(juju: Juju) -> None: juju.wait( jubilant_backports.all_active, timeout=TIMEOUT, + delay=2, ) @@ -72,6 +74,7 @@ def test_reboot_2_of_3_units(juju: Juju) -> None: juju.wait( jubilant_backports.all_active, timeout=TIMEOUT, + delay=2, ) @@ -90,6 +93,7 @@ def test_reboot_3_of_3_units(juju: Juju) -> None: juju.wait( jubilant_backports.all_active, timeout=TIMEOUT, + delay=2, ) diff --git a/machines/tests/integration/release/high_availability/test_upgrade_from_stable.py b/machines/tests/integration/release/high_availability/test_upgrade_from_stable.py index 8f29131bf..751f1446c 100644 --- a/machines/tests/integration/release/high_availability/test_upgrade_from_stable.py +++ b/machines/tests/integration/release/high_availability/test_upgrade_from_stable.py @@ -103,6 +103,7 @@ def deploy_stable(juju: Juju, revision: int) -> None: ), error=jubilant_backports.any_blocked, timeout=20 * MINUTE_SECS, + delay=2, ) @@ -130,12 +131,14 @@ async def upgrade_from_stable(juju: Juju, charm: str) -> None: juju.wait( ready=lambda status: jubilant_backports.any_maintenance(status, MYSQL_APP_NAME), timeout=10 * MINUTE_SECS, + delay=2, ) logging.info("Wait for upgrade to complete") juju.wait( ready=lambda status: jubilant_backports.all_active(status, MYSQL_APP_NAME), timeout=20 * MINUTE_SECS, + delay=2, ) logging.info("Ensure continuous writes are incrementing") From b30d3d8d816537610cd670506556a23e577cdcb5 Mon Sep 17 00:00:00 2001 From: Paulo Machado Date: Mon, 23 Feb 2026 08:45:18 -0300 Subject: [PATCH 14/40] include couple missing extra-waits to mitigate juju 2.9.x issue (#103) --- .../integration/helpers_backups.py | 7 ++--- .../integration/test_backup_aws.py | 20 ++++++++------ .../integration/test_backup_ceph.py | 20 ++++++++------ .../integration/test_backup_gcp.py | 26 +++++++++++-------- 4 files changed, 43 insertions(+), 30 deletions(-) diff --git a/machines/tests/integration/integration/helpers_backups.py b/machines/tests/integration/integration/helpers_backups.py index 533a7c221..270860bb2 100644 --- a/machines/tests/integration/integration/helpers_backups.py +++ b/machines/tests/integration/integration/helpers_backups.py @@ -2,6 +2,7 @@ # See LICENSE file for licensing details. import logging +from time import sleep import boto3 import jubilant_backports @@ -70,9 +71,6 @@ def build_and_deploy_operations( config={"cluster-name": CLUSTER_NAME, "profile": "testing"}, num_units=3, ) - # A race condition in Juju 2.9 makes `juju.wait` fail if called too early - # (filesystem for storage instance "database/X" not found) - # but it is enough to deploy another application in the meantime logger.info("Deploying s3 integrator") juju.deploy( S3_INTEGRATOR, @@ -80,6 +78,9 @@ def build_and_deploy_operations( channel=S3_INTEGRATOR_CHANNEL, base="ubuntu@22.04", ) + # A race condition in Juju 2.9 makes `juju.wait` fail if called too early + # (filesystem for storage instance "database/X" not found) + sleep(30) juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, MYSQL_APPLICATION_NAME), diff --git a/machines/tests/integration/integration/test_backup_aws.py b/machines/tests/integration/integration/test_backup_aws.py index b72d9bddf..ad4284ba9 100644 --- a/machines/tests/integration/integration/test_backup_aws.py +++ b/machines/tests/integration/integration/test_backup_aws.py @@ -82,11 +82,11 @@ def test_build_and_deploy(juju: Juju, charm) -> None: config={"cluster-name": CLUSTER_NAME, "profile": "testing"}, num_units=3, ) - # A race condition in Juju 2.9 makes `juju.wait` fail if called too early - # (filesystem for storage instance "database/X" not found) - # but it is enough to deploy another application in the meantime juju.deploy(S3_INTEGRATOR, channel="1/stable", base="ubuntu@22.04") + # A race condition in Juju 2.9 makes `juju.wait` fail if called too early + # (filesystem for storage instance "database/X" not found) + sleep(30) juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, DATABASE_APP_NAME), timeout=15 * MINUTE_SECS, @@ -336,7 +336,7 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_aws) -> None: # A race condition in Juju 2.9 makes `juju.wait` fail if called too early # (filesystem for storage instance "database/X" not found) - sleep(5) + sleep(30) juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, new_mysql_application_name), @@ -392,8 +392,10 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_aws) -> None: logger.info("Waiting for blocked application status with another cluster S3 repository") juju.wait( - ready=lambda status: status.apps[new_mysql_application_name].app_status.message - == ANOTHER_S3_CLUSTER_REPOSITORY_ERROR_MESSAGE, + ready=lambda status: ( + status.apps[new_mysql_application_name].app_status.message + == ANOTHER_S3_CLUSTER_REPOSITORY_ERROR_MESSAGE + ), timeout=TIMEOUT, delay=2, ) @@ -446,8 +448,10 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_aws) -> None: logger.info("Waiting for blocked application status after restore") juju.wait( - ready=lambda status: status.apps[new_mysql_application_name].app_status.message - == MOVE_RESTORED_CLUSTER_TO_ANOTHER_S3_REPOSITORY_ERROR, + ready=lambda status: ( + status.apps[new_mysql_application_name].app_status.message + == MOVE_RESTORED_CLUSTER_TO_ANOTHER_S3_REPOSITORY_ERROR + ), timeout=TIMEOUT, delay=2, ) diff --git a/machines/tests/integration/integration/test_backup_ceph.py b/machines/tests/integration/integration/test_backup_ceph.py index 49045c773..d23db8846 100644 --- a/machines/tests/integration/integration/test_backup_ceph.py +++ b/machines/tests/integration/integration/test_backup_ceph.py @@ -172,11 +172,11 @@ def test_build_and_deploy(juju: Juju, charm) -> None: config={"cluster-name": CLUSTER_NAME, "profile": "testing"}, num_units=3, ) - # A race condition in Juju 2.9 makes `juju.wait` fail if called too early - # (filesystem for storage instance "database/X" not found) - # but it is enough to deploy another application in the meantime juju.deploy(S3_INTEGRATOR, channel="1/stable", base="ubuntu@22.04") + # A race condition in Juju 2.9 makes `juju.wait` fail if called too early + # (filesystem for storage instance "database/X" not found) + sleep(30) juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, DATABASE_APP_NAME), timeout=15 * MINUTE_SECS, @@ -427,7 +427,7 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_ceph) -> None: # A race condition in Juju 2.9 makes `juju.wait` fail if called too early # (filesystem for storage instance "database/X" not found) - sleep(5) + sleep(30) juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, new_mysql_application_name), @@ -483,8 +483,10 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_ceph) -> None: logger.info("Waiting for blocked application status with another cluster S3 repository") juju.wait( - ready=lambda status: status.apps[new_mysql_application_name].app_status.message - == ANOTHER_S3_CLUSTER_REPOSITORY_ERROR_MESSAGE, + ready=lambda status: ( + status.apps[new_mysql_application_name].app_status.message + == ANOTHER_S3_CLUSTER_REPOSITORY_ERROR_MESSAGE + ), timeout=TIMEOUT, delay=2, ) @@ -537,8 +539,10 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_ceph) -> None: logger.info("Waiting for blocked application status after restore") juju.wait( - ready=lambda status: status.apps[new_mysql_application_name].app_status.message - == MOVE_RESTORED_CLUSTER_TO_ANOTHER_S3_REPOSITORY_ERROR, + ready=lambda status: ( + status.apps[new_mysql_application_name].app_status.message + == MOVE_RESTORED_CLUSTER_TO_ANOTHER_S3_REPOSITORY_ERROR + ), timeout=TIMEOUT, delay=2, ) diff --git a/machines/tests/integration/integration/test_backup_gcp.py b/machines/tests/integration/integration/test_backup_gcp.py index 3be33abdf..73d12ec0a 100644 --- a/machines/tests/integration/integration/test_backup_gcp.py +++ b/machines/tests/integration/integration/test_backup_gcp.py @@ -82,11 +82,11 @@ def test_build_and_deploy(juju: Juju, charm) -> None: config={"cluster-name": CLUSTER_NAME, "profile": "testing"}, num_units=3, ) - # A race condition in Juju 2.9 makes `juju.wait` fail if called too early - # (filesystem for storage instance "database/X" not found) - # but it is enough to deploy another application in the meantime juju.deploy(S3_INTEGRATOR, channel="1/stable", base="ubuntu@22.04") + # A race condition in Juju 2.9 makes `juju.wait` fail if called too early + # (filesystem for storage instance "database/X" not found) + sleep(30) juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, DATABASE_APP_NAME), timeout=15 * MINUTE_SECS, @@ -334,16 +334,16 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_gcp) -> None: num_units=1, ) + # A race condition in Juju 2.9 makes `juju.wait` fail if called too early + # (filesystem for storage instance "database/X" not found) + sleep(30) + juju.wait( ready=wait_for_apps_status(jubilant_backports.all_active, new_mysql_application_name), timeout=TIMEOUT, delay=2, ) - # A race condition in Juju 2.9 makes `juju.wait` fail if called too early - # (filesystem for storage instance "database/X" not found) - sleep(5) - # relate to S3 integrator juju.integrate(new_mysql_application_name, S3_INTEGRATOR) @@ -392,8 +392,10 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_gcp) -> None: logger.info("Waiting for blocked application status with another cluster S3 repository") juju.wait( - ready=lambda status: status.apps[new_mysql_application_name].app_status.message - == ANOTHER_S3_CLUSTER_REPOSITORY_ERROR_MESSAGE, + ready=lambda status: ( + status.apps[new_mysql_application_name].app_status.message + == ANOTHER_S3_CLUSTER_REPOSITORY_ERROR_MESSAGE + ), timeout=TIMEOUT, delay=2, ) @@ -446,8 +448,10 @@ def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_gcp) -> None: logger.info("Waiting for blocked application status after restore") juju.wait( - ready=lambda status: status.apps[new_mysql_application_name].app_status.message - == MOVE_RESTORED_CLUSTER_TO_ANOTHER_S3_REPOSITORY_ERROR, + ready=lambda status: ( + status.apps[new_mysql_application_name].app_status.message + == MOVE_RESTORED_CLUSTER_TO_ANOTHER_S3_REPOSITORY_ERROR + ), timeout=TIMEOUT, delay=2, ) From 25211482cbd60cdef0665c017c87fdd3800f8b7f Mon Sep 17 00:00:00 2001 From: Andreia Date: Mon, 23 Feb 2026 17:51:50 +0100 Subject: [PATCH 15/40] Merge VM and K8s documentation (#95) * remove separate docs and initialize latest starter pack version * add tutorial * add how-to guides * add reference pages * add all release notes from GH * add explanation pages * update home page * fix Vale errors * fix redirects * update workflows * address misc. review comments * clarify VM and K8s difference in legacy interfaces * additional fixes to interfaces and endpoints * add missing juju upgrade guide * add explicit Juju 2.9 and Juju 3 warnings * small fixes * correct mysql_root information * address small review comments * Update docs/explanation/interfaces-and-endpoints.md Co-authored-by: Paulo Machado Signed-off-by: Andreia * update mysql cli password flag * add backups and monitoring users to migration guide --------- Signed-off-by: Andreia Co-authored-by: Paulo Machado --- .github/workflows/automatic_doc_checks.yaml | 10 +- .github/workflows/check_libs.yaml | 5 +- .github/workflows/ci.yaml | 3 +- .github/workflows/release.yaml | 3 +- .../.readthedocs.yaml => .readthedocs.yaml | 6 +- .../docs => docs}/.custom_wordlist.txt | 22 + {kubernetes/docs => docs}/.gitignore | 0 .../.sphinx/.pre-commit-config.yaml | 0 .../docs => docs}/.sphinx/.pymarkdown.json | 0 .../docs => docs}/.sphinx/get_vale_conf.py | 16 +- {kubernetes/docs => docs}/.sphinx/pa11y.json | 0 docs/.sphinx/version | 1 + {machines/docs => docs}/Makefile | 60 +- {kubernetes/docs => docs}/conf.py | 61 +- docs/explanation/architecture.md | 234 +++++++ .../docs => docs}/explanation/flowcharts.md | 1 + .../docs => docs}/explanation/index.md | 2 +- docs/explanation/interfaces-and-endpoints.md | 152 +++++ {machines/docs => docs}/explanation/juju.md | 50 +- docs/explanation/legacy-charm.md | 139 +++++ docs/explanation/logs/audit-logs.md | 95 +++ .../docs => docs}/explanation/logs/index.md | 84 ++- {machines/docs => docs}/explanation/roles.md | 14 +- .../explanation/security/cryptography.md | 18 +- docs/explanation/security/index.md | 150 +++++ {machines/docs => docs}/explanation/users.md | 34 +- .../back-up-and-restore/configure-s3-aws.md | 53 +- .../configure-s3-radosgw.md | 61 +- .../back-up-and-restore/create-a-backup.md | 66 +- .../how-to/back-up-and-restore/index.md | 6 +- .../back-up-and-restore/migrate-a-cluster.md | 89 +++ .../back-up-and-restore/restore-a-backup.md | 114 ++++ .../how-to/charm-development}/index.md | 3 +- .../integrate-with-your-charm.md | 20 +- .../migrate-data-via-backup-restore.md | 40 ++ .../migrate-data-via-mydumper.md | 60 +- .../migrate-data-via-mysqldump.md | 112 +++- .../cluster-cluster-replication/clients.md | 84 +++ .../cluster-cluster-replication/deploy.md | 142 +++++ .../cluster-cluster-replication/index.md | 7 + .../cluster-cluster-replication/recovery.md | 61 ++ .../cluster-cluster-replication/removal.md | 14 +- .../switchover-failover.md | 10 +- {machines/docs => docs}/how-to/contribute.md | 17 +- docs/how-to/deploy/airgapped.md | 197 ++++++ docs/how-to/deploy/index.md | 57 ++ .../how-to/deploy/juju-spaces.md | 47 +- .../how-to/deploy/k8s-clouds}/aks.md | 3 +- .../deploy/k8s-clouds}/canonical-k8s.md | 5 +- .../how-to/deploy/k8s-clouds}/eks.md | 3 +- .../how-to/deploy/k8s-clouds}/gke.md | 29 +- docs/how-to/deploy/k8s-clouds/index.md | 12 + .../how-to/deploy/k8s-clouds}/microk8s.md | 5 +- .../how-to/deploy/multi-az/gce.md | 32 +- .../how-to/deploy/multi-az/gke.md | 82 ++- docs/how-to/deploy/multi-az/index.md | 21 + .../how-to/deploy/terraform/charm-module.md | 50 +- .../how-to/deploy/terraform/index.md | 14 +- .../how-to/deploy/terraform/product-module.md | 49 +- .../how-to/deploy/vm-clouds}/aws-ec2.md | 21 +- .../how-to/deploy/vm-clouds}/azure.md | 20 +- .../how-to/deploy/vm-clouds}/gce.md | 63 +- docs/how-to/deploy/vm-clouds/index.md | 13 + .../how-to/deploy/vm-clouds}/lxd.md | 4 +- .../how-to/deploy/vm-clouds}/maas.md | 5 +- .../how-to/deploy/vm-clouds}/sunbeam.md | 26 +- docs/how-to/enable-tls.md | 114 ++++ docs/how-to/external-network-access.md | 21 + {kubernetes/docs => docs}/how-to/index.md | 33 +- docs/how-to/integrate-with-applications.md | 189 ++++++ docs/how-to/manage-passwords.md | 101 +++ .../monitoring}/alert-rules-grafana.png | Bin .../how-to/monitoring}/enable-alert-rules.md | 35 +- .../how-to/monitoring}/enable-monitoring.md | 97 ++- docs/how-to/monitoring/enable-tracing.md | 161 +++++ .../how-to/monitoring}/index.md | 1 + .../how-to/monitoring}/mysql-vm-trace.png | Bin .../monitoring}/pushover-web-client.jpeg | Bin docs/how-to/primary-switchover.md | 38 ++ .../docs => docs}/how-to/refresh/index.md | 71 ++- .../how-to/refresh/multi-cluster/index.md | 1 + .../multi-cluster/refresh-multi-cluster.md | 31 +- .../multi-cluster/roll-back-multi-cluster.md | 0 .../how-to/refresh/single-cluster/index.md | 1 + .../single-cluster/refresh-single-cluster.md | 234 +++++++ .../roll-back-single-cluster.md | 91 +++ .../how-to/refresh/upgrade-juju.md | 9 +- .../scale-replicas.md => docs/how-to/scale.md | 101 ++- docs/index.md | 74 +++ {kubernetes/docs => docs}/redirects.txt | 43 +- .../docs => docs}/reference/alert-rules.md | 10 +- .../docs => docs}/reference/charm-statuses.md | 3 +- docs/reference/charm-testing.md | 118 ++++ {machines/docs => docs}/reference/contacts.md | 11 +- docs/reference/index.md | 19 + .../reference/plugins-extensions.md | 22 +- {machines/docs => docs}/reference/profiles.md | 68 +- docs/reference/release-notes/index.md | 9 + docs/reference/release-notes/k8s/index.md | 68 ++ .../release-notes/k8s/revision-113.md | 58 ++ .../release-notes/k8s/revision-127.md | 61 ++ .../release-notes/k8s/revision-153.md | 50 ++ .../release-notes/k8s/revision-75.md | 56 ++ .../release-notes/k8s/revision-99.md | 70 +++ .../release-notes/k8s/revisions-180-181.md | 64 ++ .../release-notes/k8s/revisions-210-211.md | 45 ++ .../release-notes/k8s/revisions-240-241.md | 41 ++ .../release-notes/k8s/revisions-254-255.md | 36 ++ .../release-notes/k8s/revisions-342-344.md | 63 ++ docs/reference/release-notes/vm/index.md | 54 ++ .../release-notes/vm/revision-151.md | 58 ++ .../release-notes/vm/revision-196.md | 87 +++ .../release-notes/vm/revision-240.md | 75 +++ .../release-notes/vm/revisions-312-313.md | 95 +++ .../release-notes/vm/revisions-366-367.md | 49 ++ .../release-notes/vm/revisions-442-444.md | 61 ++ docs/reference/system-requirements.md | 117 ++++ .../reference/troubleshooting/index.md | 215 +++++-- .../troubleshooting/known-scenarios.md | 16 +- .../recover-from-quorum-loss.md | 20 +- .../reference/troubleshooting/sos-report.md | 2 +- docs/requirements.txt | 35 ++ .../tutorial/index.md => docs/tutorial.md | 117 ++-- kubernetes/.readthedocs.yaml | 39 -- kubernetes/docs/.sphinx/get_vale_conf.py | 151 ----- .../docs/.sphinx/metrics/build_metrics.py | 94 --- .../docs/.sphinx/metrics/source_metrics.sh | 66 -- kubernetes/docs/.sphinx/version | 1 - kubernetes/docs/Makefile | 185 ------ kubernetes/docs/explanation/architecture.md | 163 ----- .../explanation/interfaces-and-endpoints.md | 62 -- kubernetes/docs/explanation/juju.md | 66 -- kubernetes/docs/explanation/legacy-charm.md | 79 --- .../docs/explanation/logs/audit-logs.md | 43 -- kubernetes/docs/explanation/roles.md | 66 -- .../docs/explanation/security/cryptography.md | 62 -- kubernetes/docs/explanation/security/index.md | 116 ---- kubernetes/docs/explanation/users.md | 97 --- .../back-up-and-restore/configure-s3-aws.md | 54 -- .../configure-s3-radosgw.md | 68 -- .../back-up-and-restore/create-a-backup.md | 38 -- .../docs/how-to/back-up-and-restore/index.md | 12 - .../back-up-and-restore/migrate-a-cluster.md | 56 -- .../back-up-and-restore/restore-a-backup.md | 61 -- .../cluster-cluster-replication/clients.md | 46 -- .../cluster-cluster-replication/deploy.md | 104 --- .../cluster-cluster-replication/index.md | 30 - .../cluster-cluster-replication/recovery.md | 40 -- .../cluster-cluster-replication/removal.md | 51 -- kubernetes/docs/how-to/contribute.md | 33 - kubernetes/docs/how-to/deploy/air-gapped.md | 113 ---- kubernetes/docs/how-to/deploy/index.md | 24 - .../docs/how-to/deploy/terraform/charm.md | 83 --- .../docs/how-to/deploy/terraform/product.md | 133 ---- .../development/integrate-with-your-charm.md | 45 -- .../migrate-data-via-backup-restore.md | 38 -- .../development/migrate-data-via-mydumper.md | 72 --- .../development/migrate-data-via-mysqldump.md | 246 -------- kubernetes/docs/how-to/enable-tls.md | 56 -- .../docs/how-to/external-network-access.md | 14 - .../integrate-with-another-application.md | 85 --- kubernetes/docs/how-to/manage-passwords.md | 41 -- .../monitoring-cos/enable-alert-rules.md | 88 --- .../monitoring-cos/enable-monitoring.md | 153 ----- .../how-to/monitoring-cos/enable-tracing.md | 133 ---- .../docs/how-to/monitoring-cos/index.md | 10 - .../monitoring-cos/mysql-k8s-trace.jpeg | Bin 115366 -> 0 bytes kubernetes/docs/how-to/primary-switchover.md | 20 - .../multi-cluster/refresh-multi-cluster.md | 32 - .../single-cluster/refresh-single-cluster.md | 206 ------ .../roll-back-single-cluster.md | 70 --- .../docs/how-to/refresh/upgrade-juju.md | 184 ------ kubernetes/docs/how-to/scale-replicas.md | 50 -- kubernetes/docs/index.md | 45 -- kubernetes/docs/reference/alert-rules.md | 38 -- kubernetes/docs/reference/charm-statuses.md | 42 -- kubernetes/docs/reference/contacts.md | 20 - kubernetes/docs/reference/index.md | 27 - kubernetes/docs/reference/profiles.md | 52 -- kubernetes/docs/reference/releases.md | 123 ---- kubernetes/docs/reference/software-testing.md | 77 --- .../docs/reference/system-requirements.md | 57 -- .../docs/reference/troubleshooting/index.md | 183 ------ kubernetes/docs/requirements.txt | 7 - kubernetes/docs/reuse/links.txt | 43 -- kubernetes/docs/reuse/substitutions.txt | 7 - kubernetes/docs/reuse/substitutions.yaml | 4 - kubernetes/docs/tutorial/index.md | 590 ------------------ machines/docs/.custom_wordlist.txt | 80 --- machines/docs/.gitignore | 26 - machines/docs/.sphinx/.pre-commit-config.yaml | 23 - machines/docs/.sphinx/.pymarkdown.json | 46 -- .../docs/.sphinx/metrics/build_metrics.py | 94 --- .../docs/.sphinx/metrics/source_metrics.sh | 66 -- machines/docs/.sphinx/pa11y.json | 9 - machines/docs/.sphinx/version | 1 - machines/docs/conf.py | 358 ----------- machines/docs/explanation/architecture.md | 133 ---- machines/docs/explanation/index.md | 35 -- .../explanation/interfaces-and-endpoints.md | 89 --- machines/docs/explanation/legacy-charm.md | 78 --- machines/docs/explanation/logs/audit-logs.md | 42 -- machines/docs/explanation/logs/index.md | 177 ------ machines/docs/explanation/security/index.md | 115 ---- .../back-up-and-restore/migrate-a-cluster.md | 57 -- .../back-up-and-restore/restore-a-backup.md | 61 -- .../cluster-cluster-replication/clients.md | 46 -- .../cluster-cluster-replication/deploy.md | 101 --- .../cluster-cluster-replication/recovery.md | 40 -- .../switchover-failover.md | 33 - machines/docs/how-to/deploy/air-gapped.md | 144 ----- machines/docs/how-to/deploy/index.md | 25 - .../docs/how-to/deploy/terraform/index.md | 14 - machines/docs/how-to/development/index.md | 11 - .../migrate-data-via-backup-restore.md | 38 -- machines/docs/how-to/enable-tls.md | 52 -- .../docs/how-to/external-network-access.md | 16 - machines/docs/how-to/index.md | 83 --- .../integrate-with-another-application.md | 84 --- machines/docs/how-to/manage-passwords.md | 41 -- .../monitoring-cos/alert-rules-grafana.png | Bin 235282 -> 0 bytes .../how-to/monitoring-cos/enable-tracing.md | 131 ---- .../monitoring-cos/mysql-k8s-trace.jpeg | Bin 115366 -> 0 bytes .../monitoring-cos/pushover-web-client.jpeg | Bin 155021 -> 0 bytes machines/docs/how-to/primary-switchover.md | 21 - machines/docs/how-to/refresh/index.md | 65 -- .../how-to/refresh/multi-cluster/index.md | 8 - .../multi-cluster/roll-back-multi-cluster.md | 7 - .../how-to/refresh/single-cluster/index.md | 8 - .../single-cluster/refresh-single-cluster.md | 188 ------ .../roll-back-single-cluster.md | 51 -- machines/docs/index.md | 44 -- machines/docs/redirects.txt | 97 --- machines/docs/reference/index.md | 29 - machines/docs/reference/plugins-extensions.md | 10 - machines/docs/reference/releases.md | 102 --- machines/docs/reference/software-testing.md | 72 --- .../docs/reference/system-requirements.md | 60 -- .../recover-from-quorum-loss.md | 101 --- machines/docs/requirements.txt | 6 - machines/docs/reuse/links.txt | 43 -- machines/docs/reuse/substitutions.txt | 7 - machines/docs/reuse/substitutions.yaml | 4 - 243 files changed, 5566 insertions(+), 8996 deletions(-) rename machines/.readthedocs.yaml => .readthedocs.yaml (87%) rename {kubernetes/docs => docs}/.custom_wordlist.txt (80%) rename {kubernetes/docs => docs}/.gitignore (100%) rename {kubernetes/docs => docs}/.sphinx/.pre-commit-config.yaml (100%) rename {kubernetes/docs => docs}/.sphinx/.pymarkdown.json (100%) rename {machines/docs => docs}/.sphinx/get_vale_conf.py (96%) mode change 100644 => 100755 rename {kubernetes/docs => docs}/.sphinx/pa11y.json (100%) create mode 100644 docs/.sphinx/version rename {machines/docs => docs}/Makefile (69%) rename {kubernetes/docs => docs}/conf.py (88%) create mode 100644 docs/explanation/architecture.md rename {kubernetes/docs => docs}/explanation/flowcharts.md (98%) rename {kubernetes/docs => docs}/explanation/index.md (97%) create mode 100644 docs/explanation/interfaces-and-endpoints.md rename {machines/docs => docs}/explanation/juju.md (63%) create mode 100644 docs/explanation/legacy-charm.md create mode 100644 docs/explanation/logs/audit-logs.md rename {kubernetes/docs => docs}/explanation/logs/index.md (81%) rename {machines/docs => docs}/explanation/roles.md (85%) rename {machines/docs => docs}/explanation/security/cryptography.md (81%) create mode 100644 docs/explanation/security/index.md rename {machines/docs => docs}/explanation/users.md (79%) rename {machines/docs => docs}/how-to/back-up-and-restore/configure-s3-aws.md (55%) rename {machines/docs => docs}/how-to/back-up-and-restore/configure-s3-radosgw.md (53%) rename {machines/docs => docs}/how-to/back-up-and-restore/create-a-backup.md (52%) rename {machines/docs => docs}/how-to/back-up-and-restore/index.md (79%) create mode 100644 docs/how-to/back-up-and-restore/migrate-a-cluster.md create mode 100644 docs/how-to/back-up-and-restore/restore-a-backup.md rename {kubernetes/docs/how-to/development => docs/how-to/charm-development}/index.md (87%) rename {machines/docs/how-to/development => docs/how-to/charm-development}/integrate-with-your-charm.md (62%) create mode 100644 docs/how-to/charm-development/migrate-data-via-backup-restore.md rename {machines/docs/how-to/development => docs/how-to/charm-development}/migrate-data-via-mydumper.md (54%) rename {machines/docs/how-to/development => docs/how-to/charm-development}/migrate-data-via-mysqldump.md (77%) create mode 100644 docs/how-to/cluster-cluster-replication/clients.md create mode 100644 docs/how-to/cluster-cluster-replication/deploy.md rename {machines/docs => docs}/how-to/cluster-cluster-replication/index.md (83%) create mode 100644 docs/how-to/cluster-cluster-replication/recovery.md rename {machines/docs => docs}/how-to/cluster-cluster-replication/removal.md (69%) rename {kubernetes/docs => docs}/how-to/cluster-cluster-replication/switchover-failover.md (74%) rename {machines/docs => docs}/how-to/contribute.md (82%) create mode 100644 docs/how-to/deploy/airgapped.md create mode 100644 docs/how-to/deploy/index.md rename {machines/docs => docs}/how-to/deploy/juju-spaces.md (67%) rename {kubernetes/docs/how-to/deploy => docs/how-to/deploy/k8s-clouds}/aks.md (99%) rename {kubernetes/docs/how-to/deploy => docs/how-to/deploy/k8s-clouds}/canonical-k8s.md (97%) rename {kubernetes/docs/how-to/deploy => docs/how-to/deploy/k8s-clouds}/eks.md (99%) rename {kubernetes/docs/how-to/deploy => docs/how-to/deploy/k8s-clouds}/gke.md (81%) create mode 100644 docs/how-to/deploy/k8s-clouds/index.md rename {kubernetes/docs/how-to/deploy => docs/how-to/deploy/k8s-clouds}/microk8s.md (92%) rename machines/docs/how-to/deploy/multi-az.md => docs/how-to/deploy/multi-az/gce.md (85%) rename kubernetes/docs/how-to/deploy/multi-az.md => docs/how-to/deploy/multi-az/gke.md (83%) create mode 100644 docs/how-to/deploy/multi-az/index.md rename machines/docs/how-to/deploy/terraform/charm.md => docs/how-to/deploy/terraform/charm-module.md (57%) rename {kubernetes/docs => docs}/how-to/deploy/terraform/index.md (61%) rename machines/docs/how-to/deploy/terraform/product.md => docs/how-to/deploy/terraform/product-module.md (74%) rename {machines/docs/how-to/deploy => docs/how-to/deploy/vm-clouds}/aws-ec2.md (97%) rename {machines/docs/how-to/deploy => docs/how-to/deploy/vm-clouds}/azure.md (96%) rename {machines/docs/how-to/deploy => docs/how-to/deploy/vm-clouds}/gce.md (83%) create mode 100644 docs/how-to/deploy/vm-clouds/index.md rename {machines/docs/how-to/deploy => docs/how-to/deploy/vm-clouds}/lxd.md (95%) rename {machines/docs/how-to/deploy => docs/how-to/deploy/vm-clouds}/maas.md (96%) rename {machines/docs/how-to/deploy => docs/how-to/deploy/vm-clouds}/sunbeam.md (88%) create mode 100644 docs/how-to/enable-tls.md create mode 100644 docs/how-to/external-network-access.md rename {kubernetes/docs => docs}/how-to/index.md (55%) create mode 100644 docs/how-to/integrate-with-applications.md create mode 100644 docs/how-to/manage-passwords.md rename {kubernetes/docs/how-to/monitoring-cos => docs/how-to/monitoring}/alert-rules-grafana.png (100%) rename {machines/docs/how-to/monitoring-cos => docs/how-to/monitoring}/enable-alert-rules.md (72%) rename {machines/docs/how-to/monitoring-cos => docs/how-to/monitoring}/enable-monitoring.md (56%) create mode 100644 docs/how-to/monitoring/enable-tracing.md rename {machines/docs/how-to/monitoring-cos => docs/how-to/monitoring}/index.md (92%) rename {machines/docs/how-to/monitoring-cos => docs/how-to/monitoring}/mysql-vm-trace.png (100%) rename {kubernetes/docs/how-to/monitoring-cos => docs/how-to/monitoring}/pushover-web-client.jpeg (100%) create mode 100644 docs/how-to/primary-switchover.md rename {kubernetes/docs => docs}/how-to/refresh/index.md (61%) rename {kubernetes/docs => docs}/how-to/refresh/multi-cluster/index.md (88%) rename {machines/docs => docs}/how-to/refresh/multi-cluster/refresh-multi-cluster.md (55%) rename {kubernetes/docs => docs}/how-to/refresh/multi-cluster/roll-back-multi-cluster.md (100%) rename {kubernetes/docs => docs}/how-to/refresh/single-cluster/index.md (87%) create mode 100644 docs/how-to/refresh/single-cluster/refresh-single-cluster.md create mode 100644 docs/how-to/refresh/single-cluster/roll-back-single-cluster.md rename {machines/docs => docs}/how-to/refresh/upgrade-juju.md (95%) rename machines/docs/how-to/scale-replicas.md => docs/how-to/scale.md (51%) create mode 100644 docs/index.md rename {kubernetes/docs => docs}/redirects.txt (68%) rename {machines/docs => docs}/reference/alert-rules.md (88%) rename {machines/docs => docs}/reference/charm-statuses.md (96%) create mode 100644 docs/reference/charm-testing.md rename {machines/docs => docs}/reference/contacts.md (65%) create mode 100644 docs/reference/index.md rename {kubernetes/docs => docs}/reference/plugins-extensions.md (51%) rename {machines/docs => docs}/reference/profiles.md (56%) create mode 100644 docs/reference/release-notes/index.md create mode 100644 docs/reference/release-notes/k8s/index.md create mode 100644 docs/reference/release-notes/k8s/revision-113.md create mode 100644 docs/reference/release-notes/k8s/revision-127.md create mode 100644 docs/reference/release-notes/k8s/revision-153.md create mode 100644 docs/reference/release-notes/k8s/revision-75.md create mode 100644 docs/reference/release-notes/k8s/revision-99.md create mode 100644 docs/reference/release-notes/k8s/revisions-180-181.md create mode 100644 docs/reference/release-notes/k8s/revisions-210-211.md create mode 100644 docs/reference/release-notes/k8s/revisions-240-241.md create mode 100644 docs/reference/release-notes/k8s/revisions-254-255.md create mode 100644 docs/reference/release-notes/k8s/revisions-342-344.md create mode 100644 docs/reference/release-notes/vm/index.md create mode 100644 docs/reference/release-notes/vm/revision-151.md create mode 100644 docs/reference/release-notes/vm/revision-196.md create mode 100644 docs/reference/release-notes/vm/revision-240.md create mode 100644 docs/reference/release-notes/vm/revisions-312-313.md create mode 100644 docs/reference/release-notes/vm/revisions-366-367.md create mode 100644 docs/reference/release-notes/vm/revisions-442-444.md create mode 100644 docs/reference/system-requirements.md rename {machines/docs => docs}/reference/troubleshooting/index.md (56%) rename {kubernetes/docs => docs}/reference/troubleshooting/known-scenarios.md (75%) rename {kubernetes/docs => docs}/reference/troubleshooting/recover-from-quorum-loss.md (86%) rename {machines/docs => docs}/reference/troubleshooting/sos-report.md (99%) create mode 100644 docs/requirements.txt rename machines/docs/tutorial/index.md => docs/tutorial.md (88%) delete mode 100644 kubernetes/.readthedocs.yaml delete mode 100644 kubernetes/docs/.sphinx/get_vale_conf.py delete mode 100644 kubernetes/docs/.sphinx/metrics/build_metrics.py delete mode 100755 kubernetes/docs/.sphinx/metrics/source_metrics.sh delete mode 100644 kubernetes/docs/.sphinx/version delete mode 100644 kubernetes/docs/Makefile delete mode 100644 kubernetes/docs/explanation/architecture.md delete mode 100644 kubernetes/docs/explanation/interfaces-and-endpoints.md delete mode 100644 kubernetes/docs/explanation/juju.md delete mode 100644 kubernetes/docs/explanation/legacy-charm.md delete mode 100644 kubernetes/docs/explanation/logs/audit-logs.md delete mode 100644 kubernetes/docs/explanation/roles.md delete mode 100644 kubernetes/docs/explanation/security/cryptography.md delete mode 100644 kubernetes/docs/explanation/security/index.md delete mode 100644 kubernetes/docs/explanation/users.md delete mode 100644 kubernetes/docs/how-to/back-up-and-restore/configure-s3-aws.md delete mode 100644 kubernetes/docs/how-to/back-up-and-restore/configure-s3-radosgw.md delete mode 100644 kubernetes/docs/how-to/back-up-and-restore/create-a-backup.md delete mode 100644 kubernetes/docs/how-to/back-up-and-restore/index.md delete mode 100644 kubernetes/docs/how-to/back-up-and-restore/migrate-a-cluster.md delete mode 100644 kubernetes/docs/how-to/back-up-and-restore/restore-a-backup.md delete mode 100644 kubernetes/docs/how-to/cluster-cluster-replication/clients.md delete mode 100644 kubernetes/docs/how-to/cluster-cluster-replication/deploy.md delete mode 100644 kubernetes/docs/how-to/cluster-cluster-replication/index.md delete mode 100644 kubernetes/docs/how-to/cluster-cluster-replication/recovery.md delete mode 100644 kubernetes/docs/how-to/cluster-cluster-replication/removal.md delete mode 100644 kubernetes/docs/how-to/contribute.md delete mode 100644 kubernetes/docs/how-to/deploy/air-gapped.md delete mode 100644 kubernetes/docs/how-to/deploy/index.md delete mode 100644 kubernetes/docs/how-to/deploy/terraform/charm.md delete mode 100644 kubernetes/docs/how-to/deploy/terraform/product.md delete mode 100644 kubernetes/docs/how-to/development/integrate-with-your-charm.md delete mode 100644 kubernetes/docs/how-to/development/migrate-data-via-backup-restore.md delete mode 100644 kubernetes/docs/how-to/development/migrate-data-via-mydumper.md delete mode 100644 kubernetes/docs/how-to/development/migrate-data-via-mysqldump.md delete mode 100644 kubernetes/docs/how-to/enable-tls.md delete mode 100644 kubernetes/docs/how-to/external-network-access.md delete mode 100644 kubernetes/docs/how-to/integrate-with-another-application.md delete mode 100644 kubernetes/docs/how-to/manage-passwords.md delete mode 100644 kubernetes/docs/how-to/monitoring-cos/enable-alert-rules.md delete mode 100644 kubernetes/docs/how-to/monitoring-cos/enable-monitoring.md delete mode 100644 kubernetes/docs/how-to/monitoring-cos/enable-tracing.md delete mode 100644 kubernetes/docs/how-to/monitoring-cos/index.md delete mode 100644 kubernetes/docs/how-to/monitoring-cos/mysql-k8s-trace.jpeg delete mode 100644 kubernetes/docs/how-to/primary-switchover.md delete mode 100644 kubernetes/docs/how-to/refresh/multi-cluster/refresh-multi-cluster.md delete mode 100644 kubernetes/docs/how-to/refresh/single-cluster/refresh-single-cluster.md delete mode 100644 kubernetes/docs/how-to/refresh/single-cluster/roll-back-single-cluster.md delete mode 100644 kubernetes/docs/how-to/refresh/upgrade-juju.md delete mode 100644 kubernetes/docs/how-to/scale-replicas.md delete mode 100644 kubernetes/docs/index.md delete mode 100644 kubernetes/docs/reference/alert-rules.md delete mode 100644 kubernetes/docs/reference/charm-statuses.md delete mode 100644 kubernetes/docs/reference/contacts.md delete mode 100644 kubernetes/docs/reference/index.md delete mode 100644 kubernetes/docs/reference/profiles.md delete mode 100644 kubernetes/docs/reference/releases.md delete mode 100644 kubernetes/docs/reference/software-testing.md delete mode 100644 kubernetes/docs/reference/system-requirements.md delete mode 100644 kubernetes/docs/reference/troubleshooting/index.md delete mode 100644 kubernetes/docs/requirements.txt delete mode 100644 kubernetes/docs/reuse/links.txt delete mode 100644 kubernetes/docs/reuse/substitutions.txt delete mode 100644 kubernetes/docs/reuse/substitutions.yaml delete mode 100644 kubernetes/docs/tutorial/index.md delete mode 100644 machines/docs/.custom_wordlist.txt delete mode 100644 machines/docs/.gitignore delete mode 100644 machines/docs/.sphinx/.pre-commit-config.yaml delete mode 100644 machines/docs/.sphinx/.pymarkdown.json delete mode 100644 machines/docs/.sphinx/metrics/build_metrics.py delete mode 100755 machines/docs/.sphinx/metrics/source_metrics.sh delete mode 100644 machines/docs/.sphinx/pa11y.json delete mode 100644 machines/docs/.sphinx/version delete mode 100644 machines/docs/conf.py delete mode 100644 machines/docs/explanation/architecture.md delete mode 100644 machines/docs/explanation/index.md delete mode 100644 machines/docs/explanation/interfaces-and-endpoints.md delete mode 100644 machines/docs/explanation/legacy-charm.md delete mode 100644 machines/docs/explanation/logs/audit-logs.md delete mode 100644 machines/docs/explanation/logs/index.md delete mode 100644 machines/docs/explanation/security/index.md delete mode 100644 machines/docs/how-to/back-up-and-restore/migrate-a-cluster.md delete mode 100644 machines/docs/how-to/back-up-and-restore/restore-a-backup.md delete mode 100644 machines/docs/how-to/cluster-cluster-replication/clients.md delete mode 100644 machines/docs/how-to/cluster-cluster-replication/deploy.md delete mode 100644 machines/docs/how-to/cluster-cluster-replication/recovery.md delete mode 100644 machines/docs/how-to/cluster-cluster-replication/switchover-failover.md delete mode 100644 machines/docs/how-to/deploy/air-gapped.md delete mode 100644 machines/docs/how-to/deploy/index.md delete mode 100644 machines/docs/how-to/deploy/terraform/index.md delete mode 100644 machines/docs/how-to/development/index.md delete mode 100644 machines/docs/how-to/development/migrate-data-via-backup-restore.md delete mode 100644 machines/docs/how-to/enable-tls.md delete mode 100644 machines/docs/how-to/external-network-access.md delete mode 100644 machines/docs/how-to/index.md delete mode 100644 machines/docs/how-to/integrate-with-another-application.md delete mode 100644 machines/docs/how-to/manage-passwords.md delete mode 100644 machines/docs/how-to/monitoring-cos/alert-rules-grafana.png delete mode 100644 machines/docs/how-to/monitoring-cos/enable-tracing.md delete mode 100644 machines/docs/how-to/monitoring-cos/mysql-k8s-trace.jpeg delete mode 100644 machines/docs/how-to/monitoring-cos/pushover-web-client.jpeg delete mode 100644 machines/docs/how-to/primary-switchover.md delete mode 100644 machines/docs/how-to/refresh/index.md delete mode 100644 machines/docs/how-to/refresh/multi-cluster/index.md delete mode 100644 machines/docs/how-to/refresh/multi-cluster/roll-back-multi-cluster.md delete mode 100644 machines/docs/how-to/refresh/single-cluster/index.md delete mode 100644 machines/docs/how-to/refresh/single-cluster/refresh-single-cluster.md delete mode 100644 machines/docs/how-to/refresh/single-cluster/roll-back-single-cluster.md delete mode 100644 machines/docs/index.md delete mode 100644 machines/docs/redirects.txt delete mode 100644 machines/docs/reference/index.md delete mode 100644 machines/docs/reference/plugins-extensions.md delete mode 100644 machines/docs/reference/releases.md delete mode 100644 machines/docs/reference/software-testing.md delete mode 100644 machines/docs/reference/system-requirements.md delete mode 100644 machines/docs/reference/troubleshooting/recover-from-quorum-loss.md delete mode 100644 machines/docs/requirements.txt delete mode 100644 machines/docs/reuse/links.txt delete mode 100644 machines/docs/reuse/substitutions.txt delete mode 100644 machines/docs/reuse/substitutions.yaml diff --git a/.github/workflows/automatic_doc_checks.yaml b/.github/workflows/automatic_doc_checks.yaml index bcc332b8a..1e7b64086 100644 --- a/.github/workflows/automatic_doc_checks.yaml +++ b/.github/workflows/automatic_doc_checks.yaml @@ -8,8 +8,7 @@ on: - '8.0/edge' pull_request: paths: - - 'kubernetes/docs/**' - - 'machines/docs/**' + - '/docs/**' workflow_dispatch: @@ -19,12 +18,7 @@ concurrency: jobs: documentation-checks: - strategy: - matrix: - path: - - kubernetes - - machines uses: canonical/documentation-workflows/.github/workflows/documentation-checks.yaml@main with: - working-directory: ${{ matrix.path }}/docs + working-directory: "docs" fetch-depth: 0 diff --git a/.github/workflows/check_libs.yaml b/.github/workflows/check_libs.yaml index 698f19fe2..09b1e08fb 100644 --- a/.github/workflows/check_libs.yaml +++ b/.github/workflows/check_libs.yaml @@ -11,15 +11,14 @@ on: paths-ignore: - 'kubernetes/.gitignore' - 'kubernetes/.jujuignore' - - 'kubernetes/docs/**' - 'machines/.gitignore' - 'machines/.jujuignore' - - 'machines/docs/**' - 'LICENSE' - '**.md' - '.github/renovate.json5' - '.github/workflows/*.yaml' - + - 'docs/**' + jobs: lib-check: name: Check libraries diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 19b77a0a6..6d058b696 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -9,8 +9,7 @@ concurrency: on: pull_request: paths-ignore: - - 'kubernetes/docs/**' - - 'machines/docs/**' + - 'docs/**' - '**.md' - '.github/renovate.json5' diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 6227008c2..6cac73910 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -7,8 +7,7 @@ on: branches: - '8.0/edge' paths-ignore: - - 'kubernetes/docs/**' - - 'machines/docs/**' + - 'docs/**' - '.github/renovate.json5' - '.github/workflows/*.yaml' diff --git a/machines/.readthedocs.yaml b/.readthedocs.yaml similarity index 87% rename from machines/.readthedocs.yaml rename to .readthedocs.yaml index fc5f69a52..920e6a42f 100644 --- a/machines/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -18,7 +18,7 @@ build: # This is a special exit code on Read the Docs that will cancel the build immediately. # https://docs.readthedocs.io/en/stable/build-customization.html#cancel-build-based-on-a-condition - | - if [ "$READTHEDOCS_VERSION_TYPE" = "external" ] && git diff --quiet origin/8.0/edge -- 'machines/docs/' 'machines/.readthedocs.yaml'; + if [ "$READTHEDOCS_VERSION_TYPE" = "external" ] && git diff --quiet origin/8.0/edge -- 'docs/' '.readthedocs.yaml'; then exit 183; fi @@ -26,7 +26,7 @@ build: # Build documentation in the docs/ directory with Sphinx sphinx: builder: dirhtml - configuration: machines/docs/conf.py + configuration: docs/conf.py fail_on_warning: true # If using Sphinx, optionally build your docs in additional formats such as PDF @@ -36,4 +36,4 @@ formats: # Optionally declare the Python requirements required to build your docs python: install: - - requirements: machines/docs/requirements.txt + - requirements: docs/requirements.txt diff --git a/kubernetes/docs/.custom_wordlist.txt b/docs/.custom_wordlist.txt similarity index 80% rename from kubernetes/docs/.custom_wordlist.txt rename to docs/.custom_wordlist.txt index fe54bb86d..4563636a3 100644 --- a/kubernetes/docs/.custom_wordlist.txt +++ b/docs/.custom_wordlist.txt @@ -1,16 +1,23 @@ # Leave a blank line at the end of this file to support concatenation airgap +airgapped Artifactory async backend backends backport +binlog Charmcraft cjk +cron cryptographically +CSR +CSRs databag dvipng +failover fonts +fqdn freefont Furo github @@ -24,24 +31,30 @@ Intersphinx io ip Jira +kubectl landscape lang lastmod LaTeX latexmk +logrotate MinIO Multipass mydumper mysql +mysqldump MyST nameserver nameservers Open Graph otf +Paxos PDF Percona plantuml PNG +PPA +PPAs PR PVC PVCs @@ -58,18 +71,27 @@ snap_daemon Sphinx Spread spread_test_example +stdout +storages subproject subprojects SVG +Sysbench tex texlive TOC toctree +tracebacks txt +teardown uncommenting +systemd +systemctl +uncordon URL utils VMs +walkthrough WCAG whitespace whitespaces diff --git a/kubernetes/docs/.gitignore b/docs/.gitignore similarity index 100% rename from kubernetes/docs/.gitignore rename to docs/.gitignore diff --git a/kubernetes/docs/.sphinx/.pre-commit-config.yaml b/docs/.sphinx/.pre-commit-config.yaml similarity index 100% rename from kubernetes/docs/.sphinx/.pre-commit-config.yaml rename to docs/.sphinx/.pre-commit-config.yaml diff --git a/kubernetes/docs/.sphinx/.pymarkdown.json b/docs/.sphinx/.pymarkdown.json similarity index 100% rename from kubernetes/docs/.sphinx/.pymarkdown.json rename to docs/.sphinx/.pymarkdown.json diff --git a/machines/docs/.sphinx/get_vale_conf.py b/docs/.sphinx/get_vale_conf.py old mode 100644 new mode 100755 similarity index 96% rename from machines/docs/.sphinx/get_vale_conf.py rename to docs/.sphinx/get_vale_conf.py index e2a81c088..13e7966f8 --- a/machines/docs/.sphinx/get_vale_conf.py +++ b/docs/.sphinx/get_vale_conf.py @@ -31,12 +31,12 @@ def clone_repo_and_copy_paths(file_source_dest, overwrite=False): """ Clone the repository to a temporary directory and copy required files - + Args: file_source_dest: dictionary of file paths to copy from the repository, and their destination paths overwrite: boolean flag to overwrite existing files in the destination - + Returns: bool: True if all files were copied successfully, False otherwise """ @@ -52,8 +52,8 @@ def clone_repo_and_copy_paths(file_source_dest, overwrite=False): try: result = subprocess.run( - clone_cmd, - capture_output=True, + clone_cmd, + capture_output=True, text=True, check=True ) @@ -73,7 +73,7 @@ def clone_repo_and_copy_paths(file_source_dest, overwrite=False): continue if not copy_files_to_path(source_path, dest, overwrite): - is_copy_success = False + is_copy_success = False logging.error("Failed to copy %s to %s", source_path, dest) # Clean up temporary directory @@ -85,12 +85,12 @@ def clone_repo_and_copy_paths(file_source_dest, overwrite=False): def copy_files_to_path(source_path, dest_path, overwrite=False): """ Copy a file or directory from source to destination - + Args: source_path: Path to the source file or directory dest_path: Path to the destination overwrite: Boolean flag to overwrite existing files in the destination - + Returns: bool: True if copy was successful, False otherwise """ @@ -138,7 +138,7 @@ def main(): # Parse command line arguments, default to overwrite_enabled = True overwrite_enabled = not parse_arguments().no_overwrite - # Download into /tmp through git clone + # Download into /tmp through git clone if not clone_repo_and_copy_paths(vale_files_dict, overwrite=overwrite_enabled): logging.error("Failed to download files from repository") return 1 diff --git a/kubernetes/docs/.sphinx/pa11y.json b/docs/.sphinx/pa11y.json similarity index 100% rename from kubernetes/docs/.sphinx/pa11y.json rename to docs/.sphinx/pa11y.json diff --git a/docs/.sphinx/version b/docs/.sphinx/version new file mode 100644 index 000000000..347f5833e --- /dev/null +++ b/docs/.sphinx/version @@ -0,0 +1 @@ +1.4.1 diff --git a/machines/docs/Makefile b/docs/Makefile similarity index 69% rename from machines/docs/Makefile rename to docs/Makefile index 0b498e12f..edee045f2 100644 --- a/machines/docs/Makefile +++ b/docs/Makefile @@ -7,17 +7,17 @@ SPHINXDIR = .sphinx SPHINXOPTS ?= -c . -d $(SPHINXDIR)/.doctrees -j auto SPHINXBUILD ?= $(VENVDIR)/bin/sphinx-build -SOURCEDIR = . -BUILDDIR = _build -VENVDIR = $(SPHINXDIR)/venv +SOURCEDIR ?= . +BUILDDIR ?= _build +VENVDIR ?= $(SPHINXDIR)/venv PA11Y = $(SPHINXDIR)/node_modules/pa11y/bin/pa11y.js --config $(SPHINXDIR)/pa11y.json VENV = $(VENVDIR)/bin/activate TARGET = * -ALLFILES = *.rst **/*.rst -METRICSDIR = $(SOURCEDIR)/.sphinx/metrics REQPDFPACKS = latexmk fonts-freefont-otf texlive-latex-recommended texlive-latex-extra texlive-fonts-recommended texlive-font-utils texlive-lang-cjk texlive-xetex plantuml xindy tex-gyre dvipng CONFIRM_SUDO ?= N VALE_CONFIG = $(SPHINXDIR)/vale.ini +VALEDIR ?= $(VENVDIR)/lib/python*/site-packages/vale +VOCAB_CANONICAL = $(SPHINXDIR)/styles/config/vocabularies/Canonical SPHINX_HOST ?= 127.0.0.1 SPHINX_PORT ?= 8000 @@ -38,14 +38,13 @@ help: @echo "* check accessibility: make pa11y" @echo "* check style guide compliance: make vale" @echo "* check style guide compliance on target: make vale TARGET=*" - @echo "* check metrics for documentation: make allmetrics" @echo "* other possible targets: make " @echo "-------------------------------------------------------------" @echo -.PHONY: help full‑help html epub pdf linkcheck spelling spellcheck woke \ - vale pa11y run serve install pa11y‑install \ - vale‑install pdf‑prep pdf‑prep‑force clean clean‑doc allmetrics \ +.PHONY: help full-help html epub pdf linkcheck spelling spellcheck woke \ + vale pa11y run serve install pa11y-install \ + vale-install pdf-prep pdf-prep-force clean clean-doc \ update lint-md full-help: $(VENVDIR) @@ -72,8 +71,8 @@ pa11y-install: npm install --prefix $(SPHINXDIR) pa11y; \ } -pymarkdownlnt-install: - @. $(VENV); test -d $(SPHINXDIR)/venv/lib/python*/site-packages/pymarkdown || pip install pymarkdownlnt +pymarkdownlnt-install: install + @. $(VENV); test -d $(VENVDIR)/lib/python*/site-packages/pymarkdown || pip install pymarkdownlnt==0.9.35 install: $(VENVDIR) @@ -82,13 +81,13 @@ run: install # Does not depend on $(BUILDDIR) to rebuild properly at every run. html: install - . $(VENV); $(SPHINXBUILD) -W --keep-going -b dirhtml "$(SOURCEDIR)" "$(BUILDDIR)" -w $(SPHINXDIR)/warnings.txt $(SPHINXOPTS) + . $(VENV); $(SPHINXBUILD) --fail-on-warning --keep-going -b dirhtml "$(SOURCEDIR)" "$(BUILDDIR)" -w $(SPHINXDIR)/warnings.txt $(SPHINXOPTS) epub: install . $(VENV); $(SPHINXBUILD) -b epub "$(SOURCEDIR)" "$(BUILDDIR)" -w $(SPHINXDIR)/warnings.txt $(SPHINXOPTS) serve: html - cd "$(BUILDDIR)"; python3 -m http.server --bind 127.0.0.1 8000 + cd "$(BUILDDIR)"; python3 -m http.server --bind $(SPHINX_HOST) $(SPHINX_PORT) clean: clean-doc @test ! -e "$(VENVDIR)" -o -d "$(VENVDIR)" -a "$(abspath $(VENVDIR))" != "$(VENVDIR)" @@ -109,36 +108,35 @@ pa11y: pa11y-install html find $(BUILDDIR) -name *.html -print0 | xargs -n 1 -0 $(PA11Y) lint-md: pymarkdownlnt-install - @. $(VENV); pymarkdownlnt --config $(SPHINXDIR)/.pymarkdown.json scan --recurse --exclude=./$(SPHINXDIR)/** $(SOURCEDIR) + @. $(VENV); pymarkdownlnt --config $(SPHINXDIR)/.pymarkdown.json scan --recurse --exclude=$(SPHINXDIR)/** $(SOURCEDIR) vale-install: install - @. $(VENV); test -d $(SPHINXDIR)/venv/lib/python*/site-packages/vale || pip install rst2html vale @. $(VENV); test -f $(VALE_CONFIG) || python3 $(SPHINXDIR)/get_vale_conf.py @echo '.Name=="Canonical.400-Enforce-inclusive-terms"' > $(SPHINXDIR)/styles/woke.filter @echo '.Level=="error" and .Name!="Canonical.500-Repeated-words" and .Name!="Canonical.000-US-spellcheck"' > $(SPHINXDIR)/styles/error.filter @echo '.Name=="Canonical.000-US-spellcheck"' > $(SPHINXDIR)/styles/spelling.filter - @. $(VENV); find $(SPHINXDIR)/venv/lib/python*/site-packages/vale/vale_bin -size 195c -exec vale --version \; + @. $(VENV); find $(VALEDIR)/vale_bin -size 195c -exec vale --version \; woke: vale-install - @cat $(SPHINXDIR)/styles/config/vocabularies/Canonical/accept.txt > $(SPHINXDIR)/styles/config/vocabularies/Canonical/accept_backup.txt - @cat $(SOURCEDIR)/.custom_wordlist.txt >> $(SPHINXDIR)/styles/config/vocabularies/Canonical/accept.txt + @cat $(VOCAB_CANONICAL)/accept.txt > $(VOCAB_CANONICAL)/accept_backup.txt + @cat $(SOURCEDIR)/.custom_wordlist.txt >> $(VOCAB_CANONICAL)/accept.txt @echo "Running Vale acceptable term check against $(TARGET). To change target set TARGET= with make command" @. $(VENV); vale --config="$(VALE_CONFIG)" --filter='$(SPHINXDIR)/styles/woke.filter' --glob='*.{md,rst}' $(TARGET) - @cat $(SPHINXDIR)/styles/config/vocabularies/Canonical/accept_backup.txt > $(SPHINXDIR)/styles/config/vocabularies/Canonical/accept.txt && rm $(SPHINXDIR)/styles/config/vocabularies/Canonical/accept_backup.txt + @cat $(VOCAB_CANONICAL)/accept_backup.txt > $(VOCAB_CANONICAL)/accept.txt && rm $(VOCAB_CANONICAL)/accept_backup.txt vale: vale-install - @cat $(SPHINXDIR)/styles/config/vocabularies/Canonical/accept.txt > $(SPHINXDIR)/styles/config/vocabularies/Canonical/accept_backup.txt - @cat $(SOURCEDIR)/.custom_wordlist.txt >> $(SPHINXDIR)/styles/config/vocabularies/Canonical/accept.txt + @cat $(VOCAB_CANONICAL)/accept.txt > $(VOCAB_CANONICAL)/accept_backup.txt + @cat $(SOURCEDIR)/.custom_wordlist.txt >> $(VOCAB_CANONICAL)/accept.txt @echo "Running Vale against $(TARGET). To change target set TARGET= with make command" @. $(VENV); vale --config="$(VALE_CONFIG)" --filter='$(SPHINXDIR)/styles/error.filter' --glob='*.{md,rst}' $(TARGET) - @cat $(SPHINXDIR)/styles/config/vocabularies/Canonical/accept_backup.txt > $(SPHINXDIR)/styles/config/vocabularies/Canonical/accept.txt && rm $(SPHINXDIR)/styles/config/vocabularies/Canonical/accept_backup.txt + @cat $(VOCAB_CANONICAL)/accept_backup.txt > $(VOCAB_CANONICAL)/accept.txt && rm $(VOCAB_CANONICAL)/accept_backup.txt spelling: vale-install - @cat $(SPHINXDIR)/styles/config/vocabularies/Canonical/accept.txt > $(SPHINXDIR)/styles/config/vocabularies/Canonical/accept_backup.txt - @cat $(SOURCEDIR)/.custom_wordlist.txt >> $(SPHINXDIR)/styles/config/vocabularies/Canonical/accept.txt + @cat $(VOCAB_CANONICAL)/accept.txt > $(VOCAB_CANONICAL)/accept_backup.txt + @cat $(SOURCEDIR)/.custom_wordlist.txt >> $(VOCAB_CANONICAL)/accept.txt @echo "Running Vale against $(TARGET). To change target set TARGET= with make command" @. $(VENV); vale --config="$(VALE_CONFIG)" --filter='$(SPHINXDIR)/styles/spelling.filter' --glob='*.{md,rst}' $(TARGET) - @cat $(SPHINXDIR)/styles/config/vocabularies/Canonical/accept_backup.txt > $(SPHINXDIR)/styles/config/vocabularies/Canonical/accept.txt && rm $(SPHINXDIR)/styles/config/vocabularies/Canonical/accept_backup.txt + @cat $(VOCAB_CANONICAL)/accept_backup.txt > $(VOCAB_CANONICAL)/accept.txt && rm $(VOCAB_CANONICAL)/accept_backup.txt spellcheck: spelling @echo "Please note that the \`make spellcheck\` command is being deprecated in favor of \`make spelling\`" @@ -165,21 +163,11 @@ pdf: pdf-prep @echo "Output can be found in ./$(BUILDDIR)" @echo -allmetrics: html - @echo "Recording documentation metrics..." - @echo "Checking for existence of vale..." - . $(VENV) - @. $(VENV); test -d $(SPHINXDIR)/venv/lib/python*/site-packages/vale || pip install vale - @. $(VENV); test -f $(VALE_CONFIG) || python3 $(SPHINXDIR)/get_vale_conf.py - @. $(VENV); find $(SPHINXDIR)/venv/lib/python*/site-packages/vale/vale_bin -size 195c -exec vale --config "$(VALE_CONFIG)" $(TARGET) > /dev/null \; - @eval '$(METRICSDIR)/source_metrics.sh $(PWD)' - @$(METRICSDIR)/build_metrics.py $(BUILDDIR) - update: install @. $(VENV); .sphinx/update_sp.py # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: - $(MAKE) —no-print-directory install + $(MAKE) --no-print-directory install . $(VENV); $(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/kubernetes/docs/conf.py b/docs/conf.py similarity index 88% rename from kubernetes/docs/conf.py rename to docs/conf.py index d40f4ae38..edc476f94 100644 --- a/kubernetes/docs/conf.py +++ b/docs/conf.py @@ -24,7 +24,7 @@ # # TODO: Update with the official name of your project or product -project = "Charmed MySQL K8s" +project = "Charmed MySQL" author = "Canonical Ltd." @@ -34,8 +34,7 @@ # # TODO: To disable the title, set to an empty string. -html_title = project + " documentation" - +html_title = project + " 8.0" + " documentation" # Copyright string; shown at the bottom of the page # @@ -139,7 +138,7 @@ 'repo_default_branch': '8.0/edge', # Docs location in the repo; used in links for viewing the source files - "repo_folder": "/kubernetes/docs/", + "repo_folder": "/docs/", # TODO: To enable or disable the Previous / Next buttons at the bottom of pages # Valid options: none, prev, next, both @@ -152,6 +151,13 @@ 'github_issues': 'enabled', } +html_extra_path = [] + +# Allow opt-in build of the OpenAPI "Hello" example so docs stay clean by default. +if os.getenv("OPENAPI", ""): + tags.add("openapi") + html_extra_path.append("how-to/assets/openapi.yaml") + # TODO: To enable the edit button on pages, uncomment and change the link to a # public repository on GitHub or Launchpad. Any of the following link domains # are accepted: @@ -174,29 +180,35 @@ # Sitemap configuration: https://sphinx-sitemap.readthedocs.io/ ####################### -# Base URL of RTD hosted project +# Use RTD canonical URL to ensure duplicate pages have a specific canonical URL -html_baseurl = 'https://canonical-charmed-mysql-k8s.readthedocs-hosted.com/' +html_baseurl = os.environ.get("READTHEDOCS_CANONICAL_URL", "/") -# URL scheme. Add language and version scheme elements. -# When configured with RTD variables, check for RTD environment so manual runs succeed: +# sphinx-sitemap uses html_baseurl to generate the full URL for each page: -if 'READTHEDOCS_VERSION' in os.environ: - version = os.environ["READTHEDOCS_VERSION"] - sitemap_url_scheme = '{version}{link}' -else: - sitemap_url_scheme = 'MANUAL/{link}' +sitemap_url_scheme = '{link}' # Include `lastmod` dates in the sitemap: sitemap_show_lastmod = True +# Exclude generated pages from the sitemap: + +sitemap_excludes = [ + '404/', + 'genindex/', + 'search/', +] + +# TODO: Add more pages to sitemap_excludes if needed. Wildcards are supported. +# For example, to exclude module pages generated by autodoc, add '_modules/*'. + ####################### # Template and asset locations ####################### -#html_static_path = ["_static"] -#templates_path = ["_templates"] +# html_static_path = [".sphinx/_static"] +# templates_path = [".sphinx/_templates"] ############# @@ -277,12 +289,27 @@ extensions = [ "canonical_sphinx", + "notfound.extension", + "sphinx_design", + "sphinx_reredirects", + "sphinx_tabs.tabs", + "sphinxcontrib.jquery", + "sphinxext.opengraph", + "sphinx_config_options", + "sphinx_contributor_listing", + "sphinx_filtered_toctree", + "sphinx_related_links", + "sphinx_roles", + "sphinx_terminal", + "sphinx_ubuntu_images", + "sphinx_youtube_links", "sphinxcontrib.cairosvgconverter", "sphinx_last_updated_by_git", "sphinx.ext.intersphinx", "sphinx_sitemap", "sphinxcontrib.mermaid", - "sphinxext.rediraffe" + "sphinxext.rediraffe", + "sphinx_new_tab_link" ] # Excludes files or directories from processing @@ -292,7 +319,7 @@ # Adds custom CSS files, located under 'html_static_path' -# html_css_files = [] +html_css_files = [] # Adds custom JavaScript files, located under 'html_static_path' diff --git a/docs/explanation/architecture.md b/docs/explanation/architecture.md new file mode 100644 index 000000000..4b29b70f7 --- /dev/null +++ b/docs/explanation/architecture.md @@ -0,0 +1,234 @@ +(architecture)= +# Architecture + +[MySQL](https://www.mysql.com/) is the world’s most popular open source database. Charmed MySQL is a Juju-based operator to deploy and support MySQL from [day 0 to day 2](https://codilime.com/blog/day-0-day-1-day-2-the-software-lifecycle-in-the-cloud-age/). It is based on the [MySQL Community Edition](https://www.mysql.com/products/community/) using the built-in cluster functionality: [MySQL InnoDB ClusterSet](https://dev.mysql.com/doc/mysql-shell/8.0/en/innodb-clusterset.html). + +## High-level design + +Charmed MySQL is developed for deployment on machine clouds or Kubernetes. Although both versions are extremely similar in functionality, there are some key differences in their architecture. + +(machine-charm)= +### Machine charm + +[Charmed MySQL VM](https://charmhub.io/mysql) leverages the [charmed-mysql snap](https://snapcraft.io/charmed-mysql) which is deployed by Juju on the specified VM/MAAS/bare-metal machine based on Ubuntu Jammy/22.04. snap allows to run MySQL service(s) in a secure and isolated environment ([strict confinement](https://snapcraft.io/blog/demystifying-snap-confinement)). + +The installed snap: + +```shell +$ juju ssh mysql/0 +$ snap list charmed-mysql +Name Version Rev Tracking Publisher Notes +charmed-mysql 8.0.34 69 latest/stable dataplatformbot held +``` + +The snap ships the following components: + +* MySQL Community Edition (based on Ubuntu APT package "[mysql-server-8.0](https://packages.ubuntu.com/jammy/mysql-server-8.0)") +* MySQL Router (based on Ubuntu APT package "[mysql-router](https://packages.ubuntu.com/jammy/mysql-router)") +* MySQL Shell (based on Canonical [backport](https://launchpad.net/~data-platform/+archive/ubuntu/mysql-shell)) +* Percona XtraBackup (based on Canonical [backport](https://launchpad.net/~data-platform/+archive/ubuntu/xtrabackup)) +* Prometheus MySQLd Exporter (based on Canonical [backport](https://launchpad.net/~data-platform/+archive/ubuntu/mysqld-exporter)) +* Prometheus MySQL Router Exporter (based on Canonical [backport](https://launchpad.net/~data-platform/+archive/ubuntu/mysqlrouter-exporter)) +* Prometheus Grafana dashboards and Loki alert rules are part of the charm revision and missing in snap. + +Versions of all the components above are carefully chosen to fit functionality of each other. + +The Charmed MySQL unit consisting of a several services which are enabled/activated accordingly to the setup: + +```shell +$ snap services charmed-mysql +Service Startup Current Notes +charmed-mysql.mysqld enabled active - +charmed-mysql.mysqld-exporter disabled inactive - +charmed-mysql.mysqlrouter-service disabled inactive - +charmed-mysql.mysqlrouterd-exporter disabled inactive - +``` + +The `mysqld` snap service is a main MySQL instance which is normally up and running right after the charm deployment. + +The `mysql-router` snap service used in [Charmed MySQL Router](https://charmhub.io/mysql-router?channel=dpe/edge) only and should be stopped on [Charmed MySQL](https://charmhub.io/mysql) deployments. + +All `exporter` services are activated only after relating with {ref}`COS `. + +```{caution} +* It is possible to start, stop, and restart snap services manually but it is NOT recommended to avoid a split brain with a charm state machine! Do it with a caution!!! +* All snap resources must be executed under the special user `snapd_daemon` only! +``` + +The snap "charmed-mysql" also ships list of tools used by charm: +* `charmed-mysql.mysql` (alias `mysql`) - mysql client to connect `mysqld`. +* `charmed-mysql.mysqlsh` - new [mysql-shell](https://dev.mysql.com/doc/mysql-shell/8.0/en/) client to configure MySQL cluster. +* `charmed-mysql.xbcloud` - a tool to download and upload full or part of xbstream archive from/to the cloud. +* `charmed-mysql.xbstream` - a tool to support simultaneous compression and streaming. +* `charmed-mysql.xtrabackup` - a tool to backup/restore MySQL DB. + +The `mysql` and `mysqlsh` are well known and popular tools to manage MySQL. +The `xtrabackup (xbcloud+xbstream)` used for [MySQL Backups](/how-to/back-up-and-restore/create-a-backup) only to store backups on S3 compatible storage. + +### Kubernetes charm + +[Charmed MySQL K8s](https://charmhub.io/mysql-k8s) leverages the [sidecar](https://kubernetes.io/blog/2015/06/the-distributed-system-toolkit-patterns/#example-1-sidecar-containers) pattern to allow multiple containers in each pod with [Pebble](https://juju.is/docs/sdk/pebble) running as the workload container’s entrypoint. + +Pebble is a lightweight, API-driven process supervisor that is responsible for configuring processes to run in a container and controlling those processes throughout the workload lifecycle. + +Pebble `services` are configured through [layers](https://github.com/canonical/pebble#layer-specification), and the following containers represent each one a layer forming the effective Pebble configuration, or `pebble plan`: + +1. a charm container runs Juju operator code: `juju ssh mysql-k8s/0 bash` +1. a [mysql](https://www.mysql.com/) (workload) container runs the MySQL application along with other services (like monitoring metrics exporters, etc): `juju ssh --container mysql mysql-k8s/0 bash` + +As a result, if you run a `kubectl get pods` on a namespace named for the Juju model you’ve deployed the "Charmed MySQL K8s" charm into, you’ll see something like the following: + +```shell +NAME READY STATUS RESTARTS AGE +mysql-k8s-0 2/2 Running 0 65m +``` + +This shows there are 2 containers in the pod: `charm` and `workload` mentioned above. + +And if you run `kubectl describe pod mysql-k8s-0`, all the containers will have as Command `/charm/bin/pebble`. That’s because Pebble is responsible for the processes startup as explained above. + +The Charmed MySQL K8s (`workload` container) based on the `mysql-image` resource defined in the [charm metadata.yaml](https://github.com/canonical/mysql-operators/blob/8.0/edge/kubernetes/metadata.yaml). It is an official Canonical "[charmed-mysql](https://github.com/canonical/charmed-mysql-rock)" [OCI/ROCK](https://documentation.ubuntu.com/server/explanation/virtualisation/about-rock-images/) image, which is recursively based on Canonical SNAP “[charmed-mysql](https://snapcraft.io/charmed-mysql)” (read more about the snap details in {ref}`machine-charm`). + +[Charmcraft](https://juju.is/docs/sdk/install-charmcraft) uploads an image as a [charm resource](https://charmhub.io/mysql-k8s/resources/mysql-image) to [Charmhub](https://charmhub.io/mysql-k8s) during the [publishing](https://github.com/canonical/mysql-k8s-operator/blob/main/.github/workflows/release.yaml#L40-L53), as described in the [Juju SDK How-to guides](https://juju.is/docs/sdk/publishing). + +The charm supports Juju deployment to all Kubernetes environments: [MicroK8s](https://microk8s.io/), [Charmed Kubernetes](https://ubuntu.com/kubernetes/charmed-k8s), [GKE](https://charmhub.io/mysql-k8s/docs/h-deploy-gke), [Amazon EKS](https://aws.amazon.com/eks/), ... + +The OCI/ROCK ships the following components based on the [`charmed-mysql` snap](https://canonical-charmed-mysql.readthedocs-hosted.com/explanation/architecture): + +* MySQL Community Edition +* MySQL Router +* MySQL Shell +* Percona XtraBackup +* Prometheus MySQLd Exporter +* Prometheus MySQL Router Exporter + +**Prometheus Grafana dashboards and Loki alert rules** are part of the charm revision, but missing in the snap. + +SNAP-based ROCK images guaranties the same components versions and functionality between VM and K8s charm flavors. + +Pebble runs layers of all the currently enabled services, e.g. monitoring, backups, etc: + +```shell +> juju ssh --container mysql mysql-k8s/0 /charm/bin/pebble plan +services: + mysqld_exporter: + summary: mysqld exporter + startup: disabled <= COS Monitoring disabled + override: replace + command: /start-mysqld-exporter.sh + environment: + DATA_SOURCE_NAME: user:password@unix(/var/run/mysqld/mysqld.sock)/ + user: mysql + group: mysql + mysqld_safe: + summary: mysqld safe + startup: enabled <= MySQL is up and running + override: replace + command: mysqld_safe + user: mysql + group: mysql + kill-delay: 24h0m0s +``` + +The `mysqld_safe` is a main MySQL wrapper which is normally up and running right after the charm deployment. + +The `mysql-router` used in [Charmed MySQL Router K8s](https://charmhub.io/mysql-router-k8s?channel=8.0/edge) only and should be stopped on [Charmed MySQL K8s](https://charmhub.io/mysql-k8s) deployments. + +All `exporter` services are activated only after relating with {ref}`COS `. + +```{caution} +* It is possible to start, stop, and restart pebble services manually but it is NOT recommended to avoid a split brain with a charm state machine! Do it with a caution!!! +* All pebble resources must be executed under the proper user (defined in user:group options of pebble layer)! +``` + +The ROCK "charmed-mysql" also ships list of tools used by charm: + +* `mysql` - mysql client to connect `mysqld`. +* `mysqlsh` - new [mysql-shell](https://dev.mysql.com/doc/mysql-shell/8.0/en/) client to configure MySQL cluster. +* `xbcloud` - a tool to download and upload full or part of xbstream archive from/to the cloud. +* `xbstream` - a tool to support simultaneous compression and streaming. +* `xtrabackup` - a tool to backup/restore MySQL DB. + +The `mysql` and `mysqlsh` are well known and popular tools to manage MySQL. + +The `xtrabackup (xbcloud+xbstream)` is used only to store {ref}`backups ` on S3 compatible storage. + +## Integrations + +### MySQL Router + +[MySQL Router](https://dev.mysql.com/doc/mysql-router/8.0/en/) is part of MySQL InnoDB Cluster, and is lightweight middle-ware that provides transparent routing between your application and back-end MySQL Servers. The MySQL Router charm ([VM](https://charmhub.io/mysql-router) | [K8s](https://charmhub.io/mysql-router-k8s)) is an independent charm that can be related with MySQL. + +### TLS Certificates Operator + +The [TLS Certificates](https://charmhub.io/tls-certificates-operator) charm is responsible for distributing certificates through relationship. Certificates are provided by the operator through Juju configs. For playground deployments, the [self-signed operator](https://charmhub.io/self-signed-certificates) is available as well. + +### S3 Integrator + +[S3 Integrator](https://charmhub.io/s3-integrator) is an integrator charm for providing S3 credentials to Charmed MySQL which seek to access shared S3 data. Store the credentials centrally in the integrator charm and relate consumer charms as needed. + +### Data Integrator + +The [Data Integrator](https://charmhub.io/data-integrator) charm is a solution to request DB credentials for non-native Juju applications. Not all applications implement a data_interfaces relation but allow setting credentials through config options. Also, some of the applications are run outside of juju. This integrator charm allows receiving credentials which can be passed into application config directly without implementing juju-native relation. + +### MySQL Test App + +The charm [MySQL Test App](https://charmhub.io/mysql-test-app) is a Canonical test application to validate the charm installation / functionality and perform the basic performance tests. + +### Grafana + +Grafana is an open-source visualization tools that allows to query, visualize, alert on, and visualize metrics from mixed data sources in configurable dashboards for observability. This charms is shipped with its own Grafana dashboard and supports integration with the [Grafana Operator](https://charmhub.io/grafana-k8s) to simplify observability. See: {ref}`enable-monitoring`. + +### Loki + +Loki is an open-source fully-featured logging system. This charms is shipped with support for the [Loki Operator](https://charmhub.io/loki-k8s) to collect the generated logs. See: {ref}`enable-monitoring`. + +### Prometheus + +Prometheus is an open-source systems monitoring and alerting toolkit with a dimensional data model, flexible query language, efficient time series database and modern alerting approach. This charm is shipped with a Prometheus exporters, alerts and support for integrating with the [Prometheus Operator](https://charmhub.io/prometheus-k8s) to automatically scrape the targets. See: {ref}`enable-monitoring`. + +## Low-level design + +See the charm state machines displayed in {ref}`flowcharts`. The low-level logic is mostly common for both VM and K8s charms. + + + +### Juju events + +Accordingly to the [Juju SDK](https://juju.is/docs/sdk/event): “an event is a data structure that encapsulates part of the execution context of a charm”. + +For this charm, the following events are observed: + +1. [`on_install`](https://documentation.ubuntu.com/juju/3.6/reference/hook/#install): install the snap "charmed-mysql" and perform basic preparations to bootstrap the cluster on the first leader (or join the already configured cluster). +2. [`leader-elected`](https://documentation.ubuntu.com/juju/3.6/reference/hook/#leader-elected): generate all the secrets to bootstrap the cluster. +3. [`leader-settings-changed`](https://documentation.ubuntu.com/juju/3.6/reference/hook/#leader-settings-changed): Handle the leader settings changed event. +4. [`start`](https://documentation.ubuntu.com/juju/3.6/reference/hook/#start): Init/setting up the cluster node. +5. [`config_changed`](https://documentation.ubuntu.com/juju/3.6/reference/hook/#config-changed): usually fired in response to a configuration change using the GUI or CLI. Create and set default cluster and cluster-set names in the peer relation databag (on the leader only). +6. [`update-status`](https://documentation.ubuntu.com/juju/3.6/reference/hook/#update-status): Takes care of workload health checks. + + +### Charm code overview + +The code for both VM and K8s charms is located in the same repository, [`mysql-operators`](https://github.com/canonical/mysql-operators/tree/8.0/edge) under the `machines` and `kubernetes` directories respectively. + +For each substrate, `src/charm.py` is the default entry point for a charm and has the `MySQLCharmBase` Python class which inherits from `CharmBase`. + +`CharmBase` is the base class from which all Charms are formed, defined by [Ops](https://ops.readthedocs.io/en/latest/) (Python framework for developing charms). See more information in the [Ops documentation for `CharmBase`](https://ops.readthedocs.io/en/latest/reference/ops.html#ops.CharmBase). + +The `__init__` method guarantees that the charm observes all events relevant to its operation and handles them. + + \ No newline at end of file diff --git a/kubernetes/docs/explanation/flowcharts.md b/docs/explanation/flowcharts.md similarity index 98% rename from kubernetes/docs/explanation/flowcharts.md rename to docs/explanation/flowcharts.md index c23d45057..f79bc16e1 100644 --- a/kubernetes/docs/explanation/flowcharts.md +++ b/docs/explanation/flowcharts.md @@ -1,3 +1,4 @@ +(flowcharts)= # Charm lifecycle flowcharts ```{mermaid} diff --git a/kubernetes/docs/explanation/index.md b/docs/explanation/index.md similarity index 97% rename from kubernetes/docs/explanation/index.md rename to docs/explanation/index.md index 88ac0992d..ac5b54a5f 100644 --- a/kubernetes/docs/explanation/index.md +++ b/docs/explanation/index.md @@ -1,3 +1,4 @@ +(explanation)= # Explanation Additional context about key concepts behind the MySQL charm. @@ -44,4 +45,3 @@ Mermaid diagrams of charm events and hooks. Charm flowcharts ``` - diff --git a/docs/explanation/interfaces-and-endpoints.md b/docs/explanation/interfaces-and-endpoints.md new file mode 100644 index 000000000..f2d8ca966 --- /dev/null +++ b/docs/explanation/interfaces-and-endpoints.md @@ -0,0 +1,152 @@ +(interfaces-and-endpoints)= +# Interfaces and endpoints + +Charmed MySQL supports modern `mysql_client` and legacy `mysql`, `mysql-shared`, `mysql-router` interfaces (in a backward compatible mode). + +| | Interface | Endpoints | VM charm | K8s charm | +|--------|----------------|-----------------------|----------|-----------| +| modern | `mysql_client` | `database` | ![check] | ![check] | +| legacy | `mysql` | `mysql` | ![check] | ![check] | +| | | `mysql_root` | | ![check] | +| legacy | `mysql-router` | `db-router` | ![check] | | +| legacy | `mysql-shared` | `shared-db` | ![check] | | + +```{caution} +Do **not** relate both modern and legacy interfaces simultaneously. +``` + +## Modern relations + +This charm provides the modern [`mysql_client`](https://github.com/canonical/charm-relation-interfaces)interface. Applications can easily connect MySQL using [`data_interfaces`](https://charmhub.io/data-platform-libs/libraries/data_interfaces) library from [`data-platform-libs`](https://github.com/canonical/data-platform-libs/). + +### `mysql_client` interface, `database` endpoint + +Adding a [Juju relation](https://documentation.ubuntu.com/juju/3.6/reference/relation/) is accomplished with `juju relate` via endpoint `database`. + +Example: + +````{tab-set} +```{tab-item} VM +:sync: vm + + # Deploy Charmed MySQL cluster with 3 nodes + juju deploy mysql -n 3 --channel 8.0 + + # Deploy the relevant charms, e.g. mysql-test-app + juju deploy mysql-test-app + + # Relate MySQL with your application + juju relate mysql:database mysql-test-app:database + + # Check established relation (using mysql_client interface): + juju status --relations + + # Example of the properly established relation: + # > Relation provider Requirer Interface Type + # > mysql:database mysql-test-app:database mysql_client regular +``` + +```{tab-item} K8s +:sync: k8s + + # Deploy Charmed MySQL cluster with 3 nodes + juju deploy mysql-k8s -n 3 --trust --channel 8.0 + + # Deploy the relevant charms, e.g. mysql-test-app + juju deploy mysql-test-app + + # Relate MySQL with your application + juju relate mysql-k8s:database mysql-test-app:database + + # Check established relation (using mysql_client interface): + juju status --relations + + # Example of the properly established relation: + # > Relation provider Requirer Interface Type + # > mysql-k8s:database mysql-test-app:database mysql_client regular +``` +```` + +See details about database user roles in {ref}`users`. + +```{note} +In order to integrate with this charm, every table created by the integrated application must have a primary key. This is required by the [group replication plugin](https://dev.mysql.com/doc/refman/8.0/en/group-replication-requirements.html) enabled in this charm. +``` + +## Legacy relations + +**Legacy relations are deprecated and will be discontinued** from Charmed MySQL 8.4 onward. Their usage should be avoided. + +Check the legacy interface implementation limitations in {ref}`legacy-charm`. + +This charm supports several legacy interfaces, e.g. `mysql`, `mysql-shared`, `mysql-router`. They were used in some legacy charms in [cross-model relations](https://documentation.ubuntu.com/juju/3.6/reference/relation/#cross-model-relation). + +### `mysql` interface, `mysql` endpoint + +This was a popular interface used by some legacy charms on both VM and K8s (e.g. [MariaDB](https://charmhub.io/mariadb), [OSM MariaDB](https://charmhub.io/charmed-osm-mariadb-k8s), [Percona Cluster](https://charmhub.io/percona-cluster) and [MySQL Innodb Cluster](https://charmhub.io/mysql-innodb-cluster)), often in [cross-model relations](https://documentation.ubuntu.com/juju/3.6/reference/relation/#cross-model-relation). + +Example usage of this interface: + +````{tab-set} +```{tab-item} VM +:sync: vm + + juju deploy mysql --channel 8.0 + juju config mysql mysql-interface-database=mediawiki mysql-interface-user=mediawiki + juju deploy mediawiki + juju relate mysql:mysql mediawiki:db +``` + +```{tab-item} K8s +:sync: k8s + + juju deploy mysql-k8s --trust --channel 8.0 + juju config mysql-k8s mysql-interface-database=wordpress mysql-interface-user=wordpress + juju deploy wordpress-k8s + juju relate mysql-k8s:mysql wordpress-k8s:db +``` +```` + +#### `mysql_root` endpoint (K8s only) + +The K8s charm additionally supports the endpoint `mysql_root`, which provides the same legacy interface `mysql` with MySQL root-level privileges. + +```{caution} +Usage of `mysql_root` is **not** recommended from security point of view. +``` + +### `mysql-router` interface, `db-router` endpoint (VM only) + +It is a relation that one uses with the [mysql router](https://charmhub.io/mysql-router) charm. + +As an example, the following commands can be executed to deploy and integrate Charmed MySQL VM to the keystone charm: + +```shell +juju deploy mysql --channel 8.0 +juju deploy mysql-router --series focal +juju deploy keystone --series focal +juju relate mysql-router keystone +juju relate mysql:db-router mysql-router:db-router +``` + +```{note} +Make sure to deploy identical [series/base](https://documentation.ubuntu.com/juju/3.6/reference/machine/#machine-base) for `keystone` and `mysql-router` applications. + +This is necessary due to the [subordinate](https://documentation.ubuntu.com/juju/3.6/reference/charm/#subordinate-charm) charm nature of `mysql-router`. +``` + +### `mysql-shared` interface, `shared-db` endpoint (VM only) + +It is a relation that one uses when the application needs to connect directly to the database cluster. It is supported by various legacy charms, e.g. [mysql-innodb-cluster](https://charmhub.io/mysql-innodb-cluster). + +As an example, the following commands can be executed to deploy and integrate Charmed MySQL VM to the keystone charm: + +```shell +juju deploy mysql --channel 8.0 +juju deploy keystone --series focal +juju relate keystone:shared-db mysql:shared-db +``` + + + +[check]: https://img.icons8.com/color/20/checkmark--v1.png \ No newline at end of file diff --git a/machines/docs/explanation/juju.md b/docs/explanation/juju.md similarity index 63% rename from machines/docs/explanation/juju.md rename to docs/explanation/juju.md index 8d25c46ce..2c56ebdfb 100644 --- a/machines/docs/explanation/juju.md +++ b/docs/explanation/juju.md @@ -1,8 +1,9 @@ +(juju)= # Juju [Juju](https://juju.is/) is an open source orchestration engine for software operators that enables the deployment, integration and lifecycle management of applications at any scale, on any infrastructure using charms. -This [charm](https://charmhub.io/mysql) is an operator - business logic encapsulated in reusable software packages that automate every aspect of an application's life. Charms are shared via [CharmHub](https://charmhub.io/). +This charm is an operator - business logic encapsulated in reusable software packages that automate every aspect of an application's life. Charms are shared via [CharmHub](https://charmhub.io/). See also: @@ -11,7 +12,8 @@ See also: This page aims to provide some context on some of the inner workings of Juju that affect this charm. -## Breaking changes between Juju 2.9.x and 3.x +(breaking-changes-juju)= +## Breaking changes between Juju 2.9 and 3 As this charm documentation is written for Juju 3.x, users of 2.9.x will encounter noteworthy changes when following the instructions. This section explains those changes. @@ -32,18 +34,44 @@ The response is to therefore substitute the documented command with the equivale ### Juju 3.x: -```shell -juju integrate mysql:database mysql-test-app +````{tab-set} +```{tab-item} VM +:sync: vm + + juju integrate mysql:database mysql-test-app -juju run mysql/leader get-password + juju run mysql/leader get-password ``` + +```{tab-item} K8s +:sync: k8s + + juju integrate mysql-k8s:database mysql-test-app + + juju run mysql-k8s/leader get-password +``` +```` + ### Juju 2.9.x: -```shell -juju relate mysql:database mysql-test-app +````{tab-set} +```{tab-item} VM +:sync: vm + + juju relate mysql:database mysql-test-app -juju run-action --wait mysql/leader get-password + juju run-action --wait mysql/leader get-password ``` + +```{tab-item} K8s +:sync: k8s + + juju relate mysql-k8s:database mysql-test-app + + juju run-action --wait mysql-k8s/leader get-password +``` +```` + ```{note} This section is based on the [OpenStack guide.](https://docs.openstack.org/charm-guide/latest/project/support-notes.html#breaking-changes-between-juju-2-9-x-and-3-x) ``` @@ -53,9 +81,9 @@ This section is based on the [OpenStack guide.](https://docs.openstack.org/charm Newly released charm revisions might require a new Juju version. This is usually because the new revision requires new Juju features, e.g. [Juju secrets](https://juju.is/docs/juju/secret). -Information about Juju requirements will be clearly indicated in the charm's [release notes](/reference/releases) and in the repository's [metadata.yaml](https://github.com/canonical/mysql-operator/blob/14c06ff88c4e564cd6d098aa213bd03e78e84b52/metadata.yaml#L72-L80) file. +Information about Juju requirements will be clearly indicated in the charm's {ref}`release notes ` and in the repository's `metadata.yaml` file. -When upgrading your database charm with juju refresh, Juju checks that its version is compatible with the target revision. If not, it stops the upgrade and prevents further changes to keep the installation safe. +When upgrading your database charm with {command}`juju refresh` Juju checks that its version is compatible with the target revision. If not, it stops the upgrade and prevents further changes to keep the installation safe. ```shell ~$ juju refresh mysql @@ -66,5 +94,5 @@ ERROR Charm feature requirements cannot be met: - charm requires feature "juju" (version >= 3.1.5) but model currently supports version 3.1.4 ``` -You must then [upgrade to the required Juju version](/how-to/refresh/upgrade-juju) before proceeding with the charm upgrade. +You must then {ref}`upgrade to the required Juju version ` before proceeding with the charm upgrade. diff --git a/docs/explanation/legacy-charm.md b/docs/explanation/legacy-charm.md new file mode 100644 index 000000000..f4448fa61 --- /dev/null +++ b/docs/explanation/legacy-charm.md @@ -0,0 +1,139 @@ +--- +relatedlinks: "[Charm generations](https://documentation.ubuntu.com/charmcraft/stable/)" +--- + +(legacy-charm)= +# Legacy charm + +Historically, there were several **legacy charms** that provided MySQL/MariaDB functionality: + +| Legacy VM charms | Legacy K8s charms | +|:----------------------|:------------------| +|[MariaDB] | [OSM MariaDB] | +|[Percona Cluster] | | +|[MySQL Innodb Cluster] | | + +These legacy charms provided endpoints `mysql` and `mysql-root` for the interface `mysql`. + +This Charmed MySQL operator is a **modern charm** - i.e. it is based on the [Ops framework](https://documentation.ubuntu.com/ops/latest/) and designed to replace all legacy {ref}`interfaces and endpoints ` of legacy charms. + +The modern charm provides old endpoints as well as the new endpoint `database` for the interface `mysql_client`. + +See all available endpoints/interfaces for Charmed MySQL on Charmhub: +* [Charmed MySQL VM](https://charmhub.io/mysql/integrations) +* [Charmed MySQL K8s](https://charmhub.io/mysql-k8s/integrations) + +## The default track `latest` vs. `8.0` + +The [default track](https://docs.openstack.org/charm-guide/yoga/project/charm-delivery.html) has been switched from the `latest` to `8.0` for both VM and K8s MySQL charms. + +This was done to ensure all new deployments use a modern codebase. For more context, see this [Discourse topic](https://discourse.charmhub.io/t/request-switch-default-track-latest-8-0-for-charms-mysql-and-mysql-k8s/9977). + +We strongly advise against using the `latest` track, as a future charm upgrade may result in a MySQL version incompatible with an integrated application. Track `8.0` guarantees MySQL `8.0` deployment only. + +The track `latest` is closed to avoid confusion. + +## How to migrate from legacy to modern charm + +The modern charm provides temporary support for legacy interfaces. + +**Quick try**: Relate the current application with new charm using endpoint `mysql` (set the channel to `8.0/stable`). No extra changes are necessary: + +`````{tab-set} +````{tab-item} VM +:sync: vm + +```yaml +mysql: +charm: mysql +channel: 8.0/stable +trust: true +``` +```` + +````{tab-item} K8s +:sync: k8s + +```yaml +mysql: +charm: mysql-k8s +channel: 8.0/stable +trust: true +``` + +```{note} +The `trust` option must be enabled if [Role Based Access Control (RBAC)](https://kubernetes.io/docs/concepts/security/rbac-good-practices/) is in use in your Kubernetes. +``` +```` +````` + +**Proper migration**: Migrate the application to the new interface [`mysql_client`](https://github.com/canonical/charm-relation-interfaces). + +The application will connect MySQL using [`data_interfaces`](https://charmhub.io/data-platform-libs/libraries/data_interfaces) library from [`data-platform-libs`](https://github.com/canonical/data-platform-libs/) via the `database` endpoint. + +```{caution} +In-place upgrades from the legacy charm to the modern, Ops-based charm are **not supported**. + +To migrate database data, see the following guides: +* {ref}`migrate-data-mysqldump` +* {ref}`migrate-data-mydumper` +* {ref}`migrate-data-backup-restore` +``` + +## How to deploy a legacy MySQL charm + +`````{tab-set} +````{tab-item} VM +:sync: vm + +```yaml +mariadb: +charm: mariadb +channel: latest/stable + +percona-cluster: +charm: percona-cluster +channel: latest/stable +``` +```` + +````{tab-item} K8s +:sync: k8s + +```yaml +osm-mariadb: +charm: charmed-osm-mariadb-k8s +channel: latest/stable + +mysql: +charm: mysql-innodb-cluster +channel: 8.0/stable +``` +```` +````` + +## Supported MySQL versions by modern charm + +Both K8s and VM modern charms support MySQL 8.0 (based on Jammy/22.04 series) only. + +Please {ref}`contact us ` if you need different versions/series. + +## Supported architectures + +Currently, all modern charm revisions support `amd64`. Later revisions introduced support for `arm64`, and `s390x`. + +See the {ref}`release-notes` for more information. + +## Report issues and contact authors + +The modern charms (from `8.0/stable`) for VM and K8s are stored in the [`mysql-operators` GitHub repository](https://github.com/canonical/mysql-operators). + +Bug reports and feature requests can be submitted as GitHub issues. + +See {ref}`contacts` for more information. + + +[MariaDB]: https://charmhub.io/mariadb +[OSM MariaDB]: https://charmhub.io/charmed-osm-mariadb-k8s +[Percona Cluster]: https://charmhub.io/percona-cluster +[MySQL Innodb Cluster]: https://charmhub.io/mysql-innodb-cluster \ No newline at end of file diff --git a/docs/explanation/logs/audit-logs.md b/docs/explanation/logs/audit-logs.md new file mode 100644 index 000000000..c31e3a9f4 --- /dev/null +++ b/docs/explanation/logs/audit-logs.md @@ -0,0 +1,95 @@ +(audit-logs)= +# Audit logs + +The Audit Log plugin allows fine grained configuration for all login/logout, queries or both records to be stored in a log file. It is enabled in Charmed MySQL by default. + +## Overview + +The following is a sample of the audit logs, in JSON format with only logins records (default configuration): + +```json +{"audit_record":{"name":"Quit","record":"6_2024-09-03T01:53:14","timestamp":"2024-09-03T01:53:33Z","connection_id":"992","status":0,"user":"clusteradmin","priv_user":"clusteradmin","os_login":"","proxy_user":"","host":"localhost","ip":"","db":""}} +{"audit_record":{"name":"Connect","record":"7_2024-09-03T01:53:14","timestamp":"2024-09-03T01:53:33Z","connection_id":"993","status":1156,"user":"","priv_user":"","os_login":"","proxy_user":"","host":"juju-da2225-8","ip":"10.207.85.214","db":""}} +{"audit_record":{"name":"Connect","record":"8_2024-09-03T01:53:14","timestamp":"2024-09-03T01:53:33Z","connection_id":"994","status":0,"user":"serverconfig","priv_user":"serverconfig","os_login":"","proxy_user":"","host":"juju-da2225-8","ip":"10.207.85.214","db":""}} +``` + +````{tab-set} +```{tab-item} VM +:sync: vm + +The logs are stored in the `/var/snap/charmed-mysql/common/var/log/mysql` directory, and are rotated every minute to the `/var/snap/charmed-mysql/common/var/log/mysql/archive_audit` directory. +``` + +```{tab-item} K8s +:sync: k8s + +The logs are stored in the `/var/log/mysql` directory of the mysql container, and it's rotated every minute to the `/var/log/mysql/archive_audit` directory. +``` +```` + +It's recommended to integrate the charm with {ref}`COS `, from where the logs can be easily persisted and queried using Loki and Grafana. + +## Configurations + +### `plugin-audit-enabled` + +The audit plugin is enabled by default in the charm, but it's possible to disable it by setting: + +````{tab-set} +```{tab-item} VM +:sync: vm + + juju config mysql plugin-audit-enabled=false +``` + +```{tab-item} K8s +:sync: k8s + + juju config mysql-k8s plugin-audit-enabled=false +``` +```` + +Valid values are `false` and `true` (default). By setting it to false, existing logs are still kept in the `archive_audit` directory. + +### `logs_audit_policy` + +Audit log policy: + +````{tab-set} +```{tab-item} VM +:sync: vm + + juju config mysql logs_audit_policy=queries +``` + +```{tab-item} K8s +:sync: k8s + + juju config mysql-k8s logs_audit_policy=queries +``` +```` + +Valid values are `logins` (default), `queries` and `all`. + +### `plugin-audit-strategy` + +By default the audit plugin writes logs in asynchronous mode for better performance. + +To ensure logs are written to disk on more timely fashion, this configuration can be set to semi-synchronous mode: + +````{tab-set} +```{tab-item} VM +:sync: vm + + juju config mysql plugin-audit-strategy=semi-async +``` + +```{tab-item} K8s +:sync: k8s + + juju config mysql-k8s plugin-audit-strategy=semi-async +``` +```` + +Valid values are `async` (default) and `semi-async`. + diff --git a/kubernetes/docs/explanation/logs/index.md b/docs/explanation/logs/index.md similarity index 81% rename from kubernetes/docs/explanation/logs/index.md rename to docs/explanation/logs/index.md index 9d38a4027..f19b592f9 100644 --- a/kubernetes/docs/explanation/logs/index.md +++ b/docs/explanation/logs/index.md @@ -1,42 +1,76 @@ +(logs)= # Logs -This explanation goes over the types of logging in MySQL and the configuration parameters for log rotation. +This explanation goes over the types of logging in MySQL and the configuration parameters for log +rotation. -The charm currently has audit and error logs enabled by default. All of these files are rotated if present into a separate dedicated archive folder under the logs directory. We do not yet support the rotation of binary logs (binlog, relay log, undo log, redo log, etc). +The charm currently has audit and error logs enabled by default. All of these files are rotated if +present into a separate dedicated archive folder under the logs directory. We do not yet support the rotation of binary logs (binlog, relay log, undo log, redo log, etc). ## Log types +````{tab-set} +```{tab-item} VM +:sync: vm + +The charm stores its logs in `/var/snap/charmed-mysql/common/var/log/mysql`. + + $ ls -lahR /var/snap/charmed-mysql/common/var/log/mysql + + # /var/log/mysql: + drwxrwx--- 2 mysql mysql 4.0K Oct 23 20:46 archive_audit + drwxrwx--- 2 mysql mysql 4.0K Oct 23 20:46 archive_error + -rw-r----- 1 mysql mysql 1.1K Oct 23 20:46 audit.log + -rw-r----- 1 mysql mysql 1.1K Oct 23 20:46 error.log + + # /var/snap/charmed-mysql/common/var/log/mysql/archive_audit: + -rw-r----- 1 snap_daemon root 43K Sep 3 01:24 audit.log-20240903_0124.gz + -rw-r----- 1 snap_daemon root 109K Sep 3 01:25 audit.log-20240903_0125.gz + + # /var/snap/charmed-mysql/common/var/log/mysql/archive_error: + + -rw-r----- 1 mysql mysql 8.7K Oct 23 20:44 error.log-43_2045.gz + -rw-r----- 1 mysql mysql 2.3K Oct 23 20:45 error.log-43_2046.gz +``` + +```{tab-item} K8s +:sync: k8s + The charm stores its logs in `/var/log/mysql`. -```shell -$ ls -lahR /var/log/mysql + $ ls -lahR /var/log/mysql -/var/log/mysql: -drwxrwx--- 2 mysql mysql 4.0K Oct 23 20:46 archive_audit -drwxrwx--- 2 mysql mysql 4.0K Oct 23 20:46 archive_error --rw-r----- 1 mysql mysql 1.1K Oct 23 20:46 audit.log --rw-r----- 1 mysql mysql 1.1K Oct 23 20:46 error.log + # /var/log/mysql: + drwxrwx--- 2 mysql mysql 4.0K Oct 23 20:46 archive_audit + drwxrwx--- 2 mysql mysql 4.0K Oct 23 20:46 archive_error + -rw-r----- 1 mysql mysql 1.1K Oct 23 20:46 audit.log + -rw-r----- 1 mysql mysql 1.1K Oct 23 20:46 error.log -/var/log/mysql/archive_audit: --rw-r----- 1 snap_daemon root 43K Sep 3 01:24 audit.log-20240903_0124.gz --rw-r----- 1 snap_daemon root 109K Sep 3 01:25 audit.log-20240903_0125.gz + # /var/log/mysql/archive_audit: + -rw-r----- 1 snap_daemon root 43K Sep 3 01:24 audit.log-20240903_0124.gz + -rw-r----- 1 snap_daemon root 109K Sep 3 01:25 audit.log-20240903_0125.gz -/var/log/mysql/archive_error: --rw-r----- 1 mysql mysql 8.7K Oct 23 20:44 error.log-43_2045.gz --rw-r----- 1 mysql mysql 2.3K Oct 23 20:45 error.log-43_2046.gz + # /var/log/mysql/archive_error: + -rw-r----- 1 mysql mysql 8.7K Oct 23 20:44 error.log-43_2045.gz + -rw-r----- 1 mysql mysql 2.3K Oct 23 20:45 error.log-43_2046.gz ``` +```` + -It is recommended to set up a [COS integration] so that these log files can be streamed to Loki. This leads to better persistence and security of the logs. +It is recommended to set up a {ref}`COS integration ` so that these log files can be streamed to Loki. This leads to better persistence and security of the logs. ### Audit logs + The Audit Log plugin allows all login/logout records to be stored in a log file.
+ Example of audit logs in JSON format with login/logout records ```json + {"audit_record":{"name":"Connect","record":"17_2024-09-03T01:52:14","timestamp":"2024-09-03T01:53:14Z","connection_id":"988","status":1156,"user":"","priv_user":"","os_login":"","proxy_user":"","host":"juju-da2225-8","ip":"10.207.85.214","db":""}} {"audit_record":{"name":"Connect","record":"18_2024-09-03T01:52:14","timestamp":"2024-09-03T01:53:14Z","connection_id":"989","status":0,"user":"serverconfig","priv_user":"serverconfig","os_login":"","proxy_user":"","host":"juju-da2225-8","ip":"10.207.85.214","db":""}} {"audit_record":{"name":"Quit","record":"1_2024-09-03T01:53:14","timestamp":"2024-09-03T01:53:14Z","connection_id":"989","status":0,"user":"serverconfig","priv_user":"serverconfig","os_login":"","proxy_user":"","host":"juju-da2225-8","ip":"10.207.85.214","db":""}} @@ -48,14 +82,17 @@ The Audit Log plugin allows all login/logout records to be stored in a log file. {"audit_record":{"name":"Connect","record":"7_2024-09-03T01:53:14","timestamp":"2024-09-03T01:53:33Z","connection_id":"993","status":1156,"user":"","priv_user":"","os_login":"","proxy_user":"","host":"juju-da2225-8","ip":"10.207.85.214","db":""}} {"audit_record":{"name":"Connect","record":"8_2024-09-03T01:53:14","timestamp":"2024-09-03T01:53:33Z","connection_id":"994","status":0,"user":"serverconfig","priv_user":"serverconfig","os_login":"","proxy_user":"","host":"juju-da2225-8","ip":"10.207.85.214","db":""}} ``` +
-For more details, see the [Audit Logs explanation]. +The audit log allows for more configuration. For details, see {ref}`audit-logs`. ### Error logs
+ Example of error logs with format time thread [label] [err_code] [subsystem] msg + ```shell 2023-10-24T23:28:07.048728Z mysqld_safe Number of processes running now: 0 2023-10-24T23:28:07.063027Z mysqld_safe mysqld restarted @@ -66,7 +103,7 @@ For more details, see the [Audit Logs explanation]. 2023-10-24T23:28:11.486308Z 0 [Warning] [MY-010068] [Server] CA certificate ca.pem is self signed. 2023-10-24T23:28:11.487473Z 0 [System] [MY-013602] [Server] Channel mysql_main configured to support TLS. Encrypted connections are now supported for this channel. 2023-10-24T23:28:11.538807Z 0 [System] [MY-011323] [Server] X Plugin ready for connections. Bind-address: '0.0.0.0' port: 33060, socket: /var/snap/charmed-mysql/common/var/run/mysqld/mysqlx.sock -2023-10-24T23:28:11.538957Z 0 [System] [MY-010931] [Server] /snap/charmed-mysql/69/usr/sbin/mysqld: ready for connections. Version: '8.0.34-0ubuntu0.22.04.1' socket: '/var/snap/charmed-mysql/common/var/run/mysqld/mysqld.sock' port: 3306 (Ubuntu). +2023-10-24T23:28:11.538957Z 0 [System] [MY-010931] [Server] /snap/charmed-mysql/69/usr/sbin/mysqld: ready for connections. Version: '8.0.34-0ubuntu0.22.04.1' socket: '/var/snap/charmed-mysql/common/var/run/mysqld/mysqld.sock' port: 3306 (Ubuntu). 2023-10-24T23:28:17.983851Z 12 [Warning] [MY-010604] [Repl] Neither --relay-log nor --relay-log-index were used; so replication may break when this MySQL server acts as a replica and has his hostname changed!! Please use '--relay-log=juju-9860bb-0-relay-bin' to avoid this problem. 2023-10-24T23:28:17.999093Z 12 [System] [MY-010597] [Repl] 'CHANGE REPLICATION SOURCE TO FOR CHANNEL 'mysqlsh.test' executed'. Previous state source_host='', source_port= 3306, source_log_file='', source_log_pos= 4, source_bind=''. New state source_host='juju-9860bb-0.lxd', source_port= 3306, source_log_file='', source_log_pos= 4, source_bind=''. 2023-10-24T23:28:18.025941Z 15 [Warning] [MY-010897] [Repl] Storing MySQL user name or password information in the connection metadata repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START REPLICA; see the 'START REPLICA Syntax' in the MySQL Manual for more information. @@ -88,10 +125,15 @@ For more details, see the [Audit Logs explanation]. 2023-10-24T23:28:19.179289Z 28 [System] [MY-011566] [Repl] Plugin group_replication reported: 'Setting super_read_only=OFF.' 2023-10-24T23:28:19.179408Z 28 [System] [MY-013731] [Repl] Plugin group_replication reported: 'The member action "mysql_start_failover_channels_if_primary" for event "AFTER_PRIMARY_ELECTION" with priority "10" will be run.' 2023-10-24T23:28:19.179600Z 31 [System] [MY-011510] [Repl] Plugin group_replication reported: 'This server is working as primary member.' -2023-10-24T23:28:19.875216Z 12 [System] [MY-014010] [Repl] Plugin group_replication reported: 'Plugin 'group_replication' has been started.' +2023-10-24T23:28:19.875216Z 12 [System] [MY-014010] [Repl] Plugin group_replication reported: 'Plugin 'group_replication' has been started.' ``` +
+### Other logs + +Other logs (slow queries, general query) are currently disabled. + ## Log rotation configuration Following the configuration options exposed by the charm: @@ -135,11 +177,10 @@ The following are logrotate config values used for log rotation: ## High Level Design -There is a cron job on the machine where the charm exists that is triggered every minute and runs `logrotate`. The logrotate utility does *not* use `copytruncate`. Instead, the existing log file is moved into the archive directory by logrotate, and then the logrotate's postrotate script invokes `juju-run` (or `juju-exec` depending on the juju version) to dispatch a custom event. This custom event's handler flushes the MySQL log with the [FLUSH](https://dev.mysql.com/doc/refman/8.0/en/flush.html) statement that will result in a new and empty log file being created under `/var/log/mysql` and the rotated file's descriptor being closed. +There is a cron job on the machine where the charm exists that is triggered every minute and runs `logrotate`. The logrotate utility does *not* use `copytruncate`. Instead, the existing log file is moved into the archive directory by logrotate, and then the logrotate post-rotate script invokes `juju-run` (or `juju-exec` depending on the juju version) to dispatch a custom event. This custom event's handler flushes the MySQL log with the [FLUSH](https://dev.mysql.com/doc/refman/8.0/en/flush.html) statement that will result in a new and empty log file being created under `/var/log/mysql` and the rotated file's descriptor being closed. We use a custom event in juju to execute the FLUSH statement in order to avoid storing any credentials on the disk. The charm code has a mechanism that will retrieve credentials from the peer relation databag or juju secrets backend, if available, and keep these credentials in memory for the duration of the event handler. - [COS integration]: /how-to/monitoring-cos/enable-monitoring @@ -149,7 +190,6 @@ We use a custom event in juju to execute the FLUSH statement in order to avoid s ```{toctree} :titlesonly: :maxdepth: 2 -:hidden: Audit logs ``` diff --git a/machines/docs/explanation/roles.md b/docs/explanation/roles.md similarity index 85% rename from machines/docs/explanation/roles.md rename to docs/explanation/roles.md index a113a3a13..1676de6bd 100644 --- a/machines/docs/explanation/roles.md +++ b/docs/explanation/roles.md @@ -1,7 +1,10 @@ +(roles)= # Roles ```{note} -The following roles are available starting on revision 412 +The following roles are available in `8.0` starting from: +* Revision 412 for the {ref}`VM charm ` +* Revision 284 for the {ref}`K8s charm ` ``` There are several definitions of roles in Charmed MySQL: @@ -9,10 +12,11 @@ There are several definitions of roles in Charmed MySQL: * Predefined database-level roles ```{seealso} -[](/explanation/users) +{ref}`users` ``` ## MySQL roles + MySQL does not provide any built-in roles for users to get permissions from. ## Charmed MySQL instance-level roles @@ -44,12 +48,14 @@ mysql> SELECT host, user FROM mysql.user; +-----------+------------------+ ``` -Additionally, the role `charmed_router` is available to ease the integration with [Charmed MySQL Router](https://charmhub.io/mysql-router). +Additionally, the role `charmed_router` is available to ease the integration with Charmed MySQL Router ([VM](https://charmhub.io/mysql-router) | [K8s](https://charmhub.io/mysql-router-k8s)). + This role contains all the necessary permissions for a MySQL Router relation user to operate. ## Charmed MySQL database-level roles -Charmed MySQL also introduces database level roles, with permissions tied to each database that's created. +Charmed MySQL also introduces database level roles, with permissions tied to each database that is created. + Example for a database named `test`: ```text diff --git a/machines/docs/explanation/security/cryptography.md b/docs/explanation/security/cryptography.md similarity index 81% rename from machines/docs/explanation/security/cryptography.md rename to docs/explanation/security/cryptography.md index 3fefdbb45..6d0d9d485 100644 --- a/machines/docs/explanation/security/cryptography.md +++ b/docs/explanation/security/cryptography.md @@ -1,13 +1,27 @@ - +(cryptography)= # Cryptography This document describes the cryptography used by Charmed MySQL. ## Resource checksums +````{tab-set} +```{tab-item} VM +:sync: vm + Charmed MySQL and Charmed MySQL Router operators use pinned revisions of the [Charmed MySQL snap](https://github.com/canonical/charmed-mysql-snap) to provide reproducible and secure environments. The Charmed MySQL snap packages the MySQL workload along with the necessary dependencies and utilities required for the operators’ lifecycle. For more details, see the snap contents in the [snapcraft.yaml file](https://github.com/canonical/charmed-mysql-snap/blob/8.0/edge/snap/snapcraft.yaml). +``` + +```{tab-item} K8s +:sync: k8s + +Charmed MySQL K8s and Charmed MySQL Router K8s operators use a pinned version of the [Charmed MySQL rock](https://github.com/orgs/canonical/packages/container/package/charmed-mysql) to provide reproducible and secure environments. + +The rock is an OCI image derived from the respective snap. The Charmed MySQL K8s snap packages the MySQL workload along with the necessary dependencies and utilities required for the operators’ lifecycle. For more details, see the snap contents in the [snapcraft.yaml file](https://github.com/canonical/charmed-mysql-snap/blob/8.0/edge/snap/snapcraft.yaml). +``` +```` Every artifact bundled into the Charmed MySQL snap is verified against its MD5, SHA256, or SHA512 checksum after download. The installation of certified snap into the rock is ensured by snap primitives that verify their squashfs filesystems images GPG signature. For more information on the snap verification process, refer to the [snapcraft.io documentation](https://snapcraft.io/docs/assertions). @@ -15,7 +29,7 @@ Every artifact bundled into the Charmed MySQL snap is verified against its MD5, MySQL and its extra components (mysql-shell, xtrabackup, mysqld-exporter, mysqlrouter-exporter, percona-server-plugins, mysql-pitr-helper, etc.) are built by Canonical from upstream source codes into PPAs and stored on [Launchpad](https://launchpad.net/mysql). -Charmed MySQL snap is published using a GitHub repository workflow. +Charmed MySQL charms, snap and ROCK are built and released programmatically using release pipelines implemented via GitHub Actions in their respective repositories. All repositories in GitHub are set up with branch protection rules, requiring: diff --git a/docs/explanation/security/index.md b/docs/explanation/security/index.md new file mode 100644 index 000000000..478b51ab6 --- /dev/null +++ b/docs/explanation/security/index.md @@ -0,0 +1,150 @@ +(security-hardening)= +# Security hardening + +This document provides an overview of security features and guidance for hardening the security of [Charmed MySQL](https://charmhub.io/mysql) deployments, including setting up and managing a secure environment. + +## Environment + +The environment where Charmed MySQL operates can be divided into two components: + +1. Cloud +2. Juju + +### Cloud + +Charmed MySQL can be deployed on top of several clouds and virtualisation layers: + +| Cloud | Security guides | +|-----------|---------------------------------------------------------------------------------------| +| OpenStack | [OpenStack Security Guide] | +| AWS | [Best practices for security, identity and compliance], [AWS security credentials] | +| Azure | [Azure security best practices and patterns], [Managed identities for Azure resource] | +| GCP | [Google security overview], [Harden your cluster's security] | + +### Juju + +Juju is the component responsible for orchestrating the entire lifecycle, from deployment to Day 2 operations. For more information on Juju security hardening, see the +[Juju security page](https://documentation.ubuntu.com/juju/latest/explanation/juju-security/index.html) and the [How to harden your deployment](https://documentation.ubuntu.com/juju/3.6/howto/manage-your-juju-deployment/harden-your-juju-deployment/) guide. + +#### Cloud credentials + +When configuring cloud credentials to be used with Juju, ensure that users have the correct permissions to operate at the required level. Juju superusers responsible for bootstrapping and managing controllers require elevated permissions to manage several kinds of resources, such as virtual machines, networks, storages, etc. Please refer to the links below for more information on the policies required to be used depending on the cloud. + +| Cloud | Cloud user policies | +|-----------|-----------------------------------------------------------------| +| OpenStack | [OpenStack cloud and Juju] | +| AWS | [Juju AWS Permission], [AWS Instance Profiles], [Juju on AWS] | +| Azure | [Juju Azure Permission], [How to use Juju with Microsoft Azure] | +| GCP | [Google GCE cloud and Juju] | + +#### Juju users + +It is very important that Juju users are set up with minimal permissions depending on the scope of their operations. Please refer to the [User access levels](https://juju.is/docs/juju/user-permissions) documentation for more information on the access levels and corresponding abilities. + +Juju user credentials must be stored securely and rotated regularly to limit the chances of unauthorized access due to credentials leakage. + +## Applications + +In the following, we provide guidance on how to harden your deployment using: + +1. Operating system +2. Security upgrades +3. Encryption +4. Authentication +5. Monitoring and auditing + +### Operating system + +Charmed MySQL and Charmed MySQL Router run on top of Ubuntu 22.04 LTS (Jammy). Deploy a [Landscape Client Charm](https://charmhub.io/landscape-client?) to connect the underlying VM to a Landscape User Account to manage security upgrades and integrate [Ubuntu Pro](https://ubuntu.com/pro) subscriptions. + +### Security upgrades + +Charmed MySQL operator and Charmed MySQL Router operator install a pinned revision of the Charmed MySQL snap to provide reproducible and secure environments. + +New versions (revisions) of charmed operators can be released to upgrade workloads, the operator's code, or both. It is important to refresh the charm regularly to make sure the workload is as secure as possible. + +For more information on upgrading the charm, see {ref}`refresh` and [How to upgrade MySQL Router](https://charmhub.io/mysql-router/docs/h-upgrade?channel=dpe/edge), as well as the {ref}`release-notes`. + +### Encryption + +By default, encryption is optional for both external connections and internal communication between cluster members. To enforce encryption in transit, integrate Charmed MySQL with a TLS certificate provider. Please refer to the [Charming Security page](https://charmhub.io/topics/security-with-x-509-certificates) for more information on how to select the right certificate provider for your use case. + +Encryption in transit for backups is provided by the storage (Charmed MySQL is a client for the S3 storage). + +For more information on encryption, see {ref}`cryptography` and {ref}`enable-tls`. + +### Authentication + +Charmed MySQL uses the [caching_sha2_password](https://dev.mysql.com/doc/refman/8.0/en/caching-sha2-pluggable-authentication.html) plugin for authentication. + +### Monitoring + +Charmed MySQL provides native integration with the [Canonical Observability Stack (COS)](https://charmhub.io/topics/canonical-observability-stack). To reduce the blast radius of infrastructure disruptions, the general recommendation is to deploy COS and the observed application into separate environments, isolated from one another. Refer to the [COS production deployments best practices](https://charmhub.io/topics/canonical-observability-stack/reference/best-practices) for more information. + +For more information, see {ref}`enable-monitoring`, {ref}`enable-alert-rules`, and {ref}`enable-tracing`. + +### Security event logging + +Charmed MySQL comes with the audit log plugin enabled by default. + +````{tab-set} +```{tab-item} VM +:sync: vm + +The logs are stored in the `/var/snap/charmed-mysql/common/var/log/mysql` directory, and are rotated every minute to the `/var/snap/charmed-mysql/common/var/log/mysql/archive_audit` directory. + +We recommend setting the retention period to a value greater than the default (three days): + + juju config mysql logs_retention_period=14 # days + +By default, the audit log records logins and logouts. To include the SQL queries executed by each user: + + juju config mysql logs_audit_policy=all +``` + +```{tab-item} K8s +:sync: k8s + +The logs are stored in the `/var/log/mysql` directory of the mysql container, and it's rotated every minute to the `/var/log/mysql/archive_audit` directory. + +We recommend setting the retention period to a value greater than the default (three days): + + juju config mysql-k8s logs_retention_period=14 # days + +By default, the audit log records logins and logouts. To include the SQL queries executed by each user: + + juju config mysql-k8s logs_audit_policy=all +``` +```` + +See {ref}`audit-logs`. + +## Additional resources + +For details on the cryptography used by Charmed MySQL, see {ref}`cryptography`. + +```{toctree} +:titlesonly: +:maxdepth: 2 +:hidden: + +cryptography +``` + + +[OpenStack Security Guide]: https://docs.openstack.org/security-guide/ +[Best practices for security, identity and compliance]: https://aws.amazon.com/architecture/security-identity-compliance +[AWS security credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/security-creds.html +[Azure security best practices and patterns]: https://learn.microsoft.com/en-us/azure/security/fundamentals/best-practices-and-patterns +[Managed identities for Azure resource]: https://learn.microsoft.com/en-us/entra/identity/managed-identities-azure-resources/ +[Google security overview]: https://cloud.google.com/kubernetes-engine/docs/concepts/security-overview +[Harden your cluster's security]: https://cloud.google.com/kubernetes-engine/docs/concepts/security-overview + + +[OpenStack cloud and Juju]: https://canonical-juju.readthedocs-hosted.com/en/latest/user/reference/cloud/list-of-supported-clouds/the-openstack-cloud-and-juju/ +[Juju AWS Permission]: https://discourse.charmhub.io/t/juju-aws-permissions/5307 +[AWS Instance Profiles]: https://discourse.charmhub.io/t/using-aws-instance-profiles-with-juju-2-9/5185 +[Juju on AWS]: https://juju.is/docs/juju/amazon-ec2 +[Juju Azure Permission]: https://juju.is/docs/juju/microsoft-azure +[How to use Juju with Microsoft Azure]: https://discourse.charmhub.io/t/how-to-use-juju-with-microsoft-azure/15219 +[Google GCE cloud and Juju]: https://canonical-juju.readthedocs-hosted.com/en/latest/user/reference/cloud/list-of-supported-clouds/the-google-gce-cloud-and-juju/ \ No newline at end of file diff --git a/machines/docs/explanation/users.md b/docs/explanation/users.md similarity index 79% rename from machines/docs/explanation/users.md rename to docs/explanation/users.md index 3dc949a90..8eb73c75c 100644 --- a/machines/docs/explanation/users.md +++ b/docs/explanation/users.md @@ -1,3 +1,4 @@ +(users)= # Users There are two main types of users in MySQL: @@ -13,12 +14,12 @@ The operator uses the following internal database users: * `root` - the initial/default MySQL user. Used for very initial bootstrap and restricted to local access. * `clusteradmin` - the user to manage replication in the MySQL InnoDB ClusterSet. * `serverconfig` - the user that operates MySQL instances. -* `monitoring` - the user for [COS integration](/how-to/monitoring-cos/enable-monitoring). -* `backups` - the user to [perform/list/restore backups](/how-to/back-up-and-restore/create-a-backup). +* `monitoring` - the user for {ref}`COS integration `. +* `backups` - the user to for {ref}`backup operations `. * `mysql_innodb_cluster_#######` - the [internal recovery users](https://dev.mysql.com/doc/mysql-shell/8.0/en/innodb-cluster-user-accounts.html#mysql-innodb-cluster-users-created) which enable connections between the servers in the cluster. Dedicated user created for each Juju unit/InnoDB Cluster member. * `mysql_innodb_cs_#######` - the internal recovery user which enable connections between MySQl InnoDB Clusters in ClusterSet. One user is created for entire MySQL ClusterSet. -The full list of internal users is available in charm [source code](https://github.com/canonical/mysql-operator/blob/main/src/constants.py). +The full list of internal users is available in charm's source code ([VM](https://github.com/canonical/mysql-operators/blob/8.0/edge/machines/src/constants.py) | [K8s](https://github.com/canonical/mysql-operators/blob/8.0/edge/kubernetes/src/constants.py)). ```{caution} It is forbidden to manage internal users, as they are dedicated to the operator’s logic. @@ -52,12 +53,12 @@ mysql> select Host,User,account_locked from mysql.user; Passwords for *internal* users can be rotated using the action `set-password` on the juju leader unit. ```{seealso} -[How to manage passwords](/how-to/manage-passwords) +{ref}`manage-passwords` ``` ## Relation users -The operator created a dedicated user for every application related/integrated with database. The username is composed by the relation ID and truncated uuid for the model, to ensure there is no username clash in cross model relations. Usernames are limited to 32 chars as per [MySQL limit](https://dev.mysql.com/doc/refman/8.0/en/user-names.html). +The operator created a dedicated user for every application related/integrated with database. The username is composed by the relation ID and truncated UUID for the model, to ensure there is no username clash in cross model relations. Usernames are limited to 32 chars as per [MySQL limit](https://dev.mysql.com/doc/refman/8.0/en/user-names.html). Relation users are removed on the juju relation/integration removal request. However, database data stays in place and can be reused on re-created relations (using new user credentials): @@ -72,7 +73,7 @@ mysql> select Host,User,account_locked from mysql.user where User like 'relation 2 row in set (0.00 sec) ``` -The extra user(s) will be created for relation with [mysql-router](https://charmhub.io/mysql-router) charm to provide necessary users for applications related via the `mysql-router` app: +The extra user(s) will be created for relation with the MySQL Router charm ([VM](https://charmhub.io/mysql-router) | [K8s](https://charmhub.io/mysql-router-k8s)) to provide necessary users for applications related via the `mysql-router` (or `mysql-router-k8s`) app: ```shell mysql> select Host,User,account_locked from mysql.user where User like 'mysql_router%'; @@ -86,15 +87,26 @@ mysql> select Host,User,account_locked from mysql.user where User like 'mysql_ro To rotate passwords for relation users, remove the relation and re-relate: -```shell -juju remove-relation mysql myclientapp -juju wait-for application mysql -juju relate mysql myclientapp +````{tab-set} +```{tab-item} VM +:sync: vm + + juju remove-relation mysql + juju wait-for application mysql + juju relate mysql +``` + +```{tab-item} K8s +:sync: k8s + + juju remove-relation mysql-k8s + juju wait-for application mysql-k8s + juju relate mysql-k8s ``` +```` ### Admin port user access The charm mainly uses the `serverconfig` user for internal operations. For connections with this user, a special admin port is used (port `33062`), which enables the charm to operate MySQL even when users connections are saturated. For further information on the administrative connection, refer to [MySQL docs](https://dev.mysql.com/doc/refman/8.0/en/administrative-connection-interface.html) on the topic. - diff --git a/machines/docs/how-to/back-up-and-restore/configure-s3-aws.md b/docs/how-to/back-up-and-restore/configure-s3-aws.md similarity index 55% rename from machines/docs/how-to/back-up-and-restore/configure-s3-aws.md rename to docs/how-to/back-up-and-restore/configure-s3-aws.md index f3b0d5e52..ab7803332 100644 --- a/machines/docs/how-to/back-up-and-restore/configure-s3-aws.md +++ b/docs/how-to/back-up-and-restore/configure-s3-aws.md @@ -1,3 +1,4 @@ +(configure-s3-aws)= # Configure S3 for AWS Charmed MySQL backups can be stored on any S3 compatible storage. S3 access and configurations are managed with the [`s3-integrator` charm](https://charmhub.io/s3-integrator). @@ -5,7 +6,7 @@ Charmed MySQL backups can be stored on any S3 compatible storage. S3 access and This guide will teach you how to deploy and configure the s3-integrator charm for [AWS S3](https://aws.amazon.com/s3/), send the configuration to a Charmed MySQL application, and update it. ```{seealso} -[](/how-to/back-up-and-restore/configure-s3-radosgw) +{ref}`configure-s3-radosgw` ``` ## Set up `s3-integrator` @@ -23,25 +24,57 @@ juju config s3-integrator \ ``` ```{note} -The amazon S3 endpoint must be specified as `s3..amazonaws.com ` within the first 24 hours of creating the bucket. For older buckets, the endpoint `s3.amazonaws.com` can be used. +The Amazon S3 endpoint must be specified as `s3..amazonaws.com ` within the first 24 hours of creating the bucket. For older buckets, the endpoint `s3.amazonaws.com` can be used. See [this post](https://repost.aws/knowledge-center/s3-http-307-response) for more information. ``` +```{admonition} Juju 2.9 users +:class: tip + +Remember that `juju run ` becomes `juju run-action --wait`. + +See also: {ref}`breaking-changes-juju` +``` + To pass these configurations to Charmed MySQL, relate the two applications: -```shell -juju integrate s3-integrator mysql + +````{tab-set} +```{tab-item} VM +:sync: vm + + juju relate s3-integrator mysql ``` +```{tab-item} K8s +:sync: k8s + + juju relate s3-integrator mysql-k8s +``` +```` + You can create, list, and restore backups now: -```shell -juju run mysql/leader list-backups -juju run mysql/leader create-backup -juju run mysql/leader list-backups -juju run mysql/leader restore backup-id= +````{tab-set} +```{tab-item} VM +:sync: vm + + juju run mysql/leader list-backups + juju run mysql/leader create-backup + juju run mysql/leader list-backups + juju run mysql/leader restore backup-id= +``` + +```{tab-item} K8s +:sync: k8s + + juju run mysql-k8s/leader list-backups + juju run mysql-k8s/leader create-backup + juju run mysql-k8s/leader list-backups + juju run mysql-k8s/leader restore backup-id= ``` +```` You can also update your S3 configuration options after relating: @@ -49,5 +82,5 @@ You can also update your S3 configuration options after relating: juju config s3-integrator