diff --git a/kubernetes/lib/charms/mysql/v0/mysql.py b/kubernetes/lib/charms/mysql/v0/mysql.py index 85a6cbe86..e94e8941b 100644 --- a/kubernetes/lib/charms/mysql/v0/mysql.py +++ b/kubernetes/lib/charms/mysql/v0/mysql.py @@ -78,6 +78,9 @@ def __init__( import ops from charms.data_platform_libs.v0.data_interfaces import DataPeerData, DataPeerUnitData from constants import ( + MYSQL_DATA_DIR, + MYSQL_LOGS_DIR, + MYSQL_TEMP_DIR, BACKUPS_PASSWORD_KEY, BACKUPS_USERNAME, CHARMED_MYSQL_PITR_HELPER, @@ -1105,7 +1108,6 @@ def render_mysqld_configuration( # noqa: C901 memory_limit: int | None = None, experimental_max_connections: int | None = None, binlog_retention_days: int, - snap_common: str = "", ) -> tuple[str, dict]: """Render mysqld ini configuration file.""" max_connections = None @@ -1154,13 +1156,13 @@ def render_mysqld_configuration( # noqa: C901 # disable memory instruments if we have less than 2GiB of RAM performance_schema_instrument = "'memory/%=OFF'" - logging_path = f"{snap_common}/var/log/mysql" binlog_retention_seconds = binlog_retention_days * 24 * 60 * 60 config = configparser.ConfigParser(interpolation=None) # do not enable slow query logs, but specify a log file path in case # the admin enables them manually - config["mysqld"] = { + base_config = { + "datadir": MYSQL_DATA_DIR, # All interfaces bind expected "bind_address": "0.0.0.0", # noqa: S104 "mysqlx_bind_address": "0.0.0.0", # noqa: S104 @@ -1168,12 +1170,17 @@ def render_mysqld_configuration( # noqa: C901 "report_host": self.instance_address, "max_connections": max_connections, "innodb_buffer_pool_size": innodb_buffer_pool_size, + "innodb_log_group_home_dir": MYSQL_LOGS_DIR, + "innodb_temp_tablespaces_dir": MYSQL_TEMP_DIR, + "innodb_undo_directory": MYSQL_LOGS_DIR, + "log_bin": f"{MYSQL_LOGS_DIR}/binlog", + "log_bin_index": f"{MYSQL_LOGS_DIR}/binlog.index", "log_error_services": "log_filter_internal;log_sink_internal", - "log_error": f"{logging_path}/error.log", + "log_error": f"{MYSQL_LOGS_DIR}/error.log", "general_log": "OFF", - "general_log_file": f"{logging_path}/general.log", + "general_log_file": f"{MYSQL_LOGS_DIR}/general.log", "loose-group_replication_paxos_single_leader": "ON", - "slow_query_log_file": f"{logging_path}/slow.log", + "slow_query_log_file": f"{MYSQL_LOGS_DIR}/slow.log", "binlog_expire_logs_seconds": f"{binlog_retention_seconds}", "gtid_mode": "ON", "enforce_gtid_consistency": "ON", @@ -1187,9 +1194,10 @@ def render_mysqld_configuration( # noqa: C901 "loose-validate_password.policy": "MEDIUM", "loose-validate_password.special_char_count": 0, } + config["mysqld"] = base_config # ty:ignore[invalid-assignment] if audit_log_enabled: - config["mysqld"]["loose-audit_log_filter.file"] = f"{logging_path}/audit.log" + config["mysqld"]["loose-audit_log_filter.file"] = f"{MYSQL_LOGS_DIR}/audit.log" config["mysqld"]["loose-audit_log_filter.format"] = "JSON" config["mysqld"]["loose-audit_log_filter.policy"] = audit_log_policy.upper() if audit_log_strategy == "async": @@ -2712,8 +2720,9 @@ def empty_data_files( mysql_data_directory: str, user: str | None = None, group: str | None = None, + extra_dirs: list[str] | None = None, ) -> None: - """Empty the mysql data directory in preparation of backup restore.""" + """Empty the mysql data directories in preparation of backup restore.""" empty_data_files_command = [ "find", mysql_data_directory, @@ -2733,11 +2742,26 @@ def empty_data_files( user=user, group=group, ) + + for extra_dir in (extra_dirs or []): + logger.debug(f"Emptying extra directory {extra_dir}") + self._execute_commands( + [ + "find", + extra_dir, + "-not", + "-path", + extra_dir, + "-delete", + ], + user=user, + group=group, + ) except MySQLExecError as e: - logger.error("Failed to empty data directory in prep for backup restore") + logger.error("Failed to empty data directories in prep for backup restore") raise MySQLEmptyDataDirectoryError(e.message) from e except Exception as e: - logger.error("Failed to empty data directory in prep for backup restore") + logger.error("Failed to empty data directories in prep for backup restore") raise MySQLEmptyDataDirectoryError from e def restore_backup( diff --git a/kubernetes/metadata.yaml b/kubernetes/metadata.yaml index 1281587c0..a6e42f2c5 100644 --- a/kubernetes/metadata.yaml +++ b/kubernetes/metadata.yaml @@ -14,7 +14,7 @@ docs: https://canonical-charmed-mysql-k8s.readthedocs-hosted.com/ source: https://github.com/canonical/mysql-operators issues: https://github.com/canonical/mysql-operators/issues website: - - https://ubuntu.com/data/mysql + - https://canonical.com/data/mysql - https://charmhub.io/mysql-k8s - https://github.com/canonical/mysql-operators maintainers: @@ -27,8 +27,14 @@ containers: gid: 584788 resource: mysql-image mounts: - - storage: database - location: /var/lib/mysql + - storage: archive + location: /var/lib/mysql/archive + - storage: data + location: /var/lib/mysql/data + - storage: logs + location: /var/lib/mysql/logs + - storage: temp + location: /var/lib/mysql/temp resources: mysql-image: @@ -81,9 +87,18 @@ requires: optional: true storage: - database: + archive: + type: filesystem + description: Persistent storage for rotated logs and other archival purposes + data: type: filesystem description: Persistent storage for MySQL data + temp: + type: filesystem + description: Persistent storage for InnoDB temporary tablespaces + logs: + type: filesystem + description: Persistent storage for MySQL error logs, general query logs, slow query logs, binary logs, redo logs and undo logs assumes: - k8s-api diff --git a/kubernetes/src/charm.py b/kubernetes/src/charm.py index f9aff5f16..566d705e9 100755 --- a/kubernetes/src/charm.py +++ b/kubernetes/src/charm.py @@ -75,6 +75,7 @@ MONITORING_PASSWORD_KEY, MONITORING_USERNAME, MYSQL_BINLOGS_COLLECTOR_SERVICE, + MYSQL_DATA_DIR, MYSQL_LOG_ERROR, MYSQL_LOG_FILES, MYSQL_LOG_SERVICE, @@ -128,9 +129,11 @@ def __init__(self, *args): self.framework.observe(self.on.leader_elected, self._on_leader_elected) self.framework.observe(self.on.config_changed, self._on_config_changed) self.framework.observe(self.on.update_status, self._on_update_status) - self.framework.observe( - self.on.database_storage_detaching, self._on_database_storage_detaching - ) + + self.framework.observe(self.on.archive_storage_detaching, self._on_storage_detaching) + self.framework.observe(self.on.data_storage_detaching, self._on_storage_detaching) + self.framework.observe(self.on.logs_storage_detaching, self._on_storage_detaching) + self.framework.observe(self.on.temp_storage_detaching, self._on_storage_detaching) self.framework.observe(self.on[PEER].relation_joined, self._on_peer_relation_joined) self.framework.observe(self.on[PEER].relation_changed, self._on_peer_relation_changed) @@ -227,7 +230,7 @@ def _pebble_layer(self) -> Layer: mysqld_cmd = [ MYSQLD_LOCATION, "--basedir=/usr", - "--datadir=/var/lib/mysql", + f"--datadir={MYSQL_DATA_DIR}", "--plugin-dir=/usr/lib/mysql/plugin", f"--log-error={MYSQL_LOG_ERROR}", f"--pid-file={self.unit_label}.pid", @@ -1064,7 +1067,7 @@ def _on_peer_relation_departed(self, event: RelationDepartedEvent) -> None: if not self._mysql.reconcile_binlogs_collection(force_restart=True): logger.error("Failed to reconcile binlogs collection during peer departed event") - def _on_database_storage_detaching(self, _) -> None: + def _on_storage_detaching(self, _) -> None: """Handle the database storage detaching event.""" # Only executes if the unit was initialised if not self.unit_initialized(): diff --git a/kubernetes/src/constants.py b/kubernetes/src/constants.py index 0310e2e1a..238f6dd6e 100644 --- a/kubernetes/src/constants.py +++ b/kubernetes/src/constants.py @@ -27,16 +27,18 @@ TLS_SSL_CERT_FILE = "custom-server-cert.pem" MYSQL_CLI_LOCATION = "/usr/bin/mysql" MYSQLSH_LOCATION = "/usr/bin/mysqlsh" -MYSQL_DATA_DIR = "/var/lib/mysql" +MYSQL_ARCHIVE_DIR = "/var/lib/mysql/archive" # Corresponds to the archive storage mount +MYSQL_DATA_DIR = "/var/lib/mysql/data" # Corresponds to the data storage mount +MYSQL_LOGS_DIR = "/var/lib/mysql/logs" # Corresponds to the logs storage mount +MYSQL_TEMP_DIR = "/var/lib/mysql/temp" # Corresponds to the temp storage mount MYSQLD_SOCK_FILE = "/var/run/mysqld/mysqld.sock" MYSQLD_CONFIG_FILE = "/etc/mysql/mysql.conf.d/z-custom.cnf" MYSQLD_INIT_CONFIG_FILE = "/etc/mysql/mysql.conf.d/z-custom-init-file.cnf" -MYSQL_LOG_DIR = "/var/log/mysql" -MYSQL_LOG_ERROR = f"{MYSQL_LOG_DIR}/error.log" +MYSQL_LOG_ERROR = f"{MYSQL_LOGS_DIR}/error.log" MYSQL_LOG_FILES = [ MYSQL_LOG_ERROR, - f"{MYSQL_LOG_DIR}/audit.log", - f"{MYSQL_LOG_DIR}/general.log", + f"{MYSQL_LOGS_DIR}/audit.log", + f"{MYSQL_LOGS_DIR}/general.log", ] MYSQL_SYSTEM_USER = "mysql" MYSQL_SYSTEM_GROUP = "mysql" diff --git a/kubernetes/src/mysql_k8s_helpers.py b/kubernetes/src/mysql_k8s_helpers.py index 5722de153..d807ddc6b 100644 --- a/kubernetes/src/mysql_k8s_helpers.py +++ b/kubernetes/src/mysql_k8s_helpers.py @@ -35,12 +35,14 @@ CHARMED_MYSQL_XTRABACKUP_LOCATION, CONTAINER_NAME, LOG_ROTATE_CONFIG_FILE, + MYSQL_ARCHIVE_DIR, MYSQL_BINLOGS_COLLECTOR_SERVICE, MYSQL_DATA_DIR, - MYSQL_LOG_DIR, MYSQL_LOG_ERROR, + MYSQL_LOGS_DIR, MYSQL_SYSTEM_GROUP, MYSQL_SYSTEM_USER, + MYSQL_TEMP_DIR, MYSQLD_DEFAULTS_CONFIG_FILE, MYSQLD_INIT_CONFIG_FILE, MYSQLD_LOCATION, @@ -184,6 +186,14 @@ def initialise_mysqld(self) -> None: "--initialize", "-u", MYSQL_SYSTEM_USER, + "--datadir", + MYSQL_DATA_DIR, + "--innodb-log-group-home-dir", + MYSQL_LOGS_DIR, + "--innodb-undo-directory", + MYSQL_LOGS_DIR, + "--innodb-temp-tablespaces-dir", + MYSQL_TEMP_DIR, ] try: @@ -277,7 +287,8 @@ def setup_logrotate_config( rendered = template.render( system_user=MYSQL_SYSTEM_USER, system_group=MYSQL_SYSTEM_GROUP, - log_dir=MYSQL_LOG_DIR, + log_dir=MYSQL_LOGS_DIR, + archive_dir=MYSQL_ARCHIVE_DIR, logs_retention_period=logs_retention_period, logs_rotations=logs_rotations, logs_compression_enabled=logs_compression, @@ -368,6 +379,7 @@ def empty_data_files(self) -> None: MYSQL_DATA_DIR, user=MYSQL_SYSTEM_USER, group=MYSQL_SYSTEM_GROUP, + extra_dirs=[MYSQL_TEMP_DIR, MYSQL_LOGS_DIR], ) def restore_backup(self, backup_location: str) -> tuple[str, str]: @@ -577,8 +589,8 @@ def is_data_dir_initialised(self) -> bool: # minimal expected content for an integral mysqld data-dir expected_content = { - "#innodb_redo", - "#innodb_temp", + # "#innodb_redo", # stored separately + # "#innodb_temp", # stored separately "auto.cnf", "ca-key.pem", "ca.pem", @@ -593,9 +605,10 @@ def is_data_dir_initialised(self) -> bool: "server-cert.pem", "server-key.pem", "sys", - "undo_001", - "undo_002", + # "undo_001", # stored separately + # "undo_002", # stored separately } + logger.debug("mysql data dir contents: %s", content_set) return expected_content <= content_set except (ExecError, APIError): diff --git a/kubernetes/templates/logrotate.j2 b/kubernetes/templates/logrotate.j2 index 101882636..3b8fb166a 100644 --- a/kubernetes/templates/logrotate.j2 +++ b/kubernetes/templates/logrotate.j2 @@ -22,10 +22,13 @@ ifempty missingok nomail nosharedscripts -nocopytruncate +# Needed because rotated logs might be in a different filesystem, +# `nocopytruncate` fails with `failed to rename ...: Invalid cross-device link` +copy +copytruncate {% for log in enabled_log_files %} {{ log_dir }}/{{ log }}.log { - olddir archive_{{ log }} + olddir {{ archive_dir }}/archive_{{ log }} } {% endfor %} diff --git a/kubernetes/tests/integration/integration/high_availability/test_replication_logs_rotation.py b/kubernetes/tests/integration/integration/high_availability/test_replication_logs_rotation.py index 0efb68b66..5a31a7015 100644 --- a/kubernetes/tests/integration/integration/high_availability/test_replication_logs_rotation.py +++ b/kubernetes/tests/integration/integration/high_availability/test_replication_logs_rotation.py @@ -16,7 +16,7 @@ wait_fixed, ) -from constants import CONTAINER_NAME, MYSQL_LOG_DIR +from constants import CONTAINER_NAME, MYSQL_ARCHIVE_DIR, MYSQL_LOGS_DIR from ... import architecture from ...helpers_ha import ( @@ -76,7 +76,7 @@ def test_deploy_highly_available_cluster(juju: Juju, charm: str) -> None: def test_log_rotation(juju: Juju) -> None: """Test the log rotation of text files.""" - log_types = ["error", "audit"] + log_types = ["audit", "error"] mysql_app_leader = get_app_leader(juju, MYSQL_APP_NAME) mysql_app_leader_label = get_mysql_instance_label(mysql_app_leader) @@ -94,12 +94,14 @@ def test_log_rotation(juju: Juju) -> None: stop_log_rotate_dispatcher(juju, mysql_app_leader) for log_type in log_types: + archive_log_dir = f"{MYSQL_ARCHIVE_DIR}/archive_{log_type}" + logging.info("Removing existing archive directories") delete_unit_file( juju=juju, unit_name=mysql_app_leader, container=CONTAINER_NAME, - file_path=f"{MYSQL_LOG_DIR}/archive_{log_type}", + file_path=archive_log_dir, ) logging.info("Writing some data to the text log files") @@ -107,7 +109,7 @@ def test_log_rotation(juju: Juju) -> None: juju=juju, unit_name=mysql_app_leader, container=CONTAINER_NAME, - file_path=f"{MYSQL_LOG_DIR}/{log_type}.log", + file_path=f"{MYSQL_LOGS_DIR}/{log_type}.log", file_data=f"{log_type} content", ) @@ -120,11 +122,11 @@ def test_log_rotation(juju: Juju) -> None: juju=juju, unit_name=mysql_app_leader, container=CONTAINER_NAME, - file_path=f"{MYSQL_LOG_DIR}/{log_type}.log", + file_path=f"{MYSQL_LOGS_DIR}/{log_type}.log", ) assert f"{log_type} content" not in active_log_file_data - archive_log_dir = f"{MYSQL_LOG_DIR}/archive_{log_type}" + archive_log_dir = f"{MYSQL_ARCHIVE_DIR}/archive_{log_type}" archive_log_files_listed = list_unit_files( juju=juju, unit_name=mysql_app_leader, diff --git a/kubernetes/tests/integration/integration/test_backup_ceph.py b/kubernetes/tests/integration/integration/test_backup_ceph.py index 719e8b288..e08160be1 100644 --- a/kubernetes/tests/integration/integration/test_backup_ceph.py +++ b/kubernetes/tests/integration/integration/test_backup_ceph.py @@ -83,8 +83,8 @@ class MicrocephConnectionInformation: access_key_id: str secret_access_key: str bucket: str - ca_cert_base64: str - region: str + ca_cert_base64: str | None = None + region: str = "default" @pytest.fixture(scope="session") @@ -111,6 +111,8 @@ def microceph(certs_path, host_ip) -> MicrocephConnectionInformation: os.environ["CEPH_ACCESS_KEY"], os.environ["CEPH_SECRET_KEY"], MICROCEPH_BUCKET, + os.environ.get("CEPH_CA_CERT"), # Optional for HTTP-only local dev + os.environ.get("CEPH_REGION", "default"), ) logger.info("Setting up TLS certificates") subprocess.run(f"openssl genrsa -out {certs_path}/ca.key 2048".split(), check=True) @@ -217,47 +219,49 @@ def microceph(certs_path, host_ip) -> MicrocephConnectionInformation: @pytest.fixture(scope="session") -def cloud_credentials(microceph) -> dict[str, str]: - """Read cloud credentials.""" - return { - "access-key": microceph.access_key_id, - "secret-key": microceph.secret_access_key, - } - - -@pytest.fixture(scope="session") -def cloud_configs(microceph) -> dict[str, str]: - return { +def cloud_configs_ceph(microceph) -> tuple[dict[str, str], dict[str, str]]: + configs = { "endpoint": microceph.endpoint_url, "bucket": microceph.bucket, - "path": "mysql-k8s", - "region": "default", - "tls-ca-chain": microceph.ca_cert_base64, + "path": "mysql", + "region": microceph.region, } + # Only add TLS CA chain if provided (for HTTPS endpoints) + if microceph.ca_cert_base64: + configs["tls-ca-chain"] = microceph.ca_cert_base64 + + credentials = { + "access-key": microceph.access_key_id, + "secret-key": microceph.secret_access_key, + } + return configs, credentials @pytest.fixture(scope="session", autouse=True) -def clean_backups_from_buckets(cloud_credentials, cloud_configs): +def clean_backups_from_buckets(cloud_configs_ceph): """Teardown to clean up created backups from clouds.""" yield + cloud_configs, cloud_credentials = cloud_configs_ceph + logger.info("Cleaning backups from buckets") session = boto3.session.Session( # pyright: ignore aws_access_key_id=cloud_credentials["access-key"], aws_secret_access_key=cloud_credentials["secret-key"], region_name=cloud_configs["region"], ) - with tempfile.NamedTemporaryFile() as ca_file: - ca_chain = base64.b64decode(cloud_configs["tls-ca-chain"]) - ca_file.write(ca_chain) - ca_file.flush() - - s3 = session.resource( - "s3", - endpoint_url=cloud_configs["endpoint"], - verify=ca_file.name, - ) - bucket = s3.Bucket(cloud_configs["bucket"]) + + with tempfile.TemporaryDirectory() as tmpdir: + s3_extra_kwargs = {} + if "tls-ca-chain" in cloud_configs: + ca_path = Path(tmpdir) / "ca.crt" + ca_chain = base64.b64decode(cloud_configs["tls-ca-chain"]) + ca_path.write_bytes(ca_chain) + s3_extra_kwargs["verify"] = str(ca_path) + + bucket = session.resource( + "s3", endpoint_url=cloud_configs["endpoint"], **s3_extra_kwargs + ).Bucket(cloud_configs["bucket"]) # GCS doesn't support batch delete operation, so delete the objects one by one backup_path = str(Path(cloud_configs["path"]) / CLOUD) @@ -307,10 +311,12 @@ def test_build_and_deploy(juju: Juju, charm) -> None: ) -def test_backup(juju: Juju, cloud_credentials, cloud_configs) -> None: +def test_backup(juju: Juju, cloud_configs_ceph) -> None: """Test to create a backup and list backups.""" global backup_id, value_before_backup, value_after_backup + cloud_configs, cloud_credentials = cloud_configs_ceph + app_units = get_app_units(juju, DATABASE_APP_NAME) zeroth_unit_name = app_units[0] @@ -360,8 +366,10 @@ def test_backup(juju: Juju, cloud_credentials, cloud_configs) -> None: verify_mysql_test_data(juju, DATABASE_APP_NAME, TABLE_NAME, value_after_backup) -def test_restore_on_same_cluster(juju: Juju, cloud_credentials, cloud_configs) -> None: +def test_restore_on_same_cluster(juju: Juju, cloud_configs_ceph) -> None: """Test to restore a backup to the same mysql cluster.""" + cloud_configs, cloud_credentials = cloud_configs_ceph + logger.info("Scaling mysql application to 1 unit") scale_app_units(juju, DATABASE_APP_NAME, 1) @@ -449,8 +457,10 @@ def test_restore_on_same_cluster(juju: Juju, cloud_credentials, cloud_configs) - ), "cluster should migrate to blocked status after restore" -def test_restore_on_new_cluster(juju: Juju, charm, cloud_credentials, cloud_configs) -> None: +def test_restore_on_new_cluster(juju: Juju, charm, cloud_configs_ceph) -> None: """Test to restore a backup on a new mysql cluster.""" + cloud_configs, cloud_credentials = cloud_configs_ceph + logger.info("Deploying a new mysql cluster") new_mysql_application_name = "another-mysql-k8s" diff --git a/kubernetes/tests/integration/integration/test_storage.py b/kubernetes/tests/integration/integration/test_storage.py new file mode 100644 index 000000000..b2daebe62 --- /dev/null +++ b/kubernetes/tests/integration/integration/test_storage.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python3 +# Copyright 2026 Canonical Ltd. +# See LICENSE file for licensing details. + +import logging +import re + +import jubilant +from jubilant import Juju + +from constants import ( + CONTAINER_NAME, + MYSQL_DATA_DIR, + MYSQL_LOGS_DIR, + MYSQL_TEMP_DIR, +) + +from ..helpers_ha import ( + CHARM_METADATA, + MINUTE_SECS, + wait_for_apps_status, +) + +logger = logging.getLogger(__name__) + +DATABASE_APP_NAME = "mysql-k8s" +TIMEOUT = 15 * MINUTE_SECS + + +def test_build_and_deploy(juju: Juju, charm) -> None: + logger.info(f"Deploying {DATABASE_APP_NAME} with 1 unit") + juju.deploy( + charm, + DATABASE_APP_NAME, + base="ubuntu@24.04", + config={"profile": "testing"}, + resources={"mysql-image": CHARM_METADATA["resources"]["mysql-image"]["upstream-source"]}, + num_units=1, + trust=True, + ) + + juju.wait( + ready=wait_for_apps_status(jubilant.all_active, DATABASE_APP_NAME), + timeout=TIMEOUT, + ) + + +def test_charm_lists_expected_storage(juju: Juju) -> None: + expected_storages = {"archive", "data", "temp", "logs"} + + assert len(juju.status().storage.storage) == len(expected_storages) + + +def test_data_directory_has_expected_contents_after_initialization(juju: Juju) -> None: + expected_content = { + "auto.cnf", + "ca-key.pem", + "ca.pem", + "client-cert.pem", + "client-key.pem", + "ib_buffer_pool", + "mysql", + "mysql.ibd", + "performance_schema", + "private_key.pem", + "public_key.pem", + "server-cert.pem", + "server-key.pem", + "sys", + } + excluded_content = { + "#innodb_temp", + "#innodb_redo", + "undo_001", + "undo_002", + } + + actual_content = set(list_container_files(juju, f"{DATABASE_APP_NAME}/0", MYSQL_DATA_DIR)) + + assert expected_content <= actual_content + assert excluded_content.isdisjoint(actual_content) + + +def test_temp_directory_has_only_expected_file_extensions_after_initialization(juju: Juju) -> None: + actual_content = set(list_container_files(juju, f"{DATABASE_APP_NAME}/0", MYSQL_TEMP_DIR)) + + assert all(fname.endswith(".ibt") for fname in actual_content) + + +def test_logs_directory_has_only_expected_contents_after_initialization(juju: Juju) -> None: + expected_content = { + "audit.log", + "error.log", + "binlog.index", + "#innodb_redo", + } + + actual_content = set(list_container_files(juju, f"{DATABASE_APP_NAME}/0", MYSQL_LOGS_DIR)) + + assert expected_content <= actual_content + remaining_content = actual_content - expected_content + + undolog_pattern = re.compile(r"^undo_\d+$") + assert all( + (undolog_pattern.match(fname) or fname.startswith("binlog") or fname.startswith("audit")) + for fname in remaining_content + ) + + redolog_pattern = re.compile(r"^\#ib_redo\d+") + actual_content = set( + list_container_files(juju, f"{DATABASE_APP_NAME}/0", f"{MYSQL_LOGS_DIR}/#innodb_redo") + ) + + assert all(redolog_pattern.match(fname) for fname in actual_content) + + +def list_container_files( + juju, unit_name: str, path: str, container: str = CONTAINER_NAME +) -> list[str]: + result = juju.ssh(unit_name, "ls", "--literal", path, container=container) + return result.strip().split() diff --git a/kubernetes/tests/integration/integration/test_tls.py b/kubernetes/tests/integration/integration/test_tls.py index b244bda1b..a0f0c933e 100644 --- a/kubernetes/tests/integration/integration/test_tls.py +++ b/kubernetes/tests/integration/integration/test_tls.py @@ -8,7 +8,7 @@ import jubilant from jubilant import Juju -from constants import CONTAINER_NAME, REPLICATION_USERNAME, TLS_SSL_CERT_FILE +from constants import CONTAINER_NAME, MYSQL_DATA_DIR, REPLICATION_USERNAME, TLS_SSL_CERT_FILE from ..helpers import is_connection_possible from ..helpers_ha import ( @@ -138,7 +138,7 @@ def test_rotate_tls_key(juju: Juju) -> None: for unit_name in app_units: original_tls[unit_name] = {} original_tls[unit_name]["cert"] = unit_file_md5( - juju, unit_name, f"/var/lib/mysql/{TLS_SSL_CERT_FILE}" + juju, unit_name, f"{MYSQL_DATA_DIR}/{TLS_SSL_CERT_FILE}" ) # set key using auto-generated key for each unit @@ -155,7 +155,7 @@ def test_rotate_tls_key(juju: Juju) -> None: # After updating both the external key and the internal key a new certificate request will be # made; then the certificates should be available and updated. for unit_name in app_units: - new_cert_md5 = unit_file_md5(juju, unit_name, f"/var/lib/mysql/{TLS_SSL_CERT_FILE}") + new_cert_md5 = unit_file_md5(juju, unit_name, f"{MYSQL_DATA_DIR}/{TLS_SSL_CERT_FILE}") assert new_cert_md5 != original_tls[unit_name]["cert"], ( f"cert for {unit_name} was not updated." diff --git a/kubernetes/tests/spread/integration/test_storage.py/task.yaml b/kubernetes/tests/spread/integration/test_storage.py/task.yaml new file mode 100644 index 000000000..f46310078 --- /dev/null +++ b/kubernetes/tests/spread/integration/test_storage.py/task.yaml @@ -0,0 +1,7 @@ +summary: test_storage.py +environment: + TEST_MODULE: test_storage.py +execute: | + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" +artifacts: + - allure-results diff --git a/kubernetes/tests/spread/integration/test_upgrade.py/task.yaml b/kubernetes/tests/spread/integration/test_upgrade.py/task.yaml index 67280c6cd..b793ddadb 100644 --- a/kubernetes/tests/spread/integration/test_upgrade.py/task.yaml +++ b/kubernetes/tests/spread/integration/test_upgrade.py/task.yaml @@ -2,6 +2,8 @@ summary: test_upgrade.py environment: TEST_MODULE: high_availability/test_upgrade.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" + # TODO: Uncomment when separation of storage has been released to the `8.4/edge` channel + # tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" + exit 0 artifacts: - allure-results diff --git a/kubernetes/tests/unit/test_charm.py b/kubernetes/tests/unit/test_charm.py index d5ad58676..7d007c574 100644 --- a/kubernetes/tests/unit/test_charm.py +++ b/kubernetes/tests/unit/test_charm.py @@ -15,6 +15,8 @@ BACKUPS_PASSWORD_KEY, DEFAULT_PASSWORD_LENGTH, MONITORING_PASSWORD_KEY, + MYSQL_DATA_DIR, + MYSQL_LOGS_DIR, MYSQLD_LOCATION, OPERATOR_PASSWORD_KEY, REPLICATION_PASSWORD_KEY, @@ -56,9 +58,9 @@ def layer_dict(self, with_mysqld_exporter: bool = False): mysqld_cmd = [ MYSQLD_LOCATION, "--basedir=/usr", - "--datadir=/var/lib/mysql", + f"--datadir={MYSQL_DATA_DIR}", "--plugin-dir=/usr/lib/mysql/plugin", - "--log-error=/var/log/mysql/error.log", + f"--log-error={MYSQL_LOGS_DIR}/error.log", f"--pid-file={self.charm.unit_label}.pid", ] return { @@ -80,7 +82,7 @@ def layer_dict(self, with_mysqld_exporter: bool = False): "mysql": { "override": "replace", "summary": "tail log", - "command": "tail -F /var/log/mysql/error.log", + "command": f"tail -F {MYSQL_LOGS_DIR}/error.log", "startup": "enabled", }, "mysqld_exporter": { @@ -336,7 +338,7 @@ def test_mysql_property(self, _, mock_get_unit_address): @patch("mysql_k8s_helpers.MySQL.remove_instance") @patch("mysql_k8s_helpers.MySQL.get_primary_label") @patch("mysql_k8s_helpers.MySQL.is_instance_in_cluster", return_value=True) - def test_database_storage_detaching( + def test_storage_detaching( self, mock_is_instance_in_cluster, mock_get_primary_label, @@ -352,7 +354,7 @@ def test_database_storage_detaching( ) mock_get_primary_label.return_value = self.charm.unit_label - self.charm._on_database_storage_detaching(None) + self.charm._on_storage_detaching(None) mock_remove_instance.assert_called_once_with(self.charm.unit_label, from_instance=None) self.assertEqual( diff --git a/kubernetes/tests/unit/test_mysql_k8s_helpers.py b/kubernetes/tests/unit/test_mysql_k8s_helpers.py index 21357e086..74e241157 100644 --- a/kubernetes/tests/unit/test_mysql_k8s_helpers.py +++ b/kubernetes/tests/unit/test_mysql_k8s_helpers.py @@ -7,7 +7,7 @@ import tenacity from ops.pebble import ExecError, PathError -from constants import PEER +from constants import MYSQL_LOGS_DIR, PEER from mysql_k8s_helpers import ( MySQL, MySQLInitialiseMySQLDError, @@ -68,7 +68,20 @@ def test_initialise_mysqld(self, _container, _process): self.mysql.initialise_mysqld() _container.exec.assert_called_once_with( - command=["/usr/sbin/mysqld", "--initialize", "-u", "mysql"], + command=[ + "/usr/sbin/mysqld", + "--initialize", + "-u", + "mysql", + "--datadir", + "/var/lib/mysql/data", + "--innodb-log-group-home-dir", + "/var/lib/mysql/logs", + "--innodb-undo-directory", + "/var/lib/mysql/logs", + "--innodb-temp-tablespaces-dir", + "/var/lib/mysql/temp", + ], user="mysql", group="mysql", ) @@ -144,18 +157,25 @@ def test_log_rotate_config(self, _container): "missingok\n" "nomail\n" "nosharedscripts\n" - "nocopytruncate\n\n\n" - "/var/log/mysql/error.log {\n" - " olddir archive_error\n" + "# Needed because rotated logs might be in a different filesystem,\n" + "# `nocopytruncate` fails with `failed to rename ...: Invalid cross-device link`\n" + "copy\n" + "copytruncate\n\n\n" + f"{MYSQL_LOGS_DIR}/error.log" + " {\n" + " olddir /var/lib/mysql/archive/archive_error\n" "}\n\n" - "/var/log/mysql/general.log {\n" - " olddir archive_general\n" + f"{MYSQL_LOGS_DIR}/general.log" + " {\n" + " olddir /var/lib/mysql/archive/archive_general\n" "}\n\n" - "/var/log/mysql/slowquery.log {\n" - " olddir archive_slowquery\n" + f"{MYSQL_LOGS_DIR}/slowquery.log" + " {\n" + " olddir /var/lib/mysql/archive/archive_slowquery\n" "}\n\n" - "/var/log/mysql/audit.log {\n" - " olddir archive_audit\n" + f"{MYSQL_LOGS_DIR}/audit.log" + " {\n" + " olddir /var/lib/mysql/archive/archive_audit\n" "}\n" ) diff --git a/kubernetes/tox.ini b/kubernetes/tox.ini index 7609a3df5..df0a6239d 100644 --- a/kubernetes/tox.ini +++ b/kubernetes/tox.ini @@ -78,6 +78,8 @@ pass_env = CEPH_ENDPOINT_URL CEPH_ACCESS_KEY CEPH_SECRET_KEY + CEPH_CA_CERT + CEPH_REGION commands_pre = poetry install --only integration commands = diff --git a/machines/lib/charms/mysql/v0/mysql.py b/machines/lib/charms/mysql/v0/mysql.py index 5172138cd..4d43b9526 100644 --- a/machines/lib/charms/mysql/v0/mysql.py +++ b/machines/lib/charms/mysql/v0/mysql.py @@ -87,6 +87,9 @@ def __init__( MAX_PASSWORD_LENGTH, MONITORING_PASSWORD_KEY, MONITORING_USERNAME, + MYSQL_DATA_DIR, + MYSQL_LOGS_DIR, + MYSQL_TEMP_DIR, OPERATOR_PASSWORD_KEY, OPERATOR_USERNAME, PEER, @@ -1105,7 +1108,6 @@ def render_mysqld_configuration( # noqa: C901 memory_limit: int | None = None, experimental_max_connections: int | None = None, binlog_retention_days: int, - snap_common: str = "", ) -> tuple[str, dict]: """Render mysqld ini configuration file.""" max_connections = None @@ -1154,13 +1156,13 @@ def render_mysqld_configuration( # noqa: C901 # disable memory instruments if we have less than 2GiB of RAM performance_schema_instrument = "'memory/%=OFF'" - logging_path = f"{snap_common}/var/log/mysql" binlog_retention_seconds = binlog_retention_days * 24 * 60 * 60 config = configparser.ConfigParser(interpolation=None) # do not enable slow query logs, but specify a log file path in case # the admin enables them manually - config["mysqld"] = { + base_config = { + "datadir": MYSQL_DATA_DIR, # All interfaces bind expected "bind_address": "0.0.0.0", # noqa: S104 "mysqlx_bind_address": "0.0.0.0", # noqa: S104 @@ -1168,12 +1170,17 @@ def render_mysqld_configuration( # noqa: C901 "report_host": self.instance_address, "max_connections": max_connections, "innodb_buffer_pool_size": innodb_buffer_pool_size, + "innodb_log_group_home_dir": MYSQL_LOGS_DIR, + "innodb_temp_tablespaces_dir": MYSQL_TEMP_DIR, + "innodb_undo_directory": MYSQL_LOGS_DIR, + "log_bin": f"{MYSQL_LOGS_DIR}/binlog", + "log_bin_index": f"{MYSQL_LOGS_DIR}/binlog.index", "log_error_services": "log_filter_internal;log_sink_internal", - "log_error": f"{logging_path}/error.log", + "log_error": f"{MYSQL_LOGS_DIR}/error.log", "general_log": "OFF", - "general_log_file": f"{logging_path}/general.log", + "general_log_file": f"{MYSQL_LOGS_DIR}/general.log", "loose-group_replication_paxos_single_leader": "ON", - "slow_query_log_file": f"{logging_path}/slow.log", + "slow_query_log_file": f"{MYSQL_LOGS_DIR}/slow.log", "binlog_expire_logs_seconds": f"{binlog_retention_seconds}", "gtid_mode": "ON", "enforce_gtid_consistency": "ON", @@ -1187,9 +1194,10 @@ def render_mysqld_configuration( # noqa: C901 "loose-validate_password.policy": "MEDIUM", "loose-validate_password.special_char_count": 0, } + config["mysqld"] = base_config # ty:ignore[invalid-assignment] if audit_log_enabled: - config["mysqld"]["loose-audit_log_filter.file"] = f"{logging_path}/audit.log" + config["mysqld"]["loose-audit_log_filter.file"] = f"{MYSQL_LOGS_DIR}/audit.log" config["mysqld"]["loose-audit_log_filter.format"] = "JSON" config["mysqld"]["loose-audit_log_filter.policy"] = audit_log_policy.upper() if audit_log_strategy == "async": @@ -2716,6 +2724,7 @@ def empty_data_files( mysql_data_directory: str, user: str | None = None, group: str | None = None, + extra_dirs: list[str] | None = None, ) -> None: """Empty the mysql data directory in preparation of backup restore.""" empty_data_files_command = [ @@ -2737,6 +2746,21 @@ def empty_data_files( user=user, group=group, ) + + for extra_dir in extra_dirs or []: + logger.debug(f"Emptying extra directory {extra_dir}") + self._execute_commands( + [ + "find", + extra_dir, + "-not", + "-path", + extra_dir, + "-delete", + ], + user=user, + group=group, + ) except MySQLExecError as e: logger.error("Failed to empty data directory in prep for backup restore") raise MySQLEmptyDataDirectoryError(e.message) from e diff --git a/machines/metadata.yaml b/machines/metadata.yaml index 7f0ed8599..7d1ed1532 100644 --- a/machines/metadata.yaml +++ b/machines/metadata.yaml @@ -14,7 +14,7 @@ docs: https://canonical-charmed-mysql.readthedocs-hosted.com/ source: https://github.com/canonical/mysql-operators issues: https://github.com/canonical/mysql-operators/issues website: - - https://ubuntu.com/data/mysql + - https://canonical.com/data/mysql - https://charmhub.io/mysql - https://github.com/canonical/mysql-operators maintainers: @@ -59,10 +59,23 @@ requires: optional: true storage: - database: + archive: + type: filesystem + description: Persistent storage for rotated logs and other archival purposes + location: /var/snap/charmed-mysql/common/var/lib/mysql/archive + data: + type: filesystem + description: Persistent storage for MySQL data + location: /var/snap/charmed-mysql/common/var/lib/mysql/data + logs: type: filesystem - description: Persistent storage for data - location: /var/snap/charmed-mysql/common + description: Persistent storage for MySQL data + location: /var/snap/charmed-mysql/common/var/lib/mysql/logs + temp: + type: filesystem + description: Persistent storage for InnoDB temporary tablespaces + location: /var/snap/charmed-mysql/common/var/lib/mysql/temp + assumes: - juju diff --git a/machines/src/charm.py b/machines/src/charm.py index 5090ee494..b38587ed1 100755 --- a/machines/src/charm.py +++ b/machines/src/charm.py @@ -155,9 +155,11 @@ def __init__(self, *args): self.framework.observe(self.on.config_changed, self._on_config_changed) self.framework.observe(self.on.start, self._on_start) self.framework.observe(self.on.update_status, self._on_update_status) - self.framework.observe( - self.on.database_storage_detaching, self._on_database_storage_detaching - ) + + self.framework.observe(self.on.archive_storage_detaching, self._on_storage_detaching) + self.framework.observe(self.on.data_storage_detaching, self._on_storage_detaching) + self.framework.observe(self.on.logs_storage_detaching, self._on_storage_detaching) + self.framework.observe(self.on.temp_storage_detaching, self._on_storage_detaching) self.framework.observe(self.on[PEER].relation_changed, self._on_peer_relation_changed) self.framework.observe(self.on[PEER].relation_departed, self._on_peer_relation_departed) @@ -367,7 +369,7 @@ def _on_peer_relation_departed(self, event: RelationDepartedEvent) -> None: if not self._mysql.reconcile_binlogs_collection(force_restart=True): logger.error("Failed to reconcile binlogs collection during peer departed event") - def _on_database_storage_detaching(self, _) -> None: + def _on_storage_detaching(self, _) -> None: """Handle the database storage detaching event.""" # Only executes if the unit was initialised if not self.unit_initialized(): diff --git a/machines/src/constants.py b/machines/src/constants.py index c72b3ffad..be15e3cb3 100644 --- a/machines/src/constants.py +++ b/machines/src/constants.py @@ -38,7 +38,18 @@ MYSQLD_DEFAULTS_CONFIG_FILE = f"{CHARMED_MYSQL_DATA_DIRECTORY}/etc/mysql/mysql.cnf" MYSQLD_CUSTOM_CONFIG_FILE = f"{MYSQLD_CONFIG_DIRECTORY}/z-custom-mysqld.cnf" MYSQL_SYSTEM_USER = "snap_daemon" -MYSQL_DATA_DIR = f"{CHARMED_MYSQL_COMMON_DIRECTORY}/var/lib/mysql" + +MYSQL_ARCHIVE_DIR = f"{CHARMED_MYSQL_COMMON_DIRECTORY}/var/lib/mysql/archive" # Corresponds to the archive storage mount +MYSQL_DATA_DIR = ( + f"{CHARMED_MYSQL_COMMON_DIRECTORY}/var/lib/mysql/data" # Corresponds to the data storage mount +) +MYSQL_LOGS_DIR = ( + f"{CHARMED_MYSQL_COMMON_DIRECTORY}/var/lib/mysql/logs" # Corresponds to the logs storage mount +) +MYSQL_TEMP_DIR = ( + f"{CHARMED_MYSQL_COMMON_DIRECTORY}/var/lib/mysql/temp" # Corresponds to the temp storage mount +) + CHARMED_MYSQL_XTRABACKUP_LOCATION = "/snap/bin/charmed-mysql.xtrabackup" CHARMED_MYSQL_XBCLOUD_LOCATION = "/snap/bin/charmed-mysql.xbcloud" CHARMED_MYSQL_XBSTREAM_LOCATION = "/snap/bin/charmed-mysql.xbstream" diff --git a/machines/src/mysql_vm_helpers.py b/machines/src/mysql_vm_helpers.py index 3eb4fa4d1..ac75fc301 100644 --- a/machines/src/mysql_vm_helpers.py +++ b/machines/src/mysql_vm_helpers.py @@ -32,7 +32,15 @@ from charms.operator_libs_linux.v2 import snap from mysql_shell.executors import LocalExecutor from mysql_shell.executors.errors import ExecutionError -from tenacity import RetryError, Retrying, retry, stop_after_attempt, stop_after_delay, wait_fixed +from tenacity import ( + RetryError, + Retrying, + before_sleep_log, + retry, + stop_after_attempt, + stop_after_delay, + wait_fixed, +) from constants import ( CHARMED_MYSQL_BINLOGS_COLLECTOR_SERVICE, @@ -44,8 +52,11 @@ CHARMED_MYSQLD_EXPORTER_SERVICE, CHARMED_MYSQLD_SERVICE, CHARMED_MYSQLSH, + MYSQL_ARCHIVE_DIR, MYSQL_DATA_DIR, + MYSQL_LOGS_DIR, MYSQL_SYSTEM_USER, + MYSQL_TEMP_DIR, MYSQLD_CONFIG_DIRECTORY, MYSQLD_CUSTOM_CONFIG_FILE, MYSQLD_DEFAULTS_CONFIG_FILE, @@ -249,7 +260,6 @@ def write_mysqld_config(self) -> dict: audit_log_enabled=self.charm.config.plugin_audit_enabled, audit_log_strategy=self.charm.config.plugin_audit_strategy, audit_log_policy=self.charm.config.logs_audit_policy, - snap_common=CHARMED_MYSQL_COMMON_DIRECTORY, memory_limit=memory_limit, binlog_retention_days=self.charm.config.binlog_retention_days, experimental_max_connections=self.charm.config.experimental_max_connections, @@ -285,7 +295,6 @@ def setup_logrotate_and_cron( config_path = "/etc/logrotate.d/flush_mysql_logs" script_path = f"{self.charm.charm_dir}/logrotation.sh" cron_path = "/etc/cron.d/flush_mysql_logs" - logs_dir = f"{CHARMED_MYSQL_COMMON_DIRECTORY}/var/log/mysql" # days * minutes/day = amount of rotated files to keep logs_rotations = logs_retention_period * 1440 @@ -295,7 +304,8 @@ def setup_logrotate_and_cron( logrotate_conf_content = template.render( system_user=MYSQL_SYSTEM_USER, - log_dir=logs_dir, + log_dir=MYSQL_LOGS_DIR, + archive_dir=MYSQL_ARCHIVE_DIR, charm_directory=self.charm.charm_dir, unit_name=self.charm.unit.name, enabled_log_files=enabled_log_files, @@ -312,7 +322,7 @@ def setup_logrotate_and_cron( template = jinja2.Template(file.read()) logrotation_script_content = template.render( - log_path=f"{CHARMED_MYSQL_COMMON_DIRECTORY}/var/log/mysql", + log_path=MYSQL_LOGS_DIR, enabled_log_files=enabled_log_files, logrotate_conf=config_path, owner=MYSQL_SYSTEM_USER, @@ -335,6 +345,12 @@ def initialise_mysqld(self) -> None: "/snap/bin/charmed-mysql.mysqld-initialize", "--datadir", MYSQL_DATA_DIR, + "--innodb-log-group-home-dir", + MYSQL_LOGS_DIR, + "--innodb-undo-directory", + MYSQL_LOGS_DIR, + "--innodb-temp-tablespaces-dir", + MYSQL_TEMP_DIR, ] try: @@ -493,6 +509,11 @@ def empty_data_files(self) -> None: user=ROOT_SYSTEM_USER, group=ROOT_SYSTEM_USER, ) + super().empty_data_files( + MYSQL_LOGS_DIR, + user=ROOT_SYSTEM_USER, + group=ROOT_SYSTEM_USER, + ) def restore_backup( self, @@ -512,6 +533,14 @@ def restore_backup( capture_output=True, text=True, ) + # Also change permissions for logs directory where undo files and binlogs go + subprocess.run( # noqa: S603 + ["/usr/bin/chmod", "770", MYSQL_LOGS_DIR], + user=ROOT_SYSTEM_USER, + group=ROOT_SYSTEM_USER, + capture_output=True, + text=True, + ) except subprocess.CalledProcessError as e: logger.exception("Failed to change data directory permissions before restoring") raise MySQLRestoreBackupError from e @@ -536,6 +565,14 @@ def restore_backup( capture_output=True, text=True, ) + # Revert permissions for the logs directory + subprocess.run( # noqa: S603 + ["/usr/bin/chmod", "750", MYSQL_LOGS_DIR], + user=ROOT_SYSTEM_USER, + group=ROOT_SYSTEM_USER, + capture_output=True, + text=True, + ) # Change ownership to the snap_daemon user since the restore files # are owned by root @@ -552,6 +589,19 @@ def restore_backup( capture_output=True, text=True, ) + # Also change ownership for the logs directory + subprocess.run( # noqa: S603 + [ + "/usr/bin/chown", + "-R", + f"{MYSQL_SYSTEM_USER}:{ROOT_SYSTEM_USER}", + MYSQL_LOGS_DIR, + ], + user=ROOT_SYSTEM_USER, + group=ROOT_SYSTEM_USER, + capture_output=True, + text=True, + ) except subprocess.CalledProcessError as e: logger.exception( "Failed to change data directory permissions or ownership after restoring" @@ -620,19 +670,23 @@ def _execute_commands( stdout += line return_code = process.wait() + + # Read stdout and stderr before checking return code + if not stdout and process.stdout: + stdout = process.stdout.read() + if not stderr and process.stderr: + stderr = process.stderr.read() + if return_code != 0: message = ( "Failed command: " f"{self.strip_off_passwords(' '.join(commands))};" - f" {user=}; {group=}" + f" {user=}; {group=}; " + f"stdout: {stdout.strip()}; " + f"stderr: {stderr.strip()}" ) logger.error(message) - raise MySQLExecError from None - - if not stdout and process.stdout: - stdout = process.stdout.read() - if not stderr and process.stderr: - stderr = process.stderr.read() + raise MySQLExecError(message) from None return (stdout.strip(), stderr.strip()) @@ -872,21 +926,22 @@ def read_file_content(self, path: str) -> str | None: @staticmethod def fetch_error_log() -> str | None: """Fetch the mysqld error log.""" - if os.path.exists(f"{CHARMED_MYSQL_COMMON_DIRECTORY}/var/log/mysql/error.log"): + if os.path.exists(f"{MYSQL_LOGS_DIR}/error.log"): # can be empty if just rotated - with open(f"{CHARMED_MYSQL_COMMON_DIRECTORY}/var/log/mysql/error.log") as fd: + with open(f"{MYSQL_LOGS_DIR}/error.log") as fd: return fd.read() @staticmethod def reset_data_dir() -> None: - """Reset the data directory.""" + """Remove all files from the data directory.""" logger.warning(f"Resetting data directory: {MYSQL_DATA_DIR}") - # Remove the data directory - shutil.rmtree(MYSQL_DATA_DIR, ignore_errors=False) - - # Recreate the data directory - os.makedirs(MYSQL_DATA_DIR) + # Remove the contents of the data directory + for root, dirs, files in pathlib.Path(MYSQL_DATA_DIR).walk(top_down=False): + for name in files: + (root / name).unlink() + for name in dirs: + (root / name).rmdir() # Change ownership of the data directory shutil.chown(MYSQL_DATA_DIR, user=MYSQL_SYSTEM_USER, group="root") @@ -894,17 +949,25 @@ def reset_data_dir() -> None: def is_volume_mounted() -> bool: """Returns if data directory is attached.""" - try: - for attempt in Retrying(stop=stop_after_attempt(10), wait=wait_fixed(12)): - with attempt: - # Parameters are hardcoded by the charm - subprocess.check_call([ # noqa: S603 - "/usr/bin/mountpoint", - "-q", - CHARMED_MYSQL_COMMON_DIRECTORY, - ]) - except RetryError: - return False + for directory in (MYSQL_DATA_DIR, MYSQL_LOGS_DIR, MYSQL_TEMP_DIR): + try: + for attempt in Retrying( + stop=stop_after_attempt(10), + wait=wait_fixed(12), + before_sleep=before_sleep_log(logger, logging.WARNING), + ): + with attempt: + # Parameters are hardcoded by the charm + subprocess.run( # noqa: S603 + [ + "/usr/bin/mountpoint", + "-q", + directory, + ], + check=True, + ) + except RetryError: + return False return True diff --git a/machines/templates/logrotate.j2 b/machines/templates/logrotate.j2 index 53934806a..ad616ea43 100644 --- a/machines/templates/logrotate.j2 +++ b/machines/templates/logrotate.j2 @@ -26,11 +26,14 @@ ifempty missingok nomail nosharedscripts -nocopytruncate +# Needed because rotated logs might be in a different filesystem, +# `nocopytruncate` fails with `failed to rename ...: Invalid cross-device link` +copy +copytruncate {% for log in enabled_log_files %} {{ log_dir }}/{{ log }}.log { - olddir archive_{{ log }} + olddir {{ archive_dir }}/archive_{{ log }} postrotate juju_command=/usr/bin/juju-run if command -v /usr/bin/juju-exec; then juju_command=/usr/bin/juju-exec; fi diff --git a/machines/tests/integration/helpers_ha.py b/machines/tests/integration/helpers_ha.py index 7ab945c12..663953ac5 100644 --- a/machines/tests/integration/helpers_ha.py +++ b/machines/tests/integration/helpers_ha.py @@ -3,6 +3,7 @@ # See LICENSE file for licensing details. import json +import logging import subprocess import uuid from collections.abc import Callable, Generator @@ -15,6 +16,7 @@ from jubilant.statustypes import Status from tenacity import ( Retrying, + before_sleep_log, retry, stop_after_attempt, stop_after_delay, @@ -33,6 +35,8 @@ JujuModelStatusFn = Callable[[Status], bool] JujuAppsStatusFn = Callable[[Status, str], bool] +logger = logging.getLogger(__name__) + def check_mysql_instances_online( juju: Juju, @@ -443,7 +447,11 @@ def stop_mysql_process_gracefully(juju: Juju, unit_name: str) -> None: ) # Hold execution until process is stopped - for attempt in Retrying(stop=stop_after_attempt(10), wait=wait_fixed(5)): + for attempt in Retrying( + stop=stop_after_attempt(10), + wait=wait_fixed(5), + before_sleep=before_sleep_log(logger, logging.WARNING), + ): with attempt: if get_unit_process_id(juju, unit_name, "mysqld") is not None: raise Exception("Failed to stop the mysqld process") diff --git a/machines/tests/integration/integration/high_availability/test_replication_logs_rotation.py b/machines/tests/integration/integration/high_availability/test_replication_logs_rotation.py index 7961ed411..6676e1963 100644 --- a/machines/tests/integration/integration/high_availability/test_replication_logs_rotation.py +++ b/machines/tests/integration/integration/high_availability/test_replication_logs_rotation.py @@ -14,7 +14,7 @@ wait_fixed, ) -from constants import CHARMED_MYSQL_COMMON_DIRECTORY +from constants import MYSQL_ARCHIVE_DIR, MYSQL_LOGS_DIR from ...helpers_ha import ( get_app_leader, @@ -70,7 +70,6 @@ def test_log_rotation(juju: Juju) -> None: log_types = ["audit", "error"] mysql_app_leader = get_app_leader(juju, MYSQL_APP_NAME) - mysql_logs_path = f"{CHARMED_MYSQL_COMMON_DIRECTORY}/var/log/mysql" logging.info("Removing the cron file") delete_unit_file(juju, mysql_app_leader, "/etc/cron.d/flush_mysql_logs") @@ -79,7 +78,7 @@ def test_log_rotation(juju: Juju) -> None: stop_unit_flush_logs_job(juju, mysql_app_leader) for log_type in log_types: - archive_log_dir = f"{mysql_logs_path}/archive_{log_type}" + archive_log_dir = f"{MYSQL_ARCHIVE_DIR}/archive_{log_type}" logging.info("Removing existing archive directories") delete_unit_file(juju, mysql_app_leader, archive_log_dir) @@ -88,7 +87,7 @@ def test_log_rotation(juju: Juju) -> None: write_unit_file( juju=juju, unit_name=mysql_app_leader, - file_path=f"{mysql_logs_path}/{log_type}.log", + file_path=f"{MYSQL_LOGS_DIR}/{log_type}.log", file_data=f"{log_type} content", ) @@ -100,11 +99,11 @@ def test_log_rotation(juju: Juju) -> None: active_log_file_data = read_unit_file( juju=juju, unit_name=mysql_app_leader, - file_path=f"{mysql_logs_path}/{log_type}.log", + file_path=f"{MYSQL_LOGS_DIR}/{log_type}.log", ) assert f"{log_type} content" not in active_log_file_data - archive_log_dir = f"{mysql_logs_path}/archive_{log_type}" + archive_log_dir = f"{MYSQL_ARCHIVE_DIR}/archive_{log_type}" archive_log_files_listed = list_unit_files(juju, mysql_app_leader, archive_log_dir) assert len(archive_log_files_listed) == 1 diff --git a/machines/tests/integration/integration/test_backup_ceph.py b/machines/tests/integration/integration/test_backup_ceph.py index cf861e93d..2152881be 100644 --- a/machines/tests/integration/integration/test_backup_ceph.py +++ b/machines/tests/integration/integration/test_backup_ceph.py @@ -82,8 +82,8 @@ class MicrocephConnectionInformation: access_key_id: str secret_access_key: str bucket: str - ca_cert_base64: str - region: str + ca_cert_base64: str | None = None + region: str = "default" @pytest.fixture(scope="session") @@ -110,6 +110,8 @@ def microceph(certs_path, host_ip) -> MicrocephConnectionInformation: os.environ["CEPH_ACCESS_KEY"], os.environ["CEPH_SECRET_KEY"], MICROCEPH_BUCKET, + os.environ.get("CEPH_CA_CERT"), # Optional for HTTP-only local dev + os.environ.get("CEPH_REGION", "default"), ) logger.info("Setting up TLS certificates") subprocess.run(f"openssl genrsa -out {certs_path}/ca.key 2048".split(), check=True) @@ -221,9 +223,12 @@ def cloud_configs_ceph(microceph) -> tuple[dict[str, str], dict[str, str]]: "endpoint": microceph.endpoint_url, "bucket": microceph.bucket, "path": "mysql", - "region": "default", - "tls-ca-chain": microceph.ca_cert_base64, + "region": microceph.region, } + # Only add TLS CA chain if provided (for HTTPS endpoints) + if microceph.ca_cert_base64: + configs["tls-ca-chain"] = microceph.ca_cert_base64 + credentials = { "access-key": microceph.access_key_id, "secret-key": microceph.secret_access_key, @@ -245,13 +250,17 @@ def clean_backups_from_buckets(cloud_configs_ceph): region_name=cloud_configs["region"], ) - with tempfile.NamedTemporaryFile() as ca_file: - ca_chain = base64.b64decode(cloud_configs["tls-ca-chain"]) - ca_file.write(ca_chain) - ca_file.flush() - - s3 = session.resource("s3", endpoint_url=cloud_configs["endpoint"], verify=ca_file.name) - bucket = s3.Bucket(cloud_configs["bucket"]) + with tempfile.TemporaryDirectory() as tmpdir: + s3_extra_kwargs = {} + if "tls-ca-chain" in cloud_configs: + ca_path = Path(tmpdir) / "ca.crt" + ca_chain = base64.b64decode(cloud_configs["tls-ca-chain"]) + ca_path.write_bytes(ca_chain) + s3_extra_kwargs["verify"] = str(ca_path) + + bucket = session.resource( + "s3", endpoint_url=cloud_configs["endpoint"], **s3_extra_kwargs + ).Bucket(cloud_configs["bucket"]) # GCS doesn't support batch delete operation, so delete the objects one by one backup_path = str(Path(cloud_configs["path"]) / backup_id) diff --git a/machines/tests/integration/integration/test_storage.py b/machines/tests/integration/integration/test_storage.py new file mode 100644 index 000000000..3de24b3d4 --- /dev/null +++ b/machines/tests/integration/integration/test_storage.py @@ -0,0 +1,122 @@ +#!/usr/bin/env python3 +# Copyright 2026 Canonical Ltd. +# See LICENSE file for licensing details. + +import logging +import re + +import jubilant +from jubilant import Juju + +from constants import ( + MYSQL_DATA_DIR, + MYSQL_LOGS_DIR, + MYSQL_TEMP_DIR, +) + +from ..helpers_ha import ( + MINUTE_SECS, + wait_for_apps_status, +) + +logger = logging.getLogger(__name__) + +DATABASE_APP_NAME = "mysql" +TIMEOUT = 15 * MINUTE_SECS + + +def test_build_and_deploy(juju: Juju, charm) -> None: + logger.info(f"Deploying {DATABASE_APP_NAME} with 1 unit") + juju.deploy( + charm, + DATABASE_APP_NAME, + base="ubuntu@24.04", + config={"profile": "testing"}, + num_units=1, + trust=True, + ) + + juju.wait( + ready=wait_for_apps_status(jubilant.all_active, DATABASE_APP_NAME), + timeout=TIMEOUT, + ) + + +def test_charm_lists_expected_storage(juju: Juju) -> None: + expected_storages = {"archive", "data", "temp", "logs"} + + assert len(juju.status().storage.storage) == len(expected_storages) + + +def test_data_directory_has_expected_contents_after_initialization(juju: Juju) -> None: + expected_content = { + "auto.cnf", + "ca-key.pem", + "ca.pem", + "client-cert.pem", + "client-key.pem", + "ib_buffer_pool", + "mysql", + "mysql.ibd", + "performance_schema", + "private_key.pem", + "public_key.pem", + "server-cert.pem", + "server-key.pem", + "sys", + } + excluded_content = { + "#innodb_temp", + "#innodb_redo", + "undo_001", + "undo_002", + } + + actual_content = set(list_vm_files(juju, f"{DATABASE_APP_NAME}/0", MYSQL_DATA_DIR)) + + assert expected_content <= actual_content + assert excluded_content.isdisjoint(actual_content) + + +def test_temp_directory_has_only_expected_file_extensions_after_initialization(juju: Juju) -> None: + actual_content = set(list_vm_files(juju, f"{DATABASE_APP_NAME}/0", MYSQL_TEMP_DIR)) + + assert all(fname.endswith(".ibt") for fname in actual_content) + + +def test_logs_directory_has_only_expected_contents_after_initialization(juju: Juju) -> None: + expected_content = { + "audit.log", + "error.log", + "binlog.index", + "#innodb_redo", + } + + actual_content = set(list_vm_files(juju, f"{DATABASE_APP_NAME}/0", MYSQL_LOGS_DIR)) + + assert expected_content <= actual_content + remaining_content = actual_content - expected_content + + undolog_pattern = re.compile(r"^undo_\d+$") + assert all( + (undolog_pattern.match(fname) or fname.startswith("binlog") or fname.startswith("audit")) + for fname in remaining_content + ) + + redolog_pattern = re.compile(r"^#ib_redo\d+") + actual_content = set( + list_vm_files(juju, f"{DATABASE_APP_NAME}/0", f"{MYSQL_LOGS_DIR}/#innodb_redo") + ) + + assert all(redolog_pattern.match(fname) for fname in actual_content) + + +def list_vm_files( + juju: jubilant.Juju, + unit_name: str, + path: str, +) -> list[str]: + task = juju.exec("ls", path, unit=unit_name) + task.raise_on_failure() + + return task.stdout.split() diff --git a/machines/tests/integration/integration/test_tls.py b/machines/tests/integration/integration/test_tls.py index a6ab45a6e..4eea5def1 100644 --- a/machines/tests/integration/integration/test_tls.py +++ b/machines/tests/integration/integration/test_tls.py @@ -8,7 +8,7 @@ import jubilant from jubilant import Juju -from constants import REPLICATION_USERNAME, TLS_SSL_CERT_FILE +from constants import MYSQL_DATA_DIR, REPLICATION_USERNAME, TLS_SSL_CERT_FILE from ..helpers import ( is_connection_possible, @@ -141,7 +141,7 @@ def test_rotate_tls_key(juju: Juju) -> None: for unit_name in app_units: original_tls[unit_name] = {} original_tls[unit_name]["cert"] = unit_file_md5( - juju, unit_name, f"/var/snap/charmed-mysql/common/var/lib/mysql/{TLS_SSL_CERT_FILE}" + juju, unit_name, f"{MYSQL_DATA_DIR}/{TLS_SSL_CERT_FILE}" ) # set key using auto-generated key for each unit @@ -161,9 +161,7 @@ def test_rotate_tls_key(juju: Juju) -> None: # After updating both the external key and the internal key a new certificate request will be # made; then the certificates should be available and updated. for unit_name in app_units: - new_cert_md5 = unit_file_md5( - juju, unit_name, f"/var/snap/charmed-mysql/common/var/lib/mysql/{TLS_SSL_CERT_FILE}" - ) + new_cert_md5 = unit_file_md5(juju, unit_name, f"{MYSQL_DATA_DIR}/{TLS_SSL_CERT_FILE}") assert new_cert_md5 != original_tls[unit_name]["cert"], ( f"cert for {unit_name} was not updated." diff --git a/machines/tests/spread/integration/test_storage.py/task.yaml b/machines/tests/spread/integration/test_storage.py/task.yaml new file mode 100644 index 000000000..1decbdbbe --- /dev/null +++ b/machines/tests/spread/integration/test_storage.py/task.yaml @@ -0,0 +1,9 @@ +summary: test_storage.py +environment: + TEST_MODULE: test_storage.py +execute: | + tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" +artifacts: + - allure-results +backends: + - -lxd-vm # Requires CI secrets diff --git a/machines/tests/spread/integration/test_upgrade.py/task.yaml b/machines/tests/spread/integration/test_upgrade.py/task.yaml index 67280c6cd..b793ddadb 100644 --- a/machines/tests/spread/integration/test_upgrade.py/task.yaml +++ b/machines/tests/spread/integration/test_upgrade.py/task.yaml @@ -2,6 +2,8 @@ summary: test_upgrade.py environment: TEST_MODULE: high_availability/test_upgrade.py execute: | - tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" + # TODO: Uncomment when separation of storage has been released to the `8.4/edge` channel + # tox run -e integration -- "tests/integration/integration/$TEST_MODULE" --alluredir="$SPREAD_TASK/allure-results" + exit 0 artifacts: - allure-results diff --git a/machines/tests/unit/test_charm.py b/machines/tests/unit/test_charm.py index 8c5e70d7b..5b8c37722 100644 --- a/machines/tests/unit/test_charm.py +++ b/machines/tests/unit/test_charm.py @@ -33,7 +33,7 @@ def setUp(self): @patch("socket.getfqdn", return_value="test-hostname") @patch("socket.gethostbyname", return_value="") - @patch("subprocess.check_call") + @patch("subprocess.run") @patch("mysql_vm_helpers.is_volume_mounted", return_value=True) @patch("mysql_vm_helpers.MySQL.install_and_configure_mysql_dependencies") def test_on_install(self, _install_and_configure_mysql_dependencies, ___, __, _, _____): @@ -43,7 +43,7 @@ def test_on_install(self, _install_and_configure_mysql_dependencies, ___, __, _, self.assertTrue(isinstance(self.harness.model.unit.status, WaitingStatus)) @patch("charm.Retrying", return_value=Retrying(stop=stop_after_attempt(1))) - @patch("subprocess.check_call") + @patch("subprocess.run") @patch("mysql_vm_helpers.is_volume_mounted", return_value=True) @patch( "mysql_vm_helpers.MySQL.install_and_configure_mysql_dependencies", side_effect=Exception() diff --git a/machines/tests/unit/test_log_rotation_setup.py b/machines/tests/unit/test_log_rotation_setup.py index 99becb56f..b14eae45d 100644 --- a/machines/tests/unit/test_log_rotation_setup.py +++ b/machines/tests/unit/test_log_rotation_setup.py @@ -7,7 +7,7 @@ from ops.testing import Harness from charm import MySQLOperatorCharm -from constants import COS_AGENT_RELATION_NAME, PEER +from constants import COS_AGENT_RELATION_NAME, MYSQL_LOGS_DIR, PEER class TestLogRotationSetup(unittest.TestCase): @@ -44,9 +44,7 @@ def test_log_syncing( ): self.harness.update_config({"logs-retention-period": "auto"}) self.harness.add_relation(COS_AGENT_RELATION_NAME, "grafana-agent") - positions = ( - "positions:\n '/var/snap/charmed-mysql/common/var/log/mysql/error.log': '466'\n" - ) + positions = f"positions:\n '{MYSQL_LOGS_DIR}/error.log': '466'\n" event = MagicMock() mock_setup.assert_called_once() mock_setup.reset_mock() diff --git a/machines/tests/unit/test_mysql.py b/machines/tests/unit/test_mysql.py index ddb86dc9f..73992af4c 100644 --- a/machines/tests/unit/test_mysql.py +++ b/machines/tests/unit/test_mysql.py @@ -68,7 +68,11 @@ InstanceState, ) -from constants import CHARMED_MYSQLSH, MYSQLD_SOCK_FILE +from constants import ( + CHARMED_MYSQL_COMMON_DIRECTORY, + CHARMED_MYSQLSH, + MYSQLD_SOCK_FILE, +) SHORT_CLUSTER_STATUS = { "defaultReplicaSet": { @@ -1714,6 +1718,12 @@ def test_render_mysqld_configuration(self, _get_available_memory): _get_available_memory.return_value = 32341442560 expected_config = { + "datadir": f"{CHARMED_MYSQL_COMMON_DIRECTORY}/var/lib/mysql/data", + "innodb_temp_tablespaces_dir": f"{CHARMED_MYSQL_COMMON_DIRECTORY}/var/lib/mysql/temp", + "log_bin": f"{CHARMED_MYSQL_COMMON_DIRECTORY}/var/lib/mysql/logs/binlog", + "log_bin_index": f"{CHARMED_MYSQL_COMMON_DIRECTORY}/var/lib/mysql/logs/binlog.index", + "innodb_log_group_home_dir": f"{CHARMED_MYSQL_COMMON_DIRECTORY}/var/lib/mysql/logs", + "innodb_undo_directory": f"{CHARMED_MYSQL_COMMON_DIRECTORY}/var/lib/mysql/logs", "bind_address": "0.0.0.0", "mysqlx_bind_address": "0.0.0.0", "admin_address": "127.0.0.1", @@ -1721,15 +1731,15 @@ def test_render_mysqld_configuration(self, _get_available_memory): "max_connections": "724", "innodb_buffer_pool_size": "23219666944", "log_error_services": "log_filter_internal;log_sink_internal", - "log_error": "/var/log/mysql/error.log", + "log_error": f"{CHARMED_MYSQL_COMMON_DIRECTORY}/var/lib/mysql/logs/error.log", "general_log": "OFF", - "general_log_file": "/var/log/mysql/general.log", - "slow_query_log_file": "/var/log/mysql/slow.log", + "general_log_file": f"{CHARMED_MYSQL_COMMON_DIRECTORY}/var/lib/mysql/logs/general.log", + "slow_query_log_file": f"{CHARMED_MYSQL_COMMON_DIRECTORY}/var/lib/mysql/logs/slow.log", "binlog_expire_logs_seconds": "604800", "loose-audit_log_filter.format": "JSON", "loose-audit_log_filter.policy": "LOGINS", "loose-audit_log_filter.strategy": "ASYNCHRONOUS", - "loose-audit_log_filter.file": "/var/log/mysql/audit.log", + "loose-audit_log_filter.file": f"{CHARMED_MYSQL_COMMON_DIRECTORY}/var/lib/mysql/logs/audit.log", "loose-group_replication_paxos_single_leader": "ON", "innodb_buffer_pool_chunk_size": "2902458368", "gtid_mode": "ON", diff --git a/machines/tests/unit/test_mysqlsh_helpers.py b/machines/tests/unit/test_mysqlsh_helpers.py index a0e2a699b..85e114a6b 100644 --- a/machines/tests/unit/test_mysqlsh_helpers.py +++ b/machines/tests/unit/test_mysqlsh_helpers.py @@ -229,18 +229,24 @@ def test_write_mysqld_config( config = "\n".join(( "[mysqld]", + "datadir = /var/snap/charmed-mysql/common/var/lib/mysql/data", "bind_address = 0.0.0.0", "mysqlx_bind_address = 0.0.0.0", "admin_address = 127.0.0.1", "report_host = 127.0.0.1", "max_connections = 111", "innodb_buffer_pool_size = 1234", + "innodb_log_group_home_dir = /var/snap/charmed-mysql/common/var/lib/mysql/logs", + "innodb_temp_tablespaces_dir = /var/snap/charmed-mysql/common/var/lib/mysql/temp", + "innodb_undo_directory = /var/snap/charmed-mysql/common/var/lib/mysql/logs", + "log_bin = /var/snap/charmed-mysql/common/var/lib/mysql/logs/binlog", + "log_bin_index = /var/snap/charmed-mysql/common/var/lib/mysql/logs/binlog.index", "log_error_services = log_filter_internal;log_sink_internal", - "log_error = /var/snap/charmed-mysql/common/var/log/mysql/error.log", + "log_error = /var/snap/charmed-mysql/common/var/lib/mysql/logs/error.log", "general_log = OFF", - "general_log_file = /var/snap/charmed-mysql/common/var/log/mysql/general.log", + "general_log_file = /var/snap/charmed-mysql/common/var/lib/mysql/logs/general.log", "loose-group_replication_paxos_single_leader = ON", - "slow_query_log_file = /var/snap/charmed-mysql/common/var/log/mysql/slow.log", + "slow_query_log_file = /var/snap/charmed-mysql/common/var/lib/mysql/logs/slow.log", "binlog_expire_logs_seconds = 604800", "gtid_mode = ON", "enforce_gtid_consistency = ON", @@ -252,7 +258,7 @@ def test_write_mysqld_config( "loose-validate_password.number_count = 1", "loose-validate_password.policy = MEDIUM", "loose-validate_password.special_char_count = 0", - "loose-audit_log_filter.file = /var/snap/charmed-mysql/common/var/log/mysql/audit.log", + "loose-audit_log_filter.file = /var/snap/charmed-mysql/common/var/lib/mysql/logs/audit.log", "loose-audit_log_filter.format = JSON", "loose-audit_log_filter.policy = LOGINS", "loose-audit_log_filter.strategy = ASYNCHRONOUS", @@ -465,14 +471,12 @@ def test_get_available_memory(self): ): self.mysql.get_available_memory() - @patch("shutil.rmtree") - @patch("os.makedirs") @patch("shutil.chown") - def test_reset_data_dir(self, _chown, _makedirs, _rmtree): + @patch("pathlib.Path.walk", return_value=iter([])) + def test_reset_data_dir(self, _walk, _chown): self.mysql.reset_data_dir() + _walk.assert_called_once() _chown.assert_called_once() - _makedirs.assert_called_once() - _rmtree.assert_called_once() @patch("mysql_vm_helpers.MySQL.reset_data_dir") @patch("subprocess.run") @@ -486,7 +490,13 @@ def test_initialise_mysqld(self, _subprocess_run, _reset_data_dir): "/usr/bin/sudo", "/snap/bin/charmed-mysql.mysqld-initialize", "--datadir", - "/var/snap/charmed-mysql/common/var/lib/mysql", + "/var/snap/charmed-mysql/common/var/lib/mysql/data", + "--innodb-log-group-home-dir", + "/var/snap/charmed-mysql/common/var/lib/mysql/logs", + "--innodb-undo-directory", + "/var/snap/charmed-mysql/common/var/lib/mysql/logs", + "--innodb-temp-tablespaces-dir", + "/var/snap/charmed-mysql/common/var/lib/mysql/temp", ], check=True, ) diff --git a/machines/tox.ini b/machines/tox.ini index 7c6578064..578b2992b 100644 --- a/machines/tox.ini +++ b/machines/tox.ini @@ -84,6 +84,8 @@ pass_env = CEPH_ENDPOINT_URL CEPH_ACCESS_KEY CEPH_SECRET_KEY + CEPH_CA_CERT + CEPH_REGION commands_pre = poetry install --only integration commands =