diff --git a/.github/workflows/release_tests.yaml b/.github/workflows/release_tests.yaml new file mode 100644 index 0000000000..c15df3102f --- /dev/null +++ b/.github/workflows/release_tests.yaml @@ -0,0 +1,197 @@ +on: + workflow_call: + inputs: + mongodb-revisions: + description: | + MongoDB revisions to target (ex: '{"amd64": "161", "arm64": "162"}' + required: true + type: string + mongos-revisions: + description: | + Mongos revisions to target (ex: '{"amd64": "161", "arm64": "162"}' + required: true + type: string + backend: + description: | + Backend to target. + + Either lxd or microk8s. + required: true + type: string + workflow_dispatch: + inputs: + mongodb-revisions: + description: | + MongoDB revisions to target (ex: '{"amd64": "161", "arm64": "162"}' + required: true + type: number + mongos-revisions: + description: | + Mongos revisions to target (ex: '{"amd64": "161", "arm64": "162"}' + required: true + type: number + backend: + description: | + Backend to target. + + Either lxd or microk8s + required: true + type: string + +name: Release testing +jobs: + collect-release-tests: + name: Collect release test spread jobs + runs-on: ubuntu-latest + timeout-minutes: 5 + steps: + - name: Checkout + uses: actions/checkout@v6 + - name: Set up environment + run: | + sudo snap install go --classic + go install github.com/canonical/spread/cmd/spread@latest + pipx install tox poetry + - name: Install deps + run: | + sudo apt update + sudo apt install -y build-essential python3-dev libldap-dev libsasl2-dev + - name: Collect spread jobs + id: collect-jobs + shell: python + run: | + import json + import pathlib + import os + import subprocess + backend = "${{ inputs.backend }}" + pattern = f"github-ci:...:tests/spread/release/{backend}/" + mongodb_revisions = json.loads("${{ inputs.mongodb-revisions }}") + spread_jobs = ( + subprocess.run( + [pathlib.Path.home() / "go/bin/spread", "-list", pattern], + capture_output=True, + check=True, + text=True, + ) + .stdout.strip() + .split("\n") + ) + jobs = [] + for job in spread_jobs: + # Example `job`: "github-ci:ubuntu-24.04:tests/spread/release/microk8s/test_release.py" + _, runner, task = job.split(":") + # Remove arm jobs in regular testing. + # Example: "test_charm.py" + task = task.removeprefix("tests/spread/") + if "arm64" in runner: + architecture = "arm64" + else: + architecture = "amd64" + # Example: "test_charm.py | amd64" + name = f"{task} | {architecture}" + + mongodb_revision = mongodb_revisions[architecture] + mongos_revision = mongos_revisions[architecture] + # ":" character not valid in GitHub Actions artifact + name_in_artifact = f"{task.replace('/', '-')}-{architecture}-mongodb-{mongodb_revision}-mongos-{mongos_revision}" + jobs.append({ + "spread_job": job, + "name": name, + "name_in_artifact": name_in_artifact, + "runner": runner, + "mongodb_revision": mongodb_revision, + "mongos_revision": mongos_revision, + }) + output = f"jobs={json.dumps(jobs)}" + print(output) + with open(os.environ["GITHUB_OUTPUT"], "a") as file: + file.write(output) + outputs: + jobs: ${{ steps.collect-jobs.outputs.jobs }} + + release-test: + strategy: + fail-fast: false + matrix: + job: ${{ fromJSON(needs.collect-release-tests.outputs.jobs) }} + name: ${{ matrix.job.name }} + needs: + - collect-release-tests + runs-on: ${{ matrix.job.runner }} + timeout-minutes: 230 # Sum of steps `timeout-minutes` + 5 + steps: + - name: (IS hosted) Disk usage + timeout-minutes: 1 + if: ${{ contains(matrix.job.runner, 'self-hosted') }} + run: df --human-readable + - name: Checkout + timeout-minutes: 3 + uses: actions/checkout@v6 + - name: Setup python 3.12 + uses: actions/setup-python@v6 + with: + python-version: "3.12" + - name: Set up environment + timeout-minutes: 5 + run: sudo snap install charmcraft --classic + # TODO: remove when https://github.com/canonical/charmcraft/issues/2105 and + # https://github.com/canonical/charmcraft/issues/2130 fixed + - run: | + sudo snap install go --classic + go install github.com/canonical/spread/cmd/spread@latest + - name: Run spread job + timeout-minutes: 180 + id: spread + # TODO: replace with `charmcraft test` when + # https://github.com/canonical/charmcraft/issues/2105 and + # https://github.com/canonical/charmcraft/issues/2130 fixed + run: ~/go/bin/spread -vv -artifacts=artifacts '${{ matrix.job.spread_job }}' + env: + MONGODB_REVISION: ${{ matrix.job.mongodb_revision }} + MONGOS_REVISION: ${{ matrix.job.mongos_revision }} + AWS_ACCESS_KEY: ${{ secrets.AWS_ACCESS_KEY }} + AWS_SECRET_KEY: ${{ secrets.AWS_SECRET_KEY }} + GCP_ACCESS_KEY: ${{ secrets.GCP_ACCESS_KEY }} + GCP_SECRET_KEY: ${{ secrets.GCP_SECRET_KEY }} + GCS_SERVICE_ACCOUNT: ${{ secrets.GCS_SERVICE_ACCOUNT }} + - timeout-minutes: 1 + if: ${{ success() || (failure() && steps.spread.outcome == 'failure') }} + run: snap list + - name: Select model + timeout-minutes: 1 + if: ${{ (success() || (failure() && steps.spread.outcome == 'failure')) }} + id: juju-switch + run: | + # sudo needed since spread runs scripts as root + # "testing" is default model created by concierge + sudo juju switch testing + mkdir ~/logs/ + - name: juju status + timeout-minutes: 1 + if: ${{ success() || (failure() && steps.spread.outcome == 'failure') }} + run: sudo juju status --color --relations | tee ~/logs/juju-status.txt + - name: juju status (YAML) + timeout-minutes: 1 + if: ${{ success() || (failure() && steps.spread.outcome == 'failure') }} + run: sudo juju status --format yaml --color --relations --storage | tee ~/logs/juju-status-yaml.txt + - name: juju debug-log + timeout-minutes: 3 + if: ${{ success() || (failure() && steps.spread.outcome == 'failure') }} + run: sudo juju debug-log --color --replay --no-tail | tee ~/logs/juju-debug-log.txt + - name: jhack tail + timeout-minutes: 3 + if: ${{ success() || (failure() && steps.spread.outcome == 'failure') }} + run: sudo jhack tail --printer raw --replay --no-watch | tee ~/logs/jhack-tail.txt + - name: Upload logs + timeout-minutes: 5 + if: ${{ success() || (failure() && steps.spread.outcome == 'failure') }} + uses: actions/upload-artifact@v4 + with: + name: logs-release-test-${{ matrix.job.name_in_artifact }} + path: ~/logs/ + if-no-files-found: error + - name: Disk usage + timeout-minutes: 1 + if: ${{ success() || (failure() && steps.spread.outcome == 'failure') }} + run: df --human-readable diff --git a/spread.yaml b/spread.yaml index acbc238404..86e92bbfe5 100644 --- a/spread.yaml +++ b/spread.yaml @@ -124,9 +124,9 @@ backends: username: ubuntu - self-hosted-linux-arm64-noble-medium: username: ubuntu - - self-hosted-linux-amd64-noble-large: + - self-hosted-linux-amd64-noble-xlarge: username: ubuntu - - self-hosted-linux-arm64-noble-large: + - self-hosted-linux-arm64-noble-xlarge: username: ubuntu suites: @@ -138,6 +138,18 @@ suites: summary: Spread tests for Mongos VM tests/spread/mongos/microk8s/: summary: Spread tests for Mongos Kubernetes + tests/spread/release/lxd/: + summary: Spread mongodb release tests for VM + manual: True + environment: + MONGODB_REVISION: "$(HOST: echo $MONGODB_REVISION)" + MONGOS_REVISION: "$(HOST: echo $MONGOS_REVISION)" + tests/spread/release/microk8s/: + summary: Spread mongodb release tests for Kubernetes + manual: True + environment: + MONGODB_REVISION: "$(HOST: echo $MONGODB_REVISION)" + MONGOS_REVISION: "$(HOST: echo $MONGOS_REVISION)" path: /root/spread_project diff --git a/tests/conftest.py b/tests/conftest.py index 720e5dbd75..788c362d35 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -17,6 +17,20 @@ def pytest_addoption(parser: Parser): choices=("lxd", "microk8s"), default="lxd", ) + parser.addoption( + "--mongodb-revision", + action="store", + help="MongoDB revision", + default=1, + type=int, + ) + parser.addoption( + "--mongos-revision", + action="store", + help="MongoDB revision", + default=1, + type=int, + ) def pytest_configure(config): diff --git a/tests/integration/applications/continuous_write_charm/actions.yaml b/tests/integration/applications/continuous_write_charm/actions.yaml index e7484c8692..692b79b715 100644 --- a/tests/integration/applications/continuous_write_charm/actions.yaml +++ b/tests/integration/applications/continuous_write_charm/actions.yaml @@ -36,3 +36,27 @@ stop-continuous-writes: type: string description: name of the collection to write to default: continuous_writes_collection + +start-continuous-reads: + description: Start continuous reads. + params: + db-name: + type: string + description: name of the database to read from + default: continuous_writes_database + collection-name: + type: string + description: name of the collection to read from + default: continuous_writes_collection + +stop-continuous-reads: + description: Stop continuous reads. + params: + db-name: + type: string + description: name of the database to read from + default: continuous_writes_database + collection-name: + type: string + description: name of the collection to read from + default: continuous_writes_collection diff --git a/tests/integration/applications/continuous_write_charm/src/charm.py b/tests/integration/applications/continuous_write_charm/src/charm.py index a1837a9c45..a54b98b276 100755 --- a/tests/integration/applications/continuous_write_charm/src/charm.py +++ b/tests/integration/applications/continuous_write_charm/src/charm.py @@ -8,6 +8,7 @@ high availability of the MongoDB charm. """ +import json import logging import os import signal @@ -32,6 +33,7 @@ PEER = "application-peers" PROC_PID_KEY = "proc-pid" LAST_WRITTEN_FILE = "last_written_value" +N_READ_FILE = "n_read_value" CA_PATH = Path("/tmp/ca.crt") @@ -55,6 +57,13 @@ def __init__(self, *args): self.on.stop_continuous_writes_action, self._on_stop_continuous_writes_action ) + self.framework.observe( + self.on.start_continuous_reads_action, self._on_start_continuous_reads_action + ) + self.framework.observe( + self.on.stop_continuous_reads_action, self._on_stop_continuous_reads_action + ) + # Database related events self.database = DatabaseRequires(self, "mongodb", self.database_name) # Database related events @@ -218,14 +227,86 @@ def _stop_continuous_writes(self, db_name: str, collection_name: str) -> int | N logger.exception("Unable to query the database", exc_info=e) return -1 + def _start_continuous_reads( + self, db_name: str, collection_name: str + ) -> None: + """Start continuous writes to the MongoDB cluster.""" + if not self._database_config: + logger.warning("No database configured.") + return + + logger.info(f"Running start continuous reads with {db_name=} and {collection_name=}") + self._stop_continuous_reads(db_name, collection_name) + + uris: str = self._database_config.get("uris", "") + # Run continuous writes in the background + proc = subprocess.Popen( + [ + sys.executable, + "src/continuous_reads.py", + uris, + db_name, + collection_name, + ] + ) + + # Store the continuous writes process id in stored state to be able to stop it later + self.app_peer_data[self.read_proc_id_key(db_name, collection_name)] = str(proc.pid) + + def _stop_continuous_reads(self, db_name: str, collection_name: str) -> tuple[int | None, list[str]]: + """Stop continuous reads to the MongoDB cluster and return the number of successful reads.""" + if not self._database_config: + logger.warning("No database configured.") + return None, [] + + if not self.app_peer_data.get(self.read_proc_id_key(db_name, collection_name)): + logger.warning("Missing read proc id.") + return None, [] + + # Send a SIGTERM to the process and wait for the process to exit + try: + os.kill( + int(self.app_peer_data[self.read_proc_id_key(db_name, collection_name)]), signal.SIGTERM + ) + except ProcessLookupError: + logger.info( + f"Process {self.app_peer_data[self.read_proc_id_key(db_name, collection_name)]} was killed already (or never existed)" + ) + return (None, []) + finally: + del self.app_peer_data[self.read_proc_id_key(db_name, collection_name)] + + + # read the last written_value + try: + for attempt in Retrying(stop=stop_after_delay(60), wait=wait_fixed(5)): + with attempt, open(self.n_read_filename(db_name, collection_name)) as fd: + data = json.load(fd) + number_of_reads = int(data.get("reads", -1)) + failed_reads = data.get("failed_reads", []) + os.remove(self.n_read_filename(db_name, collection_name)) + logger.info(f"Read {number_of_reads} times // Failed {len(failed_reads)}.") + return number_of_reads, failed_reads + except RetryError as e: + logger.exception("Unable to query the database", exc_info=e) + return -1, [] + def proc_id_key(self, db_name: str, collection_name: str) -> str: """Returns a process id key for the continuous writes process to a given db and coll.""" return f"{PROC_PID_KEY}-{db_name}-{collection_name}" + def read_proc_id_key(self, db_name: str, collection_name: str) -> str: + """Returns a process id key for the continuous reads process to a given db and coll.""" + return f"read-{PROC_PID_KEY}-{db_name}-{collection_name}" + def last_written_filename(self, db_name: str, collection_name: str) -> str: """Returns the filename for the written data for a given db and coll.""" return f"{LAST_WRITTEN_FILE}-{db_name}-{collection_name}" + def n_read_filename(self, db_name: str, collection_name: str) -> str: + """Returns the filename for the read data for a given db and coll.""" + return f"{N_READ_FILE}-{db_name}-{collection_name}.json" + # ============== # Handlers # ============== @@ -278,6 +359,26 @@ def _on_stop_continuous_writes_action(self, event: ActionEvent) -> None: event.set_results({"writes": writes or -1}) return None + def _on_start_continuous_reads_action(self, event) -> None: + """Handle the start continuous reads action event.""" + if not self._database_config: + return + + db_name = event.params.get("db-name") or self.database_name + collection_name = event.params.get("collection-name") or COLLECTION_NAME + self._start_continuous_reads(db_name, collection_name) + + def _on_stop_continuous_reads_action(self, event: ActionEvent) -> None: + """Handle the stop continuous reads action event.""" + if not self._database_config: + return event.set_results({"reads": -1, "failed-reads": []}) + + db_name = event.params.get("db-name") or self.database_name + collection_name = event.params.get("collection-name") or COLLECTION_NAME + reads, failed_reads = self._stop_continuous_reads(db_name, collection_name) + event.set_results({"reads": reads or -1, "failed-reads": failed_reads}) + return None + def _on_database_created(self, event: DatabaseCreatedEvent) -> None: """Handle the database created event.""" if event.tls == "True": diff --git a/tests/integration/applications/continuous_write_charm/src/continuous_reads.py b/tests/integration/applications/continuous_write_charm/src/continuous_reads.py new file mode 100644 index 0000000000..7a859b4700 --- /dev/null +++ b/tests/integration/applications/continuous_write_charm/src/continuous_reads.py @@ -0,0 +1,78 @@ +# Copyright 2026 Canonical Ltd. +# See LICENSE file for licensing details. + +"""This file is meant to run in the background continuously reading data from MongoDB.""" + +import json +import random +import signal +import sys + +import math +from pymongo import MongoClient +from pymongo.errors import PyMongoError +from pymongo.write_concern import WriteConcern + +DEFAULT_DB_NAME = "continuous_writes_database" +DEFAULT_COLL_NAME = "continuous_writes_collection" + +run = True + + +def sigterm_handler(_signo, _stack_frame): + global run + run = False + +def n_read_filename(db_name: str, coll_name: str) -> str: + return f"n_read_value-{db_name}-{coll_name}.json" + + +def continous_reads( + connection_string: str, + db_name: str, + coll_name: str, +): + failed_reads = [] + reads = 0 + while run: + client = MongoClient( + connection_string, + socketTimeoutMS=5000, + ) + db = client[db_name] + test_collection = db[coll_name] + try: + if (rand:= random.random()) < 0.3: + # run some basic sampling + test_collection.aggregate([{"$sample": {"size": 10}}, {"$sort": {"number": 1}}]) + elif rand < 0.6: + n_docs = test_collection.count_documents({}) + # get one single sample + test_collection.aggregate([{"$skip": math.floor(n_docs * random.random())}, {"$limit": 1}]) + else: + n_docs = test_collection.count_documents({}) + test_collection.find({"number": {"$lte": math.floor(n_docs /2)}}) + except Exception as err: + failed_reads.append(str(err)) + with open("error.log", mode="a") as fd: + fd.write(f"{err}\n") + continue + finally: + client.close() + + reads += 1 + + with open(n_read_filename(db_name, coll_name), "w") as fd: + json.dump({"reads": reads, "failed_reads": failed_reads}, fd) + + +def main(): + connection_string = sys.argv[1] + db_name = DEFAULT_DB_NAME if len(sys.argv) < 3 else sys.argv[2] + coll_name = DEFAULT_COLL_NAME if len(sys.argv) < 4 else sys.argv[3] + continous_reads(connection_string, db_name, coll_name) + + +if __name__ == "__main__": + signal.signal(signal.SIGTERM, sigterm_handler) + main() diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index cf8094c9a4..41390f0c85 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -9,7 +9,9 @@ import shutil import subprocess import time +import uuid import zipfile +from collections.abc import Generator from logging import getLogger from pathlib import Path from platform import machine @@ -24,6 +26,7 @@ from yaml import safe_load from .helpers.architecture import architecture as _architecture +from .helpers.backups import CloudConfigs, CloudConfiguration from .helpers.common import ( CONTINUOUS_WRITE_APPLICATION, MONGOS_PORT, @@ -57,6 +60,18 @@ def architecture() -> str: return _architecture +@pytest.fixture(scope="session") +def mongodb_revision(request: pytest.FixtureRequest) -> int: + """Revision for the correct arch.""" + return int(request.config.option.mongodb_revision) + + +@pytest.fixture(scope="session") +def mongos_revision(request: pytest.FixtureRequest): + """Revision for the correct arch.""" + return int(request.config.option.mongos_revision) + + @pytest.fixture def application_path(architecture: str) -> str: """The test application path.""" @@ -121,6 +136,16 @@ def mongos_resource(mongos_metadata, substrate) -> dict[str, Any]: return {} +@pytest.fixture +def mongodb_charm_name(substrate: Substrate) -> str: + return "mongodb" if substrate == "lxd" else "mongodb-k8s" + + +@pytest.fixture +def mongos_charm_name(substrate: Substrate) -> str: + return "mongos" if substrate == "lxd" else "mongos-k8s" + + @pytest.fixture async def continuous_writes_to_db(ops_test: OpsTest, application_path: str): """Continuously writget_app_name the duration of the test.""" @@ -474,3 +499,57 @@ def s3_bucket(storage_credentials, storage_config) -> None: s3 = session.resource("s3", endpoint_url=storage_config["endpoint"], verify="cert.pem") bucket = s3.Bucket(storage_config["bucket"]) yield bucket + + +@pytest.fixture(scope="session") +def cloud_configs_aws(substrate: Substrate) -> CloudConfiguration: + path = "mongodb-vm" if substrate == "lxd" else "mongodb-k8s" + configs: dict[str, str] = { + "endpoint": "https://s3.amazonaws.com", + "bucket": "data-charms-testing", + "path": f"{path}/{uuid.uuid4()}", + "region": "us-east-1", + } + credentials: dict[str, str] = { + "access-key": os.environ["AWS_ACCESS_KEY"], + "secret-key": os.environ["AWS_SECRET_KEY"], + } + return configs, credentials + + +@pytest.fixture(scope="session") +def cloud_configs_gcp(substrate: Substrate) -> CloudConfiguration: + path = "mongodb-vm" if substrate == "lxd" else "mongodb-k8s" + configs: dict[str, str] = { + "bucket": "data-charms-testing", + "endpoint": "https://storage.googleapis.com", + "region": "", + "path": f"{path}/{uuid.uuid4()}", + } + credentials: dict[str, str] = { + "access-key": os.environ["GCP_ACCESS_KEY"], + "secret-key": os.environ["GCP_SECRET_KEY"], + } + return configs, credentials + + +@pytest.fixture(scope="session") +def cloud_configs_gcs(substrate: Substrate) -> CloudConfiguration: + path = "mongodb-vm" if substrate == "lxd" else "mongodb-k8s" + configs: dict[str, str] = { + "bucket": "data-charms-testing", + "path": f"{path}/{uuid.uuid4()}", + } + credentials: dict[str, str] = { + "secret-key": os.environ["GCS_SERVICE_ACCOUNT"], + } + return configs, credentials + + +@pytest.fixture(scope="session") +def cloud_configs( + cloud_configs_gcp: CloudConfiguration, + cloud_configs_aws: CloudConfiguration, + cloud_configs_gcs: CloudConfiguration, +) -> Generator[CloudConfigs]: + yield {"AWS": cloud_configs_aws, "GCP": cloud_configs_gcp, "GCS": cloud_configs_gcs} diff --git a/tests/integration/helpers/common.py b/tests/integration/helpers/common.py index 5b068b7882..c14feb3965 100644 --- a/tests/integration/helpers/common.py +++ b/tests/integration/helpers/common.py @@ -8,6 +8,7 @@ from base64 import b64decode from dataclasses import dataclass from datetime import datetime +from pathlib import Path from random import choices from string import ascii_lowercase, digits from typing import Any @@ -35,6 +36,9 @@ from tests.integration.helpers.types import Substrate +MONGODB_SNAP_CONF_DIR = "/var/snap/charmed-mongodb/current/etc/mongod" +MONGODB_ROCK_CONF_DIR = "/etc/mongod" + MONGO_SHELL = "charmed-mongodb.mongosh" MONGOD_PORT = 27017 MONGOS_PORT = 27018 @@ -50,6 +54,8 @@ CONTINUOUS_WRITE_APPLICATION = "continuous-write" +CONTINUOUS_WRITE_APPLICATION_BIS = "continuous-write-bis" +READER_APPLICATION = "reader-application" # Keep in sync with tests/integration/applications/continuous_write_charm/src/charm.py DEFAULT_DATABASE_NAME = "continuous_writes_database" DEFAULT_COLLECTION_NAME = "continuous_writes_collection" @@ -89,6 +95,24 @@ def mongosh(substrate: Substrate) -> str: return "mongosh" +def external_cert_path(substrate: Substrate): + if substrate == "lxd": + return f"{MONGODB_SNAP_CONF_DIR}/external-ca.crt" + return f"{MONGODB_ROCK_CONF_DIR}/external-ca.crt" + + +def external_pem_path(substrate: Substrate): + if substrate == "lxd": + return f"{MONGODB_SNAP_CONF_DIR}/external-cert.pem" + return f"{MONGODB_ROCK_CONF_DIR}/external-cert.pem" + + +def internal_cert_path(substrate: Substrate): + if substrate == "lxd": + return f"{MONGODB_SNAP_CONF_DIR}/internal-ca.crt" + return f"{MONGODB_ROCK_CONF_DIR}/internal-ca.crt" + + class ProcessError(Exception): """Raised when a process fails.""" @@ -101,23 +125,27 @@ async def deploy_charm( ops_test: OpsTest, charm: str, substrate: Substrate, - mongod_resource: dict[str, str], app_name: str, num_units: int = 3, + mongod_resource: dict[str, str] | None = None, channel: str | None = None, - config: dict[str, str | bool] | None = None, + revision: int | None = None, + config: dict[str, str] | None = None, subordinate: bool = False, storage: dict[str, str] | None = None, series: str | None = None, constraints: dict[str, list[str]] | None = None, bind: dict[str, str] | None = None, ): + if revision is not None: + channel = "8/beta" if substrate == "microk8s": series = series or "noble" await ops_test.model.deploy( charm, - resources=(mongod_resource if not channel else None), application_name=app_name, + revision=revision, + resources=(mongod_resource if not channel else None), num_units=0 if subordinate else num_units, series=series, trust=True, @@ -128,8 +156,9 @@ async def deploy_charm( else: await ops_test.model.deploy( charm, - num_units=0 if subordinate else num_units, application_name=app_name, + num_units=0 if subordinate else num_units, + revision=revision, config=config, channel=channel, storage=storage, @@ -178,13 +207,23 @@ async def relate_mongodb_and_application( mongodb_application_name: The mongodb charm application name application_name: The continuous writes test charm application name """ - if is_relation_joined(ops_test, "mongodb", "database"): + if is_relation_joined( + ops_test, "mongodb", "database", app_one=application_name, app_two=mongodb_application_name + ): return await ops_test.model.integrate( f"{application_name}:mongodb", f"{mongodb_application_name}:database" ) - await ops_test.model.block_until(lambda: is_relation_joined(ops_test, "mongodb", "database")) + await ops_test.model.block_until( + lambda: is_relation_joined( + ops_test, + "mongodb", + "database", + app_one=application_name, + app_two=mongodb_application_name, + ) + ) await ops_test.model.wait_for_idle( apps=[mongodb_application_name, application_name], @@ -462,7 +501,7 @@ async def find_unit(ops_test: OpsTest, leader: bool, app_name: str | None = None return ret_unit -async def get_leader_id(ops_test: OpsTest, app_name=None) -> int: +async def get_leader_id(ops_test: OpsTest, app_name: str | None = None) -> int: """Returns the unit number of the juju leader unit.""" app_name = app_name or await get_app_name(ops_test) for unit in ops_test.model.applications[app_name].units: @@ -668,7 +707,7 @@ async def remove_units( async def get_app_name( ops_test: OpsTest, charm_name: str = "mongodb", test_deployments: list[str] = [] -) -> str: +) -> str | None: """Returns the name of the cluster running MongoDB. This is important since not all deployments of the MongoDB charm have the application name @@ -943,17 +982,38 @@ async def check_app_status( assert app.status_message == message -def is_relation_joined(ops_test: OpsTest, endpoint_one: str, endpoint_two: str) -> bool: +def is_relation_joined( + ops_test: OpsTest, + endpoint_one: str, + endpoint_two: str, + app_one: str | None = None, + app_two: str | None = None, +) -> bool: """Check if a relation is joined. Args: ops_test: The ops test object passed into every test case endpoint_one: The first endpoint of the relation endpoint_two: The second endpoint of the relation + app_one: Application name for the first endpoint of the relation + app_two: Application name for the second endpoint of the relation """ for rel in ops_test.model.relations: - endpoints = [endpoint.name for endpoint in rel.endpoints] - if endpoint_one in endpoints and endpoint_two in endpoints: + endpoints = rel.endpoints + endpoint_names = [endpoint.name for endpoint in endpoints] + invalid = False + if endpoint_one not in endpoint_names or endpoint_two not in endpoint_names: + continue + if not app_one and not app_two: + return True + for endpoint in endpoints: + if endpoint.name == endpoint_one: + if app_one and endpoint.application.name != app_one: + invalid = True + if endpoint.name == endpoint_two: + if app_two and endpoint.application.name != app_two: + invalid = True + if not invalid: return True return False @@ -981,17 +1041,24 @@ async def execute_on_mongod( uri: str, command: str, container_name: str = "mongod", + tls: bool = False, stringify: bool = True, expecting_output: bool = True, ) -> CommandResult: """Executes the command with mongosh.""" leader_id = await get_leader_id(ops_test, app_name) ssh_command = ["ssh", "--container", container_name] if substrate == "microk8s" else ["ssh"] + tls_string = "" + if tls: + tls_string = ( + f"--tls --tlsCAFile {external_cert_path(substrate)}" + f" --tlsCertificateKeyFile {external_pem_path(substrate)}" + ) if stringify: - formatted_string = f'"{uri}" --quiet --eval "EJSON.stringify({command})"' + formatted_string = f'"{uri}" --quiet --eval "EJSON.stringify({command})" {tls_string}' else: - formatted_string = f'"{uri}" --quiet --eval "{command}"' + formatted_string = f'"{uri}" --quiet --eval "{command}" {tls_string}' cmd = [f"{app_name}/{leader_id}", mongosh(substrate), formatted_string] @@ -1034,6 +1101,20 @@ async def start_continous_writes( await start_writes_action.wait() +async def start_continuous_reads( + ops_test: OpsTest, + client_app_name: str, + db_name: str = DEFAULT_DATABASE_NAME, + coll_name: str = DEFAULT_COLLECTION_NAME, +): + """Helper function to run the `start-continuous-reads` action on the continuous write app.""" + application_unit = ops_test.model.applications[client_app_name].units[0] + start_reads_action = await application_unit.run_action( + "start-continuous-reads", **{"db-name": db_name, "collection-name": coll_name} + ) + await start_reads_action.wait() + + async def stop_continous_writes( ops_test: OpsTest, client_app_name: str, @@ -1052,6 +1133,22 @@ async def stop_continous_writes( return int(stop_writes_action.results["writes"]) +async def stop_continuous_reads( + ops_test: OpsTest, + client_app_name: str, + db_name: str = DEFAULT_DATABASE_NAME, + coll_name: str = DEFAULT_COLLECTION_NAME, +) -> tuple[int, list[str]]: + """Helper function to run the `stop-continuous-reads` action on the continuous write app.""" + application_unit = ops_test.model.applications[client_app_name].units[0] + stop_writes_action = await application_unit.run_action( + "stop-continuous-reads", **{"db-name": db_name, "collection-name": coll_name} + ) + await stop_writes_action.wait() + logger.warning(f"Failed reads: {stop_writes_action.results['failed-reads']}") + return int(stop_writes_action.results["reads"]), stop_writes_action.results["failed-reads"] + + async def clear_continous_writes( ops_test: OpsTest, client_app_name: str, @@ -1075,6 +1172,7 @@ async def count_writes( username: str = CHARMED_OPERATOR_USERNAME, db_name: str = DEFAULT_DATABASE_NAME, coll_name: str = DEFAULT_COLLECTION_NAME, + tls: bool = False, ) -> int: """New versions of pymongo no longer support the count operation, instead find is used.""" host = await get_address_of_unit(ops_test, substrate, get_unit_id(unit.name), app_name=app_name) @@ -1086,12 +1184,22 @@ async def count_writes( hosts=[host], username=username, ) + container = "mongod" + if tls: + ca_file = await scp_file_preserve_ctime( + ops_test, substrate, unit.name, external_cert_path(substrate), container + ) + else: + ca_file = None - client = MongoClient(uri, directConnection=True) + client = MongoClient(uri, directConnection=True, tlsCaFile=ca_file, tls=tls) db = client[db_name] test_collection = db[coll_name] count = test_collection.count_documents({}) client.close() + + if ca_file: + Path(ca_file).unlink() return count @@ -1366,6 +1474,33 @@ async def execute_on_server( ) +async def scp_file_preserve_ctime( + ops_test: OpsTest, substrate: Substrate, unit_name: str, path: str, container: str = "mongod" +) -> str: + """Returns the unix timestamp of when a file was created on a specified unit.""" + # Retrieving the file + filename = path.split("/")[-1] + if substrate == "lxd": + complete_command = f"exec --unit {unit_name} -- sudo cat {path}" + return_code, stdout, stderr = await ops_test.juju(*complete_command.split(), check=True) + with open(filename, mode="w") as fd: + fd.write(stdout.strip()) + else: + complete_command = f"scp --container {container} {unit_name}:{path} {filename}" + return_code, _, stderr = await ops_test.juju(*complete_command.split()) + + if return_code != 0: + logger.error(stderr) + raise ProcessError( + "Expected command %s to succeed instead it failed: %s; %s", + complete_command, + return_code, + stderr, + ) + + return f"{filename}" + + def mongodb_base_path(substrate: Substrate) -> str: if substrate == "lxd": return "/var/snap/charmed-mongodb/current/etc/mongod/" diff --git a/tests/integration/helpers/ldap.py b/tests/integration/helpers/ldap.py index 3216aedcd8..6ff9fecf3c 100644 --- a/tests/integration/helpers/ldap.py +++ b/tests/integration/helpers/ldap.py @@ -153,7 +153,13 @@ async def teardown_offers(ops_test: OpsTest, kubernetes_model: Model) -> None: async def create_mongodb_user_roles( - ops_test: OpsTest, substrate: Substrate, app_name: str, role_name: str, mongos: bool = False + ops_test: OpsTest, + substrate: Substrate, + app_name: str, + role_name: str, + mongos: bool = False, + db: str = "superdb", + tls: bool = False, ) -> None: """Creates the roles for mongodb with the provided role_name.""" uri = await generate_mongodb_client(ops_test, substrate, app_name, mongos=mongos) @@ -166,8 +172,9 @@ async def create_mongodb_user_roles( "db.createRole({" f" role: '{role_name}'," " privileges: []," - " roles: [{'db': 'superdb', 'role': 'readWrite'}, {'db': 'superdb', 'role': 'enableSharding'}]" + f" roles: [{{'db': '{db}', 'role': 'readWrite'}}, {{'db': '{db}', 'role': 'enableSharding'}}]" "})", + tls=tls, ) assert result.succeeded, "Failed to create role" diff --git a/tests/integration/helpers/mongos.py b/tests/integration/helpers/mongos.py index f926754c42..b3526a00df 100644 --- a/tests/integration/helpers/mongos.py +++ b/tests/integration/helpers/mongos.py @@ -20,6 +20,7 @@ TIMEOUT, clear_continous_writes, deploy_charm, + external_cert_path, find_unit, get_address_of_unit, get_application_relation_data, @@ -28,6 +29,7 @@ get_secret_data, get_unit_hostnames, get_unit_id, + internal_cert_path, mongosh, start_continous_writes, stop_continous_writes, @@ -39,10 +41,8 @@ cannot_connect_without_tls, check_certs_correctly_distributed, check_tls, - external_cert_path, get_file_content, integrate_apps_with_tls, - internal_cert_path, remove_tls_integrations, set_private_keys, time_file_created, diff --git a/tests/integration/helpers/sharding.py b/tests/integration/helpers/sharding.py index f1482b214e..113fe6b163 100644 --- a/tests/integration/helpers/sharding.py +++ b/tests/integration/helpers/sharding.py @@ -14,8 +14,10 @@ DEPLOYMENT_TIMEOUT, MONGOS_PORT, deploy_charm, + external_cert_path, get_direct_mongo_client, get_leader_id, + internal_cert_path, mongodb_uri, ) from tests.integration.helpers.tls import ( @@ -24,9 +26,7 @@ cannot_connect_without_tls, check_certs_correctly_distributed, check_tls, - external_cert_path, get_file_content, - internal_cert_path, set_private_keys, time_file_created, time_process_started, diff --git a/tests/integration/helpers/tls.py b/tests/integration/helpers/tls.py index 2c89ec1c64..77c12f8770 100644 --- a/tests/integration/helpers/tls.py +++ b/tests/integration/helpers/tls.py @@ -19,12 +19,16 @@ MONGOS_APP_NAME, MONGOS_PORT, ProcessError, + external_cert_path, + external_pem_path, get_address_of_unit, get_application_relation_data, get_password, get_secret_content, get_secret_id, + internal_cert_path, mongosh, + scp_file_preserve_ctime, ) from tests.integration.helpers.types import Substrate @@ -38,31 +42,11 @@ DIFFERENT_CERTIFICATES_APP_NAME = "self-signed-certificates-separate" -MONGODB_SNAP_CONF_DIR = "/var/snap/charmed-mongodb/current/etc/mongod" -MONGODB_ROCK_CONF_DIR = "/etc/mongod" SNAP_MONGOD_SERVICE = "snap.charmed-mongodb.mongod.service" SNAP_MONGOS_SERVICE = "snap.charmed-mongodb.mongos.service" -def external_cert_path(substrate: Substrate): - if substrate == "lxd": - return f"{MONGODB_SNAP_CONF_DIR}/external-ca.crt" - return f"{MONGODB_ROCK_CONF_DIR}/external-ca.crt" - - -def external_pem_path(substrate: Substrate): - if substrate == "lxd": - return f"{MONGODB_SNAP_CONF_DIR}/external-cert.pem" - return f"{MONGODB_ROCK_CONF_DIR}/external-cert.pem" - - -def internal_cert_path(substrate: Substrate): - if substrate == "lxd": - return f"{MONGODB_SNAP_CONF_DIR}/internal-ca.crt" - return f"{MONGODB_ROCK_CONF_DIR}/internal-ca.crt" - - async def integrate_apps_with_tls( ops_test: OpsTest, applications: list[str], @@ -358,33 +342,6 @@ def process_systemctl_time(systemctl_output) -> datetime: return datetime.strptime(time_as_str, "%Y-%m-%dT%H:%M:%S") -async def scp_file_preserve_ctime( - ops_test: OpsTest, substrate: Substrate, unit_name: str, path: str, container: str = "mongod" -) -> str: - """Returns the unix timestamp of when a file was created on a specified unit.""" - # Retrieving the file - filename = path.split("/")[-1] - if substrate == "lxd": - complete_command = f"exec --unit {unit_name} -- sudo cat {path}" - return_code, stdout, stderr = await ops_test.juju(*complete_command.split(), check=True) - with open(filename, mode="w") as fd: - fd.write(stdout.strip()) - else: - complete_command = f"scp --container {container} {unit_name}:{path} {filename}" - return_code, _, stderr = await ops_test.juju(*complete_command.split()) - - if return_code != 0: - logger.error(stderr) - raise ProcessError( - "Expected command %s to succeed instead it failed: %s; %s", - complete_command, - return_code, - stderr, - ) - - return f"{filename}" - - async def check_certs_correctly_distributed( ops_test: OpsTest, substrate: Substrate, diff --git a/tests/integration/mongodb/conftest.py b/tests/integration/mongodb/conftest.py index 902f642630..6260da32cb 100644 --- a/tests/integration/mongodb/conftest.py +++ b/tests/integration/mongodb/conftest.py @@ -1,7 +1,5 @@ # Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. -import os -import uuid from collections.abc import Generator from pathlib import Path from typing import Any @@ -9,7 +7,6 @@ import pytest from pytest_operator.plugin import OpsTest -from ..helpers.backups import CloudConfigs, CloudConfiguration from ..helpers.common import get_app_name from ..helpers.ha import deploy_chaos_mesh, destroy_chaos_mesh, update_restart_delay from ..helpers.types import Substrate @@ -33,60 +30,6 @@ def chaos_mesh(ops_test: OpsTest, substrate: Substrate) -> Generator[None, Any, yield -@pytest.fixture(scope="session") -def cloud_configs_aws(substrate: Substrate) -> CloudConfiguration: - path = "mongodb-vm" if substrate == "lxd" else "mongodb-k8s" - configs: dict[str, str] = { - "endpoint": "https://s3.amazonaws.com", - "bucket": "data-charms-testing", - "path": f"{path}/{uuid.uuid4()}", - "region": "us-east-1", - } - credentials: dict[str, str] = { - "access-key": os.environ["AWS_ACCESS_KEY"], - "secret-key": os.environ["AWS_SECRET_KEY"], - } - return configs, credentials - - -@pytest.fixture(scope="session") -def cloud_configs_gcp(substrate: Substrate) -> CloudConfiguration: - path = "mongodb-vm" if substrate == "lxd" else "mongodb-k8s" - configs: dict[str, str] = { - "bucket": "data-charms-testing", - "endpoint": "https://storage.googleapis.com", - "region": "", - "path": f"{path}/{uuid.uuid4()}", - } - credentials: dict[str, str] = { - "access-key": os.environ["GCP_ACCESS_KEY"], - "secret-key": os.environ["GCP_SECRET_KEY"], - } - return configs, credentials - - -@pytest.fixture(scope="session") -def cloud_configs_gcs(substrate: Substrate) -> CloudConfiguration: - path = "mongodb-vm" if substrate == "lxd" else "mongodb-k8s" - configs: dict[str, str] = { - "bucket": "data-charms-testing", - "path": f"{path}/{uuid.uuid4()}", - } - credentials: dict[str, str] = { - "secret-key": os.environ["GCS_SERVICE_ACCOUNT"], - } - return configs, credentials - - -@pytest.fixture(scope="session") -def cloud_configs( - cloud_configs_gcp: CloudConfiguration, - cloud_configs_aws: CloudConfiguration, - cloud_configs_gcs: CloudConfiguration, -) -> Generator[CloudConfigs]: - yield {"AWS": cloud_configs_aws, "GCP": cloud_configs_gcp, "GCS": cloud_configs_gcs} - - @pytest.fixture() async def reset_restart_delay(ops_test: OpsTest, substrate: Substrate, tmp_path: Path): """Resets service file delay on all units.""" diff --git a/tests/integration/mongodb/tls/test_tls.py b/tests/integration/mongodb/tls/test_tls.py index 633d98dfd3..daf4e70529 100644 --- a/tests/integration/mongodb/tls/test_tls.py +++ b/tests/integration/mongodb/tls/test_tls.py @@ -17,8 +17,10 @@ clear_continous_writes, deploy_application, deploy_charm, + external_cert_path, get_app_name, get_secret_by_label, + internal_cert_path, start_continous_writes, stop_continous_writes, wait_for_mongodb_units_blocked, @@ -31,9 +33,7 @@ cannot_connect_without_tls, check_certs_correctly_distributed, check_tls, - external_cert_path, integrate_apps_with_tls, - internal_cert_path, remove_tls_integrations, set_invalid_private_key, set_private_key, diff --git a/tests/integration/mongos/release/test_release.py b/tests/integration/mongos/release/test_release.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/integration/release/__init__.py b/tests/integration/release/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/integration/release/conftest.py b/tests/integration/release/conftest.py new file mode 100644 index 0000000000..cd85c4e570 --- /dev/null +++ b/tests/integration/release/conftest.py @@ -0,0 +1,45 @@ +# Copyright 2026 Canonical Ltd. +# See LICENSE file for licensing details. + +from collections.abc import AsyncGenerator +from logging import getLogger +from typing import Any + +import pytest +from juju.model import Model +from kubernetes.config.config_exception import ConfigException +from pytest_operator.plugin import OpsTest + +TIMEOUT = 15 * 60 + +logger = getLogger(__name__) + + +@pytest.fixture +def mongodb_base_app_name(mongod_metadata: dict[str, Any]) -> str: + """Default application name for testing.""" + return mongod_metadata["name"] + + +@pytest.fixture +def mongos_base_app_name(mongos_metadata: dict[str, Any]) -> str: + """Default application name for testing.""" + return mongos_metadata["name"] + + +@pytest.fixture(scope="module") +async def kubernetes_model(ops_test: OpsTest) -> AsyncGenerator[Model]: + try: + k8s_cloud = await ops_test.add_k8s(skip_storage=False) + logger.warning(f"created cloud {k8s_cloud}") + except (ConfigException, TypeError): + pytest.fail("No Kubernetes config found to add-k8s") + # deploy the glauth-k8s charm + kubernetes_model = await ops_test.track_model( + "secondary", cloud_name=k8s_cloud, keep=ops_test.ModelKeep.NEVER + ) + logger.warning(f"Created model {kubernetes_model.name}") + + yield kubernetes_model + + await ops_test.forget_model(alias="secondary", timeout=TIMEOUT, allow_failure=True) diff --git a/tests/integration/release/test_release.py b/tests/integration/release/test_release.py new file mode 100644 index 0000000000..69e6b6d55e --- /dev/null +++ b/tests/integration/release/test_release.py @@ -0,0 +1,390 @@ +#!/usr/bin/env python3 +# Copyright 2026 Canonical Ltd. +# See LICENSE file for licensing details. + +import asyncio +from logging import getLogger + +import pytest +from juju.model import Model +from pytest_operator.plugin import OpsTest +from tenacity import RetryError, Retrying +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_fixed + +from tests.integration.helpers.backups import S3_APP_NAME, count_logical_backups +from tests.integration.helpers.common import ( + CONTINUOUS_WRITE_APPLICATION, + CONTINUOUS_WRITE_APPLICATION_BIS, + DATA_INTEGRATOR_APP_NAME, + DEFAULT_COLLECTION_NAME, + DEFAULT_DATABASE_NAME, + DEPLOYMENT_TIMEOUT, + READER_APPLICATION, + TIMEOUT, + UNIT_IDS, + count_writes, + deploy_application, + deploy_charm, + execute_on_mongod, + find_unit, + get_app_name, + relate_mongodb_and_application, + start_continous_writes, + start_continuous_reads, + stop_continous_writes, + stop_continuous_reads, +) +from tests.integration.helpers.ldap import ( + LDAP_CERT_OFFER, + LDAP_OFFER, + apply_ldif, + consume_glauth_offers, + create_mongodb_user_roles, + deploy_glauth, + generate_mongodb_ldap_client, + teardown_offers, +) +from tests.integration.helpers.tls import ( + TLS_CERTIFICATES_APP_NAME, + TLS_CERTIFICATES_BASE, + TLS_CERTIFICATES_CHANNEL, + integrate_apps_with_tls, +) +from tests.integration.helpers.types import Substrate + +logger = getLogger(__name__) + +SECOND_DB_NAME = f"{DEFAULT_DATABASE_NAME}_bis" +SECOND_COLL_NAME = f"{DEFAULT_COLLECTION_NAME}_bis" + + +@pytest.mark.abort_on_fail +async def test_deploy_apps( + ops_test: OpsTest, + mongodb_charm_name: str, + application_path: str, + substrate: Substrate, + mongodb_revision: int, + mongodb_base_app_name: str, + kubernetes_model: Model, +): + """Deploy MongoDB with the right revision. + + This also deploys a data integrator, alongside a continuous write application, + a self-signed-certificates application, and LDAP with all it needs. + """ + tls_config = {"ca-common-name": "MongoDB release CA"} + + assert ops_test.model + # it is possible for users to provide their own cluster for testing. Hence check if there + # is a pre-existing cluster. + await asyncio.gather( + deploy_charm( + ops_test=ops_test, + revision=mongodb_revision, + charm=mongodb_charm_name, + substrate=substrate, + app_name=mongodb_base_app_name, + num_units=len(UNIT_IDS), + ), + deploy_application( + ops_test, application_path=application_path, app_name=CONTINUOUS_WRITE_APPLICATION + ), + ops_test.model.deploy( + TLS_CERTIFICATES_APP_NAME, + channel=TLS_CERTIFICATES_CHANNEL, + config=tls_config, + base=TLS_CERTIFICATES_BASE, + ), + ops_test.model.deploy( + DATA_INTEGRATOR_APP_NAME, + channel="latest/stable", + series="noble", + config={"database-name": "test-database"}, + ), + ) + + await deploy_glauth(ops_test, kubernetes_model) + + # Consume the offers exposed by glauth + await consume_glauth_offers(ops_test, kubernetes_model) + + # Apply the LDIF file on glauth-utils to create users and groups + await apply_ldif(ops_test, kubernetes_model, "ldap_entries.ldif") + + await ops_test.model.wait_for_idle( + apps=[mongodb_base_app_name, TLS_CERTIFICATES_APP_NAME], + timeout=DEPLOYMENT_TIMEOUT, + status="active", + ) + + +@pytest.mark.abort_on_fail +async def test_integrate_with_tls( + ops_test: OpsTest, +): + """Tests that we can integrate with TLS, and then add a writer and start writing.""" + assert ops_test.model + app_name = await get_app_name(ops_test) + assert app_name + await integrate_apps_with_tls(ops_test, applications=[app_name]) + + await ops_test.model.wait_for_idle( + apps=[app_name, TLS_CERTIFICATES_APP_NAME], status="active", timeout=1000, idle_period=60 + ) + + await relate_mongodb_and_application(ops_test, app_name, CONTINUOUS_WRITE_APPLICATION) + await start_continous_writes(ops_test, CONTINUOUS_WRITE_APPLICATION) + + +@pytest.mark.abort_on_fail +async def test_integrate_with_ldap(ops_test: OpsTest, substrate: Substrate): + """Tests that we can integrate with LDAP without losing data.""" + assert ops_test.model + app_name = await get_app_name(ops_test) + assert app_name + + await ops_test.model.integrate(f"{LDAP_OFFER}:ldap", f"{app_name}:ldap") + await ops_test.model.integrate( + f"{LDAP_CERT_OFFER}:send-ca-cert", f"{app_name}:ldap-certificate-transfer" + ) + # Create the roles on MongoDB + await create_mongodb_user_roles( + ops_test, + substrate, + app_name=app_name, + role_name="ou=superheroes,ou=users,dc=glauth,dc=com", + db=DEFAULT_DATABASE_NAME, + tls=True, + ) + + await ops_test.model.wait_for_idle(apps=[app_name], status="active", timeout=TIMEOUT) + + +@pytest.mark.abort_on_fail +async def test_integrate_second_client(ops_test: OpsTest, application_path: str): + """Tests that we can integrate with a second client, and we also start writing on that client. + + The client is a continuous write application. + """ + app_name = await get_app_name(ops_test) + + await deploy_application( + ops_test, + application_path=application_path, + app_name=CONTINUOUS_WRITE_APPLICATION_BIS, + database_name=SECOND_DB_NAME, + ) + await relate_mongodb_and_application(ops_test, app_name, CONTINUOUS_WRITE_APPLICATION_BIS) + await start_continous_writes( + ops_test, + CONTINUOUS_WRITE_APPLICATION_BIS, + db_name=SECOND_DB_NAME, + coll_name=SECOND_COLL_NAME, + ) + + +@pytest.mark.abort_on_fail +async def test_integrate_third_client(ops_test: OpsTest, application_path: str): + """Tests that we can integrate with a third client, which will only read data. + + The client is a continuous write application. + """ + app_name = await get_app_name(ops_test) + + await deploy_application( + ops_test, + application_path=application_path, + app_name=READER_APPLICATION, + database_name=DEFAULT_DATABASE_NAME, + ) + await relate_mongodb_and_application(ops_test, app_name, READER_APPLICATION) + + await start_continuous_reads( + ops_test, + READER_APPLICATION, + db_name=DEFAULT_DATABASE_NAME, + coll_name=DEFAULT_COLLECTION_NAME, + ) + + +@pytest.mark.abort_on_fail +async def test_integrate_with_s3( + ops_test: OpsTest, + storage_credentials: dict[str, str], + storage_config: dict[str, str], +): + """Tests that we can integrate with S3 and create a backup. + + This test ensures that the backup is created and finished. + """ + assert ops_test.model + app_name = await get_app_name(ops_test) + assert app_name + + # deploy the s3 integrator charm + await ops_test.model.deploy(S3_APP_NAME, channel="1/edge") + await ops_test.model.wait_for_idle(apps=[S3_APP_NAME], timeout=DEPLOYMENT_TIMEOUT) + + s3_integrator_unit = ops_test.model.applications[S3_APP_NAME].units[0] + + # apply new configuration options + await ops_test.model.applications[S3_APP_NAME].set_config(storage_config) + action = await s3_integrator_unit.run_action( + action_name="sync-s3-credentials", **storage_credentials + ) + await action.wait() + + await ops_test.model.integrate(S3_APP_NAME, app_name) + + await ops_test.model.wait_for_idle( + apps=[S3_APP_NAME, app_name], status="active", timeout=TIMEOUT + ) + + leader_unit = await find_unit(ops_test, leader=True, app_name=app_name) + action = await leader_unit.run_action(action_name="create-backup") + backup_result = await action.wait() + + logger.info(f"Create backup result {backup_result.results=}") + assert "backup started" in backup_result.results["backup-status"], "backup didn't start" + try: + for attempt in Retrying(stop=stop_after_delay(60), wait=wait_fixed(5)): + with attempt: + backups = await count_logical_backups(leader_unit) + assert backups == 1 + except RetryError: + assert backups == 1, "Backup not created." + + # Wait for status to go back to idle. + await ops_test.model.wait_for_idle(apps=[app_name], status="active", timeout=TIMEOUT) + + +@pytest.mark.abort_on_fail +async def tests_restore_backup(ops_test: OpsTest, substrate: Substrate): + """Tests that we can restore a backup. + + This test starts by stopping the writes applications, and counting the number of writes + ensuring that we have never lost any write until now. + Then it restores the backup, counts the number of writes, + and checks that it is lower than what we had, proving that the backup was restored successfully. + """ + assert ops_test.model + app_name = await get_app_name(ops_test) + assert app_name + + first_reported_writes = await stop_continous_writes(ops_test, CONTINUOUS_WRITE_APPLICATION) + second_reported_writes = await stop_continous_writes( + ops_test, + CONTINUOUS_WRITE_APPLICATION_BIS, + db_name=SECOND_DB_NAME, + coll_name=SECOND_COLL_NAME, + ) + leader_unit = await find_unit(ops_test, leader=True, app_name=app_name) + # count total writes + first_number_writes = await count_writes(ops_test, substrate, app_name, leader_unit, tls=True) + second_number_writes = await count_writes( + ops_test, + substrate, + app_name, + leader_unit, + db_name=SECOND_DB_NAME, + coll_name=SECOND_COLL_NAME, + tls=True, + ) + assert first_number_writes == first_reported_writes + assert second_number_writes == second_reported_writes + + # find most recent backup id and restore + action = await leader_unit.run_action(action_name="list-backups") + list_result = await action.wait() + list_result = list_result.results["backups"] + most_recent_backup = list_result.split("\n")[-1] + + backup_id = most_recent_backup.split()[0] + + action = await leader_unit.run_action(action_name="restore", **{"backup-id": backup_id}) + restore = await action.wait() + logger.info(f"Restore backup result {restore.results=}") + assert restore.results["restore-status"] == "restore started", "restore not successful" + + async with ops_test.fast_forward("60s"): + await ops_test.model.wait_for_idle(apps=[app_name], status="active", idle_period=15) + + first_number_writes_after_restore = await count_writes( + ops_test, substrate, app_name, leader_unit, tls=True + ) + second_number_writes_after_restore = await count_writes( + ops_test, + substrate, + app_name, + leader_unit, + db_name=SECOND_DB_NAME, + coll_name=SECOND_COLL_NAME, + tls=True, + ) + + assert first_number_writes_after_restore < first_number_writes + assert second_number_writes_after_restore < second_number_writes + + +@pytest.mark.abort_on_fail +async def test_ldap_user_can_write(ops_test: OpsTest, substrate: Substrate): + """Checks that the LDAP user can write to the DB. + + This checks both authentication and authorisation. + """ + app_name = await get_app_name(ops_test) + assert app_name + + # We create a client which should be able to write + uri = await generate_mongodb_ldap_client( + ops_test, + substrate, + app_name, + database=DEFAULT_DATABASE_NAME, + username="cn=johndoe,ou=superheroes,ou=users,dc=glauth,dc=com", + password="dogood", + ) + + result = await execute_on_mongod( + ops_test, app_name, substrate, uri, "db.test.insertOne({number: 1})", tls=True + ) + assert result.succeeded, "Failed to insert value with LDAP client" + + result = await execute_on_mongod( + ops_test, app_name, substrate, uri, "db.test.findOne({number: 1})", tls=True + ) + assert result.succeeded, "Failed to read value with LDAP client" + + +@pytest.mark.abort_on_fail +async def test_valid_reads(ops_test: OpsTest): + """Checks the reads at the end of the tests.""" + reads, failed_reads = await stop_continuous_reads( + ops_test, + READER_APPLICATION, + db_name=DEFAULT_DATABASE_NAME, + coll_name=DEFAULT_COLLECTION_NAME, + ) + assert reads > 1000 + # We can allow for a few errors during restore for example + assert len(failed_reads) < 50 + + +@pytest.mark.abort_on_fail +async def test_teardown(ops_test: OpsTest, kubernetes_model: Model): + """Teardown of the whole offers and relations.""" + app_name = await get_app_name(ops_test) + assert app_name + + await ops_test.model.applications[app_name].remove_relation( + f"{LDAP_OFFER}:ldap", f"{app_name}:ldap" + ) + await ops_test.model.applications[app_name].remove_relation( + f"{LDAP_CERT_OFFER}:send-ca-cert", f"{app_name}:ldap-certificate-transfer" + ) + await ops_test.model.wait_for_idle(apps=[app_name], status="active", timeout=TIMEOUT) + + # Remove the offers and tear down deployment + await teardown_offers(ops_test, kubernetes_model) diff --git a/tests/integration/release/test_sharding_release.py b/tests/integration/release/test_sharding_release.py new file mode 100644 index 0000000000..c5a0cea670 --- /dev/null +++ b/tests/integration/release/test_sharding_release.py @@ -0,0 +1,618 @@ +#!/usr/bin/env python3 +# Copyright 2026 Canonical Ltd. +# See LICENSE file for licensing details. + +import asyncio +from logging import getLogger + +import pytest +from juju.model import Model +from pytest_operator.plugin import OpsTest +from tenacity import RetryError, Retrying +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_fixed + +from tests.integration.helpers.backups import S3_APP_NAME, count_logical_backups +from tests.integration.helpers.common import ( + CONTINUOUS_WRITE_APPLICATION, + CONTINUOUS_WRITE_APPLICATION_BIS, + DEFAULT_COLLECTION_NAME, + DEFAULT_DATABASE_NAME, + DEPLOYMENT_TIMEOUT, + MONGOS_APP_NAME, + READER_APPLICATION, + TIMEOUT, + UNIT_IDS, + count_writes, + deploy_application, + deploy_charm, + execute_on_mongod, + find_unit, + start_continous_writes, + start_continuous_reads, + stop_continous_writes, + stop_continuous_reads, + wait_for_mongodb_units_blocked, +) +from tests.integration.helpers.ldap import ( + LDAP_CERT_OFFER, + LDAP_OFFER, + apply_ldif, + consume_glauth_offers, + create_mongodb_user_roles, + deploy_glauth, + generate_mongodb_ldap_client, + teardown_offers, +) +from tests.integration.helpers.sharding import ( + CONFIG_SERVER_APP_NAME, + CONFIG_SERVER_REL_NAME, + SHARD_ONE_APP_NAME, + SHARD_REL_NAME, + SHARD_THREE_APP_NAME, + SHARD_TWO_APP_NAME, +) +from tests.integration.helpers.tls import ( + TLS_CERTIFICATES_APP_NAME, + TLS_CERTIFICATES_BASE, + TLS_CERTIFICATES_CHANNEL, + integrate_apps_with_tls, +) +from tests.integration.helpers.types import Substrate + +logger = getLogger(__name__) + +SECOND_DB_NAME = f"{DEFAULT_DATABASE_NAME}_bis" +SECOND_COLL_NAME = f"{DEFAULT_COLLECTION_NAME}_bis" + +MONGOS_BIS_APP_NAME = f"{MONGOS_APP_NAME}-bis" +MONGOS_TER_APP_NAME = f"{MONGOS_APP_NAME}-ter" + + +@pytest.mark.abort_on_fail +async def test_deploy_apps( + ops_test: OpsTest, + mongodb_charm_name: str, + mongos_charm_name: str, + application_path: str, + substrate: Substrate, + mongodb_revision: int, + mongos_revision: int, + kubernetes_model: Model, +): + """Deploys and integrate a cluster with the right revisions. + + This also deploys a data integrator, alongside a continuous write application, + a self-signed-certificates application, and LDAP with all it needs. + """ + tls_config = {"ca-common-name": "MongoDB release CA"} + # it is possible for users to provide their own cluster for testing. Hence check if there + # is a pre-existing cluster. + await asyncio.gather( + deploy_charm( + ops_test=ops_test, + revision=mongodb_revision, + charm=mongodb_charm_name, + substrate=substrate, + app_name=CONFIG_SERVER_APP_NAME, + num_units=len(UNIT_IDS), + config={"role": "config-server"}, + ), + deploy_charm( + ops_test=ops_test, + revision=mongodb_revision, + charm=mongodb_charm_name, + substrate=substrate, + app_name=SHARD_ONE_APP_NAME, + num_units=len(UNIT_IDS), + config={"role": "shard"}, + ), + deploy_charm( + ops_test=ops_test, + revision=mongodb_revision, + charm=mongodb_charm_name, + substrate=substrate, + app_name=SHARD_TWO_APP_NAME, + num_units=len(UNIT_IDS), + config={"role": "shard"}, + ), + deploy_charm( + ops_test=ops_test, + revision=mongos_revision, + charm=mongos_charm_name, + substrate=substrate, + app_name=MONGOS_APP_NAME, + num_units=(1 if substrate == "microk8s" else 0), + ), + deploy_charm( + ops_test=ops_test, + revision=mongos_revision, + charm=mongos_charm_name, + substrate=substrate, + app_name=MONGOS_BIS_APP_NAME, + num_units=(1 if substrate == "microk8s" else 0), + ), + deploy_charm( + ops_test=ops_test, + revision=mongos_revision, + charm=mongos_charm_name, + substrate=substrate, + app_name=MONGOS_TER_APP_NAME, + num_units=(1 if substrate == "microk8s" else 0), + ), + ops_test.model.deploy( + TLS_CERTIFICATES_APP_NAME, + channel=TLS_CERTIFICATES_CHANNEL, + config=tls_config, + base=TLS_CERTIFICATES_BASE, + ), + deploy_application( + ops_test, application_path=application_path, app_name=CONTINUOUS_WRITE_APPLICATION + ), + ) + + await deploy_glauth(ops_test, kubernetes_model) + + # Consume the offers exposed by glauth + await consume_glauth_offers(ops_test, kubernetes_model) + + # Apply the LDIF file on glauth-utils to create users and groups + await apply_ldif(ops_test, kubernetes_model, "ldap_entries.ldif") + + await ops_test.model.wait_for_idle( + apps=[TLS_CERTIFICATES_APP_NAME], timeout=DEPLOYMENT_TIMEOUT, status="active" + ) + # verify that Charmed MongoDB is blocked and reports incorrect credentials + await wait_for_mongodb_units_blocked(ops_test, substrate, CONFIG_SERVER_APP_NAME, timeout=300) + await wait_for_mongodb_units_blocked(ops_test, substrate, SHARD_ONE_APP_NAME, timeout=300) + await wait_for_mongodb_units_blocked(ops_test, substrate, SHARD_TWO_APP_NAME, timeout=300) + + await ops_test.model.integrate( + f"{SHARD_ONE_APP_NAME}:{SHARD_REL_NAME}", + f"{CONFIG_SERVER_APP_NAME}:{CONFIG_SERVER_REL_NAME}", + ) + await ops_test.model.integrate( + f"{SHARD_TWO_APP_NAME}:{SHARD_REL_NAME}", + f"{CONFIG_SERVER_APP_NAME}:{CONFIG_SERVER_REL_NAME}", + ) + await ops_test.model.wait_for_idle( + apps=[ + CONFIG_SERVER_APP_NAME, + SHARD_ONE_APP_NAME, + SHARD_TWO_APP_NAME, + ], + idle_period=15, + status="active", + timeout=TIMEOUT, + raise_on_error=False, + ) + + await ops_test.model.integrate( + f"{MONGOS_APP_NAME}", + f"{CONTINUOUS_WRITE_APPLICATION}", + ) + await ops_test.model.wait_for_idle( + apps=[ + MONGOS_APP_NAME, + CONTINUOUS_WRITE_APPLICATION, + ], + idle_period=15, + timeout=TIMEOUT, + raise_on_blocked=False, + ) + await ops_test.model.integrate( + f"{MONGOS_APP_NAME}", + f"{CONFIG_SERVER_APP_NAME}", + ) + await ops_test.model.wait_for_idle( + apps=[ + CONTINUOUS_WRITE_APPLICATION, + MONGOS_APP_NAME, + CONFIG_SERVER_APP_NAME, + ], + status="active", + idle_period=20, + timeout=TIMEOUT, + ) + + +@pytest.mark.abort_on_fail +async def test_integrate_with_tls( + ops_test: OpsTest, +): + """Tests that we can integrate with TLS, and then add a writer and start writing.""" + assert ops_test.model + + await integrate_apps_with_tls( + ops_test, + applications=[ + CONFIG_SERVER_APP_NAME, + SHARD_ONE_APP_NAME, + SHARD_TWO_APP_NAME, + MONGOS_APP_NAME, + ], + ) + await ops_test.model.wait_for_idle( + apps=[ + CONFIG_SERVER_APP_NAME, + SHARD_ONE_APP_NAME, + SHARD_TWO_APP_NAME, + TLS_CERTIFICATES_APP_NAME, + ], + status="active", + timeout=1000, + idle_period=60, + ) + + await start_continous_writes(ops_test, CONTINUOUS_WRITE_APPLICATION) + + +async def test_integrate_with_ldap(ops_test: OpsTest, substrate: Substrate): + """Tests that we can integrate with LDAP without losing data.""" + assert ops_test.model + await ops_test.model.integrate(f"{LDAP_OFFER}:ldap", f"{CONFIG_SERVER_APP_NAME}:ldap") + await ops_test.model.integrate( + f"{LDAP_CERT_OFFER}:send-ca-cert", f"{CONFIG_SERVER_APP_NAME}:ldap-certificate-transfer" + ) + await ops_test.model.integrate(f"{LDAP_OFFER}:ldap", f"{MONGOS_APP_NAME}:ldap") + await ops_test.model.integrate( + f"{LDAP_CERT_OFFER}:send-ca-cert", f"{MONGOS_APP_NAME}:ldap-certificate-transfer" + ) + # Create the roles on MongoDB + await create_mongodb_user_roles( + ops_test, + substrate, + CONFIG_SERVER_APP_NAME, + role_name="ou=superheroes,ou=users,dc=glauth,dc=com", + db=DEFAULT_DATABASE_NAME, + tls=True, + ) + + await ops_test.model.wait_for_idle( + apps=[CONFIG_SERVER_APP_NAME], status="active", timeout=TIMEOUT + ) + + +@pytest.mark.abort_on_fail +async def test_integrate_second_client(ops_test: OpsTest, application_path: str): + """Tests that we can integrate with a second client, and we also start writing on that client. + + The client is a continuous write application. + """ + assert ops_test.model + await deploy_application( + ops_test, + application_path=application_path, + app_name=CONTINUOUS_WRITE_APPLICATION_BIS, + database_name=SECOND_DB_NAME, + ) + await ops_test.model.integrate( + f"{MONGOS_BIS_APP_NAME}", + f"{CONTINUOUS_WRITE_APPLICATION_BIS}", + ) + await integrate_apps_with_tls( + ops_test, + applications=[ + MONGOS_BIS_APP_NAME, + ], + ) + + await ops_test.model.integrate(f"{LDAP_OFFER}:ldap", f"{MONGOS_BIS_APP_NAME}:ldap") + await ops_test.model.integrate( + f"{LDAP_CERT_OFFER}:send-ca-cert", f"{MONGOS_BIS_APP_NAME}:ldap-certificate-transfer" + ) + await ops_test.model.wait_for_idle( + apps=[ + MONGOS_BIS_APP_NAME, + CONTINUOUS_WRITE_APPLICATION_BIS, + ], + idle_period=15, + timeout=TIMEOUT, + raise_on_blocked=False, + ) + + await ops_test.model.integrate( + f"{MONGOS_BIS_APP_NAME}", + f"{CONFIG_SERVER_APP_NAME}", + ) + + await ops_test.model.wait_for_idle( + apps=[MONGOS_BIS_APP_NAME, CONTINUOUS_WRITE_APPLICATION_BIS, CONFIG_SERVER_APP_NAME], + timeout=DEPLOYMENT_TIMEOUT, + status="active", + ) + + await start_continous_writes( + ops_test, + CONTINUOUS_WRITE_APPLICATION_BIS, + db_name=SECOND_DB_NAME, + coll_name=SECOND_COLL_NAME, + ) + + +@pytest.mark.abort_on_fail +async def test_integrate_third_shard( + ops_test: OpsTest, substrate: Substrate, mongodb_charm_name: str, mongodb_revision: int | None +) -> None: + """Tests that we can integrate a new shard to the cluster.""" + await deploy_charm( + ops_test=ops_test, + revision=mongodb_revision, + charm=mongodb_charm_name, + substrate=substrate, + app_name=SHARD_THREE_APP_NAME, + num_units=len(UNIT_IDS), + config={"role": "shard"}, + ) + await wait_for_mongodb_units_blocked(ops_test, substrate, SHARD_THREE_APP_NAME, timeout=300) + await integrate_apps_with_tls( + ops_test, + applications=[SHARD_THREE_APP_NAME], + ) + + await ops_test.model.integrate( + f"{SHARD_THREE_APP_NAME}:{SHARD_REL_NAME}", + f"{CONFIG_SERVER_APP_NAME}:{CONFIG_SERVER_REL_NAME}", + ) + await ops_test.model.wait_for_idle( + apps=[CONFIG_SERVER_APP_NAME, SHARD_ONE_APP_NAME, SHARD_TWO_APP_NAME, SHARD_THREE_APP_NAME], + status="active", + timeout=TIMEOUT, + ) + + +@pytest.mark.abort_on_fail +async def test_integrate_third_client(ops_test: OpsTest, application_path: str): + """Tests that we can integrate with a third client, which will only read data. + + The client is a continuous write application. + """ + assert ops_test.model + await deploy_application( + ops_test, + application_path=application_path, + app_name=READER_APPLICATION, + database_name=SECOND_DB_NAME, + ) + await ops_test.model.integrate( + f"{MONGOS_TER_APP_NAME}", + f"{READER_APPLICATION}", + ) + await integrate_apps_with_tls( + ops_test, + applications=[ + MONGOS_TER_APP_NAME, + ], + ) + + await ops_test.model.integrate(f"{LDAP_OFFER}:ldap", f"{MONGOS_TER_APP_NAME}:ldap") + await ops_test.model.integrate( + f"{LDAP_CERT_OFFER}:send-ca-cert", f"{MONGOS_TER_APP_NAME}:ldap-certificate-transfer" + ) + await ops_test.model.wait_for_idle( + apps=[ + MONGOS_TER_APP_NAME, + READER_APPLICATION, + ], + idle_period=15, + timeout=TIMEOUT, + raise_on_blocked=False, + ) + await ops_test.model.integrate( + f"{MONGOS_TER_APP_NAME}", + f"{CONFIG_SERVER_APP_NAME}", + ) + + await ops_test.model.wait_for_idle( + apps=[MONGOS_TER_APP_NAME, READER_APPLICATION, CONFIG_SERVER_APP_NAME], + timeout=DEPLOYMENT_TIMEOUT, + status="active", + ) + + await start_continuous_reads( + ops_test, + READER_APPLICATION, + db_name=SECOND_DB_NAME, + coll_name=SECOND_COLL_NAME, + ) + + +@pytest.mark.abort_on_fail +async def test_integrate_with_s3( + ops_test: OpsTest, + storage_credentials: dict[str, str], + storage_config: dict[str, str], +): + """Tests that we can integrate with S3 and create a backup. + + This test ensures that the backup is created and finished. + """ + assert ops_test.model + + # deploy the s3 integrator charm + await ops_test.model.deploy(S3_APP_NAME, channel="1/edge") + await ops_test.model.wait_for_idle(apps=[S3_APP_NAME], timeout=DEPLOYMENT_TIMEOUT) + + s3_integrator_unit = ops_test.model.applications[S3_APP_NAME].units[0] + + # apply new configuration options + await ops_test.model.applications[S3_APP_NAME].set_config(storage_config) + action = await s3_integrator_unit.run_action( + action_name="sync-s3-credentials", **storage_credentials + ) + await action.wait() + + await ops_test.model.integrate(S3_APP_NAME, CONFIG_SERVER_APP_NAME) + + await ops_test.model.wait_for_idle( + apps=[S3_APP_NAME, CONFIG_SERVER_APP_NAME], status="active", timeout=TIMEOUT + ) + + leader_unit = await find_unit(ops_test, leader=True, app_name=CONFIG_SERVER_APP_NAME) + action = await leader_unit.run_action(action_name="create-backup") + backup_result = await action.wait() + + logger.info(f"Create backup result {backup_result.results=}") + assert "backup started" in backup_result.results["backup-status"], "backup didn't start" + try: + for attempt in Retrying(stop=stop_after_delay(60), wait=wait_fixed(5)): + with attempt: + backups = await count_logical_backups(leader_unit) + assert backups == 1 + except RetryError: + assert backups == 1, "Backup not created." + + +@pytest.mark.abort_on_fail +async def tests_restore_backup(ops_test: OpsTest, substrate: Substrate): + """Tests that we can restore a backup. + + This test starts by stopping the writes applications, and counting the number of writes + ensuring that we have never lost any write until now. + Then it restores the backup, counts the number of writes, + and checks that it is lower than what we had, proving that the backup was restored successfully. + """ + first_reported_writes = await stop_continous_writes(ops_test, CONTINUOUS_WRITE_APPLICATION) + second_reported_writes = await stop_continous_writes( + ops_test, + CONTINUOUS_WRITE_APPLICATION_BIS, + db_name=SECOND_DB_NAME, + coll_name=SECOND_COLL_NAME, + ) + leader_unit = await find_unit(ops_test, leader=True, app_name=CONFIG_SERVER_APP_NAME) + # count total writes + first_number_writes = await count_writes( + ops_test, substrate, CONFIG_SERVER_APP_NAME, leader_unit, tls=True, mongos=True + ) + second_number_writes = await count_writes( + ops_test, + substrate, + CONFIG_SERVER_APP_NAME, + leader_unit, + db_name=SECOND_DB_NAME, + coll_name=SECOND_COLL_NAME, + mongos=True, + tls=True, + ) + assert first_number_writes == first_reported_writes + assert second_number_writes == second_reported_writes + + # find most recent backup id and restore + action = await leader_unit.run_action(action_name="list-backups") + list_result = await action.wait() + list_result = list_result.results["backups"] + most_recent_backup = list_result.split("\n")[-1] + + backup_id = most_recent_backup.split()[0] + + action = await leader_unit.run_action(action_name="restore", **{"backup-id": backup_id}) + restore = await action.wait() + logger.info(f"Restore backup result {restore.results=}") + assert restore.results["restore-status"] == "restore started", "restore not successful" + + async with ops_test.fast_forward("60s"): + await ops_test.model.wait_for_idle( + apps=[CONFIG_SERVER_APP_NAME], status="active", idle_period=15 + ) + + first_number_writes_after_restore = await count_writes( + ops_test, substrate, CONFIG_SERVER_APP_NAME, leader_unit, tls=True, mongos=True + ) + second_number_writes_after_restore = await count_writes( + ops_test, + substrate, + CONFIG_SERVER_APP_NAME, + leader_unit, + db_name=SECOND_DB_NAME, + coll_name=SECOND_COLL_NAME, + tls=True, + mongos=True, + ) + + assert first_number_writes_after_restore < first_number_writes + assert second_number_writes_after_restore < second_number_writes + + +@pytest.mark.abort_on_fail +async def test_ldap_user_can_write(ops_test: OpsTest, substrate: Substrate): + """Checks that the LDAP user can write to the DB. + + This checks both authentication and authorisation. + """ + # We create a client which should be able to write + uri = await generate_mongodb_ldap_client( + ops_test, + substrate, + MONGOS_APP_NAME, + database=DEFAULT_DATABASE_NAME, + username="cn=johndoe,ou=superheroes,ou=users,dc=glauth,dc=com", + password="dogood", + mongos=True, + ) + + result = await execute_on_mongod( + ops_test, + MONGOS_APP_NAME, + substrate, + uri, + "db.test.insertOne({number: 1})", + tls=True, + container_name="mongos", + ) + assert result.succeeded, "Failed to insert value with LDAP client" + + result = await execute_on_mongod( + ops_test, + MONGOS_APP_NAME, + substrate, + uri, + "db.test.findOne({number: 1})", + tls=True, + container_name="mongos", + ) + assert result.succeeded, "Failed to read value with LDAP client" + + +@pytest.mark.abort_on_fail +async def test_valid_reads(ops_test: OpsTest): + """Checks the reads at the end of the tests.""" + reads, failed_reads = await stop_continuous_reads( + ops_test, + READER_APPLICATION, + db_name=SECOND_DB_NAME, + coll_name=SECOND_COLL_NAME, + ) + assert reads > 1000 + # We can allow for a few errors during restore. + assert len(failed_reads) < 10 + + +@pytest.mark.abort_on_fail +async def test_teardown(ops_test: OpsTest, kubernetes_model: Model): + for app_name in (MONGOS_APP_NAME, MONGOS_BIS_APP_NAME, MONGOS_TER_APP_NAME): + await ops_test.model.applications[app_name].remove_relation( + f"{LDAP_OFFER}:ldap", f"{app_name}:ldap" + ) + await ops_test.model.applications[app_name].remove_relation( + f"{LDAP_CERT_OFFER}:send-ca-cert", f"{app_name}:ldap-certificate-transfer" + ) + await ops_test.model.applications[app_name].remove_relation( + f"{LDAP_OFFER}:ldap", f"{CONFIG_SERVER_APP_NAME}:ldap" + ) + await ops_test.model.applications[app_name].remove_relation( + f"{LDAP_CERT_OFFER}:send-ca-cert", f"{CONFIG_SERVER_APP_NAME}:ldap-certificate-transfer" + ) + + await ops_test.model.wait_for_idle( + apps=[ + CONFIG_SERVER_APP_NAME, + SHARD_ONE_APP_NAME, + SHARD_TWO_APP_NAME, + MONGOS_APP_NAME, + MONGOS_BIS_APP_NAME, + MONGOS_TER_APP_NAME, + ], + status="active", + timeout=TIMEOUT, + ) + await teardown_offers(ops_test, kubernetes_model) diff --git a/tests/spread/mongodb/lxd/test_ldap.py/task.yaml b/tests/spread/mongodb/lxd/test_ldap.py/task.yaml index 93322172a0..8605851e5e 100644 --- a/tests/spread/mongodb/lxd/test_ldap.py/task.yaml +++ b/tests/spread/mongodb/lxd/test_ldap.py/task.yaml @@ -6,6 +6,6 @@ execute: | artifacts: - allure-results systems: - - ubuntu-22.04 + - self-hosted-linux-amd64-noble-large # TODO: Re-enable this when glauth charm supports arm64 - #- self-hosted-linux-arm64-noble-medium + #- self-hosted-linux-arm64-noble-large diff --git a/tests/spread/mongodb/lxd/test_sharding_ldap.py/task.yaml b/tests/spread/mongodb/lxd/test_sharding_ldap.py/task.yaml index cf7332f5dc..c331d295f4 100644 --- a/tests/spread/mongodb/lxd/test_sharding_ldap.py/task.yaml +++ b/tests/spread/mongodb/lxd/test_sharding_ldap.py/task.yaml @@ -6,6 +6,6 @@ execute: | artifacts: - allure-results systems: - - self-hosted-linux-amd64-noble-medium + - self-hosted-linux-amd64-noble-large # TODO: Re-enable this when glauth charm supports arm64 - #- self-hosted-linux-arm64-noble-medium + #- self-hosted-linux-arm64-noble-large diff --git a/tests/spread/mongodb/microk8s/test_ldap.py/task.yaml b/tests/spread/mongodb/microk8s/test_ldap.py/task.yaml index 917e218c8d..9f139741ad 100644 --- a/tests/spread/mongodb/microk8s/test_ldap.py/task.yaml +++ b/tests/spread/mongodb/microk8s/test_ldap.py/task.yaml @@ -6,6 +6,6 @@ execute: | artifacts: - allure-results systems: - - ubuntu-22.04 + - self-hosted-linux-arm64-noble-large # TODO: Re-enable this when glauth charm supports arm64 - #- self-hosted-linux-arm64-noble-medium + #- self-hosted-linux-arm64-noble-large diff --git a/tests/spread/mongodb/microk8s/test_sharding_ldap.py/task.yaml b/tests/spread/mongodb/microk8s/test_sharding_ldap.py/task.yaml index 127a5647a1..792c4950f1 100644 --- a/tests/spread/mongodb/microk8s/test_sharding_ldap.py/task.yaml +++ b/tests/spread/mongodb/microk8s/test_sharding_ldap.py/task.yaml @@ -6,6 +6,6 @@ execute: | artifacts: - allure-results systems: - - self-hosted-linux-amd64-noble-medium + - self-hosted-linux-amd64-noble-large # TODO: Re-enable this when glauth charm supports arm64 - #- self-hosted-linux-arm64-noble-medium + #- self-hosted-linux-arm64-noble-large diff --git a/tests/spread/mongos/lxd/test_ldap.py/task.yaml b/tests/spread/mongos/lxd/test_ldap.py/task.yaml index cbbbc0479f..c45cc8ae3d 100644 --- a/tests/spread/mongos/lxd/test_ldap.py/task.yaml +++ b/tests/spread/mongos/lxd/test_ldap.py/task.yaml @@ -6,6 +6,6 @@ execute: | artifacts: - allure-results systems: - - ubuntu-22.04 + - self-hosted-linux-arm64-noble-large # TODO: Re-enable this when glauth charm supports arm64 - #- self-hosted-linux-arm64-noble-medium + - self-hosted-linux-arm64-noble-large diff --git a/tests/spread/mongos/microk8s/test_ldap.py/task.yaml b/tests/spread/mongos/microk8s/test_ldap.py/task.yaml index c5c663ed23..120263acaf 100644 --- a/tests/spread/mongos/microk8s/test_ldap.py/task.yaml +++ b/tests/spread/mongos/microk8s/test_ldap.py/task.yaml @@ -6,6 +6,6 @@ execute: | artifacts: - allure-results systems: - - ubuntu-22.04 + - self-hosted-linux-arm64-noble-large # TODO: Re-enable this when glauth charm supports arm64 - #- self-hosted-linux-arm64-noble-medium + #- self-hosted-linux-arm64-noble-large diff --git a/tests/spread/release/lxd/test_release.py/task.yaml b/tests/spread/release/lxd/test_release.py/task.yaml new file mode 100644 index 0000000000..dbfec19be4 --- /dev/null +++ b/tests/spread/release/lxd/test_release.py/task.yaml @@ -0,0 +1,7 @@ +summary: test_release.py +environment: + TEST_MODULE: test_release.py +execute: | + tox run -e integration -- "tests/integration/release/$TEST_MODULE" --substrate lxd --model testing --mongodb-revision $MONGODB_REVISION --mongos-revision $MONGOS_REVISION" +systems: + - self-hosted-linux-amd64-noble-xlarge diff --git a/tests/spread/release/lxd/test_sharding_release.py/task.yaml b/tests/spread/release/lxd/test_sharding_release.py/task.yaml new file mode 100644 index 0000000000..b5aeea6fa4 --- /dev/null +++ b/tests/spread/release/lxd/test_sharding_release.py/task.yaml @@ -0,0 +1,7 @@ +summary: test_sharding_release.py +environment: + TEST_MODULE: test_sharding_release.py +execute: | + tox run -e integration -- "tests/integration/release/$TEST_MODULE" --substrate lxd --model testing --mongodb-revision $MONGODB_REVISION --mongos-revision $MONGOS_REVISION" +systems: + - self-hosted-linux-amd64-noble-xlarge diff --git a/tests/spread/release/microk8s/test_release.py/task.yaml b/tests/spread/release/microk8s/test_release.py/task.yaml new file mode 100644 index 0000000000..50ad924309 --- /dev/null +++ b/tests/spread/release/microk8s/test_release.py/task.yaml @@ -0,0 +1,7 @@ +summary: test_release.py +environment: + TEST_MODULE: test_release.py +execute: | + tox run -e integration -- "tests/integration/release/$TEST_MODULE" --substrate microk8s --model testing --mongodb-revision $MONGODB_REVISION --mongos-revision $MONGOS_REVISION" +systems: + - self-hosted-linux-amd64-noble-xlarge diff --git a/tests/spread/release/microk8s/test_sharding_release.py/task.yaml b/tests/spread/release/microk8s/test_sharding_release.py/task.yaml new file mode 100644 index 0000000000..989b56594c --- /dev/null +++ b/tests/spread/release/microk8s/test_sharding_release.py/task.yaml @@ -0,0 +1,7 @@ +summary: test_sharding_release.py +environment: + TEST_MODULE: test_sharding_release.py +execute: | + tox run -e integration -- "tests/integration/release/$TEST_MODULE" --substrate microk8s --model testing --mongodb-revision $MONGODB_REVISION --mongos-revision $MONGOS_REVISION" +systems: + - self-hosted-linux-amd64-noble-xlarge