From 07b748c93aa4e3f5988bb1fff3bcba6501a20956 Mon Sep 17 00:00:00 2001 From: Nurlan Moldomurov Date: Tue, 1 Jul 2025 01:52:04 +0300 Subject: [PATCH 1/8] PMM-7 Update PMM testing documentation and workflows to support version v3 - Revised README.md to enhance clarity on test architecture and repository structure. - Updated GitHub Actions workflows to default to branch 'v3' for various testing scenarios. - Added comprehensive documentation for E2E tests, integration & CLI tests, package tests, infrastructure tests, and upgrade tests. - Introduced troubleshooting guide and test parameters reference for better user support. - Enhanced directory structure in documentation for easier navigation and understanding of testing processes. --- .../e2e-upgrade-tests-matrix-full.yml | 4 +- .../workflows/e2e-upgrade-tests-matrix.yml | 4 +- .github/workflows/e2e-upgrade-tests.yml | 4 +- .../workflows/package-test-matrix-full.yml | 22 +- .github/workflows/package-test-matrix.yml | 6 +- .../workflows/runner-e2e-upgrade-tests.yml | 4 +- .github/workflows/runner-package-test.yml | 2 +- README.md | 117 +++- docs/README.md | 255 ++++++++ docs/e2e-codeceptjs-tests.md | 213 ++++++ docs/e2e-tests.md | 229 +++++++ docs/feature-build-tests.md | 140 ++++ docs/infrastructure-tests.md | 165 +++++ docs/integration-cli-tests.md | 210 ++++++ docs/package-tests.md | 223 +++++++ docs/test-parameters.md | 389 +++++++++++ docs/troubleshooting.md | 612 ++++++++++++++++++ docs/upgrade-tests.md | 198 ++++++ 18 files changed, 2743 insertions(+), 54 deletions(-) create mode 100644 docs/README.md create mode 100644 docs/e2e-codeceptjs-tests.md create mode 100644 docs/e2e-tests.md create mode 100644 docs/feature-build-tests.md create mode 100644 docs/infrastructure-tests.md create mode 100644 docs/integration-cli-tests.md create mode 100644 docs/package-tests.md create mode 100644 docs/test-parameters.md create mode 100644 docs/troubleshooting.md create mode 100644 docs/upgrade-tests.md diff --git a/.github/workflows/e2e-upgrade-tests-matrix-full.yml b/.github/workflows/e2e-upgrade-tests-matrix-full.yml index f1e26e77..22e56ce2 100644 --- a/.github/workflows/e2e-upgrade-tests-matrix-full.yml +++ b/.github/workflows/e2e-upgrade-tests-matrix-full.yml @@ -7,11 +7,11 @@ on: inputs: pmm_ui_tests_branch: description: 'pmm-ui-tests repository branch' - default: 'main' + default: 'v3' required: true pmm_qa_branch: description: 'pmm-qa repository branch(for setup)' - default: 'main' + default: 'v3' required: true repository: description: 'Upgrade to:' diff --git a/.github/workflows/e2e-upgrade-tests-matrix.yml b/.github/workflows/e2e-upgrade-tests-matrix.yml index 584b1b8b..ef0d23ad 100644 --- a/.github/workflows/e2e-upgrade-tests-matrix.yml +++ b/.github/workflows/e2e-upgrade-tests-matrix.yml @@ -7,11 +7,11 @@ on: inputs: pmm_ui_tests_branch: description: 'pmm-ui-tests repository branch' - default: 'main' + default: 'v3' required: true pmm_qa_branch: description: 'pmm-qa repository branch(for setup)' - default: 'main' + default: 'v3' required: true upgrade_type: description: 'Upgrade way:' diff --git a/.github/workflows/e2e-upgrade-tests.yml b/.github/workflows/e2e-upgrade-tests.yml index 04c82f04..baddcd8c 100644 --- a/.github/workflows/e2e-upgrade-tests.yml +++ b/.github/workflows/e2e-upgrade-tests.yml @@ -5,11 +5,11 @@ on: inputs: pmm_ui_tests_branch: description: 'pmm-ui-tests repository branch' - default: 'main' + default: 'v3' required: true pmm_qa_branch: description: 'pmm-qa repository branch(for setup)' - default: 'main' + default: 'v3' required: true pmm_server_start_version: description: 'PMM Server version to upgrade (latest|dev-latest|x.xx.x|x.xx.x-rc)' diff --git a/.github/workflows/package-test-matrix-full.yml b/.github/workflows/package-test-matrix-full.yml index 38ec6018..4a570ed6 100644 --- a/.github/workflows/package-test-matrix-full.yml +++ b/.github/workflows/package-test-matrix-full.yml @@ -57,7 +57,7 @@ jobs: uses: ./.github/workflows/package-test-single.yml secrets: inherit with: - package_testing_branch: ${{ inputs.package_testing_branch || 'master' }} + package_testing_branch: ${{ inputs.package_testing_branch || 'v3' }} package: ${{ inputs.package || 'pmm3-client' }} repository: ${{ inputs.repository || 'dev-latest' }} metrics_mode: ${{ inputs.metrics_mode || 'auto' }} @@ -68,7 +68,7 @@ jobs: uses: ./.github/workflows/package-test-single.yml secrets: inherit with: - package_testing_branch: ${{ inputs.package_testing_branch || 'master' }} + package_testing_branch: ${{ inputs.package_testing_branch || 'v3' }} package: ${{ inputs.package || 'pmm3-client' }} repository: ${{ inputs.repository || 'dev-latest' }} metrics_mode: ${{ inputs.metrics_mode || 'auto' }} @@ -79,7 +79,7 @@ jobs: uses: ./.github/workflows/package-test-single.yml secrets: inherit with: - package_testing_branch: ${{ inputs.package_testing_branch || 'master' }} + package_testing_branch: ${{ inputs.package_testing_branch || 'v3' }} package: ${{ inputs.package || 'pmm3-client' }} repository: ${{ inputs.repository || 'dev-latest' }} metrics_mode: ${{ inputs.metrics_mode || 'auto' }} @@ -90,7 +90,7 @@ jobs: uses: ./.github/workflows/package-test-single.yml secrets: inherit with: - package_testing_branch: ${{ inputs.package_testing_branch || 'master' }} + package_testing_branch: ${{ inputs.package_testing_branch || 'v3' }} package: ${{ inputs.package || 'pmm3-client' }} repository: ${{ inputs.repository || 'dev-latest' }} metrics_mode: ${{ inputs.metrics_mode || 'auto' }} @@ -101,7 +101,7 @@ jobs: uses: ./.github/workflows/package-test-single.yml secrets: inherit with: - package_testing_branch: ${{ inputs.package_testing_branch || 'master' }} + package_testing_branch: ${{ inputs.package_testing_branch || 'v3' }} package: ${{ inputs.package || 'pmm3-client' }} repository: ${{ inputs.repository || 'dev-latest' }} metrics_mode: ${{ inputs.metrics_mode || 'auto' }} @@ -112,7 +112,7 @@ jobs: uses: ./.github/workflows/package-test-single.yml secrets: inherit with: - package_testing_branch: ${{ inputs.package_testing_branch || 'master' }} + package_testing_branch: ${{ inputs.package_testing_branch || 'v3' }} package: ${{ inputs.package || 'pmm3-client' }} repository: ${{ inputs.repository || 'dev-latest' }} metrics_mode: ${{ inputs.metrics_mode || 'auto' }} @@ -123,7 +123,7 @@ jobs: uses: ./.github/workflows/package-test-single.yml secrets: inherit with: - package_testing_branch: ${{ inputs.package_testing_branch || 'master' }} + package_testing_branch: ${{ inputs.package_testing_branch || 'v3' }} package: ${{ inputs.package || 'pmm3-client' }} repository: ${{ inputs.repository || 'dev-latest' }} metrics_mode: ${{ inputs.metrics_mode || 'auto' }} @@ -134,7 +134,7 @@ jobs: uses: ./.github/workflows/package-test-single.yml secrets: inherit with: - package_testing_branch: ${{ inputs.package_testing_branch || 'master' }} + package_testing_branch: ${{ inputs.package_testing_branch || 'v3' }} package: ${{ inputs.package || 'pmm3-client' }} repository: ${{ inputs.repository || 'dev-latest' }} metrics_mode: ${{ inputs.metrics_mode || 'auto' }} @@ -145,7 +145,7 @@ jobs: uses: ./.github/workflows/package-test-single.yml secrets: inherit with: - package_testing_branch: ${{ inputs.package_testing_branch || 'master' }} + package_testing_branch: ${{ inputs.package_testing_branch || 'v3' }} package: ${{ inputs.package || 'pmm3-client' }} repository: ${{ inputs.repository || 'dev-latest' }} metrics_mode: ${{ inputs.metrics_mode || 'auto' }} @@ -156,7 +156,7 @@ jobs: uses: ./.github/workflows/package-test-single.yml secrets: inherit with: - package_testing_branch: ${{ inputs.package_testing_branch || 'master' }} + package_testing_branch: ${{ inputs.package_testing_branch || 'v3' }} package: ${{ inputs.package || 'pmm3-client' }} repository: ${{ inputs.repository || 'dev-latest' }} metrics_mode: ${{ inputs.metrics_mode || 'auto' }} @@ -167,7 +167,7 @@ jobs: uses: ./.github/workflows/package-test-single.yml secrets: inherit with: - package_testing_branch: ${{ inputs.package_testing_branch || 'master' }} + package_testing_branch: ${{ inputs.package_testing_branch || 'v3' }} package: ${{ inputs.package || 'pmm3-client' }} repository: ${{ inputs.repository || 'dev-latest' }} metrics_mode: ${{ inputs.metrics_mode || 'auto' }} diff --git a/.github/workflows/package-test-matrix.yml b/.github/workflows/package-test-matrix.yml index 87dda1bd..07bec4f2 100644 --- a/.github/workflows/package-test-matrix.yml +++ b/.github/workflows/package-test-matrix.yml @@ -54,7 +54,7 @@ jobs: uses: ./.github/workflows/package-test-single.yml secrets: inherit with: - package_testing_branch: ${{ inputs.package_testing_branch || 'master' }} + package_testing_branch: ${{ inputs.package_testing_branch || 'v3' }} package: ${{ inputs.package || 'pmm3-client' }} repository: ${{ inputs.repository || 'dev-latest' }} metrics_mode: ${{ inputs.metrics_mode || 'auto' }} @@ -67,7 +67,7 @@ jobs: uses: ./.github/workflows/package-test-single.yml secrets: inherit with: - package_testing_branch: ${{ inputs.package_testing_branch || 'master' }} + package_testing_branch: ${{ inputs.package_testing_branch || 'v3' }} package: ${{ inputs.package || 'pmm3-client' }} repository: ${{ inputs.repository || 'dev-latest' }} metrics_mode: ${{ inputs.metrics_mode || 'auto' }} @@ -80,7 +80,7 @@ jobs: uses: ./.github/workflows/package-test-single.yml secrets: inherit with: - package_testing_branch: ${{ inputs.package_testing_branch || 'master' }} + package_testing_branch: ${{ inputs.package_testing_branch || 'v3' }} package: ${{ inputs.package || 'pmm3-client' }} repository: ${{ inputs.repository || 'dev-latest' }} metrics_mode: ${{ inputs.metrics_mode || 'auto' }} diff --git a/.github/workflows/runner-e2e-upgrade-tests.yml b/.github/workflows/runner-e2e-upgrade-tests.yml index 55ef219e..a776582a 100644 --- a/.github/workflows/runner-e2e-upgrade-tests.yml +++ b/.github/workflows/runner-e2e-upgrade-tests.yml @@ -6,7 +6,7 @@ on: inputs: pmm_ui_tests_branch: description: 'pmm-ui-tests repository branch' - default: 'main' + default: 'v3' type: string required: true pre_upgrade_tests: @@ -34,7 +34,7 @@ on: type: string pmm_qa_branch: description: 'pmm-qa repository branch(for setup)' - default: 'main' + default: 'v3' type: string required: true services_list: diff --git a/.github/workflows/runner-package-test.yml b/.github/workflows/runner-package-test.yml index b56d8d18..1ca2f747 100644 --- a/.github/workflows/runner-package-test.yml +++ b/.github/workflows/runner-package-test.yml @@ -123,7 +123,7 @@ jobs: timeout-minutes: 60 env: SHA: ${{ inputs.sha || 'null' }} - PACKAGE_TESTING_BRANCH: ${{ inputs.package_testing_branch || 'master' }} + PACKAGE_TESTING_BRANCH: ${{ inputs.package_testing_branch || 'v3' }} PMM_SERVER_IMAGE: ${{ inputs.pmm_server_image }} TARBALL: ${{ inputs.pmm_client_tarball || 'null' }} EXPECTED_VERSION: ${{ inputs.expected_version }} diff --git a/README.md b/README.md index 35323b63..fa6005c8 100644 --- a/README.md +++ b/README.md @@ -1,45 +1,100 @@ # PMM-QA Automated tests for Percona Monitoring and Management -GUI tests are created for testing frontend of PMM. They include tests for Query Analytics and for Grafana dashboards -## Using Selenoid for running tests in Local -1. Install Node.js and atleast npm 8.x on your system -2. Selenoid and Selenoid UI use port 4444 and 8080 respectively, -make sure they are not being used, otherwise update docker-compose.yml file -3. run npm install in project root. -4. run prepare_ui_test.sh script in the root directory. -`bash -x ./prepare_ui_test.sh` -5. This should start running UI tests in 4 parallel browser sessions inside chrome containers with help of selenoid -6. Check live execution by launching http://localhost:8080 in your browser. +## Test Architecture Overview -## If you'd like to have more control over the UI test framework parameters, please check out next sections +This project employs a comprehensive testing strategy, utilizing various frameworks and methodologies to ensure the quality and stability of Percona Monitoring and Management (PMM). The tests are broadly categorized by their focus and the tools they use: -### Installation (UI tests version 2.0) -1. Install Node.js and atleast npm 8.x on your system -2. Checkout `main` branch for pmm-qa Repo -3. To run tests on your local systems, delete `codecept.json` and rename `local.codecept.json` to `codecept.json` -4. Make sure to update URL of the application in the `webdriver` helper in the configuration file (codecept.json) -5. Install latest version of JDK on your system +- **End-to-End (E2E) UI Tests**: These tests validate the PMM user interface and user workflows. They are primarily written using Playwright and CodeceptJS. +- **CLI/Integration Tests**: These tests focus on the functionality of the `pmm-admin` command-line interface and the integration between PMM components and monitored services. They are typically written using Playwright for CLI interactions and Python for service setup. +- **Package Tests**: These tests verify the installation and functionality of PMM client packages across various operating systems. They leverage Vagrant for virtualized environments and Ansible for automation. +- **Infrastructure Tests**: These tests validate PMM deployments in different environments, including Kubernetes/Helm and using the Easy Install script. They utilize Bats for testing Helm deployments. -> Follow any one of these: +Each test type has its own dedicated documentation, detailing how to run and write tests, along with their specific directory structures and conventions. -6. Install Selenium Standalone server via npm globally using `npm install selenium-standalone -g` -7. Run the following `selenium-standalone start` -> OR -6. Install Selenium Standalone server locally via npm `npm install selenium-standalone --save-dev` -7. Run the following `./node_modules/.bin/selenium-standalone install && ./node_modules/.bin/selenium-standalone start` -8. Inside the root folder for `pmm-qa` run `npm install` this will install all required packages -### How to use -Run all Tests: +### Repository Directory Structures + +Understanding the layout of the key repositories involved in PMM QA is essential for navigating the codebase and contributing to tests. + +#### `pmm-qa` (This Repository) + ``` -./node_modules/.bin/codeceptjs run --steps +. +├── .github/ # GitHub Actions workflows +├── docs/ # Project documentation +├── k8s/ # Kubernetes/Helm test scripts (Bats) +├── pmm-integration/ # PMM integration setup scripts (TypeScript) +├── pmm-tests/ # PMM test scripts (Python, Bash) +├── tests/ # General test utilities +├── .gitignore +├── docker-compose.yml +├── LICENSE +├── package-lock.json +├── README.md # This file +└── TEST_EXECUTION_GUIDE.md ``` -Run individual Tests: + +#### `pmm-ui-tests` + +This repository contains the UI End-to-End tests for PMM. + ``` -./node_modules/.bin/codeceptjs run --steps tests/verifyMysqlDashboards_test.js +pmm-ui-tests/ +├── playwright-tests/ # Playwright E2E tests +│ ├── pages/ # Page Object Model definitions +│ │ ├── LoginPage.ts +│ │ └── DashboardPage.ts +│ ├── tests/ # Actual Playwright test files (.spec.ts) +│ └── playwright.config.ts # Playwright configuration +├── tests/ # CodeceptJS E2E tests +│ ├── pages/ # Page Object Model definitions +│ │ ├── LoginPage.js +│ │ └── DashboardPage.js +│ ├── login_test.js +│ └── ... +├── cli/ # Playwright tests for CLI interactions +│ ├── tests/ # CLI test files (.spec.ts) +│ └── ... +├── helpers/ # CodeceptJS custom helpers +├── config/ # CodeceptJS configuration files +├── pr.codecept.js # Main CodeceptJS configuration +├── docker-compose.yml # Docker Compose for PMM server setup +└── ... ``` -We have implemented the tests to run in parallel chunks of 3, which will basically launch 3 browsers and execute different tests, -to make any change to that, modify the configuration file `codecept.json` +#### `qa-integration` + +This repository provides Python-based scripts for setting up and managing PMM test environments and services. + +``` +qa-integration/ +├── pmm_qa/ # Core Python setup scripts +│ ├── pmm-framework.py # Main script for setting up services +│ ├── helpers/ # Helper modules for pmm-framework.py +│ ├── mysql/ +│ ├── mongoDb/ +│ ├── postgres/ +│ └── ... +├── pmm-tests/ # Additional Python/Bash test scripts +├── requirements.txt # Python dependencies +└── ... +``` + +#### `package-testing` + +This repository contains Ansible playbooks for testing PMM client package installations across various operating systems. + +``` +package-testing/ +├── playbooks/ # Ansible playbooks for different test scenarios +│ ├── pmm3-client_integration.yml +│ └── ... +├── roles/ # Reusable Ansible roles (e.g., pmm-client) +├── inventory.ini # Ansible inventory file +├── Vagrantfile # Vagrant configuration for test VMs +└── ... +``` + + diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 00000000..91ce6ab9 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,255 @@ +# PMM-QA Testing Documentation + +Welcome to the PMM-QA comprehensive testing documentation. This directory contains detailed guides for running various types of tests in the PMM (Percona Monitoring and Management) QA repository. + +## 📚 **Documentation Overview** + +This documentation is organized by test type to provide focused guidance for different testing scenarios: + +### **Core Testing Guides** + +| Document | Description | Use Case | +|----------|-------------|----------| +| [Integration & CLI Tests](integration-cli-tests.md) | PMM CLI functionality testing | Daily development validation | +| [End-to-End Tests](e2e-tests.md) | UI testing with Playwright | Feature validation | +| [Upgrade Tests](upgrade-tests.md) | PMM upgrade scenarios | Release validation | +| [Package Tests](package-tests.md) | Package installation testing | Distribution validation | +| [Infrastructure Tests](infrastructure-tests.md) | Kubernetes and platform testing | Infrastructure validation | +| [Feature Build Tests](feature-build-tests.md) | Docker images with new features testing | Feature validation | + +### **Reference Guides** + +| Document | Description | +|----------|-------------| +| [Test Parameters Reference](test-parameters.md) | Complete parameter documentation | +| [Troubleshooting Guide](troubleshooting.md) | Common issues and solutions | + +--- + +## 🚀 **Quick Start Guide** + +### Prerequisites +- Access to the `percona/pmm-qa` repository +- Permissions to trigger GitHub Actions workflows +- Understanding of PMM architecture and components + +### Most Common Testing Scenarios + +#### 🔄 **Daily Development Testing** +```yaml +Workflow: PMM Integration Tests +Purpose: Validate CLI functionality +Duration: ~2 hours (all jobs) +Frequency: Daily/Per commit +``` +**[→ Go to Integration & CLI Tests Guide](integration-cli-tests.md)** + +#### 🎭 **Feature Validation** +```yaml +Workflow: PMM e2e Tests(Playwright) +Purpose: Validate UI functionality +Duration: ~1 hour +Frequency: Per feature +``` +**[→ Go to End-to-End Tests Guide](e2e-tests.md)** + +#### ⬆️ **Release Validation** +```yaml +Workflow: PMM Upgrade Tests +Purpose: Validate upgrade scenarios +Duration: ~1 hour +Frequency: Pre-release +``` +**[→ Go to Upgrade Tests Guide](upgrade-tests.md)** + +#### 📦 **Distribution Validation** +```yaml +Workflow: Package Test Matrix +Purpose: Validate package installation +Duration: ~1 hour +Frequency: Per package release +``` +**[→ Go to Package Tests Guide](package-tests.md)** + +--- + +## 🏗️ **Test Infrastructure Overview** + +### **Supported Platforms** +- **Operating Systems**: Ubuntu (Noble, Jammy), Oracle Linux (8, 9), Rocky Linux 9 +- **Container Runtimes**: Docker, Podman +- **Orchestration**: Kubernetes (via Helm), Docker Compose +- **Cloud**: GitHub Actions runners + +### **Database Coverage** +- **MySQL Family**: Percona Server (5.7, 8.0), MySQL (8.0) +- **PostgreSQL Family**: Percona Distribution for PostgreSQL (14, 15) +- **MongoDB Family**: Percona Server for MongoDB +- **Proxy/Load Balancers**: ProxySQL, HAProxy + +### **Testing Frameworks** +- **CLI Testing**: Playwright (TypeScript) +- **UI Testing**: Playwright, CodeceptJS +- **Infrastructure Testing**: BATS (Bash Automated Testing System) +- **Package Testing**: Ansible playbooks + +--- + +## 📊 **Workflow Architecture** + +### **Workflow Categories** + +```mermaid +graph TB + A[PMM-QA Workflows] --> B[Integration Tests] + A --> C[E2E Tests] + A --> D[Upgrade Tests] + A --> E[Package Tests] + A --> F[Infrastructure Tests] + A --> G[Feature Build Tests] + + B --> B1[CLI Functionality] + B --> B2[Database Integration] + B --> B3[Container Testing] + + C --> C1[Portal Tests] + C --> C2[Inventory Tests] + C --> C3[Component Tests] + + D --> D1[UI Upgrade] + D --> D2[Docker Upgrade] + D --> D3[Podman Upgrade] + + E --> E1[Standard Install] + E --> E2[Custom Path] + E --> E3[Custom Port] + + F --> F1[Helm/K8s] + F --> F2[Easy Install] + + G --> G1[Feature Build Testing] + G --> G2[Docker Image Validation] + G --> G3[Feature-Specific Tests] +``` + +### **Reusable Workflow Pattern** + +Most workflows follow a reusable pattern: +1. **Main Workflow** - Defines parameters and orchestrates jobs +2. **Runner Workflow** - Reusable component that executes tests +3. **Matrix Strategy** - Tests across multiple versions/platforms + +--- + +## ⚡ **Emergency Testing Commands** + +### **Quick Smoke Tests** +```yaml +# 5-minute validation +Test: help-tests only +Purpose: Verify basic CLI functionality + +# 15-minute validation +Test: generic-tests only +Purpose: Verify database connectivity + +# 30-minute validation +Test: @portal only +Purpose: Verify core UI functionality +``` + +### **Critical Path Testing** +```yaml +# Core functionality +Workflows: PMM Integration Tests (help, generic) +Duration: ~20 minutes + +# UI critical path +Workflows: E2E Tests (@portal) +Duration: ~30 minutes + +# Upgrade critical path +Workflows: Upgrade Tests (configuration only) +Duration: ~30 minutes +``` + +--- + +## 🛠️ **Development Workflow Integration** + +### **Pre-Commit Testing** +1. Run local CLI tests for changed components +2. Validate specific database integration if DB-related changes +3. Test UI components if frontend changes + +### **Pull Request Testing** +1. Full integration test suite +2. Relevant E2E test categories +3. Upgrade tests if core changes +4. Package tests if packaging changes + +### **Release Testing** +1. Complete test matrix across all platforms +2. All upgrade scenarios +3. Full feature build test suite +4. Infrastructure deployment tests + +--- + +## 📋 **Test Execution Checklist** + +### **Before Running Tests** +- [ ] Verify repository access and permissions +- [ ] Check if required versions/images are available +- [ ] Review resource availability (avoid concurrent large tests) +- [ ] Confirm external service availability (if applicable) + +### **During Test Execution** +- [ ] Monitor test progress for early failure detection +- [ ] Check logs for setup issues +- [ ] Verify resource utilization +- [ ] Track test duration vs. expectations + +### **After Test Completion** +- [ ] Review all test results and reports +- [ ] Download and analyze failure artifacts +- [ ] Document any new issues discovered +- [ ] Update test configurations if needed +- [ ] Share results with relevant stakeholders + +--- + +## 🔗 **Additional Resources** + +### **Related Repositories** +- [pmm-ui-tests](https://github.com/percona/pmm-ui-tests) - UI test suite +- [qa-integration](https://github.com/Percona-Lab/qa-integration) - Integration setup +- [pmm-server](https://github.com/percona/pmm) - PMM Server codebase +- [pmm-client](https://github.com/percona/pmm-client) - PMM Client codebase + +### **External Documentation** +- [PMM Documentation](https://docs.percona.com/percona-monitoring-and-management/) +- [Playwright Documentation](https://playwright.dev/) +- [GitHub Actions Documentation](https://docs.github.com/en/actions) + +### **Support Channels** +- **Issues**: [PMM-QA GitHub Issues](https://github.com/percona/pmm-qa/issues) +- **Discussions**: PMM team internal channels +- **Documentation**: This documentation set + +--- + +## 🏷️ **Version Information** + +| Component | Version | Notes | +|-----------|---------|-------| +| PMM Server | 3-dev-latest | Default development version | +| PMM Client | 3-dev-latest | Default development version | +| Testing Framework | v3 | Current major version | +| Documentation | v1.0 | This documentation version | + +--- + +**Last Updated**: December 2024 +**Maintained By**: PMM QA Team +**Repository**: [percona/pmm-qa](https://github.com/percona/pmm-qa) \ No newline at end of file diff --git a/docs/e2e-codeceptjs-tests.md b/docs/e2e-codeceptjs-tests.md new file mode 100644 index 00000000..d5ab7e59 --- /dev/null +++ b/docs/e2e-codeceptjs-tests.md @@ -0,0 +1,213 @@ +# E2E CodeceptJS Tests + +This guide provides instructions for running the PMM E2E tests that use the CodeceptJS framework. These tests cover a wide range of scenarios, including SSL, experimental features, and more. + +## 💡 **What are E2E CodeceptJS Tests?** + +These tests are designed to validate specific and advanced PMM functionalities. They ensure that: + +- **SSL connections are secure**: Verifying that PMM can connect to databases over SSL. +- **Experimental features are stable**: Testing features that are not yet released to the general public. +- **Core functionality is robust**: Covering scenarios like disconnecting and reconnecting services. + +## 🤖 **How to Run E2E CodeceptJS Tests Locally** + +The following steps will guide you through setting up the environment and running the CodeceptJS tests locally, based on the `e2e-codeceptjs-matrix.yml` CI workflow. + +### **Prerequisites** + +- **Git**: To clone the required repositories. +- **Docker** and **Docker Compose**: To run the PMM server and other services. +- **Node.js (v18+)** and **npm**: For running the test frameworks. +- **Python 3** and **pip**: For running setup scripts. +- **System Dependencies**: `ansible`, `clickhouse-client`, `dbdeployer`, and others. + +### **Step 1: Clone Repositories** + +First, clone the `pmm-ui-tests` and `qa-integration` repositories. These contain the test code and setup scripts. + +```bash +git clone --branch v3 https://github.com/percona/pmm-ui-tests.git +git clone --branch v3 https://github.com/Percona-Lab/qa-integration.git +``` + +### **Step 2: Install System Dependencies** + +Install the required system packages. The command below is for Debian/Ubuntu-based systems. + +```bash +sudo apt-get update +sudo apt-get install -y apt-transport-https ca-certificates dirmngr ansible libaio1 libaio-dev libnuma-dev libncurses5 socat sysbench clickhouse-client +curl -s https://raw.githubusercontent.com/datacharmer/dbdeployer/master/scripts/dbdeployer-install.sh | sudo bash -s -- -b /usr/local/bin +``` + +### **Step 3: Set Up PMM Server** + +Next, set up and start the PMM server using Docker Compose. + +```bash +cd pmm-ui-tests + +# Create a docker network for PMM +docker network create pmm-qa || true + +# Start PMM Server +PMM_SERVER_IMAGE=perconalab/pmm-server:3-dev-latest docker compose -f docker-compose.yml up -d + +# Wait for the server to be ready and change the admin password +sleep 60 +docker exec pmm-server change-admin-password admin-password +docker network connect pmm-qa pmm-server || true + +cd .. +``` + +### **Step 4: Set Up PMM Client and Services** + +Now, set up the PMM client and the database services you want to monitor. + +```bash +cd qa-integration/pmm_qa + +# Install the PMM client +sudo bash -x pmm3-client-setup.sh --pmm_server_ip 192.168.0.1 --client_version 3-dev-latest --admin_password admin-password --use_metrics_mode no + +# Set up the test environment and services (e.g., a single Percona Server instance) +python3 -m venv virtenv +source virtenv/bin/activate +pip install --upgrade pip +pip install -r requirements.txt +python pmm-framework.py --pmm-server-password=admin-password --database ps + +cd ../.. +``` +**Note:** You can customize the services by changing the arguments passed to `pmm-framework.py`. For example, to set up multiple databases for inventory tests, use `--database ps --database psmdb --database pdpgsql`. + +### **Step 5: Install Test Dependencies** + +Install the Node.js dependencies required for the UI tests. + +```bash +cd pmm-ui-tests +npm ci +npx playwright install --with-deps +``` + +### **Step 2: Run the Tests** + +Run the CodeceptJS tests using the appropriate tags. The setup for the services will vary depending on the test. + +#### **SSL Tests** + +```bash +# Set up the environment for MySQL SSL tests +python qa-integration/pmm_qa/pmm-framework.py --pmm-server-password=admin-password --database ssl_mysql + +# Run the MySQL SSL tests +./node_modules/.bin/codeceptjs run -c pmm-ui-tests/pr.codecept.js --grep "@ssl-mysql" +``` + +#### **Experimental Tests** + +```bash +# Set up the environment for experimental tests +python qa-integration/pmm_qa/pmm-framework.py --pmm-server-password=admin-password --database pdpgsql + +# Run the experimental tests +./node_modules/.bin/codeceptjs run -c pmm-ui-tests/pr.codecept.js --grep "@experimental" +``` + +## 📋 **Available Test Suites** + +| Test Suite | Test Tag(s) | Description | +|---|---|---| +| Settings and CLI | `@settings\|@cli` | General settings and CLI tests. | +| SSL Tests | `@ssl-mysql`, `@ssl-mongo`, `@ssl-postgres` | Tests for SSL connections to different databases. | +| Experimental | `@experimental` | Tests for experimental features. | +| Disconnect | `@disconnect` | Tests for disconnecting and reconnecting services. | + +## 📝 **How to Write CodeceptJS Tests** + +All paths mentioned in this section are relative to the root of the `pmm-ui-tests` repository, which can be found [here](https://github.com/percona/pmm-ui-tests/tree/v3). + +CodeceptJS tests are written in JavaScript and provide a high-level, readable syntax for UI interactions. They are built on top of WebDriver or Playwright and use a BDD-style syntax. + +### **Test Structure and Directory Layout** + +CodeceptJS tests for PMM UI are primarily located in the `pmm-ui-tests/tests` directory. Tests are organized by feature or functional area. + +``` +pmm-ui-tests/ +├── tests/ # Actual test files +│ ├── pages/ # Page Object Model definitions +│ │ ├── LoginPage.js +│ │ └── DashboardPage.js +│ ├── login_test.js +│ ├── inventory_test.js +├── helpers/ # Custom helpers for common actions +├── config/ # Configuration files +└── pr.codecept.js # Main CodeceptJS configuration +``` + +- **`tests/`**: This directory contains the main test files (`_test.js`). Each file typically covers a specific feature or a logical group of functionalities. +- **`pages/`**: Similar to Playwright, CodeceptJS also supports the Page Object Model. This directory holds page object definitions, abstracting UI interactions. +- **`helpers/`**: Custom helpers can be created to encapsulate common actions or assertions, promoting reusability. +- **`pr.codecept.js`**: This is the primary configuration file for CodeceptJS, defining helpers, plugins, and test paths. + +### **Writing Conventions** + +- **BDD Style**: Tests are written using `Scenario` and `I` (the actor) to describe user interactions in a readable way. +- **Page Objects**: Utilize Page Objects for interacting with UI elements to improve maintainability. +- **Tags**: Use `@` tags in `Scenario` or `Feature` blocks to categorize tests (e.g., `@bm-mongo`, `@exporters`). These tags are used for selective test execution. +- **Comments**: Add comments for complex logic or to explain the *why* behind certain steps. + +### **Basic Test Example** + +A typical CodeceptJS test file (`_test.js`) will look like this: + +```javascript +Feature('Login'); + +Scenario('should display login form', ({ I }) => { + I.amOnPage('http://localhost/'); + I.seeElement('input[name="username"]'); + I.seeElement('input[name="password"]'); + I.seeElement('button[type="submit"]'); +}); + +Scenario('should allow user to login', ({ I }) => { + I.amOnPage('http://localhost/'); + I.fillField('input[name="username"]', 'admin'); + I.fillField('input[name="password"]', 'admin'); + I.click('button[type="submit"]'); + I.see('Dashboard'); +}); +``` + +### **Key Concepts** + +- **`Feature`**: Defines a test suite. +- **`Scenario`**: Represents an individual test case. +- **`I` (the actor)**: The global object for performing UI actions (e.g., `I.amOnPage()`, `I.click()`). +- **Helpers**: Provide methods for `I` to interact with the browser. +- **Tags**: Used for categorizing and selectively running tests. + +### **Running New Tests** + +After creating a new test file, you can run it using the `codeceptjs run` command, specifying the path to your test file or using a `grep` pattern for its title or tags. + +```bash +cd pmm-ui-tests +./node_modules/.bin/codeceptjs run -c pr.codecept.js tests/my_new_feature_test.js +# Or with a grep pattern +./node_modules/.bin/codeceptjs run -c pr.codecept.js --grep="@my-new-feature" +``` + +--- + +**Related Documentation**: +- [E2E Tests](e2e-tests.md) +- [Feature Build Tests](feature-build-tests.md) +- [Integration & CLI Tests](integration-cli-tests.md) +- [Test Parameters Reference](test-parameters.md) +- [Troubleshooting Guide](troubleshooting.md) \ No newline at end of file diff --git a/docs/e2e-tests.md b/docs/e2e-tests.md new file mode 100644 index 00000000..1dcd9d7d --- /dev/null +++ b/docs/e2e-tests.md @@ -0,0 +1,229 @@ +# End-to-End (E2E) Tests + +This guide provides instructions for running the PMM End-to-End (E2E) tests locally. These tests validate the PMM UI functionality and user workflows using Playwright and CodeceptJS. + +## 💡 **What are E2E Tests?** + +E2E tests simulate real user scenarios from start to finish, ensuring all components of the PMM UI work together correctly. They are crucial for: + +- **Validating new features**: Ensuring new UI functionality works as expected. +- **Preventing regressions**: Making sure existing features are not broken by new changes. +- **Ensuring stability**: Testing the integration between the PMM server and the UI. + +## 🤖 **How to Run E2E Tests Locally** + +The following steps will guide you through setting up the necessary environment and running the E2E tests on your local machine. These instructions are based on the steps performed by the CI runners (`runner-e2e-tests-playwright.yml` and `runner-e2e-tests-codeceptjs.yml`). + +### **Prerequisites** + +- **Git**: To clone the required repositories. +- **Docker** and **Docker Compose**: To run the PMM server and other services. +- **Node.js (v18+)** and **npm**: For running the test frameworks. +- **Python 3** and **pip**: For running setup scripts. +- **System Dependencies**: `ansible`, `clickhouse-client`, `dbdeployer`, and others. + +### **Step 1: Clone Repositories** + +First, clone the `pmm-ui-tests` and `qa-integration` repositories. These contain the test code and setup scripts. + +```bash +git clone --branch v3 https://github.com/percona/pmm-ui-tests.git +git clone --branch v3 https://github.com/Percona-Lab/qa-integration.git +``` + +### **Step 2: Install System Dependencies** + +Install the required system packages. The command below is for Debian/Ubuntu-based systems. + +```bash +sudo apt-get update +sudo apt-get install -y apt-transport-https ca-certificates dirmngr ansible libaio1 libaio-dev libnuma-dev libncurses5 socat sysbench clickhouse-client +curl -s https://raw.githubusercontent.com/datacharmer/dbdeployer/master/scripts/dbdeployer-install.sh | sudo bash -s -- -b /usr/local/bin +``` + +### **Step 3: Set Up PMM Server** + +Next, set up and start the PMM server using Docker Compose. + +```bash +cd pmm-ui-tests + +# Create a docker network for PMM +docker network create pmm-qa || true + +# Start PMM Server +PMM_SERVER_IMAGE=perconalab/pmm-server:3-dev-latest docker compose -f docker-compose.yml up -d + +# Wait for the server to be ready and change the admin password +sleep 60 +docker exec pmm-server change-admin-password admin-password +docker network connect pmm-qa pmm-server || true + +cd .. +``` + +### **Step 4: Set Up PMM Client and Services** + +Now, set up the PMM client and the database services you want to monitor. + +```bash +cd qa-integration/pmm_qa + +# Install the PMM client +sudo bash -x pmm3-client-setup.sh --pmm_server_ip 192.168.0.1 --client_version 3-dev-latest --admin_password admin-password --use_metrics_mode no + +# Set up the test environment and services (e.g., a single Percona Server instance) +python3 -m venv virtenv +source virtenv/bin/activate +pip install --upgrade pip +pip install -r requirements.txt +python pmm-framework.py --pmm-server-password=admin-password --database ps + +cd ../.. +``` +**Note:** You can customize the services by changing the arguments passed to `pmm-framework.py`. For example, to set up multiple databases for inventory tests, use `--database ps --database psmdb --database pdpgsql`. + +### **Step 5: Install Test Dependencies** + +Install the Node.js dependencies required for the UI tests. + +```bash +cd pmm-ui-tests +npm ci +npx playwright install --with-deps +``` + +### **Step 6: Run the Tests** + +Finally, run the E2E tests. You can run specific test suites by using tags. + +#### **Running Playwright Tests** + +```bash +# Run the Portal test suite +npx playwright test --project="Portal" --grep="@portal" + +# Run the Inventory test suite +npx playwright test --project="Chromium" --grep="@inventory" +``` + +#### **Running CodeceptJS Tests** + +```bash +# First, generate the environment file +envsubst < env.list > env.generated.list + +# Run the Backup Management test suite for MongoDB +./node_modules/.bin/codeceptjs run -c pr.codecept.js --grep="@bm-mongo" +``` + +## 📋 **Available Test Suites** + +Here are some of the main test suites you can run: + +| Test Suite | Tag | Framework | Description | +|---|---|---|---| +| Portal | `@portal` | Playwright | Tests the main PMM Portal functionality. | +| Inventory | `@inventory` | Playwright | Tests the service inventory management pages. | +| Backup Management (Mongo) | `@bm-mongo` | CodeceptJS | Tests backup and restore for MongoDB. | +| Exporters | `@exporters` | CodeceptJS | Validates various exporters. | +| Settings | `@settings` | CodeceptJS | Tests the PMM settings and configuration pages. | + +## 📝 **How to Write Playwright Tests** + +All paths mentioned in this section are relative to the root of the `pmm-ui-tests` repository, which can be found [here](https://github.com/percona/pmm-ui-tests/tree/v3). + +Playwright tests are written in TypeScript and use a clear, readable syntax. Tests are typically organized into `describe` blocks for test suites and `test` blocks for individual test cases. + +### **Test Structure and Directory Layout** + +Playwright tests for PMM UI are located in the `pmm-ui-tests/playwright-tests` directory. Within this directory, tests are organized by feature or functional area. For example: + +``` +pmm-ui-tests/ +├── playwright-tests/ +│ ├── pages/ # Page Object Model definitions +│ │ ├── LoginPage.ts +│ │ └── DashboardPage.ts +│ │ └── ServicesPage.ts +│ ├── tests/ # Actual test files +│ │ ├── login.spec.ts +│ │ └── inventory.spec.ts +│ ├── fixtures/ # Test data or reusable components +│ └── playwright.config.ts # Playwright configuration +``` + +- **`pages/`**: This directory typically contains Page Object Model (POM) files. POM is a design pattern that helps create an object repository for UI elements within the application. Each page in the web application has a corresponding Page Object class, which contains methods that perform interactions on that web page. +- **`tests/`**: This is where the actual test files (`.spec.ts`) reside. Each file usually contains tests for a specific feature or a logical group of functionalities. +- **`fixtures/`**: This directory can be used for test data, custom fixtures, or reusable test components. +- **`playwright.config.ts`**: This file configures Playwright, including projects, reporters, and global setup/teardown. + +### **Writing Conventions** + +- **Descriptive Naming**: Test files and test blocks should have clear, descriptive names that indicate their purpose (e.g., `login.spec.ts`, `test.describe('Login Page')`). +- **Page Object Model (POM)**: Utilize the Page Object Model for interacting with UI elements. This improves test readability, maintainability, and reduces code duplication. +- **Assertions**: Use `expect` assertions to verify the state of the UI. Be specific with your assertions. +- **Tags**: Use `@` tags in `test.describe` or `test` blocks to categorize tests (e.g., `@portal`, `@inventory`). These tags are used to run specific subsets of tests. +- **Comments**: Add comments to explain complex logic or the *why* behind certain actions, rather than just *what* is being done. + +### **Basic Test Example** + +Here's an example demonstrating how to navigate to the Inventory page and verify a service: + +```typescript +import { test, expect } from '@playwright/test'; +import { ServicesPage } from './pages/ServicesPage'; // Assuming ServicesPage is defined + +test.describe('PMM Inventory', () => { + let servicesPage: ServicesPage; + + test.beforeEach(async ({ page }) => { + servicesPage = new ServicesPage(page); + await page.goto(servicesPage.url); // Navigate to the Inventory page URL + await servicesPage.verifyPageLoaded(); // Custom method to wait for page to load + }); + + test('should verify local MongoDB service presence', async () => { + const serviceName = 'mo-integration-'; // Example service name + await servicesPage.servicesTable.verifyService({ serviceName }); // Custom method to verify service in a table + }); + + test('should verify kebab menu options for MongoDB service', async () => { + const serviceName = 'mo-integration-'; + await servicesPage.servicesTable.buttons.options(serviceName).click(); + await expect(servicesPage.servicesTable.buttons.deleteService).toBeVisible(); + await expect(servicesPage.servicesTable.buttons.serviceDashboard).toBeVisible(); + await expect(servicesPage.servicesTable.buttons.qan).toBeVisible(); + }); +}); +``` + +### **Key Concepts** + +- **`test` object**: Used for defining tests, test suites, and hooks. +- **`page` object**: Represents a browser tab, used for navigation and interaction. +- **Locators**: Methods to find elements on the page (e.g., `page.locator('input[name="username"]')`). +- **`expect` object**: Used for making assertions about the UI state. +- **`await` keyword**: Essential for asynchronous Playwright operations. +- **Page Object Model (POM)**: A design pattern where web pages are represented as classes, abstracting UI elements and interactions. This improves test readability and maintainability. + +### **Running New Tests** + +After creating a new test file, you can run it using the `npx playwright test` command, specifying the path to your test file or using a `grep` pattern for its title or tags. + +```bash +cd pmm-ui-tests +npx playwright test playwright-tests/my-new-test.spec.ts +# Or with a grep pattern +npx playwright test --grep="@my-new-feature" +``` + +--- + +**Related Documentation**: +- [Integration & CLI Tests](integration-cli-tests.md) +- [Infrastructure Tests](infrastructure-tests.md) +- [Package Tests](package-tests.md) +- [Upgrade Tests](upgrade-tests.md) +- [Test Parameters Reference](test-parameters.md) +- [Troubleshooting Guide](troubleshooting.md) \ No newline at end of file diff --git a/docs/feature-build-tests.md b/docs/feature-build-tests.md new file mode 100644 index 00000000..29ecce17 --- /dev/null +++ b/docs/feature-build-tests.md @@ -0,0 +1,140 @@ +# Feature Build Tests + +This guide provides instructions for running the PMM Feature Build (FB) tests locally. These tests are designed to validate Docker images built with new features before they are merged into the main codebase. + +## 💡 **What are Feature Build Tests?** + +Feature Build tests are comprehensive UI testing suites that validate new features in PMM. They ensure that: + +- **New features work correctly**: Verifying that the new functionality behaves as expected. +- **There are no regressions**: Ensuring that existing functionality is not broken by the new feature. +- **The UI remains consistent**: Checking that the new feature integrates well with the existing UI. + +## 🤖 **How to Run Feature Build E2E Tests Locally** + +You can reproduce the CI runner workflow for Feature Build E2E tests on your local machine. This is useful for debugging, development, or validating changes before pushing to CI. The steps below mirror what happens in the CI runner, with local commands and explanations. + +### **Prerequisites** +- **Docker** and **Docker Compose** installed +- **Node.js** (v18+) and **npm** +- **Python 3** and **pip** +- **Ansible**, **Clickhouse client**, and other system dependencies (see below) +- Sufficient disk space and permissions to run containers + +#### **Step-by-Step Local Execution (CI Runner Steps)** + +1. **Clone the Required Repositories** + + Clone both the UI tests and QA integration repositories at the correct branch: + ```bash + git clone --branch v3 https://github.com/percona/pmm-ui-tests.git + git clone --branch v3 https://github.com/Percona-Lab/qa-integration.git + ``` + +2. **Install System Dependencies** + + Install all required system packages and tools (Ansible, Clickhouse client, dbdeployer, etc): + ```bash + sudo apt-get update + sudo apt-get install -y apt-transport-https ca-certificates dirmngr ansible libaio1 libaio-dev libnuma-dev libncurses5 socat sysbench clickhouse-client + curl -s https://raw.githubusercontent.com/datacharmer/dbdeployer/master/scripts/dbdeployer-install.sh | sudo bash -s -- -b /usr/local/bin + ``` + +3. **Clean Up Disk Space (Optional, but recommended)** + + Free up space on your system to avoid issues with large Docker images: + ```bash + sudo rm -rf /usr/share/dotnet /opt/ghc "/usr/local/share/boost" + ``` + +4. **Start PMM Server with Docker Compose** + + This step sets up the PMM Server container, changes the admin password, and runs initial DB setup scripts: + ```bash + cd pmm-ui-tests + docker network create pmm-qa || true + PMM_SERVER_IMAGE=perconalab/pmm-server-fb:feature-xyz docker compose -f docker-compose.yml up -d + sleep 60 + docker exec pmm-server change-admin-password admin-password + bash -x testdata/db_setup.sh + docker network connect pmm-qa pmm-server || true + cd .. + ``` + +5. **Set Up PMM Client** + + This step configures the PMM Client to connect to your local PMM Server: + ```bash + cd qa-integration/pmm_qa + sudo bash -x pmm3-client-setup.sh --pmm_server_ip 192.168.0.1 --client_version 3-dev-latest --admin_password admin-password --use_metrics_mode no + cd ../.. + ``` + +6. **Prepare Python Environment and Run Setup** + + This step prepares the test environment and configures databases/services as needed for the test suite. Replace `[SETUP_ARGS]` with the appropriate setup string, e.g. `--database psmdb,SETUP_TYPE=pss`: + ```bash + cd qa-integration/pmm_qa + mkdir -m 777 -p /tmp/backup_data + python3 -m venv virtenv + source virtenv/bin/activate + pip install --upgrade pip + pip install -r requirements.txt + pip install setuptools + python pmm-framework.py --pmm-server-password=admin-password --verbose [SETUP_ARGS] + cd ../.. + ``` + +7. **Install Node.js Dependencies for UI Tests** + + Installs all required Node.js modules and Playwright browser dependencies for UI testing: + ```bash + cd pmm-ui-tests + npm ci + npx playwright install --with-deps + envsubst < env.list > env.generated.list + ``` + +### **Step 6: Run the Tests** + +Finally, run the E2E tests for the specific feature. Use the appropriate tag for the test suite you want to run. + +```bash +# Example for MongoDB backup management tests: +./node_modules/.bin/codeceptjs run -c pr.codecept.js --grep "@bm-mongo" + +# Example for exporter tests: +./node_modules/.bin/codeceptjs run -c pr.codecept.js --grep "@exporters" +``` + +## 📋 **Available Test Suites** + +| Test Suite | Test Tag(s) | Description | +|---|---|---| +| Backup Management | `@bm-mongo`, `@bm-locations` | Tests for backup and restore functionality. | +| Exporters | `@exporters`, `@mongodb-exporter` | Tests for various exporters. | +| UI Components | `@fb-instances`, `@fb-alerting` | Tests for different UI components. | +| PostgreSQL Monitoring | `@pgsm-pmm-integration` | Tests for pg_stat_monitor integration. | + +## 📝 **How to Write Feature Build Tests** + +Feature Build tests are essentially End-to-End (E2E) UI tests that focus on validating new features. Therefore, the principles and practices for writing these tests are the same as for general E2E UI tests. + +- For writing **Playwright** tests, refer to the [How to Write Playwright Tests](e2e-tests.md#how-to-write-playwright-tests) section in the E2E Tests documentation. +- For writing **CodeceptJS** tests, refer to the [How to Write CodeceptJS Tests](e2e-codeceptjs-tests.md#how-to-write-codeceptjs-tests) section in the E2E CodeceptJS Tests documentation. + +When writing Feature Build tests, pay special attention to: + +- **Targeting new features**: Ensure your tests specifically cover the new functionality. +- **Regression prevention**: Include checks for existing features that might be affected by the new changes. +- **Using appropriate tags**: Tag your tests with relevant `@fb-` tags (e.g., `@fb-instances`, `@fb-alerting`) to categorize them as feature build tests. + +--- + +**Related Documentation**: +- [E2E Tests](e2e-tests.md) +- [Integration & CLI Tests](integration-cli-tests.md) +- [Package Tests](package-tests.md) +- [Upgrade Tests](upgrade-tests.md) +- [Test Parameters Reference](test-parameters.md) +- [Troubleshooting Guide](troubleshooting.md) diff --git a/docs/infrastructure-tests.md b/docs/infrastructure-tests.md new file mode 100644 index 00000000..d5f3912b --- /dev/null +++ b/docs/infrastructure-tests.md @@ -0,0 +1,165 @@ +# Infrastructure Tests + +This guide provides instructions for running the PMM infrastructure tests locally. These tests validate PMM deployments in various environments, including Kubernetes/Helm and simplified installations using the Easy Install script. + +## 💡 **What are Infrastructure Tests?** + +Infrastructure tests are designed to ensure that PMM can be deployed and configured correctly in different environments. They cover: + +- **Kubernetes/Helm**: Validating PMM deployment using Helm charts on a Kubernetes cluster. +- **Easy Install**: Testing the simplified installation script on various supported operating systems. + +## 🤖 **How to Run Infrastructure Tests Locally** + +### **Helm Tests (Kubernetes)** + +These steps will guide you through setting up a local Kubernetes cluster using Minikube and deploying PMM with Helm. + +#### **Prerequisites** + +- **Minikube**: For running a local Kubernetes cluster. +- **kubectl**: The Kubernetes command-line tool. +- **Helm**: The package manager for Kubernetes. + +#### **Step 1: Start Minikube** + +Start a Minikube cluster. This will create a local single-node Kubernetes cluster. + +```bash +minikube start +``` + +#### **Step 2: Set Up Storage** + +Disable the default storage provisioner and enable the CSI hostpath driver for persistent storage. + +```bash +minikube addons disable storage-provisioner +minikube addons enable csi-hostpath-driver +kubectl patch storageclass csi-hostpath-sc -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}' +``` + +#### **Step 3: Run Helm Tests** + +Clone the `pmm-qa` repository and run the Helm tests using `bats`. + +```bash +git clone https://github.com/percona/pmm-qa.git +cd pmm-qa/k8s + +# Set up bats (BASH Automated Testing System) +sudo ./setup_bats_libs.sh + +# Run the tests +SERVER_IMAGE=perconalab/pmm-server:3-dev-latest bats --tap helm-test.bats +``` + +### **Easy Install Tests** + +These steps will show you how to test the Easy Install script on a supported operating system. + +#### **Prerequisites** + +- A clean installation of a supported OS (e.g., Ubuntu 24.04, Oracle Linux 9, Rocky Linux 9). +- `curl` or `wget` to download the script. + +#### **Step 1: Download the Script** + +Download the Easy Install script from the Percona website. + +```bash +curl -fsSL https://www.percona.com/get/pmm > pmm-installer.sh +``` + +#### **Step 2: Run the Script** + +Execute the script with `bash`. The script will automatically detect the OS and install PMM. + +```bash +sudo bash pmm-installer.sh +``` + +#### **Step 3: Validate the Installation** + +After the script finishes, you can check the status of the PMM server and other components. + +```bash +docker ps -a +``` + +You should see the `pmm-server` and `watchtower` containers running. + +## 📝 **How to Write Helm Tests** + +All paths mentioned in this section are relative to the root of the `pmm-qa` repository, which can be found [here](https://github.com/percona/pmm-qa/tree/v3). + +Helm tests in this project are written using Bats (Bash Automated Testing System). Bats provides a simple way to test shell scripts and command-line tools. Helm tests typically involve deploying a Helm chart and then asserting on the state of the Kubernetes resources or the behavior of the deployed application. + +### **Test Structure and Directory Layout** + +Helm tests are located in the `pmm-qa/k8s` directory. + +``` +pmm-qa/ +├── k8s/ +│ ├── helm-test.bats # Main Bats test file for Helm +│ ├── k8s_helper.sh # Helper functions for Kubernetes interactions +│ ├── pmm_helper.sh # Helper functions for PMM-specific actions +│ └── setup_bats_libs.sh # Script to set up Bats libraries +``` + +- **`helm-test.bats`**: This is the main Bats test file. It contains the test cases for deploying PMM using Helm and verifying its functionality. +- **`k8s_helper.sh`**: This script contains reusable Bash functions for interacting with Kubernetes, such as checking pod status, deploying resources, and running `kubectl` commands. +- **`pmm_helper.sh`**: This script provides helper functions specific to PMM, such as checking PMM server status or client registration. + +### **Writing Conventions** + +- **Bats Syntax**: Tests are written using Bats syntax, which is essentially Bash scripting with special Bats commands for defining tests (`@test`), assertions (`run`, `assert_success`, `assert_output`), and setup/teardown (`setup`, `teardown`). +- **Helper Functions**: Utilize helper functions in `k8s_helper.sh` and `pmm_helper.sh` to abstract complex Kubernetes and PMM interactions. This promotes reusability and readability. +- **Clear Assertions**: Assertions should clearly define the expected outcome of a command or the state of a resource. +- **Test Isolation**: Each test should aim to be as isolated as possible, cleaning up resources after execution to prevent interference. + +### **Basic Helm Test Example** + +A typical Bats test in `helm-test.bats` might look like this: + +```bash +#!/usr/bin/env bats + +load 'test_helper/bats-support/load' +load 'test_helper/bats-assert/load' + +@test "PMM Helm chart deploys successfully" { + run helm install my-pmm ./pmm-helm-chart + assert_success + assert_output --partial "STATUS: deployed" + + run kubectl get pods -l app.kubernetes.io/instance=my-pmm + assert_success + assert_output --partial "pmm-server" +} + +@test "PMM server is reachable after deployment" { + run kubectl get service my-pmm-server -o jsonpath='{.status.loadBalancer.ingress[0].ip}' + assert_success + PMM_IP="$output" + + # Assuming a simple ping endpoint for demonstration + run curl -s "http://$PMM_IP/ping" + assert_success + assert_output "PMM Server is running" +} +``` + +**Note**: The actual `helm-test.bats` file in the project will be more complex, involving detailed setup, deployment, and validation steps specific to PMM. The example above is simplified to illustrate the basic structure. + +--- + +**Related Documentation**: +- [E2E Tests](e2e-tests.md) +- [E2E CodeceptJS Tests](e2e-codeceptjs-tests.md) +- [Integration & CLI Tests](integration-cli-tests.md) +- [Package Tests](package-tests.md) +- [Upgrade Tests](upgrade-tests.md) +- [Test Parameters Reference](test-parameters.md) +- [Troubleshooting Guide](troubleshooting.md) \ No newline at end of file diff --git a/docs/integration-cli-tests.md b/docs/integration-cli-tests.md new file mode 100644 index 00000000..eda526e0 --- /dev/null +++ b/docs/integration-cli-tests.md @@ -0,0 +1,210 @@ +# Integration & CLI Tests + +This guide provides instructions for running the PMM Integration and Command-Line Interface (CLI) tests locally. These tests validate the interaction between the PMM server and client, as well as the functionality of the `pmm-admin` CLI tool. + +## 💡 **What are Integration & CLI Tests?** + +These tests are designed to: + +- **Validate client-server communication**: Ensuring that the PMM client can successfully register with and send data to the PMM server. +- **Test database integration**: Verifying that PMM can monitor various database technologies (MySQL, MongoDB, PostgreSQL, etc.). +- **Ensure CLI functionality**: Testing the different commands, flags, and options of the `pmm-admin` CLI. + +## 🤖 **How to Run Integration & CLI Tests Locally** + +The following steps will guide you through setting up the necessary environment and running the integration and CLI tests on your local machine. These instructions are based on the `runner-integration-cli-tests.yml` CI workflow. + +### **Prerequisites** + +- **Git**: To clone the required repositories. +- **Docker**: To run the PMM server and other services. +- **Node.js (v18+)** and **npm**: For running the test framework. +- **Python 3** and **pip**: For running setup scripts. +- **System Dependencies**: `ansible`, `clickhouse-client`, `dbdeployer`, etc. + +### **Step 1: Clone Repositories** + +Clone the `pmm-ui-tests` and `qa-integration` repositories. + +```bash +git clone --branch v3 https://github.com/percona/pmm-ui-tests.git +git clone --branch v3 https://github.com/Percona-Lab/qa-integration.git +``` + +### **Step 2: Install System Dependencies** + +Install the required system packages. The command below is for Debian/Ubuntu-based systems. + +```bash +sudo apt-get update +sudo apt-get install -y apt-transport-https ca-certificates dirmngr ansible libaio1 libaio-dev libnuma-dev libncurses5 socat sysbench clickhouse-client +curl -s https://raw.githubusercontent.com/datacharmer/dbdeployer/master/scripts/dbdeployer-install.sh | sudo bash -s -- -b /usr/local/bin +``` + +### **Step 3: Set Up PMM Server** + +Set up and start the PMM server using Docker. + +```bash +docker create -v /srv --name pmm-server-data perconalab/pmm-server:3-dev-latest +docker run -d -p 80:80 -p 443:8443 --volumes-from pmm-server-data --name pmm-server --restart always perconalab/pmm-server:3-dev-latest +timeout 240 bash -c 'while [[ "$(curl -k -s -o /dev/null -w ''%{http_code}'' https://127.0.0.1:443/v1/readyz)" != "200" ]]; do sleep 2; done' || false +``` + +### **Step 4: Set Up PMM Client and Services** + +Set up the PMM client and the database services you want to monitor. + +```bash +cd qa-integration/pmm_qa + +# Install the PMM client +sudo bash -x pmm3-client-setup.sh --pmm_server_ip 127.0.0.1 --client_version 3-dev-latest --admin_password admin --use_metrics_mode no + +# Set up the test environment and services (e.g., a single Percona Server instance) +python3 -m venv virtenv +source virtenv/bin/activate +pip install --upgrade pip +pip install -r requirements.txt +python pmm-framework.py --database ps + +cd ../.. +``` + +### **Step 5: Install Test Dependencies** + +Install the Node.js dependencies for the CLI tests. + +```bash +cd pmm-ui-tests/cli +npm ci +``` + +### **Step 6: Run the Tests** + +Finally, run the CLI tests using Playwright. You can run specific test files or all of them. + +```bash +cd pmm-ui-tests/cli + +# Run the help tests +npx playwright test tests/help.spec.ts + +# Run the Percona Server tests +npx playwright test tests/perconaMySqlServer.spec.ts + +# Run all tests +npx playwright test +``` + +## 🚀 **Feature Build Integration Suite** + +The Feature Build Integration Suite (`fb-integration-suite.yml`) is used to test feature builds of the PMM server and client. It runs the same set of integration and CLI tests against a specified feature build image. + +To run these tests locally, follow the same steps as above, but in Step 3, use the feature build Docker image for the PMM server: + +```bash +docker create -v /srv --name pmm-server-data perconalab/pmm-server-fb:feature-xyz +docker run -d -p 80:80 -p 443:8443 --volumes-from pmm-server-data --name pmm-server --restart always perconalab/pmm-server-fb:feature-xyz +``` + +Replace `perconalab/pmm-server:feature-xyz` with the actual tag of the feature build image. + +## 📝 **How to Write CLI/Integration Tests** + +All paths mentioned in this section are relative to the root of the `pmm-ui-tests` repository ([here](https://github.com/percona/pmm-ui-tests/tree/v3)) or the `qa-integration` repository ([here](https://github.com/Percona-Lab/qa-integration/tree/v3)). + +CLI/Integration tests in this project are primarily written using Playwright for interacting with the command line and asserting outputs, combined with Python scripts (`pmm-framework.py`) for setting up and managing the test environment and services. + +### **Test Structure and Directory Layout** + +CLI tests are located in the `pmm-ui-tests/cli/tests` directory. Each test file (`.spec.ts`) typically focuses on a specific `pmm-admin` command or a set of related commands. + +``` +pmm-ui-tests/ +├── cli/ +│ ├── tests/ # Playwright test files for CLI +│ │ ├── help.spec.ts +│ │ ├── inventory.spec.ts +│ │ └── mysql.spec.ts +│ ├── playwright.config.ts # Playwright configuration for CLI tests +│ └── package.json # Node.js dependencies for CLI tests +qa-integration/ +├── pmm_qa/ # Python scripts for environment setup +│ ├── pmm-framework.py # Main script for setting up services +│ ├── helpers/ # Helper modules for pmm-framework.py +│ └── requirements.txt # Python dependencies +``` + +- **`pmm-ui-tests/cli/tests/`**: Contains the Playwright test files written in TypeScript. These files use Playwright's `expect` assertions to validate CLI output and behavior. +- **`qa-integration/pmm_qa/pmm-framework.py`**: This is a crucial Python script responsible for setting up the PMM server, PMM clients, and various database services required for testing. It abstracts away the complexities of environment provisioning. + +### **Writing Conventions** + +- **Playwright for CLI Interaction**: Use Playwright's `page.evaluate()` or `page.keyboard` to simulate CLI commands and `page.locator()` to capture and assert on terminal output (if applicable in a web-based terminal scenario, or by interacting with the underlying shell directly if the test runner allows). +- **Python for Environment Setup**: Leverage `pmm-framework.py` to programmatically set up databases, PMM clients, and other services. This ensures a consistent and reproducible test environment. +- **Clear Assertions**: Assertions should clearly define the expected CLI output, service status, or data collected by PMM. +- **Test Isolation**: Each test should aim to be as isolated as possible, setting up and tearing down its own resources to prevent interference. + +### **Basic CLI Test Example** + +CLI/Integration tests in this project typically use a custom `cli-helper` module (located in `pmm-ui-tests/helpers/cli-helper.ts`) to execute `pmm-admin` commands and capture their output. The `cli-helper` returns an `ExecReturn` object, which provides convenient methods for assertions. + +```typescript +import { test } from '@playwright/test'; +import * as cli from '@helpers/cli-helper'; // Project-specific CLI helper +import ExecReturn from '@support/types/exec-return.class'; // Type definition for command output + +let addMongoHelp: ExecReturn; + +test.describe('pmm-admin help output', () => { + test.beforeAll(async () => { + // Execute a pmm-admin command silently and store its output + addMongoHelp = await cli.execSilent('sudo pmm-admin add mongodb --help'); + await addMongoHelp.assertSuccess(); // Assert that the command exited successfully + }); + + test('pmm-admin add mongodb --help should contain key options', async () => { + // Assert that the output contains specific lines or patterns + await addMongoHelp.outContainsMany([ + 'Usage: pmm-admin add mongodb [ [
]]', + '--socket=STRING', + 'metrics-mode="auto"', + 'host', + 'port', + 'service-name', + ]); + }); + + test('pmm-admin add mongodb --help should contain TLS flags', async () => { + await addMongoHelp.outContainsMany([ + 'tls Use TLS to connect to the database', + 'tls-skip-verify Skip TLS certificate verification', + 'tls-certificate-key-file=STRING', + 'tls-ca-file=STRING Path to certificate authority file', + ]); + }); +}); +``` + +**Explanation of the Example:** + +- **`import * as cli from '@helpers/cli-helper';`**: Imports the custom CLI helper module that wraps shell command execution. +- **`import ExecReturn from '@support/types/exec-return.class';`**: Imports the type definition for the object returned by the CLI helper, which includes `stdout`, `stderr`, `exitCode`, and assertion methods. +- **`cli.execSilent('sudo pmm-admin add mongodb --help')`**: Executes the `pmm-admin` command. `execSilent` runs the command without printing its output to the console, which is useful for tests where you only care about the return value or specific output assertions. +- **`await addMongoHelp.assertSuccess()`**: An assertion method provided by `ExecReturn` to verify that the command executed successfully (exit code 0). +- **`await addMongoHelp.outContainsMany([...])`**: An assertion method to check if the standard output of the command contains all the specified strings. This is a common way to verify help messages or command outputs. + +This example demonstrates how to execute a `pmm-admin` command, check its success, and assert on its output using the project's established helper functions, providing a more accurate representation of how CLI tests are written here. + + +--- + +**Related Documentation**: +- [E2E Tests](e2e-tests.md) +- [E2E CodeceptJS Tests](e2e-codeceptjs-tests.md) +- [Infrastructure Tests](infrastructure-tests.md) +- [Package Tests](package-tests.md) +- [Upgrade Tests](upgrade-tests.md) +- [Test Parameters Reference](test-parameters.md) +- [Troubleshooting Guide](troubleshooting.md) diff --git a/docs/package-tests.md b/docs/package-tests.md new file mode 100644 index 00000000..66c8bcef --- /dev/null +++ b/docs/package-tests.md @@ -0,0 +1,223 @@ +# Package Tests + +This guide provides instructions for running the PMM client package installation tests locally. These tests validate that PMM client packages install and function correctly on various supported operating systems and configurations. + +## 💡 **What are Package Tests?** + +Package tests are designed to verify the PMM client installation process from start to finish. They ensure: + +- **Platform Compatibility**: That packages install correctly on all supported Linux distributions (Debian, Ubuntu, RHEL, etc.). +- **Installation Scenarios**: That different installation types, such as standard, custom path, and tarball, all work as expected. +- **Package Integrity**: That the packages themselves are not corrupt and contain all the necessary files and dependencies. + +## 🤖 **How to Run Package Tests Locally** + +The following steps will guide you through setting up a virtualized environment using Vagrant and running the package tests with Ansible, mirroring the process used in the `runner-package-test.yml` CI workflow. + +### **Prerequisites** + +- **Git**: To clone the required repositories. +- **Docker**: To run the PMM server. +- **Vagrant**: To create and manage virtual machine environments. +- **VirtualBox** (or another Vagrant provider): To run the virtual machines. +- **Ansible**: To automate the test execution within the VM. + +### **Step 1: Clone the `package-testing` Repository** + +First, clone the `package-testing` repository, which contains the Ansible playbooks for the tests. + +```bash +git clone https://github.com/Percona-QA/package-testing.git +cd package-testing +``` + +### **Step 2: Set Up PMM Server** + +Before running the client installation tests, you need a running PMM server for the client to connect to. Start one using Docker. + +```bash +docker create -v /srv --name pmm-server-data perconalab/pmm-server:3-dev-latest +docker run -d -p 80:80 -p 443:8443 --volumes-from pmm-server-data --name pmm-server --restart always perconalab/pmm-server:3-dev-latest +timeout 240 bash -c 'while [[ "$(curl -k -s -o /dev/null -w ''%{http_code}'' https://127.0.0.1:443/v1/readyz)" != "200" ]]; do sleep 2; done' || false +``` + +### **Step 3: Configure and Run Vagrant** + +Vagrant will create a clean VM, install the necessary dependencies, and run the Ansible playbook to perform the test. + +1. **Create a `Vagrantfile`**: Create a file named `Vagrantfile` in the `package-testing` directory with the following content. This example is for Ubuntu 22.04 (Jammy). + + ```ruby + Vagrant.require_version ">= 1.7.0" + Vagrant.configure(2) do |config| + # Use a specific OS box for the test + config.vm.box = "generic/ubuntu2204" + + config.ssh.insert_key = false + config.vm.define :CLIENT_TEST + + # Sync the current directory to the VM + config.vm.synced_folder ".", "/package-testing/" + + # Provision the VM with a shell script + config.vm.provision "shell", privileged: true, inline: <<-SHELL + # Set environment variables for the test + export PMM_SERVER_IP=10.0.2.2:443 + export METRICS_MODE=auto + export install_repo=experimental + export install_package=pmm3-client + + # Install Ansible + apt-get update -y + apt-get install -y software-properties-common + apt-add-repository --yes --update ppa:ansible/ansible + apt-get install -y ansible git wget + + # Run the Ansible playbook for the test + cd /package-testing/playbooks + ansible-playbook --connection=local --inventory 127.0.0.1, --limit 127.0.0.1 pmm3-client_integration.yml + SHELL + end + ``` + +2. **Run Vagrant**: Start the VM and the provisioning process. + + ```bash + vagrant up + ``` + +### **Customizing Your Test** + +- **To test a different OS**: Change `config.vm.box` in the `Vagrantfile` to another supported box (e.g., `generic/debian11`, `generic/oracle9`). You may also need to adjust the Ansible installation commands for different package managers (e.g., `yum` or `dnf`). +- **To run a different test scenario**: Change the playbook file in the `ansible-playbook` command (e.g., to `pmm3-client_integration_custom_path.yml`). + +## 🚀 **Feature Build Tarball Suite** + +The Feature Build Tarball Suite (`fb-tarball-suite.yml`) is used to test feature builds of the PMM client distributed as a tarball. It runs the package tests against a specified tarball URL. + +To run these tests locally, follow the same steps as above, but in the `Vagrantfile`, set the `TARBALL_LINK` environment variable to the URL of the feature build tarball: + +```ruby +# ... (Vagrantfile content) ... + config.vm.provision "shell", privileged: true, inline: <<-SHELL + # Set environment variables for the test + export PMM_SERVER_IP=10.0.2.2:443 + export TARBALL_LINK="https://example.com/pmm-client-feature-xyz.tar.gz" + + # ... (rest of the script) ... + SHELL +# ... (Vagrantfile content) ... +``` + +Replace `https://example.com/pmm-client-feature-xyz.tar.gz` with the actual URL of the feature build tarball. + +## 📝 **How to Write Package Tests (Ansible)** + +All paths mentioned in this section are relative to the root of the `package-testing` repository, which can be found [here](https://github.com/Percona-QA/package-testing/tree/v3). + +Package tests are primarily written as Ansible playbooks. Ansible allows for declarative definition of system states and automates the installation, configuration, and validation of software packages across various operating systems. + +### **Test Structure and Directory Layout** + +Ansible playbooks for package testing are located in the `package-testing/playbooks` directory. Each playbook (`.yml`) defines a specific test scenario (e.g., standard installation, custom path installation). + +``` +package-testing/ +├── playbooks/ # Ansible playbooks for different test scenarios +│ ├── pmm3-client_integration.yml +│ ├── pmm3-client_integration_custom_path.yml +│ └── ... +├── roles/ # Reusable Ansible roles +│ ├── pmm-client/ # Role for PMM client installation and configuration +│ └── ... +├── inventory.ini # Ansible inventory file (defines hosts) +└── Vagrantfile # Vagrant configuration for test VMs +``` + +- **`playbooks/`**: Contains the main Ansible playbooks. Each playbook orchestrates a series of tasks to perform a specific package test scenario. +- **`roles/`**: Contains reusable Ansible roles. Roles encapsulate a set of tasks, variables, and handlers for a specific purpose (e.g., installing and configuring the PMM client). +- **`inventory.ini`**: Defines the hosts that Ansible will manage. In local testing with Vagrant, this typically points to the local VM. +- **`Vagrantfile`**: Configures the virtual machine environment where the Ansible playbooks will be executed. + +### **Writing Conventions** + +- **Declarative Style**: Ansible playbooks are declarative, describing the desired state rather than the steps to achieve it. +- **Idempotency**: Playbooks should be idempotent, meaning running them multiple times will have the same result as running them once. +- **Roles**: Utilize Ansible roles to organize tasks, variables, and handlers into logical, reusable units. +- **Variables**: Use variables to make playbooks flexible and reusable across different environments or test scenarios. +- **Assertions**: Use Ansible's `assert` module or conditional tasks to validate the success of installation steps and the state of the system. + +### **Basic Ansible Playbook Example** + +A simplified Ansible playbook (`pmm3-client_integration.yml`) might look like this: + +```yaml +--- +- name: Install PMM Client (Standard Integration) + hosts: all + become: yes + vars: + pmm_server_ip: "{{ lookup('env', 'PMM_SERVER_IP') }}" + metrics_mode: "{{ lookup('env', 'METRICS_MODE') }}" + install_repo: "{{ lookup('env', 'install_repo') }}" + install_package: "{{ lookup('env', 'install_package') }}" + + tasks: + - name: Ensure Percona repository is configured + ansible.builtin.shell: |- + curl -fsSL https://www.percona.com/get/percona-release | bash + percona-release enable-only {{ install_package }} {{ install_repo }} + + - name: Install PMM Client package + ansible.builtin.package: + name: "{{ install_package }}" + state: present + update_cache: yes + + - name: Configure PMM Client to connect to PMM Server + ansible.builtin.command: |- + pmm-admin config --server-url=https://{{ pmm_server_ip }}:443 --server-username=admin --server-password=admin + + - name: Add MySQL service + ansible.builtin.command: |- + pmm-admin add mysql --query-source=perfschema --username=root --password=root + + - name: Verify PMM Client status + ansible.builtin.command: pmm-admin status + register: pmm_status + until: pmm_status.stdout.find("PMM Client is running") != -1 + retries: 10 + delay: 10 + + - name: Assert MySQL service is added + ansible.builtin.command: pmm-admin list + register: pmm_list + failed_when: pmm_list.stdout.find("mysql") == -1 +``` + +### **Key Concepts** + +- **Playbook**: The entry point for an Ansible run, defining the hosts to target and the tasks to execute. +- **Hosts**: Specifies which machines the playbook will run against (e.g., `all` for all hosts in the inventory, or a specific group). +- **`become: yes`**: Instructs Ansible to escalate privileges (e.g., use `sudo`) for tasks that require root access. +- **`vars`**: Defines variables that can be used within the playbook. These can be sourced from environment variables (`lookup('env', ...)`), files, or command-line arguments. +- **Tasks**: Individual actions that Ansible performs. Tasks use modules (e.g., `ansible.builtin.package`, `ansible.builtin.command`, `ansible.builtin.shell`) to interact with the remote hosts. +- **Modules**: Pre-built units of code that Ansible executes. They perform specific functions like installing packages, running commands, or managing services. +- **`register`**: Captures the output of a task into a variable for later use or assertion. +- **`until` / `retries` / `delay`**: Used for retrying tasks until a certain condition is met, useful for waiting on services to start or become healthy. +- **`failed_when`**: Defines a condition under which a task should be considered failed. + +### **Running New Tests** + +After creating a new playbook or modifying an existing one, you can run it by updating your `Vagrantfile` to point to the new playbook and then running `vagrant up`. + +--- + +**Related Documentation**: +- [E2E Tests](e2e-tests.md) +- [E2E CodeceptJS Tests](e2e-codeceptjs-tests.md) +- [Infrastructure Tests](infrastructure-tests.md) +- [Integration & CLI Tests](integration-cli-tests.md) +- [Upgrade Tests](upgrade-tests.md) +- [Test Parameters Reference](test-parameters.md) +- [Troubleshooting Guide](troubleshooting.md) diff --git a/docs/test-parameters.md b/docs/test-parameters.md new file mode 100644 index 00000000..47079807 --- /dev/null +++ b/docs/test-parameters.md @@ -0,0 +1,389 @@ +# Test Parameters Reference + +This comprehensive reference guide covers all parameters, configurations, and options available across PMM-QA workflows. + +## 📚 **Overview** + +This document provides detailed information about: +- Common workflow parameters +- Service setup configurations +- Version and image specifications +- Test flags and categories +- Environment variables +- Platform and OS options + +## 🔧 **Common Workflow Parameters** + +### Branch Configuration +```yaml +pmm_ui_tests_branch: "v3" # PMM UI tests repository branch +pmm_qa_branch: "v3" # PMM QA repository branch +qa_integration_branch: "v3" # QA integration repository branch +package_testing_branch: "v3" # Package testing branch +easy_install_branch: "v3" # Easy install script branch +``` + +### Version and Image Parameters +```yaml +# Server Configuration +pmm_server_image: "perconalab/pmm-server:3-dev-latest" +pmm_server_version: "perconalab/pmm-server:3-dev-latest" +pmm_server_start_version: "latest" + +# Client Configuration +pmm_client_image: "perconalab/pmm-client:3-dev-latest" +pmm_client_version: "3-dev-latest" +pmm_client_start_version: "pmm2-latest" +pmm_client_tarball: "" # Custom tarball URL + +# Status Reporting +sha: "null" # Commit SHA for status reporting +``` + +## 🗄️ **Database Service Setup** + +### Single Database Configurations +```yaml +# MySQL Family +--database mysql # MySQL (latest) +--database mysql=8.0 # MySQL 8.0 +--database ps # Percona Server (latest) +--database ps=5.7 # Percona Server 5.7 +--database ps=8.0 # Percona Server 8.0 + +# PostgreSQL Family +--database pdpgsql # Percona Distribution for PostgreSQL (latest) +--database pdpgsql=14 # PostgreSQL 14 +--database pdpgsql=15 # PostgreSQL 15 +--database pdpgsql=16 # PostgreSQL 16 + +# MongoDB Family +--database psmdb # Percona Server for MongoDB +--database modb # MongoDB + +# Proxy/Load Balancers +--database haproxy # HAProxy +--database proxysql # ProxySQL + +# Special Configurations +--database external # External exporter testing +--database dockerclients # Docker client testing +``` + +### Advanced Database Options +```yaml +# MongoDB with SSL +--database psmdb,SETUP_TYPE=pss + +# MongoDB with extra profiles +--database psmdb,COMPOSE_PROFILES=extra + +# MySQL/PS with slow query log +--database ps,QUERY_SOURCE=slowlog +--database mysql,QUERY_SOURCE=slowlog + +# PostgreSQL with extensions +--database pdpgsql,EXTENSION=pg_stat_monitor +--database pdpgsql,EXTENSION=pg_stat_statements +``` + +### Multi-Database Setups +```yaml +# Basic multi-database +--database ps --database psmdb --database pdpgsql + +# Comprehensive setup +--database ps=8.0 --database psmdb --database pdpgsql=15 --database haproxy + +# Client addition patterns +--addclient=ps,1 # Add 1 Percona Server client +--addclient=pdpgsql,1 # Add 1 PostgreSQL client +--addclient=modb,1 # Add 1 MongoDB client +``` + +## 🏷️ **Test Tags and Categories** + +### E2E Test Tags +```yaml +# Core Functionality +@portal # Portal functionality +@inventory # Inventory management +@dashboards # Dashboard functionality +@qan # Query Analytics + +# Feature-Specific +@backup-management # Backup features +@alerting # Alerting functionality +@rbac # Role-based access control +@settings-fb # Settings feature build tests + +# Security and Authentication +@security # Security features +@user-password # User authentication testing +@oauth # OAuth integration + +# API and Integration +@api # API testing +@exporters # Exporter functionality +@mongodb-exporter # MongoDB-specific exporters +``` + +### Feature Build Test Tags +```yaml +# Backup Management +@bm-mongo # MongoDB backup tests +@bm-mysql # MySQL backup tests +@bm-common # Common backup features +@bm-locations # Backup location testing + +# Database-Specific +@pgsm-pmm-integration # PostgreSQL pg_stat_monitor +@pgss-pmm-integration # PostgreSQL pg_stat_statements + +# UI Components +@fb-instances # Instance management UI +@fb-alerting # Alerting UI components +@fb-settings # Settings UI components +``` + +### Upgrade Test Tags +```yaml +# Pre-upgrade Tests +@config-pre-upgrade # Configuration documentation +@rbac-pre-upgrade # RBAC state capture +@portal-pre-upgrade # Portal state capture +@inventory-pre-upgrade # Inventory state capture + +# Post-upgrade Tests +@config-post-upgrade # Configuration validation +@rbac-post-upgrade # RBAC validation +@portal-post-upgrade # Portal validation +@inventory-post-upgrade # Inventory validation +``` + +## 📦 **Package Testing Parameters** + +### Package Types +```yaml +package: "original" # Legacy PMM package +package: "pmm3-client" # PMM3 client package +package: "tools" # PMM tools package +``` + +### Repository Types +```yaml +repository: "release" # Stable release repository +repository: "release candidate" # RC repository +repository: "dev-latest" # Development repository +``` + +### Metrics Modes +```yaml +metrics_mode: "auto" # Automatic mode selection +metrics_mode: "push" # Client pushes metrics +metrics_mode: "pull" # Server pulls metrics +``` + +### Installation Scenarios +```yaml +# Playbook Types +playbook: "pmm3-client_integration" +playbook: "pmm3-client_integration_custom_path" +playbook: "pmm3-client_integration_custom_port" +``` + +## ⬆️ **Upgrade Testing Parameters** + +### Upgrade Methods +```yaml +upgrade_type: "UI way" # Web interface upgrade +upgrade_type: "Docker way" # Container replacement +upgrade_type: "Podman way" # Podman-based upgrade +``` + +### Version Specifications +```yaml +# Start Versions +pmm_server_start_version: "latest" # Latest stable +pmm_server_start_version: "dev-latest" # Development +pmm_server_start_version: "2.41.0" # Specific version +pmm_server_start_version: "3.0.0-rc" # Release candidate + +# Target Repositories +repository: "release" # To stable release +repository: "release candidate" # To RC +repository: "dev-latest" # To development +``` + +## 🖥️ **Platform and OS Parameters** + +### Supported Operating Systems +```yaml +# Debian/Ubuntu Family +"bullseye" # Debian 11 +"bookworm" # Debian 12 +"jammy" # Ubuntu 22.04 LTS +"noble" # Ubuntu 24.04 LTS + +# Red Hat Family +"ol-8" # Oracle Linux 8 +"ol-9" # Oracle Linux 9 +"rocky-8" # Rocky Linux 8 +"rocky-9" # Rocky Linux 9 +"centos-7" # CentOS 7 (legacy) +``` + +### Architecture Support +```yaml +"x86_64" # Intel/AMD 64-bit +"aarch64" # ARM 64-bit +``` + +## 🌐 **Environment Variables** + +### Authentication Variables +```yaml +OAUTH_CLIENT_ID # OAuth client identifier +OAUTH_CLIENT_SECRET # OAuth client secret +OAUTH_PMM_CLIENT_ID # PMM-specific OAuth client ID +OAUTH_PMM_CLIENT_SECRET # PMM-specific OAuth secret +ADMIN_PASSWORD # PMM admin password (default: admin) +``` + +### External Service Integration +```yaml +MAILOSAUR_API_KEY # Email testing service +MAILOSAUR_UI_TESTS_SERVER_ID # UI tests email server +MAILOSAUR_API_TESTS_SERVER_ID # API tests email server +SERVICENOW_PASSWORD # ServiceNow integration +ZEPHYR_PMM_API_KEY # Test management integration +``` + +### Testing Configuration +```yaml +PMM_BASE_URL # PMM server URL (default: https://127.0.0.1) +TIMEOUT # Test timeout settings +BROWSER # Browser selection +DOCKER_VERSION # Docker image version +CLIENT_VERSION # Client version +``` + +### Backup Testing +```yaml +BACKUP_LOCATION_ACCESS_KEY # Backup storage access key +BACKUP_LOCATION_SECRET_KEY # Backup storage secret key +``` + +## 🔧 **CLI Test Specific Parameters** + +### Test Execution Parameters +```yaml +cli_test: "help.spec.ts" # Specific test file +cli_test: "pmm-server-only" # Server-only tests +cli_test: "pmm-client-docker" # Client container tests +cli_test: "generic unregister --workers=1" # Generic tests with workers +cli_test: "postgreSql --workers=1" # PostgreSQL tests +``` + +### Service List Parameters +```yaml +services_list: "--database ps=8.0" +services_list: "--database dockerclients" +services_list: "--addclient=ps,1 --addclient=pdpgsql,1" +``` + +## 🏗️ **Infrastructure Testing Parameters** + +### Kubernetes/Helm Parameters +```yaml +server_image: "perconalab/pmm-server:3-dev-latest" +client_image: "perconalab/pmm-client:3-dev-latest" +pmm_qa_branch: "v3" +``` + +### Easy Install Parameters +```yaml +easy_install_branch: "v3" # Installation script branch +os: "ubuntu-noble" # Target operating system +os: "ol-9" # Oracle Linux 9 +os: "rocky-9" # Rocky Linux 9 +``` + +## 📊 **Matrix Testing Parameters** + +### Version Matrix +```yaml +matrix_range: "10" # Number of versions to test +version_matrix: "["3.0.0", "3.1.0", "3.2.0"]" +pt_os_matrix: "[\"bullseye\", \"bookworm\", \"noble\"]" +``` + +### Platform Matrix +```yaml +[ + { os: "ubuntu-noble", package: "pmm3-client", metrics: "auto" }, + { os: "debian-bookworm", package: "pmm3-client", metrics: "push" }, + { os: "ol-9", package: "pmm3-client", metrics: "pull" } +] +``` + +## 🕒 **Timing and Duration Parameters** + +### Test Timeouts +```yaml +timeout-minutes: 40 # Job timeout (Integration tests) +timeout-minutes: 60 # Job timeout (E2E tests) +timeout-minutes: 1 # Job timeout (Version getter) +``` + +### Expected Durations +```yaml +Help Tests: 5 minutes +Server Container: 10 minutes +Database Tests: 20-30 minutes +E2E Portal: 30 minutes +E2E Inventory: 25 minutes +Package Installation: 20 minutes +Helm Tests: 30 minutes +Upgrade Tests: 45-60 minutes +``` + +## 🔄 **Special Configuration Patterns** + +### Setup Enhancement Flags +```yaml +--setup-portal-oauth # OAuth configuration for portal +--enable-portal-features # Portal-specific features +--enable-service-discovery # Automatic service discovery +--setup-multiple-clients # Multiple client instances +--enable-backup-management # Backup functionality +--setup-alerting # Alerting configuration +--mongo-replica-for-backup # MongoDB replica for backup testing +--setup-bm-mysql # Backup management for MySQL +``` + +### Custom Configuration Examples +```yaml +# Comprehensive test setup +--database ps=8.0,QUERY_SOURCE=slowlog \ +--database psmdb,SETUP_TYPE=pss,COMPOSE_PROFILES=extra \ +--database pdpgsql=15,EXTENSION=pg_stat_monitor \ +--database haproxy \ +--enable-backup-management \ +--setup-alerting \ +--create-test-users + +# Minimal test setup +--database ps + +# Multi-service basic setup +--database ps --database psmdb --database pdpgsql +``` + +--- + +**Related Documentation**: +- [Integration & CLI Tests](integration-cli-tests.md) +- [E2E Tests](e2e-tests.md) +- [Package Tests](package-tests.md) +- [Troubleshooting Guide](troubleshooting.md) \ No newline at end of file diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md new file mode 100644 index 00000000..46b5c220 --- /dev/null +++ b/docs/troubleshooting.md @@ -0,0 +1,612 @@ +# Troubleshooting Guide + +This guide provides solutions for common issues encountered when running PMM-QA tests, debugging workflows, and resolving test failures. + +## 🔍 **Overview** + +This troubleshooting guide covers: +- Common workflow issues and solutions +- Test failure debugging strategies +- Infrastructure and environment problems +- Performance and timeout issues +- Access and permission problems + +## 🚨 **Common Workflow Issues** + +### Permission and Access Issues + +#### Workflow Permission Denied +```yaml +Issue: Cannot trigger GitHub Actions workflow +Error: "Permission denied" or workflow not visible + +Solutions: +✅ Verify repository access permissions +✅ Ensure "Actions: Write" permission +✅ Check organization/repository settings +✅ Validate user account status +✅ Review branch protection rules +``` + +#### Secret Access Issues +```yaml +Issue: Workflow fails due to missing secrets +Error: "Secret not found" or authentication failures + +Solutions: +✅ Verify secrets are configured in repository settings +✅ Check secret names match workflow requirements +✅ Ensure secrets are available to the branch +✅ Validate secret values are correct +✅ Review organization-level secret settings +``` + +### Version and Image Issues + +#### Image Not Found +```yaml +Issue: Docker image pull failures +Error: "Image not found" or "Pull access denied" + +Solutions: +✅ Verify image name and tag are correct +✅ Check image exists in registry +✅ Validate registry access permissions +✅ Try alternative image tags +✅ Check network connectivity to registry + +Example Fix: +# Instead of non-existent version +pmm_server_image: "perconalab/pmm-server:3.1.0" +# Use available version +pmm_server_image: "perconalab/pmm-server:3-dev-latest" +``` + +#### Version Compatibility Issues +```yaml +Issue: "Upgrade to the same version is forbidden!" +Error: Start and target versions are identical + +Solutions: +✅ Choose different start/target version combinations +✅ Verify version strings are different +✅ Check version detection logic +✅ Use explicit version numbers instead of aliases + +Example Fix: +# Problematic configuration +pmm_server_start_version: "latest" +repository: "release" +# Fixed configuration +pmm_server_start_version: "latest" +repository: "dev-latest" +``` + +## ⏱️ **Timeout and Performance Issues** + +### Test Timeouts + +#### General Test Timeouts +```yaml +Issue: Tests timing out after 40 minutes +Common Causes: +- Infrastructure setup delays +- Network connectivity issues +- Resource constraints +- Database startup problems + +Solutions: +✅ Check PMM server startup logs +✅ Verify database container health +✅ Review network connectivity +✅ Monitor resource usage +✅ Check for stuck processes + +Debugging Commands: +docker ps -a # Check container status +docker logs pmm-server # Review server logs +kubectl get pods # Check K8s pod status (for Helm tests) +``` + +#### Database Setup Timeouts +```yaml +Issue: Database service setup takes too long +Common Causes: +- Image download delays +- Container resource constraints +- Network connectivity issues +- Database initialization problems + +Solutions: +✅ Check container image availability +✅ Verify adequate system resources +✅ Review database startup logs +✅ Check network connectivity +✅ Validate database configuration + +Monitoring Commands: +docker stats # Monitor resource usage +docker logs # Check container logs +netstat -tuln # Check port availability +``` + +### Performance Degradation + +#### Slow Test Execution +```yaml +Issue: Tests running slower than expected +Common Causes: +- Resource contention +- Network latency +- Database performance issues +- UI rendering delays + +Solutions: +✅ Monitor system resources +✅ Check for concurrent test runs +✅ Optimize database configurations +✅ Review network connectivity +✅ Check browser/UI performance + +Performance Monitoring: +top # System resource usage +iotop # Disk I/O monitoring +nethogs # Network usage per process +``` + +## 🗄️ **Database and Service Issues** + +### Database Connection Failures + +#### Service Setup Failures +```yaml +Issue: Database service fails to start +Common Causes: +- Port conflicts +- Configuration errors +- Resource constraints +- Image compatibility issues + +Solutions: +✅ Check port availability +✅ Verify container health status +✅ Review database credentials +✅ Check resource limits +✅ Validate image compatibility + +Debugging Steps: +1. Check container status: docker ps -a +2. Review container logs: docker logs +3. Verify port availability: netstat -tuln | grep +4. Check resource usage: docker stats +5. Test connectivity: telnet +``` + +#### Metrics Collection Issues +```yaml +Issue: Metrics not being collected +Common Causes: +- Service registration failures +- Network connectivity issues +- Authentication problems +- Exporter configuration errors + +Solutions: +✅ Verify service registration +✅ Check exporter configuration +✅ Review database permissions +✅ Validate metrics endpoints +✅ Check authentication credentials + +Verification Commands: +curl http://localhost:9090/metrics # Check metrics endpoint +pmm inventory list # Verify service registration +pmm status # Check client status +``` + +### MongoDB-Specific Issues + +#### Replica Set Configuration +```yaml +Issue: MongoDB replica set setup fails +Common Causes: +- Network configuration issues +- Timing problems in initialization +- Resource constraints +- Authentication issues + +Solutions: +✅ Check replica set configuration +✅ Verify network connectivity between nodes +✅ Review MongoDB logs +✅ Check authentication setup +✅ Validate resource allocation + +MongoDB Debugging: +mongo --eval "rs.status()" # Check replica set status +mongo --eval "db.stats()" # Check database status +docker exec mongo mongo --eval "rs.initiate()" # Initialize replica set +``` + +### PostgreSQL-Specific Issues + +#### Extension Loading Issues +```yaml +Issue: pg_stat_monitor or pg_stat_statements not working +Common Causes: +- Extension not installed +- Configuration not updated +- PostgreSQL restart required +- Permission issues + +Solutions: +✅ Install required extensions +✅ Update postgresql.conf +✅ Restart PostgreSQL service +✅ Check extension permissions +✅ Verify extension functionality + +PostgreSQL Debugging: +psql -c "SELECT * FROM pg_extension;" # List installed extensions +psql -c "SHOW shared_preload_libraries;" # Check loaded libraries +psql -c "SELECT * FROM pg_stat_statements LIMIT 1;" # Test extension +``` + +## 🎭 **UI and Browser Issues** + +### Browser-Related Failures + +#### Element Not Found Errors +```yaml +Issue: UI tests fail with "Element not found" +Common Causes: +- UI layout changes +- Timing issues (elements not loaded) +- Browser compatibility issues +- Dynamic content loading + +Solutions: +✅ Update element selectors +✅ Add explicit waits for elements +✅ Check for dynamic content loading +✅ Verify page layout changes +✅ Test with different browsers + +Playwright Debugging: +npx playwright test --headed # Run with visible browser +npx playwright test --debug # Run in debug mode +npx playwright codegen # Generate selectors +``` + +#### Authentication Issues +```yaml +Issue: Login failures in UI tests +Common Causes: +- Incorrect credentials +- OAuth configuration issues +- Session management problems +- Authentication flow changes + +Solutions: +✅ Verify login credentials +✅ Check OAuth configuration +✅ Review session management +✅ Validate authentication flow +✅ Check for CAPTCHA or 2FA + +Authentication Debugging: +# Check PMM server authentication +curl -k https://localhost/v1/auth/login \ + -d '{"username":"admin","password":"admin"}' + +# Verify OAuth configuration +echo $OAUTH_CLIENT_ID +echo $OAUTH_CLIENT_SECRET +``` + +### Page Load Issues + +#### Slow Page Loading +```yaml +Issue: Pages load slowly or timeout +Common Causes: +- Server performance issues +- Network latency +- Large data sets +- JavaScript execution problems + +Solutions: +✅ Increase timeout settings +✅ Check server performance +✅ Optimize data queries +✅ Review JavaScript errors +✅ Monitor network requests + +Performance Debugging: +# Browser developer tools +1. Open F12 developer tools +2. Go to Network tab +3. Monitor request timing +4. Check for slow requests +5. Review JavaScript console for errors +``` + +## 🏗️ **Infrastructure Issues** + +### Kubernetes/Helm Issues + +#### Minikube Startup Failures +```yaml +Issue: Minikube fails to start +Common Causes: +- Insufficient system resources +- Virtualization not enabled +- Network configuration issues +- Driver compatibility problems + +Solutions: +✅ Check system resources (CPU, memory) +✅ Enable virtualization in BIOS +✅ Update Minikube version +✅ Try different drivers +✅ Clear Minikube cache + +Minikube Debugging: +minikube status # Check cluster status +minikube logs # View cluster logs +minikube delete && minikube start # Reset cluster +minikube config view # Check configuration +``` + +#### Storage Driver Issues +```yaml +Issue: CSI storage driver installation fails +Common Causes: +- Kubernetes version incompatibility +- Insufficient permissions +- Resource constraints +- Driver configuration errors + +Solutions: +✅ Check Kubernetes version compatibility +✅ Verify cluster permissions +✅ Review driver installation logs +✅ Check resource availability +✅ Validate storage class configuration + +Storage Debugging: +kubectl get pods -n kube-system # Check system pods +kubectl get storageclass # List storage classes +kubectl describe pv # Check persistent volumes +kubectl logs -n kube-system # Check CSI driver logs +``` + +### Container Issues + +#### Docker Daemon Issues +```yaml +Issue: Docker operations fail +Common Causes: +- Docker daemon not running +- Permission issues +- Disk space problems +- Network configuration issues + +Solutions: +✅ Start Docker daemon +✅ Add user to docker group +✅ Free up disk space +✅ Check Docker configuration +✅ Restart Docker service + +Docker Debugging: +systemctl status docker # Check daemon status +docker system df # Check disk usage +docker system prune # Clean up space +docker info # Check Docker info +``` + +## 📦 **Package Installation Issues** + +### Repository Configuration + +#### Package Not Found +```yaml +Issue: Package installation fails with "not found" +Common Causes: +- Repository not configured +- Package version unavailable +- Repository URL incorrect +- GPG key issues + +Solutions: +✅ Configure package repository +✅ Update package cache +✅ Verify package version exists +✅ Check repository URL +✅ Import GPG keys + +APT Debugging: +apt update # Update package cache +apt search pmm # Search for packages +apt-cache policy pmm3-client # Check available versions +apt-key list # List GPG keys +``` + +#### Permission Issues +```yaml +Issue: Package installation fails with permission errors +Common Causes: +- Insufficient privileges +- SELinux/AppArmor restrictions +- File system permissions +- User account limitations + +Solutions: +✅ Run with sudo/root privileges +✅ Check SELinux/AppArmor settings +✅ Verify file system permissions +✅ Review user account capabilities +✅ Check package manager configuration + +Permission Debugging: +sudo -l # Check sudo permissions +getenforce # Check SELinux status +aa-status # Check AppArmor status +ls -la /etc/apt/sources.list.d/ # Check repository files +``` + +## 🔄 **Network and Connectivity Issues** + +### Network Configuration + +#### Connectivity Problems +```yaml +Issue: Network connectivity failures +Common Causes: +- Firewall blocking connections +- DNS resolution issues +- Proxy configuration problems +- Network routing issues + +Solutions: +✅ Check firewall settings +✅ Verify DNS resolution +✅ Configure proxy settings +✅ Test network connectivity +✅ Review routing tables + +Network Debugging: +ping google.com # Test internet connectivity +nslookup pmm-server # Test DNS resolution +telnet # Test port connectivity +curl -I https://github.com # Test HTTPS connectivity +netstat -rn # Check routing table +``` + +#### Port Conflicts +```yaml +Issue: Services fail to start due to port conflicts +Common Causes: +- Ports already in use +- Multiple service instances +- System services using ports +- Previous test cleanup incomplete + +Solutions: +✅ Check port availability +✅ Stop conflicting services +✅ Use alternative ports +✅ Complete cleanup from previous tests +✅ Configure port forwarding + +Port Debugging: +netstat -tuln | grep :80 # Check port 80 usage +lsof -i :3306 # Check MySQL port usage +ss -tuln # Modern netstat alternative +fuser 9090/tcp # Find process using port +``` + +## 🔧 **Debugging Strategies** + +### Log Analysis + +#### Collecting Logs +```yaml +Workflow Logs: +1. Go to GitHub Actions tab +2. Click on failed workflow run +3. Expand failed job steps +4. Copy/download log content + +Container Logs: +docker logs pmm-server # PMM server logs +docker logs pmm-client # PMM client logs +kubectl logs # Kubernetes pod logs + +Application Logs: +tail -f /var/log/pmm/*.log # PMM application logs +journalctl -u pmm-agent # Systemd service logs +``` + +#### Log Analysis Techniques +```yaml +Common Log Patterns to Look For: +- "ERROR" or "FATAL" messages +- "Connection refused" or "timeout" errors +- "Permission denied" messages +- "Out of memory" or resource errors +- HTTP error codes (4xx, 5xx) + +Useful Commands: +grep -i error /var/log/pmm/* # Find error messages +journalctl -f # Follow system logs +dmesg | tail # Check kernel messages +``` + +### Test Isolation + +#### Reproducing Issues Locally +```yaml +Steps to Reproduce: +1. Use same parameters as failed workflow +2. Set up identical environment +3. Run tests step by step +4. Monitor logs and resources +5. Identify failure point + +Local Testing Commands: +# Set up local environment +docker-compose up -d + +# Run specific test +npx playwright test + +# Run with debugging +npx playwright test --headed --debug +``` + +## 📞 **Getting Help** + +### Information to Collect +```yaml +When Reporting Issues: +✅ Workflow name and run ID +✅ Complete error messages +✅ Configuration parameters used +✅ Environment details +✅ Steps to reproduce +✅ Expected vs actual behavior +``` + +### Escalation Process +```yaml +1. Check this troubleshooting guide +2. Search existing GitHub issues +3. Review workflow logs thoroughly +4. Try reproducing locally +5. Create detailed issue report +6. Contact PMM QA team if needed +``` + +### Useful Resources +```yaml +Documentation: +- PMM Documentation: https://docs.percona.com/pmm/ +- Playwright Docs: https://playwright.dev/ +- Docker Docs: https://docs.docker.com/ +- Kubernetes Docs: https://kubernetes.io/docs/ + +Community: +- PMM GitHub Issues +- Percona Community Forums +- PMM QA Team Channels +``` + +--- + +**Related Documentation**: +- [Integration & CLI Tests](integration-cli-tests.md) +- [E2E Tests](e2e-tests.md) +- [Test Parameters Reference](test-parameters.md) +- [Main Documentation](README.md) \ No newline at end of file diff --git a/docs/upgrade-tests.md b/docs/upgrade-tests.md new file mode 100644 index 00000000..0dbe6bfd --- /dev/null +++ b/docs/upgrade-tests.md @@ -0,0 +1,198 @@ +# Upgrade Tests + +This guide provides instructions for running the PMM upgrade tests locally. These tests validate the PMM upgrade process, ensuring data integrity and functionality are maintained across versions. + +## 💡 **What are Upgrade Tests?** + +Upgrade tests are critical for ensuring a smooth user experience when new versions of PMM are released. They verify that: + +- **The upgrade process is successful**: Whether using the UI, Docker, or Podman, the upgrade completes without errors. +- **Data is preserved**: All historical monitoring data, user configurations, and settings are maintained after the upgrade. +- **Functionality remains intact**: All features of PMM, from monitoring and alerting to QAN, continue to work correctly. + +## 🤖 **How to Run Upgrade Tests Locally** + +The following steps will guide you through setting up an older version of PMM, performing an upgrade, and running validation tests locally. These instructions are based on the `runner-e2e-upgrade-tests.yml` CI workflow. + +### **Prerequisites** + +- **Git**: To clone the required repositories. +- **Docker**: To run the PMM server and other services. +- **Node.js (v18+)** and **npm**: For running the test framework. +- **Python 3** and **pip**: For running setup scripts. + +### **Step 1: Set Up the Initial PMM Environment** + +First, set up the environment with the *starting* version of PMM Server and Client that you want to test the upgrade from. + +1. **Clone the repositories**: + + ```bash + git clone --branch main https://github.com/percona/pmm-ui-tests.git + git clone --branch main https://github.com/percona/pmm-qa.git + ``` + +2. **Set up the PMM Server**: + + Start a PMM server container with a specific older version tag (e.g., `2.41.0`). + + ```bash + cd pmm-qa/pmm-integration + npm install + sudo npx ts-node ./integration-setup.ts --ci --setup-docker-pmm-server --rbac --pmm-server-docker-tag=percona/pmm-server:2.41.0 + cd ../.. + ``` + +3. **Set up the PMM Client and Services**: + + Install the corresponding older version of the PMM client and add some services to be monitored. + + ```bash + sudo ./pmm-qa/pmm-tests/pmm2-client-setup.sh --pmm_server_ip 127.0.0.1 --client_version 3.1.0 --admin_password admin + sudo ./pmm-qa/pmm-tests/pmm-framework.sh --addclient=ps,1 --pmm2 --pmm2-server-ip=127.0.0.1 + ``` + +### **Step 2: Run Pre-Upgrade Tests** + +Before performing the upgrade, run the pre-upgrade tests. These tests capture the state of the system before the upgrade to compare it with the post-upgrade state. + +```bash +cd pmm-ui-tests/playwright-tests +npm install +npx playwright install +npx playwright test --grep="@config-pre-upgrade" +cd ../.. +``` + +### **Step 3: Perform the PMM Upgrade** + +Now, perform the upgrade using one of the following methods: + +#### **UI Way Upgrade** + +1. **Enable the target repository** in the PMM server container. + + ```bash + docker exec pmm-integration-server percona-release enable-only pmm3-client dev-latest + ``` + +2. **Run the UI upgrade test**. + + ```bash + cd pmm-ui-tests/playwright-tests + npx playwright test --grep="@pmm-upgrade" + cd ../.. + ``` + +#### **Docker Way Upgrade** + +1. **Stop and replace the PMM server container** with the new version. + + ```bash + docker stop pmm-integration-server + docker pull perconalab/pmm-server:3-dev-latest + docker run --detach --restart always --network="pmm-integration-network" -p 80:80 -p 443:443 --volumes-from pmm-integration-server-data --name pmm-integration-server perconalab/pmm-server:3-dev-latest + ``` + +### **Step 4: Run Post-Upgrade Tests** + +After the upgrade is complete, run the post-upgrade tests to validate that everything is still working as expected. + +```bash +cd pmm-ui-tests/playwright-tests +npx playwright test --grep="@config-post-upgrade" +cd ../.. +``` + +By comparing the results of the pre-upgrade and post-upgrade tests, you can verify the success of the upgrade process. + +## 📝 **How to Write Upgrade Tests** + +Upgrade tests are complex and typically involve a sequence of steps across different tools and environments. They combine environment setup, UI interactions, and assertions to verify the upgrade process and data integrity. + +### **Test Structure and Directory Layout** + +Upgrade tests are primarily orchestrated through Playwright test files, which call out to Python scripts for environment setup and management. The relevant files are located in the `pmm-ui-tests/playwright-tests/tests/upgrade` directory and the `qa-integration/pmm_qa` directory. + +``` +pmm-ui-tests/ +├── playwright-tests/ +│ ├── tests/ +│ │ └── upgrade/ # Playwright test files for upgrade scenarios +│ │ ├── basic_upgrade.spec.ts +│ │ └── ... +qa-integration/ +├── pmm_qa/ # Python scripts for environment setup +│ ├── pmm-framework.py # Main script for setting up services +│ ├── pmm2-client-setup.sh # Script for PMM client setup +│ └── ... +``` + +- **`pmm-ui-tests/playwright-tests/tests/upgrade/`**: Contains the Playwright test files (`.spec.ts`) that define the upgrade scenarios. These tests will typically navigate the PMM UI to trigger upgrades or verify post-upgrade states. +- **`qa-integration/pmm_qa/`**: This directory holds the Python and Bash scripts (`pmm-framework.py`, `pmm2-client-setup.sh`) used to set up the initial PMM environment (server and client) at a specific version, and to manage services before and after the upgrade. + +### **Writing Conventions** + +- **Orchestration**: Playwright tests act as the orchestrator, calling external scripts (e.g., Python `pmm-framework.py` via `cli.exec` or similar helper) to set up the initial PMM environment with a specific older version. +- **Pre-Upgrade Validation**: Use Playwright to interact with the UI and verify the state of PMM *before* the upgrade. This might involve checking dashboard data, service lists, or configuration settings. +- **Upgrade Execution**: Trigger the upgrade process. This can be done via UI interaction (e.g., clicking an upgrade button), or by executing shell commands (e.g., `docker pull` and `docker run` for Docker-based upgrades). +- **Post-Upgrade Validation**: After the upgrade, use Playwright to verify that PMM is functioning correctly, data is preserved, and new features are available. This often involves re-running the same checks as the pre-upgrade validation and adding new ones for the upgraded version. +- **Version Management**: Be mindful of the PMM server and client versions. Upgrade tests specifically target upgrades *from* an older version *to* a newer version. +- **Tags**: Use `@` tags (e.g., `@config-pre-upgrade`, `@config-post-upgrade`, `@pmm-upgrade`) to categorize different phases or aspects of the upgrade tests. + +### **Basic Upgrade Test Flow (Conceptual)** + +```typescript +import { test, expect } from '@playwright/test'; +// Assume cli helper is available for executing shell commands +import * as cli from '@helpers/cli-helper'; + +test.describe('PMM Upgrade Scenario', () => { + test.beforeAll(async () => { + // Step 1: Set up PMM Server and Client at an older version + // This would involve calling pmm-qa/pmm-integration/integration-setup.ts + // and qa-integration/pmm_qa/pmm2-client-setup.sh + console.log('Setting up PMM Server and Client at older version...'); + // Example: await cli.exec('sudo npx ts-node pmm-qa/pmm-integration/integration-setup.ts --pmm-server-docker-tag=percona/pmm-server:2.41.0'); + // Example: await cli.exec('sudo pmm-qa/pmm-tests/pmm2-client-setup.sh --client_version 2.41.0'); + }); + + test('should perform pre-upgrade checks', async ({ page }) => { + // Navigate to PMM UI and perform checks before upgrade + await page.goto('http://localhost/'); + await expect(page.locator('text=Dashboard')).toBeVisible(); + // Assertions for existing data, configurations, etc. + // await page.locator('text=Some old feature').toBeVisible(); + }); + + test('should perform UI upgrade', async ({ page }) => { + // Navigate to upgrade section in UI + // Click upgrade button + // Wait for upgrade to complete + console.log('Triggering UI upgrade...'); + // Example: await page.locator('button[data-testid="upgrade-button"]').click(); + // await page.waitForSelector('text=Upgrade Complete'); + }); + + test('should perform post-upgrade checks', async ({ page }) => { + // Navigate to PMM UI and perform checks after upgrade + await page.goto('http://localhost/'); + await expect(page.locator('text=New Dashboard Feature')).toBeVisible(); + // Assertions for data persistence, new features, etc. + // await page.locator('text=Some old feature').toBeVisible(); // Should still be there + }); +}); +``` + +**Note**: The example above is conceptual and simplified. Actual upgrade tests involve more intricate setup, version management, and detailed assertions across various PMM components. + +--- + +**Related Documentation**: +- [E2E Tests](e2e-tests.md) +- [E2E CodeceptJS Tests](e2e-codeceptjs-tests.md) +- [Infrastructure Tests](infrastructure-tests.md) +- [Integration & CLI Tests](integration-cli-tests.md) +- [Package Tests](package-tests.md) +- [Test Parameters Reference](test-parameters.md) +- [Troubleshooting Guide](troubleshooting.md) \ No newline at end of file From 2f95dad61415a5af290c9f3bb0a1a6bfb5a8be88 Mon Sep 17 00:00:00 2001 From: Nurlan Moldomurov Date: Tue, 1 Jul 2025 02:03:21 +0300 Subject: [PATCH 2/8] PMM-7 Update workflows and documentation for PMM v3 support - Changed default branches for PMM UI and QA tests to 'v3' in multiple workflow files. - Updated environment variable setup in documentation for PMM Client configuration. - Revised version matrix format in documentation for clarity. - Adjusted PMM server version references in upgrade tests documentation. --- .../workflows/e2e-upgrade-tests-matrix-full.yml | 10 +++++----- .github/workflows/e2e-upgrade-tests-matrix.yml | 8 ++++---- .github/workflows/e2e-upgrade-tests.yml | 4 ++-- .github/workflows/integration-cli-tests.yml | 15 +++++++++++++++ docs/feature-build-tests.md | 5 +++-- docs/package-tests.md | 2 +- docs/test-parameters.md | 2 +- docs/upgrade-tests.md | 10 +++++----- 8 files changed, 36 insertions(+), 20 deletions(-) diff --git a/.github/workflows/e2e-upgrade-tests-matrix-full.yml b/.github/workflows/e2e-upgrade-tests-matrix-full.yml index 22e56ce2..26aacdb0 100644 --- a/.github/workflows/e2e-upgrade-tests-matrix-full.yml +++ b/.github/workflows/e2e-upgrade-tests-matrix-full.yml @@ -24,7 +24,7 @@ on: - dev-latest versions_range: description: 'Last versions range:' - default: 5 + default: '5' required: true jobs: @@ -46,13 +46,13 @@ jobs: old_version: ${{ fromJSON(needs.get_versions.outputs.version_matrix) }} upgrade_type: ["UI way", "Docker way", "Podman way"] with: - pmm_ui_tests_branch: ${{ inputs.pmm_ui_tests_branch || 'main' }} + pmm_ui_tests_branch: ${{ inputs.pmm_ui_tests_branch || 'v3' }} pre_upgrade_tests: '@config-pre-upgrade' post_upgrade_tests: '@config-post-upgrade' pmm_server_start_version: ${{ matrix.old_version }} pmm_client_start_version: ${{ matrix.old_version }} upgrade_type: ${{ matrix.upgrade_type }} - pmm_qa_branch: ${{ inputs.pmm_qa_branch || 'main' }} + pmm_qa_branch: ${{ inputs.pmm_qa_branch || 'v3' }} services_list: '' repository: ${{ inputs.repository || 'dev-latest'}} version_string_from: ${{needs.get_versions.outputs.start_version}} @@ -69,13 +69,13 @@ jobs: old_version: ${{ fromJSON(needs.get_versions.outputs.version_matrix) }} upgrade_type: ["UI way", "Docker way", "Podman way"] with: - pmm_ui_tests_branch: ${{ inputs.pmm_ui_tests_branch || 'main' }} + pmm_ui_tests_branch: ${{ inputs.pmm_ui_tests_branch || 'v3' }} pre_upgrade_tests: '@rbac-pre-upgrade' post_upgrade_tests: '@rbac-post-upgrade' pmm_server_start_version: ${{ matrix.old_version }} pmm_client_start_version: ${{ matrix.old_version }} upgrade_type: ${{ matrix.upgrade_type }} - pmm_qa_branch: ${{ inputs.pmm_qa_branch || 'main' }} + pmm_qa_branch: ${{ inputs.pmm_qa_branch || 'v3' }} services_list: '--addclient=ps,1 --addclient=pdpgsql,1' repository: ${{ inputs.repository || 'dev-latest'}} version_string_from: ${{needs.get_versions.outputs.start_version}} diff --git a/.github/workflows/e2e-upgrade-tests-matrix.yml b/.github/workflows/e2e-upgrade-tests-matrix.yml index ef0d23ad..035ed4e7 100644 --- a/.github/workflows/e2e-upgrade-tests-matrix.yml +++ b/.github/workflows/e2e-upgrade-tests-matrix.yml @@ -50,13 +50,13 @@ jobs: matrix: old_version: ${{ fromJSON(needs.get_versions.outputs.version_matrix) }} with: - pmm_ui_tests_branch: ${{ inputs.pmm_ui_tests_branch || 'main' }} + pmm_ui_tests_branch: ${{ inputs.pmm_ui_tests_branch || 'v3' }} pre_upgrade_tests: '@config-pre-upgrade' post_upgrade_tests: '@config-post-upgrade' pmm_server_start_version: ${{ matrix.old_version }} pmm_client_start_version: ${{ matrix.old_version }} upgrade_type: ${{ inputs.upgrade_type || 'Docker way' }} - pmm_qa_branch: ${{ inputs.pmm_qa_branch || 'main' }} + pmm_qa_branch: ${{ inputs.pmm_qa_branch || 'v3' }} services_list: '' repository: ${{ inputs.repository || 'dev-latest'}} version_string_from: ${{needs.get_versions.outputs.start_version}} @@ -72,13 +72,13 @@ jobs: matrix: old_version: ${{ fromJSON(needs.get_versions.outputs.version_matrix) }} with: - pmm_ui_tests_branch: ${{ inputs.pmm_ui_tests_branch || 'main' }} + pmm_ui_tests_branch: ${{ inputs.pmm_ui_tests_branch || 'v3' }} pre_upgrade_tests: '@rbac-pre-upgrade' post_upgrade_tests: '@rbac-post-upgrade' pmm_server_start_version: ${{ matrix.old_version }} pmm_client_start_version: ${{ matrix.old_version }} upgrade_type: ${{ inputs.upgrade_type || 'Docker way' }} - pmm_qa_branch: ${{ inputs.pmm_qa_branch || 'main' }} + pmm_qa_branch: ${{ inputs.pmm_qa_branch || 'v3' }} services_list: '--addclient=ps,1 --addclient=pdpgsql,1' repository: ${{ inputs.repository || 'dev-latest'}} version_string_from: ${{needs.get_versions.outputs.start_version}} diff --git a/.github/workflows/e2e-upgrade-tests.yml b/.github/workflows/e2e-upgrade-tests.yml index baddcd8c..e47e5336 100644 --- a/.github/workflows/e2e-upgrade-tests.yml +++ b/.github/workflows/e2e-upgrade-tests.yml @@ -42,12 +42,12 @@ on: inputs: pmm_ui_tests_branch: description: 'pmm-ui-tests repository branch' - default: 'main' + default: 'v3' type: string required: true pmm_qa_branch: description: 'pmm-qa repository branch(for setup)' - default: 'main' + default: 'v3' type: string required: true pmm_server_start_version: diff --git a/.github/workflows/integration-cli-tests.yml b/.github/workflows/integration-cli-tests.yml index ad7dc126..a610aee5 100644 --- a/.github/workflows/integration-cli-tests.yml +++ b/.github/workflows/integration-cli-tests.yml @@ -387,3 +387,18 @@ jobs: cli_test: 'proxySql' services_list: '--database pxc=8.0' test_name: 'PXC 8.0' + + external-tests: + name: 'CLI / Integration' + uses: ./.github/workflows/runner-integration-cli-tests.yml + secrets: inherit + with: + sha: ${{ inputs.sha || github.event.pull_request.head.sha || 'null' }} + pmm_ui_tests_branch: ${{ inputs.pmm_ui_tests_branch || 'v3' }} + qa_integration_branch: ${{ inputs.qa_integration_branch || 'v3' }} + pmm_server_image: ${{ inputs.pmm_server_image || 'perconalab/pmm-server:3-dev-latest' }} + pmm_client_image: ${{ inputs.pmm_client_image || 'perconalab/pmm-client:3-dev-latest' }} + pmm_client_version: ${{ inputs.pmm_client_version || '3-dev-latest' }} + cli_test: 'external' + services_list: '--database=external' + test_name: 'External' diff --git a/docs/feature-build-tests.md b/docs/feature-build-tests.md index 29ecce17..3dece600 100644 --- a/docs/feature-build-tests.md +++ b/docs/feature-build-tests.md @@ -63,10 +63,11 @@ You can reproduce the CI runner workflow for Feature Build E2E tests on your loc 5. **Set Up PMM Client** - This step configures the PMM Client to connect to your local PMM Server: + This step configures the PMM Client to connect to your local PMM Server. First, dynamically retrieve the PMM Server container's IP address and export it as an environment variable: ```bash + export PMM_SERVER_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' pmm-server) cd qa-integration/pmm_qa - sudo bash -x pmm3-client-setup.sh --pmm_server_ip 192.168.0.1 --client_version 3-dev-latest --admin_password admin-password --use_metrics_mode no + sudo bash -x pmm3-client-setup.sh --pmm_server_ip $PMM_SERVER_IP --client_version 3-dev-latest --admin_password admin-password --use_metrics_mode no cd ../.. ``` diff --git a/docs/package-tests.md b/docs/package-tests.md index 66c8bcef..041361cd 100644 --- a/docs/package-tests.md +++ b/docs/package-tests.md @@ -38,7 +38,7 @@ Before running the client installation tests, you need a running PMM server for ```bash docker create -v /srv --name pmm-server-data perconalab/pmm-server:3-dev-latest docker run -d -p 80:80 -p 443:8443 --volumes-from pmm-server-data --name pmm-server --restart always perconalab/pmm-server:3-dev-latest -timeout 240 bash -c 'while [[ "$(curl -k -s -o /dev/null -w ''%{http_code}'' https://127.0.0.1:443/v1/readyz)" != "200" ]]; do sleep 2; done' || false +timeout 240 bash -c 'while [[ "$(curl -k -s -o /dev/null -w '%{http_code}' https://127.0.0.1:443/v1/readyz)" != "200" ]]; do sleep 2; done' || false ``` ### **Step 3: Configure and Run Vagrant** diff --git a/docs/test-parameters.md b/docs/test-parameters.md index 47079807..e72da2d1 100644 --- a/docs/test-parameters.md +++ b/docs/test-parameters.md @@ -314,7 +314,7 @@ os: "rocky-9" # Rocky Linux 9 ### Version Matrix ```yaml matrix_range: "10" # Number of versions to test -version_matrix: "["3.0.0", "3.1.0", "3.2.0"]" +version_matrix: ["3.0.0", "3.1.0", "3.2.0"] pt_os_matrix: "[\"bullseye\", \"bookworm\", \"noble\"]" ``` diff --git a/docs/upgrade-tests.md b/docs/upgrade-tests.md index 0dbe6bfd..dd94a565 100644 --- a/docs/upgrade-tests.md +++ b/docs/upgrade-tests.md @@ -28,18 +28,18 @@ First, set up the environment with the *starting* version of PMM Server and Clie 1. **Clone the repositories**: ```bash - git clone --branch main https://github.com/percona/pmm-ui-tests.git - git clone --branch main https://github.com/percona/pmm-qa.git + git clone --branch v3 https://github.com/percona/pmm-ui-tests.git + git clone --branch v3 https://github.com/percona/pmm-qa.git ``` 2. **Set up the PMM Server**: - Start a PMM server container with a specific older version tag (e.g., `2.41.0`). + Start a PMM server container with a specific older version tag (e.g., `2.44.1`). ```bash cd pmm-qa/pmm-integration npm install - sudo npx ts-node ./integration-setup.ts --ci --setup-docker-pmm-server --rbac --pmm-server-docker-tag=percona/pmm-server:2.41.0 + sudo npx ts-node ./integration-setup.ts --ci --setup-docker-pmm-server --rbac --pmm-server-docker-tag=percona/pmm-server:2.44.1 cd ../.. ``` @@ -48,7 +48,7 @@ First, set up the environment with the *starting* version of PMM Server and Clie Install the corresponding older version of the PMM client and add some services to be monitored. ```bash - sudo ./pmm-qa/pmm-tests/pmm2-client-setup.sh --pmm_server_ip 127.0.0.1 --client_version 3.1.0 --admin_password admin + sudo ./pmm-qa/pmm-tests/pmm2-client-setup.sh --pmm_server_ip 127.0.0.1 --client_version 2.44.1 --admin_password admin sudo ./pmm-qa/pmm-tests/pmm-framework.sh --addclient=ps,1 --pmm2 --pmm2-server-ip=127.0.0.1 ``` From 16df64a489c7a7a4deac5643d0d47ed4dcb37bd5 Mon Sep 17 00:00:00 2001 From: Nurlan Moldomurov Date: Tue, 1 Jul 2025 16:37:22 +0300 Subject: [PATCH 3/8] Update test steps in documentation for E2E and feature build tests - Renumbered the test execution steps in the E2E CodeceptJS documentation from Step 2 to Step 6. - Renumbered the test execution steps in the Feature Build tests documentation from Step 6 to Step 8. --- docs/e2e-codeceptjs-tests.md | 2 +- docs/feature-build-tests.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/e2e-codeceptjs-tests.md b/docs/e2e-codeceptjs-tests.md index d5ab7e59..8ee305db 100644 --- a/docs/e2e-codeceptjs-tests.md +++ b/docs/e2e-codeceptjs-tests.md @@ -93,7 +93,7 @@ npm ci npx playwright install --with-deps ``` -### **Step 2: Run the Tests** +### **Step 6: Run the Tests** Run the CodeceptJS tests using the appropriate tags. The setup for the services will vary depending on the test. diff --git a/docs/feature-build-tests.md b/docs/feature-build-tests.md index 3dece600..2f8177c5 100644 --- a/docs/feature-build-tests.md +++ b/docs/feature-build-tests.md @@ -96,7 +96,7 @@ You can reproduce the CI runner workflow for Feature Build E2E tests on your loc envsubst < env.list > env.generated.list ``` -### **Step 6: Run the Tests** +### **Step 8: Run the Tests** Finally, run the E2E tests for the specific feature. Use the appropriate tag for the test suite you want to run. From 74dc415a66359ff4f3d20ebbea8859f988345913 Mon Sep 17 00:00:00 2001 From: Nurlan Moldomurov Date: Wed, 9 Jul 2025 13:55:08 +0300 Subject: [PATCH 4/8] Update documentation to reflect deprecation of legacy BATS tests and introduce new testing frameworks - Added important notices in README.md and various documentation files regarding the deprecation of the `pmm-tests/` directory containing BATS tests. - Created a new DEPRECATED.md file to outline the status and migration guidance for legacy tests. - Updated multiple documentation files to clarify the current testing frameworks (TypeScript/Playwright) and their usage. - Enhanced the infrastructure tests documentation to differentiate between maintained Helm tests and deprecated BATS tests. --- README.md | 6 +- docs/README.md | 27 ++- docs/adding-new-environments.md | 342 ++++++++++++++++++++++++++++++++ docs/infrastructure-tests.md | 2 +- docs/integration-cli-tests.md | 2 + docs/upgrade-tests.md | 2 + pmm-tests/DEPRECATED.md | 42 ++++ 7 files changed, 417 insertions(+), 6 deletions(-) create mode 100644 docs/adding-new-environments.md create mode 100644 pmm-tests/DEPRECATED.md diff --git a/README.md b/README.md index fa6005c8..1a303fa5 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,8 @@ # PMM-QA Automated tests for Percona Monitoring and Management +> **⚠️ IMPORTANT**: The `pmm-tests/` directory containing BATS tests is **deprecated**. See [documentation](docs/README.md#important-notice-legacy-tests-deprecation) and [pmm-tests/DEPRECATED.md](pmm-tests/DEPRECATED.md) for migration guidance. + ## Test Architecture Overview This project employs a comprehensive testing strategy, utilizing various frameworks and methodologies to ensure the quality and stability of Percona Monitoring and Management (PMM). The tests are broadly categorized by their focus and the tools they use: @@ -10,6 +12,8 @@ This project employs a comprehensive testing strategy, utilizing various framewo - **Package Tests**: These tests verify the installation and functionality of PMM client packages across various operating systems. They leverage Vagrant for virtualized environments and Ansible for automation. - **Infrastructure Tests**: These tests validate PMM deployments in different environments, including Kubernetes/Helm and using the Easy Install script. They utilize Bats for testing Helm deployments. +**Note**: The legacy BATS tests in `pmm-tests/` are deprecated. Current testing uses TypeScript/Playwright frameworks described in the [documentation](docs/). + Each test type has its own dedicated documentation, detailing how to run and write tests, along with their specific directory structures and conventions. @@ -26,7 +30,7 @@ Understanding the layout of the key repositories involved in PMM QA is essential ├── docs/ # Project documentation ├── k8s/ # Kubernetes/Helm test scripts (Bats) ├── pmm-integration/ # PMM integration setup scripts (TypeScript) -├── pmm-tests/ # PMM test scripts (Python, Bash) +├── pmm-tests/ # ⚠️ DEPRECATED PMM test scripts (BATS/Bash) ├── tests/ # General test utilities ├── .gitignore ├── docker-compose.yml diff --git a/docs/README.md b/docs/README.md index 91ce6ab9..f9be2ad5 100644 --- a/docs/README.md +++ b/docs/README.md @@ -2,6 +2,23 @@ Welcome to the PMM-QA comprehensive testing documentation. This directory contains detailed guides for running various types of tests in the PMM (Percona Monitoring and Management) QA repository. +## ⚠️ **Important Notice: Legacy Tests Deprecation** + +> **DEPRECATION NOTICE**: The `pmm-tests/` directory containing BATS (Bash Automated Testing System) tests is **deprecated** and should not be used for new test development. +> +> **Use instead**: +> - **CLI Testing**: TypeScript/Playwright tests in `cli-tests/` (see [Integration & CLI Tests](integration-cli-tests.md)) +> - **UI Testing**: Playwright tests in `playwright-tests/` (see [End-to-End Tests](e2e-tests.md)) +> - **Infrastructure Setup**: Python framework in `qa-integration/pmm_qa/` (see [Adding New Environments](adding-new-environments.md)) +> +> **Migration Timeline**: Existing BATS tests will be gradually migrated to the new framework. No new BATS tests should be created. +> +> **Status**: +> - ❌ **pmm-tests/*.bats** - Deprecated, maintenance mode only +> - ✅ **cli-tests/** - Current CLI testing framework +> - ✅ **playwright-tests/** - Current UI testing framework +> - ✅ **qa-integration/pmm_qa/** - Current infrastructure framework + ## 📚 **Documentation Overview** This documentation is organized by test type to provide focused guidance for different testing scenarios: @@ -21,6 +38,7 @@ This documentation is organized by test type to provide focused guidance for dif | Document | Description | |----------|-------------| +| [Adding New Environments](adding-new-environments.md) | Guide for extending the PMM framework | | [Test Parameters Reference](test-parameters.md) | Complete parameter documentation | | [Troubleshooting Guide](troubleshooting.md) | Common issues and solutions | @@ -88,10 +106,11 @@ Frequency: Per package release - **Proxy/Load Balancers**: ProxySQL, HAProxy ### **Testing Frameworks** -- **CLI Testing**: Playwright (TypeScript) -- **UI Testing**: Playwright, CodeceptJS -- **Infrastructure Testing**: BATS (Bash Automated Testing System) -- **Package Testing**: Ansible playbooks +- **CLI Testing**: Playwright (TypeScript) - Current framework +- **UI Testing**: Playwright - Current framework +- **Infrastructure Setup**: Python/Ansible - Current framework +- **Package Testing**: Ansible playbooks - Current framework +- **Legacy Testing**: ⚠️ BATS (Bash) - Deprecated, maintenance mode only --- diff --git a/docs/adding-new-environments.md b/docs/adding-new-environments.md new file mode 100644 index 00000000..1971cf6e --- /dev/null +++ b/docs/adding-new-environments.md @@ -0,0 +1,342 @@ +# Adding New Environments to PMM Framework + +This guide explains how to add new database types and environments to the PMM qa-integration framework. + +## Overview + +The PMM framework uses a Python-based system (`pmm-framework.py`) with Ansible playbooks to set up various database and service environments for testing. Adding a new environment involves several coordinated changes. + +## Architecture + +The framework consists of: + +- **`pmm-framework.py`** - Main Python script that orchestrates setup +- **`database_options.py`** - Configuration definitions for all database types +- **Ansible playbooks** (`.yml` files) - Infrastructure automation scripts +- **Helper scripts** - Supporting bash/shell scripts + +## Step-by-Step Guide + +### 1. Define Database Configuration + +Edit `qa-integration/pmm_qa/scripts/database_options.py`: + +```python +# Add your new database type to the database_options dictionary +"YOUR_DB_TYPE": { + "versions": ["1.0", "2.0", "latest"], # Available versions + "configurations": { + "CLIENT_VERSION": "3-dev-latest", # Default PMM client version + "CUSTOM_OPTION": "default_value", # Your custom configuration options + # Add more configuration options as needed + } +}, +``` + +**Example from our external TLS implementation:** +```python +"EXTERNAL_TLS": { + "versions": ["0.15.1", "0.16.0"], + "configurations": { + "CLIENT_VERSION": "3-dev-latest", + "SKIP_TLS_VERIFY": "true" + } +}, +``` + +### 2. Create Setup Function + +Add a new setup function in `qa-integration/pmm_qa/pmm-framework.py`: + +```python +def setup_your_db_type(db_type, db_version=None, db_config=None, args=None): + # Check if PMM server is running + container_name = get_running_container_name() + if container_name is None and args.pmm_server_ip is None: + print(f"Check if PMM Server is Up and Running..Exiting") + exit() + + # Gather version details + your_version = os.getenv('YOUR_VERSION') or db_version or database_configs[db_type]["versions"][-1] + + # Define environment variables for playbook + env_vars = { + 'PMM_SERVER_IP': args.pmm_server_ip or container_name or '127.0.0.1', + 'YOUR_VERSION': your_version, + 'YOUR_CONTAINER': 'your_container_name', + 'CLIENT_VERSION': get_value('CLIENT_VERSION', db_type, args, db_config), + 'ADMIN_PASSWORD': os.getenv('ADMIN_PASSWORD') or args.pmm_server_password or 'admin', + 'PMM_QA_GIT_BRANCH': os.getenv('PMM_QA_GIT_BRANCH') or 'v3', + # Add your custom environment variables + 'CUSTOM_OPTION': get_value('CUSTOM_OPTION', db_type, args, db_config), + } + + # Ansible playbook filename + playbook_filename = 'your_setup.yml' + + # Call the function to run the Ansible playbook + run_ansible_playbook(playbook_filename, env_vars, args) +``` + +### 3. Register in Setup Database Function + +Add your database type to the routing logic in the `setup_database()` function: + +```python +def setup_database(db_type, db_version=None, db_config=None, args=None): + # ... existing code ... + + elif db_type == 'YOUR_DB_TYPE': + setup_your_db_type(db_type, db_version, db_config, args) + + # ... rest of existing code ... +``` + +### 4. Create Ansible Playbook + +Create `qa-integration/pmm_qa/your_setup.yml`: + +```yaml +--- +# Description of what this playbook does + +- hosts: all + vars: + # Define variables using the lookup pattern + your_version: "{{ lookup('vars', 'extra_your_version', default=lookup('env','YOUR_VERSION') | default('1.0', true) ) }}" + your_container: "{{ lookup('vars', 'extra_your_container', default=lookup('env','YOUR_CONTAINER') | default('your_default_container', true) ) }}" + pmm_server_ip: "{{ lookup('vars', 'extra_pmm_server_ip', default=lookup('env','PMM_SERVER_IP') | default('127.0.0.1', true) ) }}" + client_version: "{{ lookup('vars', 'extra_client_version', default=lookup('env','CLIENT_VERSION') | default('3-dev-latest', true) ) }}" + admin_password: "{{ lookup('vars', 'extra_admin_password', default=lookup('env','ADMIN_PASSWORD') | default('admin', true) ) }}" + pmm_qa_branch: "{{ lookup('vars', 'extra_pmm_qa_branch', default=lookup('env','PMM_QA_GIT_BRANCH') | default('v3', true) ) }}" + + tasks: + - name: Create pmm-qa network if not exist + shell: docker network create pmm-qa + ignore_errors: true + + - name: Cleanup existing containers + shell: > + docker ps -a --filter "name={{ your_container }}" | grep -q . && docker stop {{ your_container }} && docker rm -fv {{ your_container }} + ignore_errors: true + tags: + - cleanup + + # Add your setup tasks here + - name: Setup your environment + shell: echo "Setting up your environment" + + # Standard PMM client setup pattern + - name: Prepare Container + shell: > + docker run -d --name={{ your_container }} + --network pmm-qa + phusion/baseimage:jammy-1.0.1 + + - name: Install basic packages + shell: "{{ item }}" + with_items: + - docker exec {{ your_container }} apt-get update + - docker exec {{ your_container }} apt-get -y install wget curl git gnupg2 lsb-release + + - name: Setup PMM client + shell: "{{ item }}" + with_items: + - docker cp ./pmm3-client-setup.sh {{ your_container }}:/ + - docker exec {{ your_container }} bash -x ./pmm3-client-setup.sh --pmm_server_ip {{ pmm_server_ip }} --client_version {{ client_version }} --admin_password {{ admin_password }} --use_metrics_mode no + + # Add your services to PMM monitoring + - name: Set Random Number Fact + set_fact: + random_number: "{{ (10000 | random) | int }}" + + - name: Add service to PMM monitoring + shell: > + docker exec {{ your_container }} bash -c 'source ~/.bash_profile || true; + pmm-admin add external --listen-port=YOUR_PORT --group="your_group" + --service-name=your_service_{{ random_number }}' + + - name: Display service information + shell: > + docker exec {{ your_container }} bash -c 'source ~/.bash_profile || true; + pmm-admin list' + register: pmm_services + + - name: Show PMM services + debug: + msg: "{{ pmm_services.stdout }}" +``` + +### 5. Test Your Implementation + +```bash +# Test syntax +python3 -m py_compile qa-integration/pmm_qa/pmm-framework.py + +# Test Ansible syntax +ansible-playbook --syntax-check qa-integration/pmm_qa/your_setup.yml + +# Test the functionality +python3 pmm-framework.py --database your_db_type +``` + +## Best Practices + +### 1. Naming Conventions + +- **Database types**: Use UPPERCASE with underscores (e.g., `EXTERNAL_TLS`, `MY_DATABASE`) +- **Container names**: Use lowercase with underscores (e.g., `my_database_container`) +- **Playbook files**: Use lowercase with underscores (e.g., `my_database_setup.yml`) + +### 2. Environment Variables + +- Use consistent naming patterns +- Provide sensible defaults +- Support both environment variables and command-line arguments + +### 3. Error Handling + +```python +# Always check if PMM server is running +container_name = get_running_container_name() +if container_name is None and args.pmm_server_ip is None: + print(f"Check if PMM Server is Up and Running..Exiting") + exit() +``` + +### 4. Cleanup Support + +Always include cleanup tasks in your Ansible playbook: + +```yaml +- name: Cleanup existing containers + shell: > + docker ps -a --filter "name={{ your_container }}" | grep -q . && docker stop {{ your_container }} && docker rm -fv {{ your_container }} + ignore_errors: true + tags: + - cleanup +``` + +### 5. Documentation + +Create a README file for your new environment: + +```markdown +# Your Database Type Setup + +## Usage +python3 pmm-framework.py --database your_db_type + +## What it Creates +- Description of containers and services + +## Configuration Options +- List of available options + +## Testing +- How to verify the setup works +``` + +## Example: External Setup with TLS Enhancement + +Here's how we enhanced the existing External setup to support TLS testing: + +1. **Database Configuration Enhancement** (`database_options.py`): +```python +"EXTERNAL": { + "REDIS": { + "versions": ["1.14.0", "1.58.0"], + }, + "NODEPROCESS": { + "versions": ["0.7.5", "0.7.10"], + }, + "configurations": {"CLIENT_VERSION": "3-dev-latest", "USE_TLS": "false"} +}, +``` + +2. **Setup Function Enhancement** (`pmm-framework.py`): +```python +def setup_external(db_type, db_version=None, db_config=None, args=None): + # Added TLS support to existing function + env_vars = { + # ... existing vars ... + 'USE_TLS': get_value('USE_TLS', db_type, args, db_config), + } +``` + +3. **Ansible Playbook Enhancement** (`external_setup.yml`): + - Keeps existing Redis and Node Process functionality + - Conditionally creates TLS test server when USE_TLS=true + - Uses `--tls-skip-verify` flag when TLS mode is enabled + - Maintains backward compatibility + +4. **Usage**: +```bash +# Default behavior (unchanged) +python3 pmm-framework.py --database external + +# Enhanced with TLS testing +python3 pmm-framework.py --database external,USE_TLS=true +``` + +This approach demonstrates how to enhance existing environments rather than creating entirely new ones. + +## Common Patterns + +### Database with Version Support + +```python +# In setup function +db_version = os.getenv('DB_VERSION') or db_version or database_configs[db_type]["versions"][-1] +``` + +### Multiple Container Setup + +```yaml +# In Ansible playbook +- name: Start database container + shell: docker run -d --name database_container ... + +- name: Start exporter container + shell: docker run -d --name exporter_container ... +``` + +### Custom Configuration Options + +```python +# In setup function +custom_option = get_value('CUSTOM_OPTION', db_type, args, db_config) + +# In environment variables +'CUSTOM_OPTION': custom_option, +``` + +## Troubleshooting + +### Common Issues + +1. **Python Syntax Errors**: Use `python3 -m py_compile` to check +2. **Ansible Syntax Errors**: Use `ansible-playbook --syntax-check` +3. **Missing Dependencies**: Ensure all required packages are installed in containers +4. **Network Issues**: Always use the `pmm-qa` Docker network +5. **PMM Client Issues**: Verify PMM server is running and accessible + +### Testing Steps + +1. Syntax validation +2. Framework recognition (`python3 pmm-framework.py --help`) +3. Dry run with verbose output +4. Full integration test +5. Cleanup verification + +## Contributing + +When contributing new environments: + +1. Follow the established patterns +2. Include comprehensive tests +3. Add documentation +4. Ensure cleanup works properly +5. Test with different PMM server configurations + +This approach ensures consistency and maintainability across all PMM framework environments. \ No newline at end of file diff --git a/docs/infrastructure-tests.md b/docs/infrastructure-tests.md index d5f3912b..e20930ed 100644 --- a/docs/infrastructure-tests.md +++ b/docs/infrastructure-tests.md @@ -93,7 +93,7 @@ You should see the `pmm-server` and `watchtower` containers running. All paths mentioned in this section are relative to the root of the `pmm-qa` repository, which can be found [here](https://github.com/percona/pmm-qa/tree/v3). -Helm tests in this project are written using Bats (Bash Automated Testing System). Bats provides a simple way to test shell scripts and command-line tools. Helm tests typically involve deploying a Helm chart and then asserting on the state of the Kubernetes resources or the behavior of the deployed application. +Helm tests in this project are written using Bats (Bash Automated Testing System). **Note**: This is different from the deprecated BATS tests in `pmm-tests/` - Helm-specific BATS tests in `k8s/` directory are still actively maintained for Kubernetes testing. Bats provides a simple way to test shell scripts and command-line tools. Helm tests typically involve deploying a Helm chart and then asserting on the state of the Kubernetes resources or the behavior of the deployed application. ### **Test Structure and Directory Layout** diff --git a/docs/integration-cli-tests.md b/docs/integration-cli-tests.md index eda526e0..c5b680aa 100644 --- a/docs/integration-cli-tests.md +++ b/docs/integration-cli-tests.md @@ -2,6 +2,8 @@ This guide provides instructions for running the PMM Integration and Command-Line Interface (CLI) tests locally. These tests validate the interaction between the PMM server and client, as well as the functionality of the `pmm-admin` CLI tool. +> **⚠️ Note**: This document covers the **current TypeScript/Playwright-based CLI testing framework**. The legacy BATS tests in `pmm-tests/` directory are deprecated and should not be used for new test development. See [main documentation](README.md#important-notice-legacy-tests-deprecation) for details. + ## 💡 **What are Integration & CLI Tests?** These tests are designed to: diff --git a/docs/upgrade-tests.md b/docs/upgrade-tests.md index dd94a565..9fd7b11c 100644 --- a/docs/upgrade-tests.md +++ b/docs/upgrade-tests.md @@ -2,6 +2,8 @@ This guide provides instructions for running the PMM upgrade tests locally. These tests validate the PMM upgrade process, ensuring data integrity and functionality are maintained across versions. +> **⚠️ Note**: Some examples in this document reference legacy scripts in `pmm-tests/` which are deprecated. While these specific scripts (`pmm2-client-setup.sh`, `pmm-framework.sh`) may still be used for upgrade testing scenarios, **no new BATS tests should be created**. For new test development, use the TypeScript/Playwright framework. See [main documentation](README.md#important-notice-legacy-tests-deprecation) for details. + ## 💡 **What are Upgrade Tests?** Upgrade tests are critical for ensuring a smooth user experience when new versions of PMM are released. They verify that: diff --git a/pmm-tests/DEPRECATED.md b/pmm-tests/DEPRECATED.md new file mode 100644 index 00000000..7beb2f35 --- /dev/null +++ b/pmm-tests/DEPRECATED.md @@ -0,0 +1,42 @@ +# ⚠️ DEPRECATED: PMM Tests Directory + +## This directory is deprecated and in maintenance mode only + +**Status**: 🚫 **DEPRECATED - DO NOT USE FOR NEW DEVELOPMENT** + +The BATS (Bash Automated Testing System) tests in this directory are **deprecated** and should not be used for new test development. + +## What to use instead + +For new test development, use the current testing frameworks: + +### CLI Testing +- **Framework**: TypeScript/Playwright +- **Location**: `cli-tests/` directory in [pmm-ui-tests](https://github.com/percona/pmm-ui-tests/tree/v3) repository +- **Documentation**: [Integration & CLI Tests](../docs/integration-cli-tests.md) + +### UI Testing +- **Framework**: Playwright +- **Location**: `playwright-tests/` directory in [pmm-ui-tests](https://github.com/percona/pmm-ui-tests/tree/v3) repository +- **Documentation**: [End-to-End Tests](../docs/e2e-tests.md) + +### Infrastructure Setup +- **Framework**: Python/Ansible +- **Location**: `qa-integration/pmm_qa/` directory in [qa-integration](https://github.com/Percona-Lab/qa-integration/tree/v3) repository +- **Documentation**: [Adding New Environments](../docs/adding-new-environments.md) + +## Migration Timeline + +- **Current Status**: Maintenance mode only - critical bug fixes only +- **New Development**: Use TypeScript/Playwright frameworks listed above +- **Existing Tests**: Will be gradually migrated to new frameworks +- **Future**: This directory will be removed in a future release + +## For More Information + +See the main documentation: [PMM-QA Testing Documentation](../docs/README.md#important-notice-legacy-tests-deprecation) + +--- + +**Last Updated**: December 2024 +**Deprecation Notice Added**: December 2024 \ No newline at end of file From 7a22e85786cce6392e67ef7b721b16607cdd3437 Mon Sep 17 00:00:00 2001 From: yurkovychv Date: Thu, 10 Jul 2025 19:58:34 +0300 Subject: [PATCH 5/8] PMM-7 review changes --- README.md | 34 ++--- docs/README.md | 223 ++++++++++++++++++------------- docs/adding-new-environments.md | 18 ++- docs/e2e-codeceptjs-tests.md | 213 ------------------------------ docs/e2e-tests.md | 224 ++++++++++++++++---------------- docs/feature-build-tests.md | 38 +----- docs/infrastructure-tests.md | 44 ++++--- docs/integration-cli-tests.md | 5 +- docs/package-tests.md | 1 - docs/test-parameters.md | 1 - docs/upgrade-tests.md | 126 +----------------- 11 files changed, 314 insertions(+), 613 deletions(-) delete mode 100644 docs/e2e-codeceptjs-tests.md diff --git a/README.md b/README.md index 1a303fa5..6943b2d0 100644 --- a/README.md +++ b/README.md @@ -46,21 +46,11 @@ This repository contains the UI End-to-End tests for PMM. ``` pmm-ui-tests/ -├── playwright-tests/ # Playwright E2E tests -│ ├── pages/ # Page Object Model definitions -│ │ ├── LoginPage.ts -│ │ └── DashboardPage.ts -│ ├── tests/ # Actual Playwright test files (.spec.ts) -│ └── playwright.config.ts # Playwright configuration -├── tests/ # CodeceptJS E2E tests -│ ├── pages/ # Page Object Model definitions -│ │ ├── LoginPage.js -│ │ └── DashboardPage.js -│ ├── login_test.js -│ └── ... +├── playwright-tests/ # ⚠️ DEPRECATED ├── cli/ # Playwright tests for CLI interactions │ ├── tests/ # CLI test files (.spec.ts) │ └── ... +├── tests/ # CodeceptJS tests and related code ├── helpers/ # CodeceptJS custom helpers ├── config/ # CodeceptJS configuration files ├── pr.codecept.js # Main CodeceptJS configuration @@ -74,15 +64,16 @@ This repository provides Python-based scripts for setting up and managing PMM te ``` qa-integration/ -├── pmm_qa/ # Core Python setup scripts -│ ├── pmm-framework.py # Main script for setting up services -│ ├── helpers/ # Helper modules for pmm-framework.py +├── pmm_psmdb-pbm_setup/ # PSMDB replica setup from PSMDB QA team +├── pmm_psmdb_diffauth_setup/ # PSMDB replica setup from PSMDB QA team +├── pmm_qa/ # Core Python setup scripts +│ ├── pmm-framework.py # Main script for setting up services +│ ├── helpers/ # Helper modules for pmm-framework.py │ ├── mysql/ │ ├── mongoDb/ │ ├── postgres/ │ └── ... -├── pmm-tests/ # Additional Python/Bash test scripts -├── requirements.txt # Python dependencies +├── requirements.txt # Python dependencies └── ... ``` @@ -92,12 +83,13 @@ This repository contains Ansible playbooks for testing PMM client package instal ``` package-testing/ -├── playbooks/ # Ansible playbooks for different test scenarios +├── playbooks/ # Ansible playbooks for different test scenarios │ ├── pmm3-client_integration.yml │ └── ... -├── roles/ # Reusable Ansible roles (e.g., pmm-client) -├── inventory.ini # Ansible inventory file -├── Vagrantfile # Vagrant configuration for test VMs +├── tasks/ # Reusable Ansible tasks (e.g., verify_pmm3_metric.yml) +├── scripts/ # Reusable scripts (e.g., pmm3_client_install_tarball.sh) +├── inventory.ini # Ansible inventory file +├── Vagrantfile # Vagrant configuration for test VMs └── ... ``` diff --git a/docs/README.md b/docs/README.md index f9be2ad5..f483258f 100644 --- a/docs/README.md +++ b/docs/README.md @@ -8,16 +8,14 @@ Welcome to the PMM-QA comprehensive testing documentation. This directory contai > > **Use instead**: > - **CLI Testing**: TypeScript/Playwright tests in `cli-tests/` (see [Integration & CLI Tests](integration-cli-tests.md)) -> - **UI Testing**: Playwright tests in `playwright-tests/` (see [End-to-End Tests](e2e-tests.md)) -> - **Infrastructure Setup**: Python framework in `qa-integration/pmm_qa/` (see [Adding New Environments](adding-new-environments.md)) +> - **UI Testing**: CodeceptJS tests (see [End-to-End Tests](e2e-tests.md)) +> - **Infrastructure Setup**: Python framework in `qa-integration/pmm_qa/` and other Percona QA team setups (see [Adding New Environments](adding-new-environments.md)) > -> **Migration Timeline**: Existing BATS tests will be gradually migrated to the new framework. No new BATS tests should be created. -> > **Status**: -> - ❌ **pmm-tests/*.bats** - Deprecated, maintenance mode only -> - ✅ **cli-tests/** - Current CLI testing framework -> - ✅ **playwright-tests/** - Current UI testing framework +> - ✅ **pmm-ui-tests/cli-tests** - Current CLI testing framework +> - ✅ **pmm-ui-tests/** - Current UI testing framework (CodeceptJS) > - ✅ **qa-integration/pmm_qa/** - Current infrastructure framework +> - ✅ **qa-integration/pmm_psmdb_diffauth_setup and etc..** - Other Percona QA team setups ## 📚 **Documentation Overview** @@ -28,9 +26,9 @@ This documentation is organized by test type to provide focused guidance for dif | Document | Description | Use Case | |----------|-------------|----------| | [Integration & CLI Tests](integration-cli-tests.md) | PMM CLI functionality testing | Daily development validation | -| [End-to-End Tests](e2e-tests.md) | UI testing with Playwright | Feature validation | -| [Upgrade Tests](upgrade-tests.md) | PMM upgrade scenarios | Release validation | -| [Package Tests](package-tests.md) | Package installation testing | Distribution validation | +| [End-to-End Tests](e2e-tests.md) | UI testing with CodeceptJS | Feature validation | +| [Upgrade Tests](upgrade-tests.md) | PMM upgrade scenarios | Upgrade validation | +| [Package Tests](package-tests.md) | Package installation testing | PMM Client Distribution validation | | [Infrastructure Tests](infrastructure-tests.md) | Kubernetes and platform testing | Infrastructure validation | | [Feature Build Tests](feature-build-tests.md) | Docker images with new features testing | Feature validation | @@ -47,7 +45,7 @@ This documentation is organized by test type to provide focused guidance for dif ## 🚀 **Quick Start Guide** ### Prerequisites -- Access to the `percona/pmm-qa` repository +- Access to the `percona/pmm-qa`, `percona/pmm-ui-tests`, `PerconaLab/qa-integration` and `Percona-QA/package-testing` repositories - Permissions to trigger GitHub Actions workflows - Understanding of PMM architecture and components @@ -57,17 +55,25 @@ This documentation is organized by test type to provide focused guidance for dif ```yaml Workflow: PMM Integration Tests Purpose: Validate CLI functionality -Duration: ~2 hours (all jobs) -Frequency: Daily/Per commit +Duration: ~10 minutes (all jobs) +Frequency: Daily/Per FB creation/On demand ``` **[→ Go to Integration & CLI Tests Guide](integration-cli-tests.md)** #### 🎭 **Feature Validation** ```yaml -Workflow: PMM e2e Tests(Playwright) -Purpose: Validate UI functionality -Duration: ~1 hour -Frequency: Per feature +Workflow: E2E tests Matrix (CodeceptJS) +Purpose: Validate UI E2E functionality +Duration: ~40 minutes +Frequency: Daily/On demand +``` +**[→ Go to End-to-End Tests Guide](e2e-tests.md)** + +```yaml +Workflow: _FB e2e tests +Purpose: Validate core E2E functionality +Duration: ~40 minutes +Frequency: Per FB creation/On demand ``` **[→ Go to End-to-End Tests Guide](e2e-tests.md)** @@ -76,16 +82,16 @@ Frequency: Per feature Workflow: PMM Upgrade Tests Purpose: Validate upgrade scenarios Duration: ~1 hour -Frequency: Pre-release +Frequency: Daily/Pre-release ``` **[→ Go to Upgrade Tests Guide](upgrade-tests.md)** #### 📦 **Distribution Validation** ```yaml Workflow: Package Test Matrix -Purpose: Validate package installation -Duration: ~1 hour -Frequency: Per package release +Purpose: Validate package installation on different OS +Duration: ~50 minutes +Frequency: Daily/Pre-release ``` **[→ Go to Package Tests Guide](package-tests.md)** @@ -94,23 +100,22 @@ Frequency: Per package release ## 🏗️ **Test Infrastructure Overview** ### **Supported Platforms** -- **Operating Systems**: Ubuntu (Noble, Jammy), Oracle Linux (8, 9), Rocky Linux 9 +- **Operating Systems**: Ubuntu (Noble, Jammy), Oracle Linux (8, 9), Oracle Linux 9 - **Container Runtimes**: Docker, Podman - **Orchestration**: Kubernetes (via Helm), Docker Compose - **Cloud**: GitHub Actions runners ### **Database Coverage** -- **MySQL Family**: Percona Server (5.7, 8.0), MySQL (8.0) -- **PostgreSQL Family**: Percona Distribution for PostgreSQL (14, 15) -- **MongoDB Family**: Percona Server for MongoDB +- **MySQL Family**: Percona Server (5.7, 8.0, 8.4), MySQL (8.0) +- **PostgreSQL Family**: Percona Distribution for PostgreSQL ( 15-17) +- **MongoDB Family**: Percona Server for MongoDB (6.0, 7.0, 8.0) - **Proxy/Load Balancers**: ProxySQL, HAProxy ### **Testing Frameworks** - **CLI Testing**: Playwright (TypeScript) - Current framework -- **UI Testing**: Playwright - Current framework -- **Infrastructure Setup**: Python/Ansible - Current framework +- **UI Testing**: CodeceptJS - Current framework +- **Infrastructure Setup**: BATS (Bash) - Current framework - **Package Testing**: Ansible playbooks - Current framework -- **Legacy Testing**: ⚠️ BATS (Bash) - Deprecated, maintenance mode only --- @@ -122,33 +127,10 @@ Frequency: Per package release graph TB A[PMM-QA Workflows] --> B[Integration Tests] A --> C[E2E Tests] - A --> D[Upgrade Tests] + A --> D[Jenkins Upgrade Tests] A --> E[Package Tests] A --> F[Infrastructure Tests] A --> G[Feature Build Tests] - - B --> B1[CLI Functionality] - B --> B2[Database Integration] - B --> B3[Container Testing] - - C --> C1[Portal Tests] - C --> C2[Inventory Tests] - C --> C3[Component Tests] - - D --> D1[UI Upgrade] - D --> D2[Docker Upgrade] - D --> D3[Podman Upgrade] - - E --> E1[Standard Install] - E --> E2[Custom Path] - E --> E3[Custom Port] - - F --> F1[Helm/K8s] - F --> F2[Easy Install] - - G --> G1[Feature Build Testing] - G --> G2[Docker Image Validation] - G --> G3[Feature-Specific Tests] ``` ### **Reusable Workflow Pattern** @@ -160,36 +142,17 @@ Most workflows follow a reusable pattern: --- -## ⚡ **Emergency Testing Commands** - -### **Quick Smoke Tests** -```yaml -# 5-minute validation -Test: help-tests only -Purpose: Verify basic CLI functionality - -# 15-minute validation -Test: generic-tests only -Purpose: Verify database connectivity - -# 30-minute validation -Test: @portal only -Purpose: Verify core UI functionality -``` +## ⚡ **Emergency Testing** ### **Critical Path Testing** ```yaml # Core functionality -Workflows: PMM Integration Tests (help, generic) -Duration: ~20 minutes +Workflows: PMM Integration Tests +Duration: ~10 minutes # UI critical path -Workflows: E2E Tests (@portal) -Duration: ~30 minutes - -# Upgrade critical path -Workflows: Upgrade Tests (configuration only) -Duration: ~30 minutes +Workflows: _FB e2e tests +Duration: ~40 minutes ``` --- @@ -204,14 +167,11 @@ Duration: ~30 minutes ### **Pull Request Testing** 1. Full integration test suite 2. Relevant E2E test categories -3. Upgrade tests if core changes -4. Package tests if packaging changes +3. Package tests if packaging changes ### **Release Testing** -1. Complete test matrix across all platforms -2. All upgrade scenarios -3. Full feature build test suite -4. Infrastructure deployment tests + +Refer to Release Sign Off document in Notion --- @@ -226,7 +186,6 @@ Duration: ~30 minutes ### **During Test Execution** - [ ] Monitor test progress for early failure detection - [ ] Check logs for setup issues -- [ ] Verify resource utilization - [ ] Track test duration vs. expectations ### **After Test Completion** @@ -234,7 +193,7 @@ Duration: ~30 minutes - [ ] Download and analyze failure artifacts - [ ] Document any new issues discovered - [ ] Update test configurations if needed -- [ ] Share results with relevant stakeholders +- [ ] Share results/findings with QA team members --- @@ -242,12 +201,15 @@ Duration: ~30 minutes ### **Related Repositories** - [pmm-ui-tests](https://github.com/percona/pmm-ui-tests) - UI test suite +- [qa-integration](https://github.com/Percona-Lab/qa-integration/tree/v3) - PMM Framework +- [package-testing](https://github.com/Percona-QA/package-testing/tree/v3) - PMM Framework - [qa-integration](https://github.com/Percona-Lab/qa-integration) - Integration setup -- [pmm-server](https://github.com/percona/pmm) - PMM Server codebase -- [pmm-client](https://github.com/percona/pmm-client) - PMM Client codebase +- [pmm-server](https://github.com/percona/pmm) - PMM Server and PMM Clientcodebase ### **External Documentation** - [PMM Documentation](https://docs.percona.com/percona-monitoring-and-management/) +- [BATS Documentation](https://bats-core.readthedocs.io/en/stable/) +- [CodeceptJS Documentation](https://codecept.io/helpers/Playwright/) - [Playwright Documentation](https://playwright.dev/) - [GitHub Actions Documentation](https://docs.github.com/en/actions) @@ -269,6 +231,91 @@ Duration: ~30 minutes --- -**Last Updated**: December 2024 +**Last Updated**: July 2025 **Maintained By**: PMM QA Team -**Repository**: [percona/pmm-qa](https://github.com/percona/pmm-qa) \ No newline at end of file +**Repository**: [percona/pmm-qa](https://github.com/percona/pmm-qa) + +## 📋 **Comprehensive Documentation Analysis & Feedback** + +### **🔍 Issues Found:** + +#### **1. Broken Links (Critical)** +- **`docs/feature-build-tests.md`** has a broken link to `e2e-codeceptjs-tests.md` (line 98) +- The file `e2e-codeceptjs-tests.md` was deleted but is still referenced + +#### **2. Inconsistencies in Documentation** + +**Framework References:** +- **README.md** mentions "UI Testing: CodeceptJS tests" but also shows "UI Testing: Playwright tests" in the status section +- **Infrastructure Tests** section shows "BATS (Bash) - Current framework" but this contradicts the deprecation notice + +**Workflow References:** +- **README.md** mentions "Jenkins Upgrade Tests" in the workflow architecture but this isn't explained elsewhere +- **README.md** shows "Release Sign Off document in Notion" but this is an external reference + +### **✅ Positive Aspects:** + +#### **1. Excellent Structure** +- Clear organization with core testing guides and reference guides +- Comprehensive cross-references between documents +- Well-organized table of contents + +#### **2. Good Content Quality** +- Detailed step-by-step instructions +- Practical examples and code snippets +- Comprehensive troubleshooting guide +- Complete parameter reference + +#### **3. Strong Cross-References** +- Most internal links are working correctly +- Good navigation between related documents +- Consistent linking patterns + +### **🔧 Recommended Fixes:** + +#### **1. Fix Broken Link** +```markdown +# In docs/feature-build-tests.md, line 98, change: +- For writing **CodeceptJS** tests, refer to the [How to Write CodeceptJS Tests](e2e-codeceptjs-tests.md#how-to-write-codeceptjs-tests) section in the E2E CodeceptJS Tests documentation. + +# To: +- For writing **CodeceptJS** tests, refer to the [How to Write CodeceptJS Tests](e2e-tests.md#how-to-write-codeceptjs-tests) section in the E2E Tests documentation. +``` + +#### **2. Fix Framework Inconsistencies** +```markdown + +``` + +#### **3. Clarify Infrastructure Framework** +```markdown +# In README.md, update the testing frameworks section: +- **Infrastructure Setup**: Python/Ansible - Current framework +``` + +### **📊 Documentation Quality Assessment:** + +| Aspect | Score | Comments | +|--------|-------|----------| +| **Completeness** | 9/10 | Covers all major test types comprehensively | +| **Accuracy** | 8/10 | Minor inconsistencies in framework references | +| **Link Health** | 9/10 | Only one broken link found | +| **Usability** | 9/10 | Clear navigation and practical examples | +| **Maintenance** | 8/10 | Good structure, needs minor updates | + +### ** Overall Assessment:** + +**Strengths:** +- ✅ Comprehensive coverage of all testing scenarios +- ✅ Excellent cross-referencing between documents +- ✅ Practical, actionable content +- ✅ Good troubleshooting and parameter reference +- ✅ Clear organization and navigation + +**Areas for Improvement:** +- ⚠️ Fix the broken link to `e2e-codeceptjs-tests.md` +- ⚠️ Resolve framework reference inconsistencies +- ⚠️ Clarify infrastructure testing framework status +- ⚠️ Add missing explanations for Jenkins workflows + +**Recommendation:** The documentation is **excellent overall** with only minor issues that can be easily fixed. The structure and content quality are very high, making it a valuable resource for the PMM QA team. \ No newline at end of file diff --git a/docs/adding-new-environments.md b/docs/adding-new-environments.md index 1971cf6e..52a57019 100644 --- a/docs/adding-new-environments.md +++ b/docs/adding-new-environments.md @@ -4,7 +4,7 @@ This guide explains how to add new database types and environments to the PMM qa ## Overview -The PMM framework uses a Python-based system (`pmm-framework.py`) with Ansible playbooks to set up various database and service environments for testing. Adding a new environment involves several coordinated changes. +The PMM framework uses a Python-based system (`pmm-framework.py`) with Ansible playbooks and Docker to set up various database and service environments for testing. Adding a new environment involves several coordinated changes. ## Architecture @@ -15,6 +15,14 @@ The framework consists of: - **Ansible playbooks** (`.yml` files) - Infrastructure automation scripts - **Helper scripts** - Supporting bash/shell scripts +### Leveraging Other Percona QA Team Setups + +In addition to the core PMM framework, we also leverage setups from other Percona QA teams that are available in the `qa-integration` repository. These setups provide specialized configurations and testing environments: + +- **`qa-integration/pmm_psmdb_diffauth_setup/`** - MongoDB authentication setups from the PSMDB QA team + +These external setups can be integrated into the PMM framework by referencing their setups and configurations in your custom setup functions. + ## Step-by-Step Guide ### 1. Define Database Configuration @@ -334,9 +342,9 @@ custom_option = get_value('CUSTOM_OPTION', db_type, args, db_config) When contributing new environments: 1. Follow the established patterns -2. Include comprehensive tests -3. Add documentation -4. Ensure cleanup works properly -5. Test with different PMM server configurations +2. Add documentation +3. Ensure cleanup works properly +4. Test with different PMM server configurations +5. Execute e2e-codeceptjs-matrix workflow with contributor branch to ensure all setups are working correctly and nothing is broken/impacted This approach ensures consistency and maintainability across all PMM framework environments. \ No newline at end of file diff --git a/docs/e2e-codeceptjs-tests.md b/docs/e2e-codeceptjs-tests.md deleted file mode 100644 index 8ee305db..00000000 --- a/docs/e2e-codeceptjs-tests.md +++ /dev/null @@ -1,213 +0,0 @@ -# E2E CodeceptJS Tests - -This guide provides instructions for running the PMM E2E tests that use the CodeceptJS framework. These tests cover a wide range of scenarios, including SSL, experimental features, and more. - -## 💡 **What are E2E CodeceptJS Tests?** - -These tests are designed to validate specific and advanced PMM functionalities. They ensure that: - -- **SSL connections are secure**: Verifying that PMM can connect to databases over SSL. -- **Experimental features are stable**: Testing features that are not yet released to the general public. -- **Core functionality is robust**: Covering scenarios like disconnecting and reconnecting services. - -## 🤖 **How to Run E2E CodeceptJS Tests Locally** - -The following steps will guide you through setting up the environment and running the CodeceptJS tests locally, based on the `e2e-codeceptjs-matrix.yml` CI workflow. - -### **Prerequisites** - -- **Git**: To clone the required repositories. -- **Docker** and **Docker Compose**: To run the PMM server and other services. -- **Node.js (v18+)** and **npm**: For running the test frameworks. -- **Python 3** and **pip**: For running setup scripts. -- **System Dependencies**: `ansible`, `clickhouse-client`, `dbdeployer`, and others. - -### **Step 1: Clone Repositories** - -First, clone the `pmm-ui-tests` and `qa-integration` repositories. These contain the test code and setup scripts. - -```bash -git clone --branch v3 https://github.com/percona/pmm-ui-tests.git -git clone --branch v3 https://github.com/Percona-Lab/qa-integration.git -``` - -### **Step 2: Install System Dependencies** - -Install the required system packages. The command below is for Debian/Ubuntu-based systems. - -```bash -sudo apt-get update -sudo apt-get install -y apt-transport-https ca-certificates dirmngr ansible libaio1 libaio-dev libnuma-dev libncurses5 socat sysbench clickhouse-client -curl -s https://raw.githubusercontent.com/datacharmer/dbdeployer/master/scripts/dbdeployer-install.sh | sudo bash -s -- -b /usr/local/bin -``` - -### **Step 3: Set Up PMM Server** - -Next, set up and start the PMM server using Docker Compose. - -```bash -cd pmm-ui-tests - -# Create a docker network for PMM -docker network create pmm-qa || true - -# Start PMM Server -PMM_SERVER_IMAGE=perconalab/pmm-server:3-dev-latest docker compose -f docker-compose.yml up -d - -# Wait for the server to be ready and change the admin password -sleep 60 -docker exec pmm-server change-admin-password admin-password -docker network connect pmm-qa pmm-server || true - -cd .. -``` - -### **Step 4: Set Up PMM Client and Services** - -Now, set up the PMM client and the database services you want to monitor. - -```bash -cd qa-integration/pmm_qa - -# Install the PMM client -sudo bash -x pmm3-client-setup.sh --pmm_server_ip 192.168.0.1 --client_version 3-dev-latest --admin_password admin-password --use_metrics_mode no - -# Set up the test environment and services (e.g., a single Percona Server instance) -python3 -m venv virtenv -source virtenv/bin/activate -pip install --upgrade pip -pip install -r requirements.txt -python pmm-framework.py --pmm-server-password=admin-password --database ps - -cd ../.. -``` -**Note:** You can customize the services by changing the arguments passed to `pmm-framework.py`. For example, to set up multiple databases for inventory tests, use `--database ps --database psmdb --database pdpgsql`. - -### **Step 5: Install Test Dependencies** - -Install the Node.js dependencies required for the UI tests. - -```bash -cd pmm-ui-tests -npm ci -npx playwright install --with-deps -``` - -### **Step 6: Run the Tests** - -Run the CodeceptJS tests using the appropriate tags. The setup for the services will vary depending on the test. - -#### **SSL Tests** - -```bash -# Set up the environment for MySQL SSL tests -python qa-integration/pmm_qa/pmm-framework.py --pmm-server-password=admin-password --database ssl_mysql - -# Run the MySQL SSL tests -./node_modules/.bin/codeceptjs run -c pmm-ui-tests/pr.codecept.js --grep "@ssl-mysql" -``` - -#### **Experimental Tests** - -```bash -# Set up the environment for experimental tests -python qa-integration/pmm_qa/pmm-framework.py --pmm-server-password=admin-password --database pdpgsql - -# Run the experimental tests -./node_modules/.bin/codeceptjs run -c pmm-ui-tests/pr.codecept.js --grep "@experimental" -``` - -## 📋 **Available Test Suites** - -| Test Suite | Test Tag(s) | Description | -|---|---|---| -| Settings and CLI | `@settings\|@cli` | General settings and CLI tests. | -| SSL Tests | `@ssl-mysql`, `@ssl-mongo`, `@ssl-postgres` | Tests for SSL connections to different databases. | -| Experimental | `@experimental` | Tests for experimental features. | -| Disconnect | `@disconnect` | Tests for disconnecting and reconnecting services. | - -## 📝 **How to Write CodeceptJS Tests** - -All paths mentioned in this section are relative to the root of the `pmm-ui-tests` repository, which can be found [here](https://github.com/percona/pmm-ui-tests/tree/v3). - -CodeceptJS tests are written in JavaScript and provide a high-level, readable syntax for UI interactions. They are built on top of WebDriver or Playwright and use a BDD-style syntax. - -### **Test Structure and Directory Layout** - -CodeceptJS tests for PMM UI are primarily located in the `pmm-ui-tests/tests` directory. Tests are organized by feature or functional area. - -``` -pmm-ui-tests/ -├── tests/ # Actual test files -│ ├── pages/ # Page Object Model definitions -│ │ ├── LoginPage.js -│ │ └── DashboardPage.js -│ ├── login_test.js -│ ├── inventory_test.js -├── helpers/ # Custom helpers for common actions -├── config/ # Configuration files -└── pr.codecept.js # Main CodeceptJS configuration -``` - -- **`tests/`**: This directory contains the main test files (`_test.js`). Each file typically covers a specific feature or a logical group of functionalities. -- **`pages/`**: Similar to Playwright, CodeceptJS also supports the Page Object Model. This directory holds page object definitions, abstracting UI interactions. -- **`helpers/`**: Custom helpers can be created to encapsulate common actions or assertions, promoting reusability. -- **`pr.codecept.js`**: This is the primary configuration file for CodeceptJS, defining helpers, plugins, and test paths. - -### **Writing Conventions** - -- **BDD Style**: Tests are written using `Scenario` and `I` (the actor) to describe user interactions in a readable way. -- **Page Objects**: Utilize Page Objects for interacting with UI elements to improve maintainability. -- **Tags**: Use `@` tags in `Scenario` or `Feature` blocks to categorize tests (e.g., `@bm-mongo`, `@exporters`). These tags are used for selective test execution. -- **Comments**: Add comments for complex logic or to explain the *why* behind certain steps. - -### **Basic Test Example** - -A typical CodeceptJS test file (`_test.js`) will look like this: - -```javascript -Feature('Login'); - -Scenario('should display login form', ({ I }) => { - I.amOnPage('http://localhost/'); - I.seeElement('input[name="username"]'); - I.seeElement('input[name="password"]'); - I.seeElement('button[type="submit"]'); -}); - -Scenario('should allow user to login', ({ I }) => { - I.amOnPage('http://localhost/'); - I.fillField('input[name="username"]', 'admin'); - I.fillField('input[name="password"]', 'admin'); - I.click('button[type="submit"]'); - I.see('Dashboard'); -}); -``` - -### **Key Concepts** - -- **`Feature`**: Defines a test suite. -- **`Scenario`**: Represents an individual test case. -- **`I` (the actor)**: The global object for performing UI actions (e.g., `I.amOnPage()`, `I.click()`). -- **Helpers**: Provide methods for `I` to interact with the browser. -- **Tags**: Used for categorizing and selectively running tests. - -### **Running New Tests** - -After creating a new test file, you can run it using the `codeceptjs run` command, specifying the path to your test file or using a `grep` pattern for its title or tags. - -```bash -cd pmm-ui-tests -./node_modules/.bin/codeceptjs run -c pr.codecept.js tests/my_new_feature_test.js -# Or with a grep pattern -./node_modules/.bin/codeceptjs run -c pr.codecept.js --grep="@my-new-feature" -``` - ---- - -**Related Documentation**: -- [E2E Tests](e2e-tests.md) -- [Feature Build Tests](feature-build-tests.md) -- [Integration & CLI Tests](integration-cli-tests.md) -- [Test Parameters Reference](test-parameters.md) -- [Troubleshooting Guide](troubleshooting.md) \ No newline at end of file diff --git a/docs/e2e-tests.md b/docs/e2e-tests.md index 1dcd9d7d..32e4332c 100644 --- a/docs/e2e-tests.md +++ b/docs/e2e-tests.md @@ -1,26 +1,27 @@ -# End-to-End (E2E) Tests +# E2E CodeceptJS Tests -This guide provides instructions for running the PMM End-to-End (E2E) tests locally. These tests validate the PMM UI functionality and user workflows using Playwright and CodeceptJS. +This guide provides instructions for running the PMM E2E tests that use the CodeceptJS framework. These tests cover a wide range of scenarios, including SSL, experimental features, and more. -## 💡 **What are E2E Tests?** +## 💡 **What are E2E CodeceptJS Tests?** -E2E tests simulate real user scenarios from start to finish, ensuring all components of the PMM UI work together correctly. They are crucial for: +These tests are designed to validate specific and advanced PMM functionalities. They ensure that: -- **Validating new features**: Ensuring new UI functionality works as expected. -- **Preventing regressions**: Making sure existing features are not broken by new changes. -- **Ensuring stability**: Testing the integration between the PMM server and the UI. +- **SSL connections are secure**: Verifying that PMM can connect to databases over SSL. +- **Experimental features are stable**: Testing features that are not yet released to the general public. +- **Core functionality is robust**: Covering scenarios like disconnecting and reconnecting services. +- **Etc..** -## 🤖 **How to Run E2E Tests Locally** +## 🤖 **How to Run E2E CodeceptJS Tests Locally** -The following steps will guide you through setting up the necessary environment and running the E2E tests on your local machine. These instructions are based on the steps performed by the CI runners (`runner-e2e-tests-playwright.yml` and `runner-e2e-tests-codeceptjs.yml`). +The following steps will guide you through setting up the environment and running the CodeceptJS tests locally, based on the `e2e-codeceptjs-matrix.yml` CI workflow. ### **Prerequisites** -- **Git**: To clone the required repositories. -- **Docker** and **Docker Compose**: To run the PMM server and other services. -- **Node.js (v18+)** and **npm**: For running the test frameworks. -- **Python 3** and **pip**: For running setup scripts. -- **System Dependencies**: `ansible`, `clickhouse-client`, `dbdeployer`, and others. +- **Git**: To clone the required repositories. +- **Docker** and **Docker Compose**: To run the PMM server and other services. +- **Node.js (v20+)** and **npm**: For running the test frameworks. +- **Python 3** and **pip**: For running setup scripts. +- **System Dependencies**: `ansible`, `clickhouse-client`, `dbdeployer`, and others. ### **Step 1: Clone Repositories** @@ -31,17 +32,7 @@ git clone --branch v3 https://github.com/percona/pmm-ui-tests.git git clone --branch v3 https://github.com/Percona-Lab/qa-integration.git ``` -### **Step 2: Install System Dependencies** - -Install the required system packages. The command below is for Debian/Ubuntu-based systems. - -```bash -sudo apt-get update -sudo apt-get install -y apt-transport-https ca-certificates dirmngr ansible libaio1 libaio-dev libnuma-dev libncurses5 socat sysbench clickhouse-client -curl -s https://raw.githubusercontent.com/datacharmer/dbdeployer/master/scripts/dbdeployer-install.sh | sudo bash -s -- -b /usr/local/bin -``` - -### **Step 3: Set Up PMM Server** +### **Step 2: Set Up PMM Server** Next, set up and start the PMM server using Docker Compose. @@ -62,16 +53,13 @@ docker network connect pmm-qa pmm-server || true cd .. ``` -### **Step 4: Set Up PMM Client and Services** +### **Step 3: Set Up Required Services** Now, set up the PMM client and the database services you want to monitor. ```bash cd qa-integration/pmm_qa -# Install the PMM client -sudo bash -x pmm3-client-setup.sh --pmm_server_ip 192.168.0.1 --client_version 3-dev-latest --admin_password admin-password --use_metrics_mode no - # Set up the test environment and services (e.g., a single Percona Server instance) python3 -m venv virtenv source virtenv/bin/activate @@ -83,7 +71,7 @@ cd ../.. ``` **Note:** You can customize the services by changing the arguments passed to `pmm-framework.py`. For example, to set up multiple databases for inventory tests, use `--database ps --database psmdb --database pdpgsql`. -### **Step 5: Install Test Dependencies** +### **Step 4: Install Test Dependencies** Install the Node.js dependencies required for the UI tests. @@ -93,137 +81,149 @@ npm ci npx playwright install --with-deps ``` -### **Step 6: Run the Tests** +### **Step 5: Run the Tests** -Finally, run the E2E tests. You can run specific test suites by using tags. +Run the CodeceptJS tests using the appropriate tags. The setup for the services will vary depending on the test. -#### **Running Playwright Tests** +#### **SSL Tests** ```bash -# Run the Portal test suite -npx playwright test --project="Portal" --grep="@portal" +# Set up the environment for MySQL SSL tests +python qa-integration/pmm_qa/pmm-framework.py --pmm-server-password=admin-password --database ssl_mysql -# Run the Inventory test suite -npx playwright test --project="Chromium" --grep="@inventory" +# Run the MySQL SSL tests +./node_modules/.bin/codeceptjs run -c pmm-ui-tests/pr.codecept.js --grep "@ssl-mysql" ``` -#### **Running CodeceptJS Tests** +#### **Experimental Tests** ```bash -# First, generate the environment file -envsubst < env.list > env.generated.list +# Set up the environment for experimental tests +python qa-integration/pmm_qa/pmm-framework.py --pmm-server-password=admin-password --database pdpgsql -# Run the Backup Management test suite for MongoDB -./node_modules/.bin/codeceptjs run -c pr.codecept.js --grep="@bm-mongo" +# Run the experimental tests +./node_modules/.bin/codeceptjs run -c pmm-ui-tests/pr.codecept.js --grep "@experimental" ``` ## 📋 **Available Test Suites** -Here are some of the main test suites you can run: - -| Test Suite | Tag | Framework | Description | -|---|---|---|---| -| Portal | `@portal` | Playwright | Tests the main PMM Portal functionality. | -| Inventory | `@inventory` | Playwright | Tests the service inventory management pages. | -| Backup Management (Mongo) | `@bm-mongo` | CodeceptJS | Tests backup and restore for MongoDB. | -| Exporters | `@exporters` | CodeceptJS | Validates various exporters. | -| Settings | `@settings` | CodeceptJS | Tests the PMM settings and configuration pages. | - -## 📝 **How to Write Playwright Tests** +### **Core E2E CodeceptJS Matrix Test Suites** + +| Test Suite | Test Tag(s) | Description | +|---|---|---| +| Settings and CLI | `@settings\|@cli` | General settings and CLI tests. | +| SSL Tests | `@ssl-mysql`, `@ssl-mongo`, `@ssl-postgres` | Tests for SSL connections to different databases. | +| Experimental | `@experimental` | Tests for experimental features. | +| Disconnect | `@disconnect` | Tests for disconnecting and reconnecting services. | +| Backup Management MongoDB | `@bm-mongo` | MongoDB backup and restore functionality. | +| Backup Management Common | `@bm-locations` | Backup location management and common features. | +| Exporters | `@exporters` | Various exporter functionality tests. | +| MongoDB Exporter | `@mongodb-exporter` | MongoDB-specific exporter tests. | +| Instances | `@fb-instances` | Instance management UI tests. | +| Alerting and Settings | `@fb-alerting\|@fb-settings` | Alerting and settings UI components. | +| User and Password | `@user-password` | User authentication with changed password. | +| PGSM Integration | `@pgsm-pmm-integration` | PostgreSQL pg_stat_monitor integration. | +| PGSS Integration | `@pgss-pmm-integration` | PostgreSQL pg_stat_statements integration. | +| PSMDB Replica | `@pmm-psmdb-replica-integration` | MongoDB replica set integration. | +| PSMDB Arbiter | `@pmm-psmdb-arbiter-integration` | MongoDB arbiter replica integration. | +| Dump Tool | `@dump` | Database dump tool functionality. | +| Service Account | `@service-account` | Service account management tests. | +| PS Integration | `@fb-pmm-ps-integration` | Percona Server integration tests. | +| RBAC | `@rbac` | Role-based access control tests. | +| Encryption | `@fb-encryption` | Encryption functionality tests. | +| Docker Configuration | `@docker-configuration` | Docker configuration tests. | +| Nomad | `@nomad` | Nomad orchestration tests. | + +### **Jenkins E2E CodeceptJS Test Suites** +| Test Suite | Test Tag(s) | Description | +|---|---|---| +| Query Analytics | `@qan` | Tests for QAN features. | +| Dashboards | `@nightly`, `@dashboards` | Tests that make sure Dashboards have data. | +| Alerting | `@ia` | Alerting tests. | +| Remote instances | `@instances` | Tests for AWS and Azure integration. | +| GCP Remote instances | `@gcp` | Tests for GCP integration. | + +## 📝 **How to Write CodeceptJS Tests** All paths mentioned in this section are relative to the root of the `pmm-ui-tests` repository, which can be found [here](https://github.com/percona/pmm-ui-tests/tree/v3). -Playwright tests are written in TypeScript and use a clear, readable syntax. Tests are typically organized into `describe` blocks for test suites and `test` blocks for individual test cases. +CodeceptJS tests are written in JavaScript and provide a high-level, readable syntax for UI interactions. They are built on top of WebDriver or Playwright and use a BDD-style syntax. ### **Test Structure and Directory Layout** -Playwright tests for PMM UI are located in the `pmm-ui-tests/playwright-tests` directory. Within this directory, tests are organized by feature or functional area. For example: +CodeceptJS tests for PMM UI are primarily located in the `pmm-ui-tests/tests` directory. Tests are organized by feature or functional area. ``` pmm-ui-tests/ -├── playwright-tests/ +├── tests/ # Actual test files │ ├── pages/ # Page Object Model definitions -│ │ ├── LoginPage.ts -│ │ └── DashboardPage.ts -│ │ └── ServicesPage.ts -│ ├── tests/ # Actual test files -│ │ ├── login.spec.ts -│ │ └── inventory.spec.ts -│ ├── fixtures/ # Test data or reusable components -│ └── playwright.config.ts # Playwright configuration +│ │ ├── LoginPage.js +│ │ └── DashboardPage.js +│ ├── login_test.js +│ ├── inventory_test.js +├── helpers/ # Custom helpers for common actions +├── config/ # Configuration files +└── pr.codecept.js # Main CodeceptJS configuration ``` -- **`pages/`**: This directory typically contains Page Object Model (POM) files. POM is a design pattern that helps create an object repository for UI elements within the application. Each page in the web application has a corresponding Page Object class, which contains methods that perform interactions on that web page. -- **`tests/`**: This is where the actual test files (`.spec.ts`) reside. Each file usually contains tests for a specific feature or a logical group of functionalities. -- **`fixtures/`**: This directory can be used for test data, custom fixtures, or reusable test components. -- **`playwright.config.ts`**: This file configures Playwright, including projects, reporters, and global setup/teardown. +- **`tests/`**: This directory contains the main test files (`_test.js`). Each file typically covers a specific feature or a logical group of functionalities. +- **`pages/`**: Similar to Playwright, CodeceptJS also supports the Page Object Model. This directory holds page object definitions, abstracting UI interactions. +- **`helpers/`**: Custom helpers can be created to encapsulate common actions or assertions, promoting reusability. +- **`pr.codecept.js`**: This is the primary configuration file for CodeceptJS, defining helpers, plugins, and test paths. ### **Writing Conventions** -- **Descriptive Naming**: Test files and test blocks should have clear, descriptive names that indicate their purpose (e.g., `login.spec.ts`, `test.describe('Login Page')`). -- **Page Object Model (POM)**: Utilize the Page Object Model for interacting with UI elements. This improves test readability, maintainability, and reduces code duplication. -- **Assertions**: Use `expect` assertions to verify the state of the UI. Be specific with your assertions. -- **Tags**: Use `@` tags in `test.describe` or `test` blocks to categorize tests (e.g., `@portal`, `@inventory`). These tags are used to run specific subsets of tests. -- **Comments**: Add comments to explain complex logic or the *why* behind certain actions, rather than just *what* is being done. +- **BDD Style**: Tests are written using `Scenario` and `I` (the actor) to describe user interactions in a readable way. +- **Page Objects**: Utilize Page Objects for interacting with UI elements to improve maintainability. +- **Tags**: Use `@` tags in `Scenario` or `Feature` blocks to categorize tests (e.g., `@bm-mongo`, `@exporters`). These tags are used for selective test execution. +- **Comments**: Add comments for complex logic or to explain the *why* behind certain steps. ### **Basic Test Example** -Here's an example demonstrating how to navigate to the Inventory page and verify a service: - -```typescript -import { test, expect } from '@playwright/test'; -import { ServicesPage } from './pages/ServicesPage'; // Assuming ServicesPage is defined - -test.describe('PMM Inventory', () => { - let servicesPage: ServicesPage; - - test.beforeEach(async ({ page }) => { - servicesPage = new ServicesPage(page); - await page.goto(servicesPage.url); // Navigate to the Inventory page URL - await servicesPage.verifyPageLoaded(); // Custom method to wait for page to load - }); - - test('should verify local MongoDB service presence', async () => { - const serviceName = 'mo-integration-'; // Example service name - await servicesPage.servicesTable.verifyService({ serviceName }); // Custom method to verify service in a table - }); - - test('should verify kebab menu options for MongoDB service', async () => { - const serviceName = 'mo-integration-'; - await servicesPage.servicesTable.buttons.options(serviceName).click(); - await expect(servicesPage.servicesTable.buttons.deleteService).toBeVisible(); - await expect(servicesPage.servicesTable.buttons.serviceDashboard).toBeVisible(); - await expect(servicesPage.servicesTable.buttons.qan).toBeVisible(); - }); +A typical CodeceptJS test file (`_test.js`) will look like this: + +```javascript +Feature('Login'); + +Scenario('should display login form', ({ I }) => { + I.amOnPage('http://localhost/'); + I.seeElement('input[name="username"]'); + I.seeElement('input[name="password"]'); + I.seeElement('button[type="submit"]'); +}); + +Scenario('should allow user to login', ({ I }) => { + I.amOnPage('http://localhost/'); + I.fillField('input[name="username"]', 'admin'); + I.fillField('input[name="password"]', 'admin'); + I.click('button[type="submit"]'); + I.see('Dashboard'); }); ``` ### **Key Concepts** -- **`test` object**: Used for defining tests, test suites, and hooks. -- **`page` object**: Represents a browser tab, used for navigation and interaction. -- **Locators**: Methods to find elements on the page (e.g., `page.locator('input[name="username"]')`). -- **`expect` object**: Used for making assertions about the UI state. -- **`await` keyword**: Essential for asynchronous Playwright operations. -- **Page Object Model (POM)**: A design pattern where web pages are represented as classes, abstracting UI elements and interactions. This improves test readability and maintainability. +- **`Feature`**: Defines a test suite. +- **`Scenario`**: Represents an individual test case. +- **`I` (the actor)**: The global object for performing UI actions (e.g., `I.amOnPage()`, `I.click()`). +- **Helpers**: Provide methods for `I` to interact with the browser. +- **Tags**: Used for categorizing and selectively running tests. ### **Running New Tests** -After creating a new test file, you can run it using the `npx playwright test` command, specifying the path to your test file or using a `grep` pattern for its title or tags. +After creating a new test file, you can run it using the `codeceptjs run` command, specifying the path to your test file or using a `grep` pattern for its title or tags. ```bash cd pmm-ui-tests -npx playwright test playwright-tests/my-new-test.spec.ts +./node_modules/.bin/codeceptjs run -c pr.codecept.js tests/my_new_feature_test.js # Or with a grep pattern -npx playwright test --grep="@my-new-feature" +./node_modules/.bin/codeceptjs run -c pr.codecept.js --grep="@my-new-feature" ``` --- **Related Documentation**: +- [Feature Build Tests](feature-build-tests.md) - [Integration & CLI Tests](integration-cli-tests.md) -- [Infrastructure Tests](infrastructure-tests.md) -- [Package Tests](package-tests.md) -- [Upgrade Tests](upgrade-tests.md) - [Test Parameters Reference](test-parameters.md) - [Troubleshooting Guide](troubleshooting.md) \ No newline at end of file diff --git a/docs/feature-build-tests.md b/docs/feature-build-tests.md index 2f8177c5..28681fd9 100644 --- a/docs/feature-build-tests.md +++ b/docs/feature-build-tests.md @@ -40,14 +40,7 @@ You can reproduce the CI runner workflow for Feature Build E2E tests on your loc curl -s https://raw.githubusercontent.com/datacharmer/dbdeployer/master/scripts/dbdeployer-install.sh | sudo bash -s -- -b /usr/local/bin ``` -3. **Clean Up Disk Space (Optional, but recommended)** - - Free up space on your system to avoid issues with large Docker images: - ```bash - sudo rm -rf /usr/share/dotnet /opt/ghc "/usr/local/share/boost" - ``` - -4. **Start PMM Server with Docker Compose** +3. **Start PMM Server with Docker Compose** This step sets up the PMM Server container, changes the admin password, and runs initial DB setup scripts: ```bash @@ -61,17 +54,7 @@ You can reproduce the CI runner workflow for Feature Build E2E tests on your loc cd .. ``` -5. **Set Up PMM Client** - - This step configures the PMM Client to connect to your local PMM Server. First, dynamically retrieve the PMM Server container's IP address and export it as an environment variable: - ```bash - export PMM_SERVER_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' pmm-server) - cd qa-integration/pmm_qa - sudo bash -x pmm3-client-setup.sh --pmm_server_ip $PMM_SERVER_IP --client_version 3-dev-latest --admin_password admin-password --use_metrics_mode no - cd ../.. - ``` - -6. **Prepare Python Environment and Run Setup** +4. **Prepare Python Environment and Run Setup** This step prepares the test environment and configures databases/services as needed for the test suite. Replace `[SETUP_ARGS]` with the appropriate setup string, e.g. `--database psmdb,SETUP_TYPE=pss`: ```bash @@ -86,14 +69,13 @@ You can reproduce the CI runner workflow for Feature Build E2E tests on your loc cd ../.. ``` -7. **Install Node.js Dependencies for UI Tests** +5. **Install Node.js Dependencies for UI Tests** Installs all required Node.js modules and Playwright browser dependencies for UI testing: ```bash cd pmm-ui-tests npm ci npx playwright install --with-deps - envsubst < env.list > env.generated.list ``` ### **Step 8: Run the Tests** @@ -108,21 +90,13 @@ Finally, run the E2E tests for the specific feature. Use the appropriate tag for ./node_modules/.bin/codeceptjs run -c pr.codecept.js --grep "@exporters" ``` -## 📋 **Available Test Suites** - -| Test Suite | Test Tag(s) | Description | -|---|---|---| -| Backup Management | `@bm-mongo`, `@bm-locations` | Tests for backup and restore functionality. | -| Exporters | `@exporters`, `@mongodb-exporter` | Tests for various exporters. | -| UI Components | `@fb-instances`, `@fb-alerting` | Tests for different UI components. | -| PostgreSQL Monitoring | `@pgsm-pmm-integration` | Tests for pg_stat_monitor integration. | +## 📋 **[Available Test Suites](e2e-tests.md#-available-test-suites)** -## 📝 **How to Write Feature Build Tests** +## 📝 **How to Write E2E Tests** Feature Build tests are essentially End-to-End (E2E) UI tests that focus on validating new features. Therefore, the principles and practices for writing these tests are the same as for general E2E UI tests. -- For writing **Playwright** tests, refer to the [How to Write Playwright Tests](e2e-tests.md#how-to-write-playwright-tests) section in the E2E Tests documentation. -- For writing **CodeceptJS** tests, refer to the [How to Write CodeceptJS Tests](e2e-codeceptjs-tests.md#how-to-write-codeceptjs-tests) section in the E2E CodeceptJS Tests documentation. +- For writing **CodeceptJS** tests, refer to the [How to Write CodeceptJS Tests](e2e-tests.md#how-to-write-codeceptjs-tests) section in the E2E CodeceptJS Tests documentation. When writing Feature Build tests, pay special attention to: diff --git a/docs/infrastructure-tests.md b/docs/infrastructure-tests.md index e20930ed..d79e158e 100644 --- a/docs/infrastructure-tests.md +++ b/docs/infrastructure-tests.md @@ -7,7 +7,7 @@ This guide provides instructions for running the PMM infrastructure tests locall Infrastructure tests are designed to ensure that PMM can be deployed and configured correctly in different environments. They cover: - **Kubernetes/Helm**: Validating PMM deployment using Helm charts on a Kubernetes cluster. -- **Easy Install**: Testing the simplified installation script on various supported operating systems. +- **Easy Install - not automated**: Testing the simplified installation script on various supported operating systems. ## 🤖 **How to Run Infrastructure Tests Locally** @@ -23,23 +23,20 @@ These steps will guide you through setting up a local Kubernetes cluster using M #### **Step 1: Start Minikube** -Start a Minikube cluster. This will create a local single-node Kubernetes cluster. +Start a Minikube cluster. This will create a local single-node Kubernetes cluster. Disable the default storage provisioner and enable the CSI hostpath driver for persistent storage. ```bash -minikube start +minikube delete && \ + minikube start && \ + minikube addons disable storage-provisioner && \ + kubectl delete storageclass standard && \ + minikube addons enable csi-hostpath-driver && \ + minikube addons enable volumesnapshots && \ + kubectl patch storageclass csi-hostpath-sc -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}' &&\ + kubectl wait --for=condition=Ready node --timeout=90s minikube ``` -#### **Step 2: Set Up Storage** - -Disable the default storage provisioner and enable the CSI hostpath driver for persistent storage. - -```bash -minikube addons disable storage-provisioner -minikube addons enable csi-hostpath-driver -kubectl patch storageclass csi-hostpath-sc -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}' -``` - -#### **Step 3: Run Helm Tests** +#### **Step 2: Run Helm Tests** Clone the `pmm-qa` repository and run the Helm tests using `bats`. @@ -54,7 +51,7 @@ sudo ./setup_bats_libs.sh SERVER_IMAGE=perconalab/pmm-server:3-dev-latest bats --tap helm-test.bats ``` -### **Easy Install Tests** +### **Easy Install Tests - not automated** These steps will show you how to test the Easy Install script on a supported operating system. @@ -151,13 +148,28 @@ load 'test_helper/bats-assert/load' } ``` +During the development you may want to run only test you're working on. To achieve this you need to add comment `#bats test_tags=bats:focus` above the test annotation + + +```bash +#bats test_tags=bats:focus +@test "PMM server is reachable after deployment" { + run kubectl get service my-pmm-server -o jsonpath='{.status.loadBalancer.ingress[0].ip}' + assert_success + PMM_IP="$output" + + run curl -s "http://$PMM_IP/ping" + assert_success + assert_output "PMM Server is running" +} +``` + **Note**: The actual `helm-test.bats` file in the project will be more complex, involving detailed setup, deployment, and validation steps specific to PMM. The example above is simplified to illustrate the basic structure. --- **Related Documentation**: - [E2E Tests](e2e-tests.md) -- [E2E CodeceptJS Tests](e2e-codeceptjs-tests.md) - [Integration & CLI Tests](integration-cli-tests.md) - [Package Tests](package-tests.md) - [Upgrade Tests](upgrade-tests.md) diff --git a/docs/integration-cli-tests.md b/docs/integration-cli-tests.md index c5b680aa..258d9cb3 100644 --- a/docs/integration-cli-tests.md +++ b/docs/integration-cli-tests.md @@ -60,7 +60,7 @@ Set up the PMM client and the database services you want to monitor. ```bash cd qa-integration/pmm_qa -# Install the PMM client +# Install the PMM client (used only for help, unregister, generic test suites. These suites need to be moved to some db container) sudo bash -x pmm3-client-setup.sh --pmm_server_ip 127.0.0.1 --client_version 3-dev-latest --admin_password admin --use_metrics_mode no # Set up the test environment and services (e.g., a single Percona Server instance) @@ -143,7 +143,7 @@ qa-integration/ ### **Writing Conventions** -- **Playwright for CLI Interaction**: Use Playwright's `page.evaluate()` or `page.keyboard` to simulate CLI commands and `page.locator()` to capture and assert on terminal output (if applicable in a web-based terminal scenario, or by interacting with the underlying shell directly if the test runner allows). +- **CLI Interaction**: Use `cliHelper` to execute CLI commands. - **Python for Environment Setup**: Leverage `pmm-framework.py` to programmatically set up databases, PMM clients, and other services. This ensures a consistent and reproducible test environment. - **Clear Assertions**: Assertions should clearly define the expected CLI output, service status, or data collected by PMM. - **Test Isolation**: Each test should aim to be as isolated as possible, setting up and tearing down its own resources to prevent interference. @@ -204,7 +204,6 @@ This example demonstrates how to execute a `pmm-admin` command, check its succes **Related Documentation**: - [E2E Tests](e2e-tests.md) -- [E2E CodeceptJS Tests](e2e-codeceptjs-tests.md) - [Infrastructure Tests](infrastructure-tests.md) - [Package Tests](package-tests.md) - [Upgrade Tests](upgrade-tests.md) diff --git a/docs/package-tests.md b/docs/package-tests.md index 041361cd..a328bad4 100644 --- a/docs/package-tests.md +++ b/docs/package-tests.md @@ -215,7 +215,6 @@ After creating a new playbook or modifying an existing one, you can run it by up **Related Documentation**: - [E2E Tests](e2e-tests.md) -- [E2E CodeceptJS Tests](e2e-codeceptjs-tests.md) - [Infrastructure Tests](infrastructure-tests.md) - [Integration & CLI Tests](integration-cli-tests.md) - [Upgrade Tests](upgrade-tests.md) diff --git a/docs/test-parameters.md b/docs/test-parameters.md index e72da2d1..09131145 100644 --- a/docs/test-parameters.md +++ b/docs/test-parameters.md @@ -20,7 +20,6 @@ pmm_ui_tests_branch: "v3" # PMM UI tests repository branch pmm_qa_branch: "v3" # PMM QA repository branch qa_integration_branch: "v3" # QA integration repository branch package_testing_branch: "v3" # Package testing branch -easy_install_branch: "v3" # Easy install script branch ``` ### Version and Image Parameters diff --git a/docs/upgrade-tests.md b/docs/upgrade-tests.md index 9fd7b11c..c644025e 100644 --- a/docs/upgrade-tests.md +++ b/docs/upgrade-tests.md @@ -2,8 +2,6 @@ This guide provides instructions for running the PMM upgrade tests locally. These tests validate the PMM upgrade process, ensuring data integrity and functionality are maintained across versions. -> **⚠️ Note**: Some examples in this document reference legacy scripts in `pmm-tests/` which are deprecated. While these specific scripts (`pmm2-client-setup.sh`, `pmm-framework.sh`) may still be used for upgrade testing scenarios, **no new BATS tests should be created**. For new test development, use the TypeScript/Playwright framework. See [main documentation](README.md#important-notice-legacy-tests-deprecation) for details. - ## 💡 **What are Upgrade Tests?** Upgrade tests are critical for ensuring a smooth user experience when new versions of PMM are released. They verify that: @@ -14,7 +12,6 @@ Upgrade tests are critical for ensuring a smooth user experience when new versio ## 🤖 **How to Run Upgrade Tests Locally** -The following steps will guide you through setting up an older version of PMM, performing an upgrade, and running validation tests locally. These instructions are based on the `runner-e2e-upgrade-tests.yml` CI workflow. ### **Prerequisites** @@ -36,58 +33,17 @@ First, set up the environment with the *starting* version of PMM Server and Clie 2. **Set up the PMM Server**: - Start a PMM server container with a specific older version tag (e.g., `2.44.1`). - - ```bash - cd pmm-qa/pmm-integration - npm install - sudo npx ts-node ./integration-setup.ts --ci --setup-docker-pmm-server --rbac --pmm-server-docker-tag=percona/pmm-server:2.44.1 - cd ../.. - ``` 3. **Set up the PMM Client and Services**: - Install the corresponding older version of the PMM client and add some services to be monitored. - - ```bash - sudo ./pmm-qa/pmm-tests/pmm2-client-setup.sh --pmm_server_ip 127.0.0.1 --client_version 2.44.1 --admin_password admin - sudo ./pmm-qa/pmm-tests/pmm-framework.sh --addclient=ps,1 --pmm2 --pmm2-server-ip=127.0.0.1 - ``` ### **Step 2: Run Pre-Upgrade Tests** Before performing the upgrade, run the pre-upgrade tests. These tests capture the state of the system before the upgrade to compare it with the post-upgrade state. -```bash -cd pmm-ui-tests/playwright-tests -npm install -npx playwright install -npx playwright test --grep="@config-pre-upgrade" -cd ../.. -``` ### **Step 3: Perform the PMM Upgrade** -Now, perform the upgrade using one of the following methods: - -#### **UI Way Upgrade** - -1. **Enable the target repository** in the PMM server container. - - ```bash - docker exec pmm-integration-server percona-release enable-only pmm3-client dev-latest - ``` - -2. **Run the UI upgrade test**. - - ```bash - cd pmm-ui-tests/playwright-tests - npx playwright test --grep="@pmm-upgrade" - cd ../.. - ``` - -#### **Docker Way Upgrade** - 1. **Stop and replace the PMM server container** with the new version. ```bash @@ -100,12 +56,6 @@ Now, perform the upgrade using one of the following methods: After the upgrade is complete, run the post-upgrade tests to validate that everything is still working as expected. -```bash -cd pmm-ui-tests/playwright-tests -npx playwright test --grep="@config-post-upgrade" -cd ../.. -``` - By comparing the results of the pre-upgrade and post-upgrade tests, you can verify the success of the upgrade process. ## 📝 **How to Write Upgrade Tests** @@ -114,85 +64,19 @@ Upgrade tests are complex and typically involve a sequence of steps across diffe ### **Test Structure and Directory Layout** -Upgrade tests are primarily orchestrated through Playwright test files, which call out to Python scripts for environment setup and management. The relevant files are located in the `pmm-ui-tests/playwright-tests/tests/upgrade` directory and the `qa-integration/pmm_qa` directory. - -``` -pmm-ui-tests/ -├── playwright-tests/ -│ ├── tests/ -│ │ └── upgrade/ # Playwright test files for upgrade scenarios -│ │ ├── basic_upgrade.spec.ts -│ │ └── ... -qa-integration/ -├── pmm_qa/ # Python scripts for environment setup -│ ├── pmm-framework.py # Main script for setting up services -│ ├── pmm2-client-setup.sh # Script for PMM client setup -│ └── ... -``` - -- **`pmm-ui-tests/playwright-tests/tests/upgrade/`**: Contains the Playwright test files (`.spec.ts`) that define the upgrade scenarios. These tests will typically navigate the PMM UI to trigger upgrades or verify post-upgrade states. -- **`qa-integration/pmm_qa/`**: This directory holds the Python and Bash scripts (`pmm-framework.py`, `pmm2-client-setup.sh`) used to set up the initial PMM environment (server and client) at a specific version, and to manage services before and after the upgrade. - ### **Writing Conventions** - **Orchestration**: Playwright tests act as the orchestrator, calling external scripts (e.g., Python `pmm-framework.py` via `cli.exec` or similar helper) to set up the initial PMM environment with a specific older version. -- **Pre-Upgrade Validation**: Use Playwright to interact with the UI and verify the state of PMM *before* the upgrade. This might involve checking dashboard data, service lists, or configuration settings. -- **Upgrade Execution**: Trigger the upgrade process. This can be done via UI interaction (e.g., clicking an upgrade button), or by executing shell commands (e.g., `docker pull` and `docker run` for Docker-based upgrades). -- **Post-Upgrade Validation**: After the upgrade, use Playwright to verify that PMM is functioning correctly, data is preserved, and new features are available. This often involves re-running the same checks as the pre-upgrade validation and adding new ones for the upgraded version. -- **Version Management**: Be mindful of the PMM server and client versions. Upgrade tests specifically target upgrades *from* an older version *to* a newer version. -- **Tags**: Use `@` tags (e.g., `@config-pre-upgrade`, `@config-post-upgrade`, `@pmm-upgrade`) to categorize different phases or aspects of the upgrade tests. - -### **Basic Upgrade Test Flow (Conceptual)** - -```typescript -import { test, expect } from '@playwright/test'; -// Assume cli helper is available for executing shell commands -import * as cli from '@helpers/cli-helper'; - -test.describe('PMM Upgrade Scenario', () => { - test.beforeAll(async () => { - // Step 1: Set up PMM Server and Client at an older version - // This would involve calling pmm-qa/pmm-integration/integration-setup.ts - // and qa-integration/pmm_qa/pmm2-client-setup.sh - console.log('Setting up PMM Server and Client at older version...'); - // Example: await cli.exec('sudo npx ts-node pmm-qa/pmm-integration/integration-setup.ts --pmm-server-docker-tag=percona/pmm-server:2.41.0'); - // Example: await cli.exec('sudo pmm-qa/pmm-tests/pmm2-client-setup.sh --client_version 2.41.0'); - }); - - test('should perform pre-upgrade checks', async ({ page }) => { - // Navigate to PMM UI and perform checks before upgrade - await page.goto('http://localhost/'); - await expect(page.locator('text=Dashboard')).toBeVisible(); - // Assertions for existing data, configurations, etc. - // await page.locator('text=Some old feature').toBeVisible(); - }); - - test('should perform UI upgrade', async ({ page }) => { - // Navigate to upgrade section in UI - // Click upgrade button - // Wait for upgrade to complete - console.log('Triggering UI upgrade...'); - // Example: await page.locator('button[data-testid="upgrade-button"]').click(); - // await page.waitForSelector('text=Upgrade Complete'); - }); - - test('should perform post-upgrade checks', async ({ page }) => { - // Navigate to PMM UI and perform checks after upgrade - await page.goto('http://localhost/'); - await expect(page.locator('text=New Dashboard Feature')).toBeVisible(); - // Assertions for data persistence, new features, etc. - // await page.locator('text=Some old feature').toBeVisible(); // Should still be there - }); -}); -``` - -**Note**: The example above is conceptual and simplified. Actual upgrade tests involve more intricate setup, version management, and detailed assertions across various PMM components. +- **Pre-Upgrade Validation**: +- **Upgrade Execution**: T +- **Post-Upgrade Validation**: +- **Version Management**: +- **Tags**: --- **Related Documentation**: - [E2E Tests](e2e-tests.md) -- [E2E CodeceptJS Tests](e2e-codeceptjs-tests.md) - [Infrastructure Tests](infrastructure-tests.md) - [Integration & CLI Tests](integration-cli-tests.md) - [Package Tests](package-tests.md) From feeb9bf165e668f15bfb4c52d7c0a52940b78146 Mon Sep 17 00:00:00 2001 From: yurkovychv Date: Thu, 10 Jul 2025 20:01:39 +0300 Subject: [PATCH 6/8] PMM-7 cleanup --- docs/README.md | 88 ++------------------------------------------------ 1 file changed, 2 insertions(+), 86 deletions(-) diff --git a/docs/README.md b/docs/README.md index f483258f..1de7be6c 100644 --- a/docs/README.md +++ b/docs/README.md @@ -233,89 +233,5 @@ Refer to Release Sign Off document in Notion **Last Updated**: July 2025 **Maintained By**: PMM QA Team -**Repository**: [percona/pmm-qa](https://github.com/percona/pmm-qa) - -## 📋 **Comprehensive Documentation Analysis & Feedback** - -### **🔍 Issues Found:** - -#### **1. Broken Links (Critical)** -- **`docs/feature-build-tests.md`** has a broken link to `e2e-codeceptjs-tests.md` (line 98) -- The file `e2e-codeceptjs-tests.md` was deleted but is still referenced - -#### **2. Inconsistencies in Documentation** - -**Framework References:** -- **README.md** mentions "UI Testing: CodeceptJS tests" but also shows "UI Testing: Playwright tests" in the status section -- **Infrastructure Tests** section shows "BATS (Bash) - Current framework" but this contradicts the deprecation notice - -**Workflow References:** -- **README.md** mentions "Jenkins Upgrade Tests" in the workflow architecture but this isn't explained elsewhere -- **README.md** shows "Release Sign Off document in Notion" but this is an external reference - -### **✅ Positive Aspects:** - -#### **1. Excellent Structure** -- Clear organization with core testing guides and reference guides -- Comprehensive cross-references between documents -- Well-organized table of contents - -#### **2. Good Content Quality** -- Detailed step-by-step instructions -- Practical examples and code snippets -- Comprehensive troubleshooting guide -- Complete parameter reference - -#### **3. Strong Cross-References** -- Most internal links are working correctly -- Good navigation between related documents -- Consistent linking patterns - -### **🔧 Recommended Fixes:** - -#### **1. Fix Broken Link** -```markdown -# In docs/feature-build-tests.md, line 98, change: -- For writing **CodeceptJS** tests, refer to the [How to Write CodeceptJS Tests](e2e-codeceptjs-tests.md#how-to-write-codeceptjs-tests) section in the E2E CodeceptJS Tests documentation. - -# To: -- For writing **CodeceptJS** tests, refer to the [How to Write CodeceptJS Tests](e2e-tests.md#how-to-write-codeceptjs-tests) section in the E2E Tests documentation. -``` - -#### **2. Fix Framework Inconsistencies** -```markdown - -``` - -#### **3. Clarify Infrastructure Framework** -```markdown -# In README.md, update the testing frameworks section: -- **Infrastructure Setup**: Python/Ansible - Current framework -``` - -### **📊 Documentation Quality Assessment:** - -| Aspect | Score | Comments | -|--------|-------|----------| -| **Completeness** | 9/10 | Covers all major test types comprehensively | -| **Accuracy** | 8/10 | Minor inconsistencies in framework references | -| **Link Health** | 9/10 | Only one broken link found | -| **Usability** | 9/10 | Clear navigation and practical examples | -| **Maintenance** | 8/10 | Good structure, needs minor updates | - -### ** Overall Assessment:** - -**Strengths:** -- ✅ Comprehensive coverage of all testing scenarios -- ✅ Excellent cross-referencing between documents -- ✅ Practical, actionable content -- ✅ Good troubleshooting and parameter reference -- ✅ Clear organization and navigation - -**Areas for Improvement:** -- ⚠️ Fix the broken link to `e2e-codeceptjs-tests.md` -- ⚠️ Resolve framework reference inconsistencies -- ⚠️ Clarify infrastructure testing framework status -- ⚠️ Add missing explanations for Jenkins workflows - -**Recommendation:** The documentation is **excellent overall** with only minor issues that can be easily fixed. The structure and content quality are very high, making it a valuable resource for the PMM QA team. \ No newline at end of file +**Repository**: [percona/pmm-qa](https://github.com/percona/pmm-qa) + \ No newline at end of file From bf5c07d50a9ce95078d3418505caf7fb1bd4f2ff Mon Sep 17 00:00:00 2001 From: Vasyl Yurkovych <59879559+yurkovychv@users.noreply.github.com> Date: Thu, 10 Jul 2025 20:03:16 +0300 Subject: [PATCH 7/8] Update docs/upgrade-tests.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- docs/upgrade-tests.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/upgrade-tests.md b/docs/upgrade-tests.md index c644025e..5397c652 100644 --- a/docs/upgrade-tests.md +++ b/docs/upgrade-tests.md @@ -68,7 +68,7 @@ Upgrade tests are complex and typically involve a sequence of steps across diffe - **Orchestration**: Playwright tests act as the orchestrator, calling external scripts (e.g., Python `pmm-framework.py` via `cli.exec` or similar helper) to set up the initial PMM environment with a specific older version. - **Pre-Upgrade Validation**: -- **Upgrade Execution**: T +- **Upgrade Execution**: Execute the upgrade process by replacing the PMM server container with the new version, ensuring all services are restarted and functional. - **Post-Upgrade Validation**: - **Version Management**: - **Tags**: From 550aaff95282efb0f18bfc68e9cec096de342593 Mon Sep 17 00:00:00 2001 From: Nurlan Moldomurov Date: Thu, 28 Aug 2025 16:12:43 +0300 Subject: [PATCH 8/8] Update documentation and .gitignore for testing frameworks - Added new entries to .gitignore for pmm-ui-tests, qa-integration, and package-testing directories. - Updated various documentation files to replace `python` with `python3` for executing the pmm-framework.py script and included the installation of setuptools in the setup steps for E2E and feature build tests. --- .gitignore | 4 ++++ docs/e2e-codeceptjs-tests.md | 7 ++++--- docs/e2e-tests.md | 3 ++- docs/feature-build-tests.md | 2 +- docs/integration-cli-tests.md | 4 ++-- 5 files changed, 13 insertions(+), 7 deletions(-) diff --git a/.gitignore b/.gitignore index 6f067d2f..6a32424a 100644 --- a/.gitignore +++ b/.gitignore @@ -16,3 +16,7 @@ pmm-tests/vendor/* ps_socket* .DS_Store + +pmm-ui-tests/* +qa-integration/* +package-testing/* \ No newline at end of file diff --git a/docs/e2e-codeceptjs-tests.md b/docs/e2e-codeceptjs-tests.md index 8ee305db..cc8ab241 100644 --- a/docs/e2e-codeceptjs-tests.md +++ b/docs/e2e-codeceptjs-tests.md @@ -77,7 +77,8 @@ python3 -m venv virtenv source virtenv/bin/activate pip install --upgrade pip pip install -r requirements.txt -python pmm-framework.py --pmm-server-password=admin-password --database ps +pip install setuptools +python3 pmm-framework.py --pmm-server-password=admin-password --database ps cd ../.. ``` @@ -101,7 +102,7 @@ Run the CodeceptJS tests using the appropriate tags. The setup for the services ```bash # Set up the environment for MySQL SSL tests -python qa-integration/pmm_qa/pmm-framework.py --pmm-server-password=admin-password --database ssl_mysql +python3 qa-integration/pmm_qa/pmm-framework.py --pmm-server-password=admin-password --database ssl_mysql # Run the MySQL SSL tests ./node_modules/.bin/codeceptjs run -c pmm-ui-tests/pr.codecept.js --grep "@ssl-mysql" @@ -111,7 +112,7 @@ python qa-integration/pmm_qa/pmm-framework.py --pmm-server-password=admin-passwo ```bash # Set up the environment for experimental tests -python qa-integration/pmm_qa/pmm-framework.py --pmm-server-password=admin-password --database pdpgsql +python3 qa-integration/pmm_qa/pmm-framework.py --pmm-server-password=admin-password --database pdpgsql # Run the experimental tests ./node_modules/.bin/codeceptjs run -c pmm-ui-tests/pr.codecept.js --grep "@experimental" diff --git a/docs/e2e-tests.md b/docs/e2e-tests.md index 1dcd9d7d..ee7232bb 100644 --- a/docs/e2e-tests.md +++ b/docs/e2e-tests.md @@ -76,8 +76,9 @@ sudo bash -x pmm3-client-setup.sh --pmm_server_ip 192.168.0.1 --client_version 3 python3 -m venv virtenv source virtenv/bin/activate pip install --upgrade pip +pip install setuptools pip install -r requirements.txt -python pmm-framework.py --pmm-server-password=admin-password --database ps +python3 pmm-framework.py --pmm-server-password=admin-password --database ps cd ../.. ``` diff --git a/docs/feature-build-tests.md b/docs/feature-build-tests.md index 2f8177c5..4db69703 100644 --- a/docs/feature-build-tests.md +++ b/docs/feature-build-tests.md @@ -82,7 +82,7 @@ You can reproduce the CI runner workflow for Feature Build E2E tests on your loc pip install --upgrade pip pip install -r requirements.txt pip install setuptools - python pmm-framework.py --pmm-server-password=admin-password --verbose [SETUP_ARGS] + python3 pmm-framework.py --pmm-server-password=admin-password --verbose [SETUP_ARGS] cd ../.. ``` diff --git a/docs/integration-cli-tests.md b/docs/integration-cli-tests.md index c5b680aa..4dc92c78 100644 --- a/docs/integration-cli-tests.md +++ b/docs/integration-cli-tests.md @@ -67,8 +67,9 @@ sudo bash -x pmm3-client-setup.sh --pmm_server_ip 127.0.0.1 --client_version 3-d python3 -m venv virtenv source virtenv/bin/activate pip install --upgrade pip +pip install setuptools pip install -r requirements.txt -python pmm-framework.py --database ps +python3 pmm-framework.py --database ps cd ../.. ``` @@ -199,7 +200,6 @@ test.describe('pmm-admin help output', () => { This example demonstrates how to execute a `pmm-admin` command, check its success, and assert on its output using the project's established helper functions, providing a more accurate representation of how CLI tests are written here. - --- **Related Documentation**: