diff --git a/.github/workflows/perf-tests.yml b/.github/workflows/perf-tests.yml new file mode 100644 index 0000000..00f162d --- /dev/null +++ b/.github/workflows/perf-tests.yml @@ -0,0 +1,37 @@ +name: Performance Tests + +on: + schedule: + - cron: "0 2 */3 * *" + workflow_dispatch: + +jobs: + run-perf-tests: + runs-on: ubuntu-latest + + steps: + - name: Checkout repo + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Install dependencies + run: | + pip install -r requirements.txt + + - name: Run performance tests + env: + INPUT: config.json + run: | + python perfTestRunner.py + + - name: Commit performance report + run: | + git config user.name "github-actions" + git config user.email "github-actions@github.com" + git add perf_reports/ + git commit -m "Add performance report $(date +'%Y-%m-%d')" || echo "No changes" + git push diff --git a/perfTestRunner.py b/perfTestRunner.py index 1e69d56..22173ae 100644 --- a/perfTestRunner.py +++ b/perfTestRunner.py @@ -1,9 +1,20 @@ -import subprocess +import os import json import time -import os +import subprocess +import csv +from pathlib import Path from datetime import date, datetime + + +REPORTS_DIR = Path("perf_reports") +REPORTS_DIR.mkdir(parents=True, exist_ok=True) + +CSV_FILE = REPORTS_DIR / f"perf_{date.today().isoformat()}.csv" + + + now = datetime.now() current_time = now.strftime("%H:%M:%S") TIMESTAMP = datetime.now().strftime("%Y%m%d_%H%M%S") @@ -116,6 +127,31 @@ def compare_results(prev_results, new_results): test_fail = True return test_fail +def write_csv_results(test_name, results): + with open(CSV_FILE, mode="w", newline="") as f: + writer = csv.writer(f) + + writer.writerow([ + "date", + "year", + "test_name", + "phase", + "duration_minutes" + ]) + + today = date.today() + year = today.year + + for phase, duration in results.items(): + if isinstance(duration, (int, float)): + writer.writerow([ + today.isoformat(), + year, + test_name, + phase, + duration + ]) + def perform_load_test(): @@ -154,6 +190,10 @@ def perform_load_test(): # Save results after successful test execution save_results(test_data) + #Saving it into csv file in the same repo + write_csv_results(test_name, test_data["results"]) + + if test_fail: exit(1) diff --git a/perfTestRunner_csv.py b/perfTestRunner_csv.py new file mode 100644 index 0000000..7b32110 --- /dev/null +++ b/perfTestRunner_csv.py @@ -0,0 +1 @@ +print("CSV PERF TEST RUNNER STARTED")