Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 40 additions & 0 deletions .github/scripts/generate_validation_report.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
#!/usr/bin/env python3
"""Generate validation report for GitHub Actions summary."""

import json
import sys
from pathlib import Path

def main():
results_file = Path("validation_reference_results.json")

if not results_file.exists():
print("❌ Validation failed to produce results")
return 1

with open(results_file) as f:
results = json.load(f)

summary = results["summary"]
targets = results["targets"]

print(f"**Test Cases Run:** {summary['test_cases_run']}")
print(f"**Total Comparisons:** {summary['total']}")
print(f"**Passed:** {summary['passed']} ({summary['pass_rate']:.1f}%)")
print(f"**Failed:** {summary['failed']}")
print()

pass_rate = summary["pass_rate"]
if pass_rate >= targets["excellent_pass_rate"]:
print(f"✅ **EXCELLENT** - Pass rate {pass_rate:.1f}% exceeds target {targets['excellent_pass_rate']}%")
elif pass_rate >= targets["target_pass_rate"]:
print(f"✅ **VERY GOOD** - Pass rate {pass_rate:.1f}% exceeds target {targets['target_pass_rate']}%")
elif pass_rate >= targets["minimum_pass_rate"]:
print(f"✅ **PASSED** - Pass rate {pass_rate:.1f}% meets minimum {targets['minimum_pass_rate']}%")
else:
print(f"❌ **BELOW TARGET** - Pass rate {pass_rate:.1f}% below minimum {targets['minimum_pass_rate']}%")

return 0

if __name__ == "__main__":
sys.exit(main())
37 changes: 37 additions & 0 deletions .github/scripts/report_test_suite_status.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
#!/usr/bin/env python3
"""Report test suite status for GitHub Actions summary."""

import json
import sys
from pathlib import Path

def main():
config_file = Path("test_config.json")

if not config_file.exists():
print("❌ test_config.json not found")
return 1

with open(config_file) as f:
config = json.load(f)

test_cases = config["test_cases"]
active = [tc for tc in test_cases if tc.get("status") == "active"]
pending = [tc for tc in test_cases if tc.get("status") == "pending_reference"]

print(f"**Total Test Cases:** {len(test_cases)}")
print(f"**Active:** {len(active)}")
print(f"**Pending Reference Data:** {len(pending)}")
print()
print("### Active Test Cases:")
for tc in active:
print(f"- ✅ {tc['id']}: {tc['name']}")
print()
print("### Pending Test Cases:")
for tc in pending:
print(f"- ⏳ {tc['id']}: {tc['name']}")

return 0

if __name__ == "__main__":
sys.exit(main())
83 changes: 4 additions & 79 deletions .github/workflows/validation.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@ jobs:
python -m pip install --upgrade pip
pip install numpy scipy matplotlib pytest
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
# Install the dvoacap package itself
pip install -e .

- name: Run reference validation
Expand All @@ -44,17 +43,11 @@ jobs:
- name: Check validation results
if: steps.validation.outputs.validation_completed == 'true'
run: |
# Extract pass rate from validation results
if [ -f validation_reference_results.json ]; then
PASS_RATE=$(python -c "import json; print(json.load(open('validation_reference_results.json'))['summary']['pass_rate'])")
echo "Pass rate: ${PASS_RATE}%"

# Check against minimum target (80%)
python -c "import sys; import json; \
result = json.load(open('validation_reference_results.json')); \
pass_rate = result['summary']['pass_rate']; \
min_target = result['targets']['minimum_pass_rate']; \
sys.exit(0 if pass_rate >= min_target else 1)"
python -c "import sys; import json; result = json.load(open('validation_reference_results.json')); pass_rate = result['summary']['pass_rate']; min_target = result['targets']['minimum_pass_rate']; sys.exit(0 if pass_rate >= min_target else 1)"
else
echo "ERROR: validation_reference_results.json not found"
exit 1
Expand All @@ -73,37 +66,7 @@ jobs:
run: |
echo "## Validation Results" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY

if [ -f validation_reference_results.json ]; then
python << 'EOF' >> $GITHUB_STEP_SUMMARY
import json
import sys

with open("validation_reference_results.json") as f:
results = json.load(f)

summary = results["summary"]
targets = results["targets"]

print(f"**Test Cases Run:** {summary['test_cases_run']}")
print(f"**Total Comparisons:** {summary['total']}")
print(f"**Passed:** {summary['passed']} ({summary['pass_rate']:.1f}%)")
print(f"**Failed:** {summary['failed']}")
print()

pass_rate = summary["pass_rate"]
if pass_rate >= targets["excellent_pass_rate"]:
print(f"✅ **EXCELLENT** - Pass rate {pass_rate:.1f}% exceeds target {targets['excellent_pass_rate']}%")
elif pass_rate >= targets["target_pass_rate"]:
print(f"✅ **VERY GOOD** - Pass rate {pass_rate:.1f}% exceeds target {targets['target_pass_rate']}%")
elif pass_rate >= targets["minimum_pass_rate"]:
print(f"✅ **PASSED** - Pass rate {pass_rate:.1f}% meets minimum {targets['minimum_pass_rate']}%")
else:
print(f"❌ **BELOW TARGET** - Pass rate {pass_rate:.1f}% below minimum {targets['minimum_pass_rate']}%")
EOF
else
echo "❌ Validation failed to produce results" >> $GITHUB_STEP_SUMMARY
fi
python .github/scripts/generate_validation_report.py >> $GITHUB_STEP_SUMMARY

test-suite-status:
name: Test Suite Status
Expand All @@ -122,47 +85,9 @@ EOF

- name: Check test coverage
run: |
python -c "
import json

with open('test_config.json') as f:
config = json.load(f)

total_tests = len(config['test_cases'])
active_tests = len([tc for tc in config['test_cases'] if tc.get('status') == 'active'])
pending_tests = total_tests - active_tests

print(f'Total test cases defined: {total_tests}')
print(f'Active test cases: {active_tests}')
print(f'Pending reference data: {pending_tests}')

if active_tests == 0:
print('WARNING: No active test cases')
exit(1)
"
python -c "import json; config = json.load(open('test_config.json')); total = len(config['test_cases']); active = len([tc for tc in config['test_cases'] if tc.get('status') == 'active']); pending = total - active; print(f'Total test cases: {total}'); print(f'Active: {active}'); print(f'Pending: {pending}'); exit(1 if active == 0 else 0)"

- name: Report test suite status
run: |
echo "## Test Suite Status" >> $GITHUB_STEP_SUMMARY
python << 'EOF' >> $GITHUB_STEP_SUMMARY
import json

with open("test_config.json") as f:
config = json.load(f)

test_cases = config["test_cases"]
active = [tc for tc in test_cases if tc.get("status") == "active"]
pending = [tc for tc in test_cases if tc.get("status") == "pending_reference"]

print(f"**Total Test Cases:** {len(test_cases)}")
print(f"**Active:** {len(active)}")
print(f"**Pending Reference Data:** {len(pending)}")
print()
print("### Active Test Cases:")
for tc in active:
print(f"- ✅ {tc['id']}: {tc['name']}")
print()
print("### Pending Test Cases:")
for tc in pending:
print(f"- ⏳ {tc['id']}: {tc['name']}")
EOF
python .github/scripts/report_test_suite_status.py >> $GITHUB_STEP_SUMMARY