Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
255 changes: 255 additions & 0 deletions .github/workflows/full-tests-with-api.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,255 @@
name: Robot Framework Tests (Full - With API Keys)

on:
push:
branches:
- dev
- main
paths:
- 'tests/**/*.robot'
- 'tests/**/*.py'
- 'backends/advanced/src/**'
- '.github/workflows/full-tests-with-api.yml'
workflow_dispatch: # Allow manual triggering

permissions:
contents: read
pull-requests: write
issues: write
pages: write
id-token: write

jobs:
full-robot-tests:
runs-on: ubuntu-latest
timeout-minutes: 30

steps:
- name: Checkout code
uses: actions/checkout@v4

- name: Verify required secrets
env:
DEEPGRAM_API_KEY: ${{ secrets.DEEPGRAM_API_KEY }}
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
HF_TOKEN: ${{ secrets.HF_TOKEN }}
run: |
echo "Verifying required secrets..."
if [ -z "$DEEPGRAM_API_KEY" ]; then
echo "❌ ERROR: DEEPGRAM_API_KEY secret is not set"
exit 1
fi
if [ -z "$OPENAI_API_KEY" ]; then
echo "❌ ERROR: OPENAI_API_KEY secret is not set"
exit 1
fi
if [ -z "$HF_TOKEN" ]; then
echo "⚠️ WARNING: HF_TOKEN secret is not set (speaker recognition will be disabled)"
else
echo "✓ HF_TOKEN is set (length: ${#HF_TOKEN})"
fi
echo "✓ DEEPGRAM_API_KEY is set (length: ${#DEEPGRAM_API_KEY})"
echo "✓ OPENAI_API_KEY is set (length: ${#OPENAI_API_KEY})"
echo "✓ Required secrets verified"

- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver-opts: |
image=moby/buildkit:latest
network=host

- name: Cache Docker layers
uses: actions/cache@v4
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ hashFiles('backends/advanced/Dockerfile', 'backends/advanced/pyproject.toml') }}
restore-keys: |
${{ runner.os }}-buildx-

- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.12"

- name: Install uv
uses: astral-sh/setup-uv@v4
with:
version: "latest"

- name: Install Robot Framework and dependencies
run: |
uv pip install --system robotframework robotframework-requests python-dotenv websockets

- name: Create test config.yml
run: |
echo "Copying test configuration file..."
mkdir -p config
cp tests/configs/deepgram-openai.yml config/config.yml
echo "✓ Test config.yml created from tests/configs/deepgram-openai.yml"
ls -lh config/config.yml

- name: Create plugins.yml from template
run: |
echo "Creating plugins.yml from template..."
if [ -f "config/plugins.yml.template" ]; then
cp config/plugins.yml.template config/plugins.yml
echo "✓ plugins.yml created from template"
ls -lh config/plugins.yml
else
echo "❌ ERROR: config/plugins.yml.template not found"
exit 1
fi

- name: Run Full Robot Framework tests
working-directory: tests
env:
# Required for test runner script
DEEPGRAM_API_KEY: ${{ secrets.DEEPGRAM_API_KEY }}
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
HF_TOKEN: ${{ secrets.HF_TOKEN }}
CLEANUP_CONTAINERS: "false" # Don't cleanup in CI - handled by workflow
run: |
# Use the full test script (includes all tests with API keys)
./run-robot-tests.sh
TEST_EXIT_CODE=$?
echo "test_exit_code=$TEST_EXIT_CODE" >> $GITHUB_ENV
exit 0 # Don't fail here, we'll fail at the end after uploading artifacts

- name: Show service logs
if: always()
working-directory: backends/advanced
run: |
echo "=== Backend Logs (last 50 lines) ==="
docker compose -f docker-compose-test.yml logs --tail=50 chronicle-backend-test
echo ""
echo "=== Worker Logs (last 50 lines) ==="
docker compose -f docker-compose-test.yml logs --tail=50 workers-test

- name: Check if test results exist
if: always()
id: check_results
run: |
if [ -f tests/results/output.xml ]; then
echo "results_exist=true" >> $GITHUB_OUTPUT
else
echo "results_exist=false" >> $GITHUB_OUTPUT
echo "⚠️ No test results found in tests/results/"
ls -la tests/results/ || echo "Results directory doesn't exist"
fi

- name: Upload Robot Framework HTML reports
if: always() && steps.check_results.outputs.results_exist == 'true'
uses: actions/upload-artifact@v4
with:
name: robot-test-reports-html-full
path: |
tests/results/report.html
tests/results/log.html
retention-days: 30

- name: Publish HTML Report as GitHub Pages artifact
if: always() && steps.check_results.outputs.results_exist == 'true'
uses: actions/upload-pages-artifact@v3
with:
path: tests/results

- name: Deploy to GitHub Pages
if: always() && steps.check_results.outputs.results_exist == 'true'
uses: actions/deploy-pages@v4
id: deployment

- name: Generate test summary
if: always() && steps.check_results.outputs.results_exist == 'true'
id: test_summary
run: |
# Parse test results
python3 << 'PYTHON_SCRIPT' > test_summary.txt
import xml.etree.ElementTree as ET
tree = ET.parse('tests/results/output.xml')
root = tree.getroot()
stats = root.find('.//total/stat')
if stats is not None:
passed = stats.get("pass", "0")
failed = stats.get("fail", "0")
total = int(passed) + int(failed)
print(f"PASSED={passed}")
print(f"FAILED={failed}")
print(f"TOTAL={total}")
PYTHON_SCRIPT

# Source the variables
source test_summary.txt

# Set outputs
echo "passed=$PASSED" >> $GITHUB_OUTPUT
echo "failed=$FAILED" >> $GITHUB_OUTPUT
echo "total=$TOTAL" >> $GITHUB_OUTPUT

- name: Upload Robot Framework XML output
if: always() && steps.check_results.outputs.results_exist == 'true'
uses: actions/upload-artifact@v4
with:
name: robot-test-results-xml-full
path: tests/results/output.xml
retention-days: 30

- name: Upload logs on failure
if: failure()
uses: actions/upload-artifact@v4
with:
name: robot-test-logs-full
path: |
backends/advanced/.env
tests/setup/.env.test
retention-days: 7
Comment on lines +197 to +205
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Potential secret exposure in uploaded logs.

Uploading .env files as artifacts on failure may expose API keys or other secrets. These files are accessible to anyone with repository read access on public repositories.

Consider either:

  1. Sanitizing the files before upload (redacting sensitive values)
  2. Uploading only non-sensitive configuration files
  3. Using if: failure() && github.event_name != 'pull_request' to restrict to non-PR workflows
🔒 Suggested fix to sanitize or exclude sensitive files
     - name: Upload logs on failure
       if: failure()
       uses: actions/upload-artifact@v4
       with:
         name: robot-test-logs-full
         path: |
-          backends/advanced/.env
-          tests/setup/.env.test
+          backends/advanced/docker-compose-test.yml
+          tests/setup/.env.test.example
         retention-days: 7

Or sanitize before upload:

    - name: Sanitize and upload logs on failure
      if: failure()
      run: |
        # Create sanitized copies
        sed 's/\(API_KEY=\).*/\1[REDACTED]/' backends/advanced/.env > backends/advanced/.env.sanitized || true
        sed 's/\(PASSWORD=\).*/\1[REDACTED]/' tests/setup/.env.test > tests/setup/.env.test.sanitized || true
    
    - name: Upload sanitized logs on failure
      if: failure()
      uses: actions/upload-artifact@v4
      with:
        name: robot-test-logs-full
        path: |
          backends/advanced/.env.sanitized
          tests/setup/.env.test.sanitized
        retention-days: 7


- name: Display test results summary
if: always()
run: |
if [ -f tests/results/output.xml ]; then
echo "Full test results generated successfully (With API Keys)"
echo "========================================"
python3 << 'PYTHON_SCRIPT'
import xml.etree.ElementTree as ET
tree = ET.parse('tests/results/output.xml')
root = tree.getroot()
stats = root.find('.//total/stat')
if stats is not None:
passed = stats.get("pass", "0")
failed = stats.get("fail", "0")
print(f'✅ Passed: {passed}')
print(f'❌ Failed: {failed}')
print(f'📊 Total: {int(passed) + int(failed)}')
PYTHON_SCRIPT
echo "========================================"
echo ""
echo "ℹ️ Full test suite including API-dependent tests"
echo ""
echo "📊 FULL TEST REPORTS AVAILABLE:"
echo " 1. Go to the 'Summary' tab at the top of this page"
echo " 2. Scroll down to 'Artifacts' section"
echo " 3. Download 'robot-test-reports-html-full'"
echo " 4. Extract and open report.html or log.html in your browser"
echo ""
echo "The HTML reports provide:"
echo " - report.html: Executive summary with statistics"
echo " - log.html: Detailed step-by-step execution log"
echo ""
fi

- name: Cleanup
if: always()
working-directory: backends/advanced
run: |
docker compose -f docker-compose-test.yml down -v

- name: Fail workflow if tests failed
if: always()
run: |
if [ "${{ env.test_exit_code }}" != "0" ]; then
echo "❌ Tests failed with exit code ${{ env.test_exit_code }}"
exit 1
else
echo "✅ All tests passed"
fi
Loading
Loading