|
| 1 | +name: Integration Tests (Client-side) |
| 2 | + |
| 3 | +run-name: Run integration tests from llama-stack using local client |
| 4 | + |
| 5 | +on: |
| 6 | + push: |
| 7 | + branches: |
| 8 | + - main |
| 9 | + - 'release-[0-9]+.[0-9]+.x' |
| 10 | + pull_request: |
| 11 | + branches: |
| 12 | + - main |
| 13 | + - 'release-[0-9]+.[0-9]+.x' |
| 14 | + types: [opened, synchronize, reopened] |
| 15 | + paths: |
| 16 | + - 'src/llama_stack_client/**' |
| 17 | + - 'uv.lock' |
| 18 | + - 'pyproject.toml' |
| 19 | + - '.github/workflows/integration-tests.yml' |
| 20 | + workflow_dispatch: |
| 21 | + inputs: |
| 22 | + test-setup: |
| 23 | + description: 'Test against a specific setup' |
| 24 | + type: string |
| 25 | + default: 'ollama' |
| 26 | + |
| 27 | +concurrency: |
| 28 | + # Skip concurrency for pushes to main - each commit should be tested independently |
| 29 | + group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/main' && github.run_id || github.ref }} |
| 30 | + cancel-in-progress: true |
| 31 | + |
| 32 | +jobs: |
| 33 | + generate-matrix: |
| 34 | + runs-on: ubuntu-latest |
| 35 | + outputs: |
| 36 | + matrix: ${{ steps.set-matrix.outputs.matrix }} |
| 37 | + steps: |
| 38 | + - name: Checkout llama-stack repository |
| 39 | + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 |
| 40 | + with: |
| 41 | + repository: llamastack/llama-stack |
| 42 | + ref: main |
| 43 | + |
| 44 | + - name: Generate test matrix |
| 45 | + id: set-matrix |
| 46 | + run: | |
| 47 | + # Generate matrix from CI_MATRIX in llama-stack tests/integration/ci_matrix.json |
| 48 | + MATRIX=$(PYTHONPATH=. python3 scripts/generate_ci_matrix.py \ |
| 49 | + --test-setup "${{ github.event.inputs.test-setup }}") |
| 50 | + echo "matrix=$MATRIX" >> $GITHUB_OUTPUT |
| 51 | + echo "Generated matrix: $MATRIX" |
| 52 | +
|
| 53 | + run-replay-mode-tests: |
| 54 | + needs: generate-matrix |
| 55 | + runs-on: ubuntu-latest |
| 56 | + name: ${{ format('Integration Tests ({0}, {1}, {2}, {3})', matrix.client, matrix.config.setup, matrix.python-version, matrix.config.suite) }} |
| 57 | + |
| 58 | + strategy: |
| 59 | + fail-fast: false |
| 60 | + matrix: |
| 61 | + client: [library, docker, server] |
| 62 | + python-version: ['3.12'] |
| 63 | + # Test configurations: Generated from CI_MATRIX in llama-stack tests/integration/ci_matrix.json |
| 64 | + config: ${{ fromJSON(needs.generate-matrix.outputs.matrix).include }} |
| 65 | + |
| 66 | + steps: |
| 67 | + - name: Checkout llama-stack-client repository |
| 68 | + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 |
| 69 | + with: |
| 70 | + path: llama-stack-client-python |
| 71 | + |
| 72 | + - name: Checkout llama-stack repository |
| 73 | + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 |
| 74 | + with: |
| 75 | + repository: llamastack/llama-stack |
| 76 | + ref: main |
| 77 | + path: llama-stack |
| 78 | + |
| 79 | + - name: Install uv |
| 80 | + if: ${{ matrix.config.allowed_clients == null || contains(matrix.config.allowed_clients, matrix.client) }} |
| 81 | + uses: astral-sh/setup-uv@6b9c6063abd6010835644d4c2e1bef4cf5cd0fca # v6.0.1 |
| 82 | + with: |
| 83 | + python-version: ${{ matrix.python-version }} |
| 84 | + version: 0.7.6 |
| 85 | + |
| 86 | + - name: Install llama-stack dependencies |
| 87 | + if: ${{ matrix.config.allowed_clients == null || contains(matrix.config.allowed_clients, matrix.client) }} |
| 88 | + working-directory: llama-stack |
| 89 | + run: | |
| 90 | + echo "Installing llama-stack dependencies" |
| 91 | + uv sync --all-groups |
| 92 | + uv pip install faiss-cpu |
| 93 | +
|
| 94 | + - name: Install local llama-stack-client |
| 95 | + if: ${{ matrix.config.allowed_clients == null || contains(matrix.config.allowed_clients, matrix.client) }} |
| 96 | + working-directory: llama-stack |
| 97 | + run: | |
| 98 | + echo "Installing local llama-stack-client from ../llama-stack-client-python" |
| 99 | + uv pip install -e ../llama-stack-client-python |
| 100 | +
|
| 101 | + echo "Installed llama packages:" |
| 102 | + uv pip list | grep llama |
| 103 | +
|
| 104 | + - name: Build Llama Stack |
| 105 | + if: ${{ matrix.config.allowed_clients == null || contains(matrix.config.allowed_clients, matrix.client) }} |
| 106 | + working-directory: llama-stack |
| 107 | + run: | |
| 108 | + echo "Building Llama Stack" |
| 109 | + LLAMA_STACK_DIR=. \ |
| 110 | + uv run --no-sync llama stack list-deps ci-tests | xargs -L1 uv pip install |
| 111 | +
|
| 112 | + - name: Configure git for commits |
| 113 | + if: ${{ matrix.config.allowed_clients == null || contains(matrix.config.allowed_clients, matrix.client) }} |
| 114 | + working-directory: llama-stack |
| 115 | + run: | |
| 116 | + git config --local user.email "github-actions[bot]@users.noreply.github.com" |
| 117 | + git config --local user.name "github-actions[bot]" |
| 118 | +
|
| 119 | + - name: Check Storage and Memory Available Before Tests |
| 120 | + if: ${{ (matrix.config.allowed_clients == null || contains(matrix.config.allowed_clients, matrix.client)) && always() }} |
| 121 | + run: | |
| 122 | + free -h |
| 123 | + df -h |
| 124 | +
|
| 125 | + - name: Run Integration Tests (Replay Mode) |
| 126 | + if: ${{ matrix.config.allowed_clients == null || contains(matrix.config.allowed_clients, matrix.client) }} |
| 127 | + working-directory: llama-stack |
| 128 | + env: |
| 129 | + OPENAI_API_KEY: dummy |
| 130 | + run: | |
| 131 | + STACK_CONFIG="${{ matrix.config.stack_config || (matrix.client == 'library' && 'ci-tests') || (matrix.client == 'server' && 'server:ci-tests') || 'docker:ci-tests' }}" |
| 132 | +
|
| 133 | + SCRIPT_ARGS="--stack-config $STACK_CONFIG --inference-mode replay" |
| 134 | +
|
| 135 | + # Add optional arguments |
| 136 | + if [ -n '${{ matrix.config.setup }}' ]; then |
| 137 | + SCRIPT_ARGS="$SCRIPT_ARGS --setup ${{ matrix.config.setup }}" |
| 138 | + fi |
| 139 | + if [ -n '${{ matrix.config.suite }}' ]; then |
| 140 | + SCRIPT_ARGS="$SCRIPT_ARGS --suite ${{ matrix.config.suite }}" |
| 141 | + fi |
| 142 | +
|
| 143 | + echo "=== Running command ===" |
| 144 | + echo "uv run --no-sync ./scripts/integration-tests.sh $SCRIPT_ARGS" |
| 145 | + echo "" |
| 146 | +
|
| 147 | + uv run --no-sync ./scripts/integration-tests.sh $SCRIPT_ARGS | tee pytest-replay.log |
| 148 | +
|
| 149 | + - name: Upload logs |
| 150 | + if: ${{ always() }} |
| 151 | + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 |
| 152 | + with: |
| 153 | + name: logs-${{ github.run_id }}-${{ github.run_attempt || '1' }}-${{ strategy.job-index || github.job }}-${{ github.action }} |
| 154 | + path: | |
| 155 | + llama-stack/*.log |
| 156 | + retention-days: 1 |
0 commit comments