diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index a47454c..c619d94 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -10,6 +10,7 @@ permissions: contents: read pages: write id-token: write + administration: write concurrency: group: pages @@ -39,3 +40,7 @@ jobs: steps: - id: deployment uses: actions/deploy-pages@v4 + - name: Update repo homepage + run: gh api -X PATCH repos/${{ github.repository }} -f homepage="${{ steps.deployment.outputs.page_url }}" + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/graph/llm.py b/graph/llm.py index 70f3fe0..f364cd8 100644 --- a/graph/llm.py +++ b/graph/llm.py @@ -32,4 +32,14 @@ def create_llm(config: LangGraphConfig) -> ChatOpenAI: # AIMessageChunks with usage_metadata=None and we can't emit # the cost-v1 DataPart on the terminal artifact. stream_usage=True, + # Cloudflare's managed WAF blocks the OpenAI SDK's default + # `OpenAI/Python ` User-Agent (observed 403 "Your request + # was blocked" against api.proto-labs.ai). Override with the + # same identifier `tools/lg_tools.py` uses for outbound fetches + # so every protoAgent egress presents a consistent, allowlisted + # UA. If you self-host behind a different edge, this is safe to + # keep. + default_headers={ + "User-Agent": "protoAgent/0.1 (+https://github.com/protoLabsAI/protoAgent)", + }, ) diff --git a/pyproject.toml b/pyproject.toml index b730fc6..39bdee2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "protoagent" -version = "0.1.0" +version = "0.2.1" description = "protoAgent — LangGraph + A2A template for spawning protoLabs agents" requires-python = ">=3.11"