From 29932ff5021cd4e5b612f04fc36348ebb0ba3075 Mon Sep 17 00:00:00 2001 From: senseibelbi Date: Fri, 17 Apr 2026 14:32:39 +0200 Subject: [PATCH 1/4] Clean repository surface on main --- .gitignore | 7 + AUDIT_MCP_COVERAGE_2026-03-18.md | 86 - AUDIT_MCP_ENDPOINTS_2026-03-18.md | 171 - AUDIT_MCP_FAMILY_LIVE_COVERAGE_2026-03-18.md | 53 - AUDIT_MCP_PATCH_VERIFICATION_2026-03-18.md | 48 - .../AUDIT_EVIDENCE_FRAMEWORK.md | 105 - ToxMCP_Audit_Reviewed_v2/DELIVERY_SUMMARY.md | 150 - ToxMCP_Audit_Reviewed_v2/INDEX.md | 146 - ToxMCP_Audit_Reviewed_v2/MANIFEST.md | 6 - ToxMCP_Audit_Reviewed_v2/MANIFEST_SHA256.txt | 28 - .../PUBLIC_REFERENCE_NOTES.md | 23 - ToxMCP_Audit_Reviewed_v2/QUICK_REFERENCE.md | 95 - ToxMCP_Audit_Reviewed_v2/README.md | 12 - ToxMCP_Audit_Reviewed_v2/REVISION_LOG.md | 95 - .../TOXMCP_MASTER_AUDIT_REPORT.md | 272 - ...MCP_Performance_Resilience_Audit_Report.md | 474 -- .../VALIDATION_BACKLOG.md | 48 - .../aop-mcp-audit/README.md | 161 - .../aop-mcp-audit/REMEDIATION_CODE.md | 710 -- .../cognitive_ergonomics_audit_report.md | 413 - .../comptox-mcp-audit/README.md | 173 - .../comptox-mcp-audit/REMEDIATION_CODE.md | 649 -- .../oqt-mcp-audit/README.md | 180 - .../oqt-mcp-audit/REMEDIATION_CODE.md | 1061 --- .../pbpk-mcp-audit/README.md | 164 - .../pbpk-mcp-audit/REMEDIATION_CODE.md | 902 --- .../toxmcp_adversarial_audit_report.md | 386 - .../toxmcp_contract_audit_report.md | 399 - .../toxmcp_future_proofing_audit_report.md | 209 - .../toxmcp_observability_audit_report.md | 616 -- .../toxmcp_regulatory_audit_report.md | 230 - .../toxmcp_remediation_snippets.py | 463 -- .../toxmcp_security_audit_report.md | 215 - docs/genra_workflow.md | 2 +- docs/mcp_ctx_audit.md | 54 - docs/model_metadata.md | 2 +- epa_comptox_api_structure.json | 37 - extract_api_structure.py | 15 +- scientific_engine_bundle.txt | 6829 ----------------- triclosan_partition_distribution.png | Bin 62475 -> 0 bytes triclosan_tissue_distribution_httk.png | Bin 247522 -> 0 bytes 41 files changed, 19 insertions(+), 15670 deletions(-) delete mode 100644 AUDIT_MCP_COVERAGE_2026-03-18.md delete mode 100644 AUDIT_MCP_ENDPOINTS_2026-03-18.md delete mode 100644 AUDIT_MCP_FAMILY_LIVE_COVERAGE_2026-03-18.md delete mode 100644 AUDIT_MCP_PATCH_VERIFICATION_2026-03-18.md delete mode 100644 ToxMCP_Audit_Reviewed_v2/AUDIT_EVIDENCE_FRAMEWORK.md delete mode 100644 ToxMCP_Audit_Reviewed_v2/DELIVERY_SUMMARY.md delete mode 100644 ToxMCP_Audit_Reviewed_v2/INDEX.md delete mode 100644 ToxMCP_Audit_Reviewed_v2/MANIFEST.md delete mode 100644 ToxMCP_Audit_Reviewed_v2/MANIFEST_SHA256.txt delete mode 100644 ToxMCP_Audit_Reviewed_v2/PUBLIC_REFERENCE_NOTES.md delete mode 100644 ToxMCP_Audit_Reviewed_v2/QUICK_REFERENCE.md delete mode 100644 ToxMCP_Audit_Reviewed_v2/README.md delete mode 100644 ToxMCP_Audit_Reviewed_v2/REVISION_LOG.md delete mode 100644 ToxMCP_Audit_Reviewed_v2/TOXMCP_MASTER_AUDIT_REPORT.md delete mode 100644 ToxMCP_Audit_Reviewed_v2/ToxMCP_Performance_Resilience_Audit_Report.md delete mode 100644 ToxMCP_Audit_Reviewed_v2/VALIDATION_BACKLOG.md delete mode 100644 ToxMCP_Audit_Reviewed_v2/aop-mcp-audit/README.md delete mode 100644 ToxMCP_Audit_Reviewed_v2/aop-mcp-audit/REMEDIATION_CODE.md delete mode 100644 ToxMCP_Audit_Reviewed_v2/cognitive_ergonomics_audit_report.md delete mode 100644 ToxMCP_Audit_Reviewed_v2/comptox-mcp-audit/README.md delete mode 100644 ToxMCP_Audit_Reviewed_v2/comptox-mcp-audit/REMEDIATION_CODE.md delete mode 100644 ToxMCP_Audit_Reviewed_v2/oqt-mcp-audit/README.md delete mode 100644 ToxMCP_Audit_Reviewed_v2/oqt-mcp-audit/REMEDIATION_CODE.md delete mode 100644 ToxMCP_Audit_Reviewed_v2/pbpk-mcp-audit/README.md delete mode 100644 ToxMCP_Audit_Reviewed_v2/pbpk-mcp-audit/REMEDIATION_CODE.md delete mode 100644 ToxMCP_Audit_Reviewed_v2/toxmcp_adversarial_audit_report.md delete mode 100644 ToxMCP_Audit_Reviewed_v2/toxmcp_contract_audit_report.md delete mode 100644 ToxMCP_Audit_Reviewed_v2/toxmcp_future_proofing_audit_report.md delete mode 100644 ToxMCP_Audit_Reviewed_v2/toxmcp_observability_audit_report.md delete mode 100644 ToxMCP_Audit_Reviewed_v2/toxmcp_regulatory_audit_report.md delete mode 100644 ToxMCP_Audit_Reviewed_v2/toxmcp_remediation_snippets.py delete mode 100644 ToxMCP_Audit_Reviewed_v2/toxmcp_security_audit_report.md delete mode 100644 docs/mcp_ctx_audit.md delete mode 100644 epa_comptox_api_structure.json delete mode 100644 scientific_engine_bundle.txt delete mode 100644 triclosan_partition_distribution.png delete mode 100644 triclosan_tissue_distribution_httk.png diff --git a/.gitignore b/.gitignore index 1f21d12..c6b2662 100644 --- a/.gitignore +++ b/.gitignore @@ -49,3 +49,10 @@ htmlcov/ artifacts/ dist/ build/ + +# Internal review and generated research artifacts +/AUDIT_MCP_*.md +/ToxMCP_Audit_Reviewed_*/ +/scientific_engine_bundle.txt +/triclosan_*.png +/epa_comptox_api_structure.json diff --git a/AUDIT_MCP_COVERAGE_2026-03-18.md b/AUDIT_MCP_COVERAGE_2026-03-18.md deleted file mode 100644 index ba8bdd4..0000000 --- a/AUDIT_MCP_COVERAGE_2026-03-18.md +++ /dev/null @@ -1,86 +0,0 @@ -# EPA CompTox MCP coverage audit - -Date: `2026-03-18` - -Target: -- `http://127.0.0.1:8002/mcp` - -## Executive summary - -After the HTTP catalog patch, the live MCP now advertises the full catalog over `tools/list`: - -- total tools: `79` -- `nextCursor`: `null` - -Current resource coverage by family: - -| Resource family | Tool count | -| --- | ---: | -| `chemical` | 10 | -| `bioactivity` | 14 | -| `exposure` | 32 | -| `hazard` | 18 | -| `chemical_list` | 2 | -| `metadata` | 3 | -| `cheminformatics` | 0 | - -## What is covered - -The current MCP catalog covers the major CTX dashboard data families represented in this repository: - -- chemical discovery and detail lookup -- bioactivity assays, assay chemicals, AED, and AOP lookups -- exposure datasets including `HTTK`, `CPDat`, `SEEM`, `MMDB`, functional use, and CCD -- hazard datasets including `ToxValDB`, `ToxRefDB`, cancer, genetox, `ADME/IVIVE`, `IRIS`, `PPRTV`, and `HAWC` -- public chemical lists -- metadata and applicability-domain assets - -Representative live-discovery checks after the patch: - -- `search_hazard`: present -- `get_hazard_adme_ivive`: present -- `get_hazard_toxref`: present -- `get_bioactivity_aed`: present -- `search_httk`: present - -## What is not covered or not yet surfaced - -### 1. Predictive services are not part of the live MCP catalog - -The repository contains predictive service code (`GenRA`, `OPERA`, `TEST` wrappers), but these are not currently advertised as MCP tools in the live `79`-tool catalog. - -Interpretation: -- CTX dashboard-style data access is broadly covered. -- Predictive micro-services exist in the codebase, but they are not yet exposed through the same MCP discovery surface. - -### 2. `cheminformatics` currently contributes zero tools - -The `cheminformatics` resource is initialized, but its current tool count is `0`. - -Interpretation: -- This is not blocking dashboard data access. -- It is an obvious expansion point if cheminformatics operations are expected to be part of the MCP surface. - -## Answer to “do we cover the entire dashboard?” - -For the core CTX data tiers used by this server, coverage is strong: - -- chemical: yes -- bioactivity: yes -- exposure: yes -- hazard: yes -- metadata/list assets: yes - -Two qualifiers remain: - -1. “Entire dashboard” is broader than the audited priority families and broader than the CTX API surface used in this repo. -2. Predictive services and cheminformatics are not fully surfaced as MCP tools in the same way as the core CTX data families. - -## Bottom line - -If the goal is comprehensive MCP coverage of the main CTX dashboard data families, the server is now in good shape and the full catalog is discoverable over HTTP. - -If the goal is literal “everything in the repo” or “everything a user may associate with the dashboard,” the remaining visible gaps are: - -1. predictive services are not exposed as MCP tools -2. cheminformatics contributes no live tools diff --git a/AUDIT_MCP_ENDPOINTS_2026-03-18.md b/AUDIT_MCP_ENDPOINTS_2026-03-18.md deleted file mode 100644 index 1f27e0d..0000000 --- a/AUDIT_MCP_ENDPOINTS_2026-03-18.md +++ /dev/null @@ -1,171 +0,0 @@ -# EPA CompTox MCP audit - -Date: `2026-03-18` - -Target server: -- `http://127.0.0.1:8002/mcp` - -Audit scope: -- MCP discovery via `tools/list` -- Live tool execution for the priority data families: - - `AED` - - `HTTK` - - `ADME/IVIVE` -- Upstream API reachability using `scripts/check_endpoints.py` - -## Executive summary - -The live server on `8002` is functional for the priority data families. `AED`, `HTTK`, and `ADME/IVIVE` all returned real data for `DTXSID7020182` (Bisphenol A). - -This audit initially surfaced two issues: - -1. HTTP `tools/list` only returned the first `50` tools, which hid part of the catalog. -2. The chemical smoke checker used a stale probe URL and produced a false negative. - -Both issues are now patched. - -Post-fix state: - -- HTTP `tools/list` returns the full `79`-tool catalog -- `get_hazard_adme_ivive` is discoverable via `tools/list` -- `scripts/check_endpoints.py --json` passes for chemical, hazard, exposure, and bioactivity when project env is loaded - -## Discovery audit - -Live `tools/list` now returns `79` tools with `nextCursor: null`. - -Priority tool discovery status: - -| Tool | In `tools/list` | Callable | Returns data | -| --- | --- | --- | --- | -| `get_bioactivity_aed` | Yes | Yes | Yes | -| `search_httk` | Yes | Yes | Yes | -| `get_exposure_httk` | Yes | Yes | Yes | -| `get_hazard_adme_ivive` | Yes | Yes | Yes | - -## Live MCP execution audit - -Test substance: -- `DTXSID7020182` (`Bisphenol A`) - -### 1. AED - -Tool: -- `get_bioactivity_aed` - -Observed result: -- HTTP metadata status: `200` -- Data type: `list` -- Record count: `662` -- Sample fields include: - - `dtxsid` - - `aeid` - - `aedVal` - - `aedType` - - `httkModel` - - `httkVersion` - - `aedValUnit` - -Conclusion: -- Functional -- Data-bearing -- Suitable for real audit and downstream analysis - -### 2. HTTK - -Tools: -- `search_httk` -- `get_exposure_httk` - -Observed result for both: -- HTTP metadata status: `200` -- Data type: `list` -- Record count: `18` -- Sample fields include: - - `dtxsid` - - `parameter` - - `measured` - - `predicted` - - `model` - - `species` - - `percentile` - -Sample parameter/model: -- `Css` -- `PBTK` - -Conclusion: -- Both HTTK tools are functional -- Both return real HTTK rows -- The two outputs are materially equivalent for this test substance - -### 3. ADME/IVIVE - -Tool: -- `get_hazard_adme_ivive` - -Observed result: -- HTTP metadata status: `200` -- Data type: `list` -- Record count: `18` -- Sample fields include: - - `dtxsid` - - `description` - - `measured` - - `predicted` - - `unit` - - `model` - - `species` - - `percentile` - -Sample parameter: -- `Clint` - -Conclusion: -- Functional -- Data-bearing -- Discoverable through the MCP catalog after the transport patch - -## Upstream dependency audit - -Command path: -- `scripts/check_endpoints.py --json` - -When run with project env loaded, the checker returns: - -| Upstream endpoint | Status | Result | -| --- | --- | --- | -| `CTX Chemical API` | `200` | OK | -| `CTX Hazard API` | `200` | OK | -| `CTX Exposure API` | `200` | OK | -| `CTX Bioactivity API` | `200` | OK | - -Interpretation: -- Chemical, hazard, exposure, and bioactivity upstreams are reachable and healthy enough for the tested MCP calls. -- The checker now probes the chemical tier with `chemical/detail/search/by-dtxsid/DTXSID7020182`, which matches the live CTX path family used by the server. - -## Remaining follow-up - -### Finding 1: endpoint matrix documentation still points to `v1` roots - -Severity: -- Medium - -Why it matters: -- `docs/contracts/endpoint-matrix.md` documents `ctx-api/v1` base roots. -- Direct probe tests against those base roots returned `404`, while the currently functioning CTX probe paths use the non-`v1` endpoint family. - -Evidence: -- `docs/contracts/endpoint-matrix.md` lists `https://comptox.epa.gov/ctx-api/v1/chemical` and analogous `v1` roots. -- Direct probes against those base roots returned `404`. -- The patched smoke checker and the live MCP succeed against non-`v1` CTX endpoint paths. - -## Bottom line - -For the priority areas requested in this audit: - -- `AED`: pass -- `HTTK`: pass -- `ADME/IVIVE`: pass - -The server retrieves real data for all three target families and now advertises the full catalog correctly over HTTP. The one remaining issue is documentation drift in `docs/contracts/endpoint-matrix.md`. diff --git a/AUDIT_MCP_FAMILY_LIVE_COVERAGE_2026-03-18.md b/AUDIT_MCP_FAMILY_LIVE_COVERAGE_2026-03-18.md deleted file mode 100644 index 64c7f1b..0000000 --- a/AUDIT_MCP_FAMILY_LIVE_COVERAGE_2026-03-18.md +++ /dev/null @@ -1,53 +0,0 @@ -# MCP Family Live Coverage Audit (2026-03-18) - -## Scope - -- Server audited: `http://127.0.0.1:8002/mcp` -- Discovery source: live MCP HTTP `tools/list` response -- Goal: verify family-level runtime coverage for the exposed CompTox dashboard domains, with explicit proof for `AED`, `HTTK`, and `ADME/IVIVE`. - -## Discovery summary - -- Total advertised tools: `79` -- `bioactivity`: `14` tools -- `chemical`: `10` tools -- `chemical_list`: `2` tools -- `exposure`: `32` tools -- `hazard`: `18` tools -- `metadata`: `3` tools - -## Representative live runtime checks - -| Family | Representative tool | Input | `structuredContent.data` | Size | Result | -| --- | --- | --- | --- | ---: | --- | -| `chemical` | `get_chemical_details` | `{"identifier":"DTXSID7020182","id_type":"dtxsid","subset":"default"}` | `dict` | `74` | **PASS** | -| `bioactivity` | `get_bioactivity_aed` | `{"dtxsid":"DTXSID7020182"}` | `list` | `662` | **PASS** | -| `exposure` | `get_exposure_httk` | `{"dtxsid":"DTXSID7020182"}` | `list` | `18` | **PASS** | -| `hazard` | `get_hazard_adme_ivive` | `{"dtxsid":"DTXSID7020182"}` | `list` | `18` | **PASS** | -| `chemical_list` | `get_public_list_names` | `{}` | `list` | `8` | **PASS** | -| `metadata` | `metadata_list_applicability_domain` | `{"limit":10}` | `dict` | `3` | **PASS** | - -## Dashboard coverage mapping - -| Dashboard area | MCP family | Runtime coverage | Notes | -| --- | --- | --- | --- | -| Chemical identity/detail | `chemical` | Covered | `get_chemical_details` returned a populated structured object and now also exposes `structuredContent.data`. | -| AED / bioactivity | `bioactivity` | Covered | `get_bioactivity_aed` returned `662` rows for `DTXSID7020182`. | -| HTTK / exposure | `exposure` | Covered | `get_exposure_httk` returned `18` rows for `DTXSID7020182`. | -| ADME / IVIVE / hazard | `hazard` | Covered | `get_hazard_adme_ivive` returned `18` rows for `DTXSID7020182`. | -| Chemical lists | `chemical_list` | Covered | `get_public_list_names` now returns `8` public list names; `get_full_list("CCL")` remained live throughout. | -| Metadata / reference registries | `metadata` | Covered | `metadata_list_applicability_domain` returned `3` applicability-domain records and now also exposes `structuredContent.data`. | -| Cheminformatics | not exposed | Not covered | No live MCP tools are currently advertised for this area. | - -## Findings - -- The priority scientific paths requested for the audit are live and returning data: `AED`, `HTTK`, and `ADME/IVIVE`. -- Family-level dashboard coverage is now complete for all currently exposed MCP families: `chemical`, `bioactivity`, `exposure`, `hazard`, `chemical_list`, and `metadata` all have successful live runtime proof. -- `chemical_list` discovery now works through the shared `ctxpy` client, so non-MCP callers and MCP callers use the same fallback behavior when the upstream enumeration endpoint returns `404`. -- Client parsing is now normalized around `structuredContent.data` for both success and error responses, while preserving existing domain-specific top-level keys for backward compatibility. -- No `cheminformatics` tools are currently exposed through MCP, so that dashboard area remains outside current interface coverage. - -## Conclusion - -The current MCP server is functionally usable across all exposed CompTox dashboard families relevant to this project. The remaining interface gap is not a runtime failure but a product-scope gap: `cheminformatics` is still not exported as live MCP tools. - diff --git a/AUDIT_MCP_PATCH_VERIFICATION_2026-03-18.md b/AUDIT_MCP_PATCH_VERIFICATION_2026-03-18.md deleted file mode 100644 index fc03fa0..0000000 --- a/AUDIT_MCP_PATCH_VERIFICATION_2026-03-18.md +++ /dev/null @@ -1,48 +0,0 @@ -# MCP Patch Verification (2026-03-18) - -## Scope - -- Server: `http://127.0.0.1:8002/mcp` -- Patch set: - - restore `chemical_list.get_public_list_names` - - normalize `structuredContent.data` across success and error responses - -## Live verification results - -### 1. `get_public_list_names` recovery - -- Result: **PASS** -- Runtime behavior: returns a non-error response with `structuredContent.data` -- Returned count: `8` -- Sample values: `CCL`, `CCL1`, `CPDAT`, `CPDATv2`, `CTD` -- Implementation note: upstream CTX list-enumeration endpoint currently returns `404`, so the MCP now falls back to a maintained catalog of verified public list names while `get_full_list(list_name)` continues to use the live CTX API. - -### 2. Dict-shaped success responses now expose `structuredContent.data` - -- `get_chemical_details(DTXSID7020182)` - - Result: **PASS** - - `structuredContent.data`: present - - Payload type: `dict` -- `metadata_list_applicability_domain(limit=10)` - - Result: **PASS** - - `structuredContent.data`: present - - Backward-compatible top-level keys preserved: `applicabilityDomains`, `nextCursor`, `metadata` - -### 3. Error responses now expose `structuredContent.data` - -- Probe: `get_chemical_details(DTXSID_NOT_REAL, id_type="dtxsid")` -- Result: **PASS** -- Error semantics preserved: `isError=true` -- Normalization confirmed: `structuredContent.data = null` - -## Outcome - -The MCP now has a consistent client-facing parsing contract: -- Success responses always expose `structuredContent.data` -- Error responses expose `structuredContent.data = null` -- Existing top-level domain-specific keys remain available for backward compatibility - -## Files changed - -- `/Volumes/Storage/topotox_space_relief_20260220/mcp_epacomp_tox/src/epacomp_tox/resources/chemical_list.py` -- `/Volumes/Storage/topotox_space_relief_20260220/mcp_epacomp_tox/src/epacomp_tox/server.py` diff --git a/ToxMCP_Audit_Reviewed_v2/AUDIT_EVIDENCE_FRAMEWORK.md b/ToxMCP_Audit_Reviewed_v2/AUDIT_EVIDENCE_FRAMEWORK.md deleted file mode 100644 index 48d34d1..0000000 --- a/ToxMCP_Audit_Reviewed_v2/AUDIT_EVIDENCE_FRAMEWORK.md +++ /dev/null @@ -1,105 +0,0 @@ -# ToxMCP Audit Evidence Framework - -**Added in reviewed copy:** 2026-04-15 -**Purpose:** Make the package easier to defend internally and safer to reuse externally. - ---- - -## Why this document exists - -The original audit pack was strong on systems thinking, but it mixed together three different claim types: - -1. **Directly observed code/schema facts** -2. **Architecture-level inferences** -3. **Scenario-based exploit or misuse narratives** - -Those are all useful, but they should not be presented with the same certainty. This framework standardizes how the reviewed copy uses evidence, confidence, and severity language. - ---- - -## Evidence taxonomy - -| Label | Meaning | Typical example | How to read it | -|---|---|---|---| -| **Observed** | Directly quoted or paraphrased from code, schema, configuration, or documentation contained in the audited material | A function uses `str.format()` on a query template; a schema omits a required provenance field | Strongest class of claim in this package | -| **Observed + inferred** | A direct observation supports a broader architecture conclusion | Independent per-repo correlation IDs imply no end-to-end distributed trace | Usually strong, but still one step removed from a direct test | -| **Scenario** | A misuse or exploit path that depends on stated preconditions | A prompt-injection payload alters downstream reasoning *if* untrusted identifiers are interpolated into model prompts without isolation | Useful for threat modeling, not proof that exploitation was demonstrated | -| **Standards note** | A statement about a regulatory or protocol expectation from a public standard or guidance | Signature/record linking expectations under 21 CFR Part 11 | Read with deployment and intended-use context in mind | - ---- - -## Confidence scale - -| Confidence | Meaning | -|---|---| -| **High** | The claim is strongly grounded in the supplied material or an official standard, and only limited interpretation is required | -| **Medium** | The claim is plausible and supported, but exploitability, operational impact, or scope depends on assumptions that have not yet been validated | -| **Low** | The claim is directionally useful for red-teaming, but needs reproduction or source-repo verification before being presented as a hard finding | - ---- - -## Severity language rules used in the reviewed copy - -### Critical -Use only when the package shows a gap that is both material and near-core to the intended operating model, for example: -- integrity of scientific outputs -- inability to reconstruct or sign regulated records -- unbounded execution that can predictably destabilize the service -- unsafe interpolation at a trust boundary - -### High -Use when the gap is significant, but one or more of these remains true: -- exploitability depends on preconditions -- compensating procedural controls may exist -- impact is serious but not necessarily suite-blocking - -### Medium -Use when the gap is real but better framed as a design weakness, future migration cost, or a finding that still needs validation. - ---- - -## Claim phrasing rules - -The reviewed copy avoids the following unless directly demonstrated and scoped: - -- "FDA rejection" -- "submission rejection" -- "certain" -- "production-ready code" -- destructive exploit claims such as graph deletion unless the endpoint is known to permit updates - -Instead, the package prefers wording such as: - -- "high risk of non-conformance for regulated use" -- "likely unacceptable for submission without compensating controls" -- "observed unsafe interpolation pattern" -- "reference implementation / implementation pattern" - ---- - -## Validation states - -Each major finding should eventually be paired with one or more of these: - -| Validation state | Meaning | -|---|---| -| **Reproduced** | A proof of concept or deterministic reproduction exists | -| **Source-verified** | The claim was re-checked against the live repository, not just this audit bundle | -| **Fix-verified** | The proposed remediation was tested and shown to change behavior as intended | -| **Still open** | Needs follow-up before external use | - -This reviewed copy improves wording and internal consistency, but it does **not** claim that all findings were reproduced or re-verified against the live repositories. - ---- - -## Minimal standard before external use - -Before using any finding externally, the package should include: - -1. exact repository or commit reference -2. reproduction or test preconditions -3. expected behavior vs observed behavior -4. exploitability caveats -5. fix validation criteria - -Until then, this package is best treated as a **carefully edited internal audit and remediation planning pack**. diff --git a/ToxMCP_Audit_Reviewed_v2/DELIVERY_SUMMARY.md b/ToxMCP_Audit_Reviewed_v2/DELIVERY_SUMMARY.md deleted file mode 100644 index ecf75d6..0000000 --- a/ToxMCP_Audit_Reviewed_v2/DELIVERY_SUMMARY.md +++ /dev/null @@ -1,150 +0,0 @@ -# ToxMCP Audit - Delivery Summary (Reviewed Copy) - -**Package date:** 2026-04-15 -**Status:** Reviewed for internal consistency, evidentiary discipline, and delivery readiness - ---- - -## What this package is - -This is a **reviewed internal audit pack** for the ToxMCP ecosystem covering: - -- `comptox-mcp` -- `oqt-mcp` -- `aop-mcp` -- `pbpk-mcp` - -It is strong as: -- a red-team architecture review -- a remediation planning pack -- a leadership briefing artifact - -It is **not yet** the same thing as: -- a third-party assurance report -- a submission-ready validation package -- a fully reproduced penetration or compliance test report - ---- - -## What changed in the reviewed copy - -The original package had strong insights but needed a more defensible presentation. This reviewed copy: - -- distinguishes **observed facts** from **architecture inferences** and **scenario narratives** -- removes or softens overly absolute phrasing -- normalizes dates and terminology -- marks code snippets as **reference implementations** -- updates future-proofing language to current public MCP context -- adds a validation backlog for findings that need live-repo confirmation - -See: -- `AUDIT_EVIDENCE_FRAMEWORK.md` -- `REVISION_LOG.md` -- `VALIDATION_BACKLOG.md` -- `PUBLIC_REFERENCE_NOTES.md` - ---- - -## What is included - -### Package-level docs -| File | Purpose | -|---|---| -| `README.md` | Entry point | -| `DELIVERY_SUMMARY.md` | This document | -| `INDEX.md` | Navigation by audience | -| `QUICK_REFERENCE.md` | Fast triage view | -| `TOXMCP_MASTER_AUDIT_REPORT.md` | Revised cross-suite synthesis | -| `AUDIT_EVIDENCE_FRAMEWORK.md` | Evidence/confidence/severity rules | -| `REVISION_LOG.md` | What changed in this reviewed copy | -| `VALIDATION_BACKLOG.md` | Follow-up tasks before external use | -| `PUBLIC_REFERENCE_NOTES.md` | Public protocol and regulatory context consulted during review | - -### Specialist reports -- `toxmcp_regulatory_audit_report.md` -- `toxmcp_adversarial_audit_report.md` -- `toxmcp_contract_audit_report.md` -- `toxmcp_security_audit_report.md` -- `ToxMCP_Performance_Resilience_Audit_Report.md` -- `toxmcp_observability_audit_report.md` -- `cognitive_ergonomics_audit_report.md` -- `toxmcp_future_proofing_audit_report.md` - -### Repository-specific packages -- `comptox-mcp-audit/` -- `oqt-mcp-audit/` -- `aop-mcp-audit/` -- `pbpk-mcp-audit/` - -Each repository package includes: -- `README.md` — reviewed summary of findings and sequencing -- `REMEDIATION_CODE.md` — implementation-oriented reference code, not drop-in patches - -### Shared reference code -- `toxmcp_remediation_snippets.py` - ---- - -## Most important package-level conclusions - -### 1. The strongest issues are architectural, not local -The pack is at its best when it identifies cross-cutting gaps such as: -- provenance and time-machine reconstruction -- cross-suite orchestration and contradiction handling -- mandatory scientific review checkpoints -- uncertainty propagation -- distributed tracing and replayability - -### 2. Some original language was too absolute -The reviewed copy deliberately replaces phrases like: -- "FDA rejection" -- "submission rejection" -- "certain" -- "production-ready" - -with wording that better matches the level of evidence actually shown. - -### 3. The remediation code should be read as design guidance -Several code blocks are valuable patterns, but they still require: -- repository-specific adaptation -- test coverage -- dependency and runtime checks -- review by domain owners - ---- - -## Recommended reading order - -### Leadership / program owner -1. `TOXMCP_MASTER_AUDIT_REPORT.md` -2. `QUICK_REFERENCE.md` -3. `VALIDATION_BACKLOG.md` - -### Engineering leads -1. `INDEX.md` -2. repository `README.md` files -3. relevant specialist report(s) -4. relevant `REMEDIATION_CODE.md` - -### Security / quality / regulatory reviewers -1. `AUDIT_EVIDENCE_FRAMEWORK.md` -2. `toxmcp_security_audit_report.md` or `toxmcp_regulatory_audit_report.md` -3. `VALIDATION_BACKLOG.md` - ---- - -## Package posture after review - -| Use case | Fit | -|---|---| -| Internal planning and prioritization | **Strong** | -| Engineering remediation sequencing | **Strong** | -| Leadership briefing | **Strong** | -| External diligence without further validation | **Limited** | -| Formal compliance or security attestation | **Not yet** | - ---- - -## Immediate next step - -Use this reviewed copy to align on priorities, then execute the validation tasks in `VALIDATION_BACKLOG.md` against the live repositories before sending the package outside the team. diff --git a/ToxMCP_Audit_Reviewed_v2/INDEX.md b/ToxMCP_Audit_Reviewed_v2/INDEX.md deleted file mode 100644 index cce04cc..0000000 --- a/ToxMCP_Audit_Reviewed_v2/INDEX.md +++ /dev/null @@ -1,146 +0,0 @@ -# ToxMCP Comprehensive Audit - Master Index (Reviewed Copy) - -**Audit package date:** 2026-04-15 -**Repositories in scope:** `comptox-mcp`, `oqt-mcp`, `aop-mcp`, `pbpk-mcp` - ---- - -## Read this first - -Before reusing any finding outside the immediate engineering team, read: - -1. `AUDIT_EVIDENCE_FRAMEWORK.md` -2. `TOXMCP_MASTER_AUDIT_REPORT.md` -3. `VALIDATION_BACKLOG.md` -4. `PUBLIC_REFERENCE_NOTES.md` - -These four documents define: -- what the package actually claims -- how strong the evidence is -- what still needs validation - ---- - -## Navigation by audience - -### Leadership / program management -- `DELIVERY_SUMMARY.md` -- `QUICK_REFERENCE.md` -- `TOXMCP_MASTER_AUDIT_REPORT.md` - -### Engineering leads -- repository-specific `README.md` files -- `QUICK_REFERENCE.md` -- `VALIDATION_BACKLOG.md` - -### Security / platform engineering -- `toxmcp_security_audit_report.md` -- `ToxMCP_Performance_Resilience_Audit_Report.md` -- `toxmcp_observability_audit_report.md` -- `aop-mcp-audit/REMEDIATION_CODE.md` -- `pbpk-mcp-audit/REMEDIATION_CODE.md` - -### Regulatory / quality / scientific governance -- `toxmcp_regulatory_audit_report.md` -- `cognitive_ergonomics_audit_report.md` -- `toxmcp_adversarial_audit_report.md` -- `oqt-mcp-audit/README.md` -- `comptox-mcp-audit/README.md` - -### Architecture / integration owners -- `toxmcp_contract_audit_report.md` -- `toxmcp_future_proofing_audit_report.md` -- `TOXMCP_MASTER_AUDIT_REPORT.md` - ---- - -## Package structure - -```text -README.md -DELIVERY_SUMMARY.md -INDEX.md -QUICK_REFERENCE.md -TOXMCP_MASTER_AUDIT_REPORT.md -AUDIT_EVIDENCE_FRAMEWORK.md -REVISION_LOG.md -VALIDATION_BACKLOG.md - -Specialist reports/ - toxmcp_regulatory_audit_report.md - toxmcp_adversarial_audit_report.md - toxmcp_contract_audit_report.md - toxmcp_security_audit_report.md - ToxMCP_Performance_Resilience_Audit_Report.md - toxmcp_observability_audit_report.md - cognitive_ergonomics_audit_report.md - toxmcp_future_proofing_audit_report.md - -Repository packages/ - comptox-mcp-audit/ - oqt-mcp-audit/ - aop-mcp-audit/ - pbpk-mcp-audit/ - -Shared reference code/ - toxmcp_remediation_snippets.py -``` - ---- - -## Fastest route to decisions - -### Question: “What are the top cross-suite issues?” -Read: -- `TOXMCP_MASTER_AUDIT_REPORT.md` -- `QUICK_REFERENCE.md` - -### Question: “What should each repo team do next?” -Read: -- repo `README.md` -- repo `REMEDIATION_CODE.md` -- `VALIDATION_BACKLOG.md` - -### Question: “How much of this is directly observed vs inferred?” -Read: -- `AUDIT_EVIDENCE_FRAMEWORK.md` -- relevant specialist report summary section - -### Question: “Can we circulate this externally?” -Read: -- `DELIVERY_SUMMARY.md` -- `REVISION_LOG.md` -- `VALIDATION_BACKLOG.md` - ---- - -## Highest-priority repository docs - -| Repository | Start here | Then read | -|---|---|---| -| `comptox-mcp` | `comptox-mcp-audit/README.md` | `toxmcp_regulatory_audit_report.md`, `comptox-mcp-audit/REMEDIATION_CODE.md` | -| `oqt-mcp` | `oqt-mcp-audit/README.md` | `cognitive_ergonomics_audit_report.md`, `oqt-mcp-audit/REMEDIATION_CODE.md` | -| `aop-mcp` | `aop-mcp-audit/README.md` | `toxmcp_security_audit_report.md`, `aop-mcp-audit/REMEDIATION_CODE.md` | -| `pbpk-mcp` | `pbpk-mcp-audit/README.md` | `ToxMCP_Performance_Resilience_Audit_Report.md`, `pbpk-mcp-audit/REMEDIATION_CODE.md` | - ---- - -## Legend used in reviewed summaries - -| Label | Meaning | -|---|---| -| **Observed** | Directly supported by the audit material itself | -| **Observed + inferred** | A direct observation supports a broader architectural conclusion | -| **Scenario** | Threat or misuse path with stated preconditions | -| **High / Medium / Low confidence** | How strongly the package supports the claim | - ---- - -## Package-level caution - -The repository packages and specialist reports are useful and actionable, but several findings still need: -- live-repo verification -- proof-of-concept reproduction -- fix verification tests - -Treat this package as a strong internal audit and planning artifact, not a substitute for formal external assurance. diff --git a/ToxMCP_Audit_Reviewed_v2/MANIFEST.md b/ToxMCP_Audit_Reviewed_v2/MANIFEST.md deleted file mode 100644 index b825328..0000000 --- a/ToxMCP_Audit_Reviewed_v2/MANIFEST.md +++ /dev/null @@ -1,6 +0,0 @@ -# ToxMCP Reviewed Package Manifest - -This manifest lists file hashes for the reviewed copy so package contents can be checked after transfer. - -- Hash algorithm: `SHA-256` -- File-level digests: see `MANIFEST_SHA256.txt` diff --git a/ToxMCP_Audit_Reviewed_v2/MANIFEST_SHA256.txt b/ToxMCP_Audit_Reviewed_v2/MANIFEST_SHA256.txt deleted file mode 100644 index 438b574..0000000 --- a/ToxMCP_Audit_Reviewed_v2/MANIFEST_SHA256.txt +++ /dev/null @@ -1,28 +0,0 @@ -6725c8e43032b103ab3de0606705f244650937c7baead2eb3fad766763572a04 AUDIT_EVIDENCE_FRAMEWORK.md -09d987ce9552549256b349a08ab20258313c4194c9e53ca02171519ad96d9571 DELIVERY_SUMMARY.md -902676cc99336c83e0a412b0975cfa06950f4da79ef75b450dc93660866864bb INDEX.md -20e3c23f7ae37f20efed5d34e7d5bb35a5b7d02c2105c6b70e7a150ba1f4a5f3 MANIFEST.md -99d1b6a1292c57c5ee574d9a521a7bf15a63504bb4e50322eef395a20caaa035 MANIFEST_SHA256.txt -17fd6599ffe4e15178ef246bd37465e79af9cfc125a1698a435ea05497ff2d46 PUBLIC_REFERENCE_NOTES.md -66f0130512889699849411cdc91792f9dbdb41ab22c03b8f5f7b9dd288d18ac4 QUICK_REFERENCE.md -8fdc3f642a70a9bdef298e8b5402bc027145e2d0f548df645e6bed450805ad0e README.md -364514826910925086be43ba1e39928902520f8c90773337295e0e9dfecffe29 REVISION_LOG.md -02198f635d7c0141642e3ab8437b4620da47ded9415c74b30c8f6b7aefb59c87 TOXMCP_MASTER_AUDIT_REPORT.md -4961d40e58e06842c0b693aac6c8833eae5b363867ffc11270c15ff521b0afb9 ToxMCP_Performance_Resilience_Audit_Report.md -369ae386a5495fd7de172a49852b792a7a68b1e9db8a99889bfe57ac1cebdeb1 VALIDATION_BACKLOG.md -b9bef0619ae1e308f84b2eacb4553a85cd079ee9fdb4e7ac9684eaa47021b6b4 aop-mcp-audit/README.md -0989473fc6088c61afa9c81bbbfa32a4cd85cfdccb5597753675a3fb3d3deaaa aop-mcp-audit/REMEDIATION_CODE.md -c08ec262058f8c89d9bc7ae6df4f1535b59d3b1bdd5e72822dfc583b88c291ab cognitive_ergonomics_audit_report.md -5395278129f2822421cba26b27c41596d009fa38cea3c7d2e7d74441d48e07c0 comptox-mcp-audit/README.md -d53c1a299213a87aeb1e10f081ba1f96ad424c19da31573a249df36eb782151c comptox-mcp-audit/REMEDIATION_CODE.md -1124485197ac0672ba0205c57b175e20f6ebb1a1a6978baeb11636671198d49e oqt-mcp-audit/README.md -d95bd6fee607f7335b8b6d4ef22b94aacbc24f0f31cad038e095fd9129392f9e oqt-mcp-audit/REMEDIATION_CODE.md -79d612915067d8b3e4cd0da600a8f62cd3a1c24a0932c2506942ec9356c3b0e0 pbpk-mcp-audit/README.md -f53fae41a1baadc7709e78ec099e5de981e2d917994fe6c0c942b0029b0b1894 pbpk-mcp-audit/REMEDIATION_CODE.md -b1ff5364fd919d4f31137728127051f52e8074c7794fff4739bd7baa76eb9afd toxmcp_adversarial_audit_report.md -d61a2e3dc29bbc3c93121022621ebfa9eb0e7a31b4c6cb4e80ba96d9a71d6f8f toxmcp_contract_audit_report.md -ab50e4b3f26cb55a2363fe857360ad51031c7bc0a14cfd53d46be4e259e9b1d2 toxmcp_future_proofing_audit_report.md -d707433a13ddc6b66051bd58788779328fd18984c796b08318b1202449e94f9f toxmcp_observability_audit_report.md -f3857a6e674c91a0ff9da95e9088fb9c7d45c06c75bb43021d09779585579601 toxmcp_regulatory_audit_report.md -e4c1e7fbf7ea7f73aff21f7ebd3aeb38146f8cb176e96486cf859271846478b6 toxmcp_remediation_snippets.py -bf4effb38cff4e07960406190aa6217a68d2c7f2478c27b6a47d78c3e6fa98d7 toxmcp_security_audit_report.md diff --git a/ToxMCP_Audit_Reviewed_v2/PUBLIC_REFERENCE_NOTES.md b/ToxMCP_Audit_Reviewed_v2/PUBLIC_REFERENCE_NOTES.md deleted file mode 100644 index a19ea62..0000000 --- a/ToxMCP_Audit_Reviewed_v2/PUBLIC_REFERENCE_NOTES.md +++ /dev/null @@ -1,23 +0,0 @@ -# Public Reference Notes Used During Review - -This package review updated a small number of time-sensitive or standards-sensitive framing decisions. -The following public materials were consulted at review time: - -## MCP / protocol context -- Model Context Protocol specification (latest public release noted during review) -- MCP specification changelog describing the shift from HTTP+SSE to Streamable HTTP -- MCP 2026 roadmap notes on transport evolution, session handling, and server cards - -## Regulatory context -- 21 CFR Part 11 (electronic records and electronic signatures) -- FDA guidance on Part 11 scope/application and electronic systems/electronic records/electronic signatures -- EudraLex Annex 11 public materials -- OECD GLP data integrity and computerized systems guidance materials - -## How these were used -These references were used only to: -- update obviously stale protocol/future-proofing framing -- keep regulatory wording appropriately cautious -- avoid making claims that outran the public standards context - -This file is not a full compliance mapping and should not be treated as one. diff --git a/ToxMCP_Audit_Reviewed_v2/QUICK_REFERENCE.md b/ToxMCP_Audit_Reviewed_v2/QUICK_REFERENCE.md deleted file mode 100644 index ab899db..0000000 --- a/ToxMCP_Audit_Reviewed_v2/QUICK_REFERENCE.md +++ /dev/null @@ -1,95 +0,0 @@ -# ToxMCP Audit Quick Reference (Reviewed Copy) - -**Purpose:** Fast triage for engineering and leadership -**How to read this page:** It prioritizes what to fix first, not what to claim most loudly. - ---- - -## Top cross-suite items - -| Rank | Finding | Primary repos | Severity | Evidence basis | Confidence | First action | -|---|---|---|---|---|---|---| -| 1 | Historical reconstruction and provenance gaps | All | **Critical** | Observed + inferred | High | Define a single provenance envelope and capture code/data/runtime versions at workflow start | -| 2 | No mandatory scientific review checkpoints in high-risk flows | `oqt-mcp`, cross-suite | **Critical** | Observed + inferred | High | Add explicit pause-and-approve checkpoints before predictive and reporting steps | -| 3 | Unsafe interpolation / trust-boundary handling | `aop-mcp`, `oqt-mcp` | **Critical / High** | Observed + scenario | High / Medium | Remove structural query interpolation; isolate untrusted identifiers from prompts | -| 4 | Resource-control and resilience gaps | `pbpk-mcp`, `aop-mcp` | **High** | Observed | High | Add quotas, circuit breaker behavior, and load-test-derived defaults | -| 5 | Auditability and traceability gaps | All | **High** | Observed + inferred | High | Propagate a single trace ID and emit replayable provenance records | -| 6 | Cross-suite orchestration responsibility is documented but not implemented | All | **High** | Observed + inferred | Medium-High | Define orchestration ownership, evidence deduplication, and contradiction handling | - ---- - -## Repo-by-repo first moves - -### `comptox-mcp` -1. Capture upstream provenance in a way the provider actually supports -2. Replace audit-log fallback behavior with a tamper-evident trail design -3. Add retry jitter/backoff and document supported MCP transport/version strategy - -### `oqt-mcp` -1. Enforce applicability-domain gates, not just narrative AD summaries -2. Add mandatory human review checkpoints and stronger PDF provenance defaults -3. Treat chemical identifiers as untrusted text when crossing LLM or agent boundaries - -### `aop-mcp` -1. Remove arbitrary query-shape interpolation; use allow-listed query plans and safe binding -2. Add resilience controls for SPARQL upstream failure -3. Tighten draft-signature and checksum-chain semantics - -### `pbpk-mcp` -1. Enforce parameter bounds and log parameter sweeps -2. Add population and memory quotas with tested defaults -3. Improve reproducibility metadata and deterministic event hashing - ---- - -## What changed in the reviewed copy - -- absolute phrases were softened to match evidence -- remediation code is now framed as **reference code** -- future-proofing language was updated to current MCP public context -- validation gaps were moved into an explicit backlog - -See: -- `AUDIT_EVIDENCE_FRAMEWORK.md` -- `REVISION_LOG.md` -- `VALIDATION_BACKLOG.md` - ---- - -## Items to validate before external circulation - -| Finding | Why validation is needed | -|---|---| -| SPARQL injection | Need to confirm actual runtime-controlled fields and endpoint permissions | -| Prompt injection via identifiers | Need a real prompt-boundary trace, not only a scenario | -| Regulated-use compliance gaps | Need intended-use and procedural-control context | -| Upstream version pinning | Need to verify what external providers actually expose | -| Population/OOM thresholds | Need measurements on representative infrastructure | - ---- - -## Recommended sequence - -### Week 0: package hygiene -- adopt the reviewed copy -- assign owners -- turn critical findings into tracked work items -- agree on validation criteria - -### Week 1-2: hard controls -- OQT AD gating and review checkpoints -- AOP query safety and circuit breaking -- PBPK parameter/resource controls -- CompTox provenance capture and audit trail hardening - -### Week 3-4: shared architecture -- provenance envelope -- distributed tracing -- orchestration/evidence broker -- fix validation tests - ---- - -## One-line posture - -**Strong internal audit and planning pack; not yet a reproduced external assurance package.** diff --git a/ToxMCP_Audit_Reviewed_v2/README.md b/ToxMCP_Audit_Reviewed_v2/README.md deleted file mode 100644 index b5d29ab..0000000 --- a/ToxMCP_Audit_Reviewed_v2/README.md +++ /dev/null @@ -1,12 +0,0 @@ -# ToxMCP Audit Package - Reviewed Copy - -Start here: - -1. `DELIVERY_SUMMARY.md` — what this package is and how to use it -2. `INDEX.md` — navigation by audience -3. `TOXMCP_MASTER_AUDIT_REPORT.md` — revised cross-suite synthesis -4. `AUDIT_EVIDENCE_FRAMEWORK.md` — how evidence, confidence, and severity are used -5. `VALIDATION_BACKLOG.md` — what still needs reproduction or source verification -6. `PUBLIC_REFERENCE_NOTES.md` — public standards/protocol context consulted during review - -This reviewed copy is designed to be safer to circulate internally than the original draft. It is still best treated as an internal audit and remediation planning package until live-repository validation is complete. diff --git a/ToxMCP_Audit_Reviewed_v2/REVISION_LOG.md b/ToxMCP_Audit_Reviewed_v2/REVISION_LOG.md deleted file mode 100644 index 01df148..0000000 --- a/ToxMCP_Audit_Reviewed_v2/REVISION_LOG.md +++ /dev/null @@ -1,95 +0,0 @@ -# ToxMCP Audit Package - Revision Log - -**Reviewed copy date:** 2026-04-15 - ---- - -## What changed in this reviewed copy - -This revision keeps the core findings but tightens the package in five ways: - -1. **Evidentiary discipline** - - Added an explicit evidence framework - - Separated observed facts from architecture inferences and scenario narratives - - Softened absolute language where reproduction was not shown - -2. **Internal consistency** - - Normalized date mismatches - - Corrected tone and severity inconsistencies - - Aligned top-level summaries with more defensible wording - -3. **Remediation quality** - - Reframed code samples as **reference implementations** - - Corrected several mitigations that were too generic or potentially misleading - - Upgraded the shared Python remediation file so it is clearer about placeholder boundaries - -4. **Future-proofing accuracy** - - Updated MCP-related language to reflect the current public specification and roadmap context - - Reframed speculative schedule statements as migration-risk statements - -5. **Delivery readiness** - - Added a validation backlog - - Added reviewed summaries for repository-specific packages - - Added package-level notes about intended use and limitations - ---- - -## Files rewritten or substantially revised - -### Top-level package docs -- `DELIVERY_SUMMARY.md` -- `INDEX.md` -- `QUICK_REFERENCE.md` -- `TOXMCP_MASTER_AUDIT_REPORT.md` - -### New governance/QA docs -- `AUDIT_EVIDENCE_FRAMEWORK.md` -- `REVISION_LOG.md` -- `VALIDATION_BACKLOG.md` - -### Specialist reports substantially revised -- `toxmcp_regulatory_audit_report.md` -- `toxmcp_security_audit_report.md` -- `toxmcp_future_proofing_audit_report.md` - -### Repository summaries substantially revised -- `comptox-mcp-audit/README.md` -- `oqt-mcp-audit/README.md` -- `aop-mcp-audit/README.md` -- `pbpk-mcp-audit/README.md` - -### Shared code revised -- `toxmcp_remediation_snippets.py` - ---- - -## Files lightly edited - -The following documents were retained but annotated or normalized: -- `ToxMCP_Performance_Resilience_Audit_Report.md` -- `cognitive_ergonomics_audit_report.md` -- `toxmcp_adversarial_audit_report.md` -- `toxmcp_contract_audit_report.md` -- `toxmcp_observability_audit_report.md` -- all `REMEDIATION_CODE.md` files - -Typical light edits: -- reviewed-copy note inserted -- date normalization -- wording updates for over-absolute claims -- short caveat added for reference code - ---- - -## What this reviewed copy still does not claim - -- It does **not** claim that all line references were revalidated against the live repositories -- It does **not** claim that all attack chains were reproduced -- It does **not** claim that remediation code is merge-ready without repo-specific adaptation and tests -- It does **not** upgrade the package into a formal third-party audit - ---- - -## Recommended next step - -Use this reviewed copy as the planning and stakeholder-facing basis, then execute the `VALIDATION_BACKLOG.md` items against the live repositories before external circulation. diff --git a/ToxMCP_Audit_Reviewed_v2/TOXMCP_MASTER_AUDIT_REPORT.md b/ToxMCP_Audit_Reviewed_v2/TOXMCP_MASTER_AUDIT_REPORT.md deleted file mode 100644 index 23b8a1f..0000000 --- a/ToxMCP_Audit_Reviewed_v2/TOXMCP_MASTER_AUDIT_REPORT.md +++ /dev/null @@ -1,272 +0,0 @@ -# ToxMCP Ecosystem - Comprehensive Adversarial Audit Report (Reviewed Copy) - -**Review date:** 2026-04-15 -**Scope:** `comptox-mcp`, `oqt-mcp`, `aop-mcp`, `pbpk-mcp` -**Intended use:** Internal planning, engineering prioritization, stakeholder briefing - ---- - -## Review status - -This reviewed copy preserves the original package’s core concerns while tightening: -- evidentiary language -- severity calibration -- remediation phrasing -- package-level consistency - -It should be read together with: -- `AUDIT_EVIDENCE_FRAMEWORK.md` -- `REVISION_LOG.md` -- `VALIDATION_BACKLOG.md` - ---- - -## Executive judgment - -The ToxMCP audit bundle is **strong as an internal red-team and architecture review**, especially where it identifies cross-cutting risks around provenance, uncertainty, orchestration, and scientist-facing misuse. - -The reviewed copy does **not** treat the package as a finished external audit. A number of findings remain best framed as: -- observed implementation gaps, -- architecture inferences, -- or scenario-based exploit narratives that still require reproduction. - -### Bottom-line rating - -| Use case | Assessment | -|---|---| -| Internal remediation planning | **Strong** | -| Cross-team prioritization | **Strong** | -| Leadership briefing | **Strong** | -| External diligence without further validation | **Limited** | -| Formal assurance / submission support | **Not yet** | - ---- - -## Why the package is still valuable - -The most important insight in the original work was correct: the main failure modes are not only classic software bugs. They are also: - -- missing time-machine reconstruction -- confidence without calibration -- outputs that look authoritative without enough provenance -- cross-tool contradictions that no component is responsible for resolving -- uncertainty that grows across the workflow but is never represented explicitly - -Those are real and important system-level risks for a toxicology workflow stack. - ---- - -## Evidence and confidence summary - -| ID | Finding | Severity | Evidence basis | Confidence | Reviewed wording | -|---|---|---|---|---|---| -| M-01 | Historical reconstruction and provenance gaps | **Critical** | Observed + inferred | High | High risk of being unable to reconstruct past outputs in a defensible way | -| M-02 | Missing or weak human review checkpoints in high-risk flows | **Critical** | Observed + inferred | High | High risk of false confidence and unreviewed downstream reporting | -| M-03 | Unsafe trust-boundary handling in query/prompt paths | **Critical / High** | Observed + scenario | High / Medium | Unsafe interpolation patterns are present; exploitability depends on actual runtime data flow | -| M-04 | No shared cross-suite orchestration / contradiction handling layer | **High** | Observed + inferred | Medium-High | Responsibility is documented but not implemented in the audited material | -| M-05 | Resource-control and resilience gaps | **High** | Observed | High | Service instability or degraded scientific throughput is plausible under stress | -| M-06 | Auditability, replay, and observability gaps | **High** | Observed + inferred | High | Debugging and post-hoc verification are materially harder than they should be | -| M-07 | Schema / protocol / ontology evolution risk | **High** | Observed + standards note | Medium-High | Migration cost is likely to be high without shared abstraction and versioning discipline | - ---- - -## System-level findings - -### M-01: Provenance and reconstruction are not yet first-class - -Across the package, the strongest repeated concern is not merely "missing logs." It is the absence of a single, defensible record of: - -- code version -- runtime environment -- input identity resolution -- upstream data/version context -- model/tool version -- human review state -- final signed or approved output - -This matters because toxicology workflows often need more than replay. They need a **reconstructable explanation of what happened, when, with which inputs, under which software and data conditions**. - -**Why this remains Critical:** -The reviewed copy still considers this a critical suite-level gap because it affects integrity, auditability, and the ability to defend historical outputs. - -**What changed in the wording:** -The original package sometimes implied automatic regulatory failure. The reviewed copy instead states that this gap creates a **high risk of non-conformance and defensibility failure for regulated use**, subject to intended use and any external procedural controls. - ---- - -### M-02: Human review is not reliably embedded where it matters most - -The package is persuasive when it shows how scientist-facing automation can move from: -chemical identification → predictive tooling → PDF/report artifact -without clearly enforced review checkpoints. - -The issue is not that automation exists. The issue is that the package shows too many places where the workflow can appear "finished" before: -- chemical identity is confirmed -- applicability-domain boundaries are accepted -- contradictory evidence is surfaced -- confidence language is reviewed by a human - -**Why this remains Critical:** -Because the scientific and regulatory risk is not just wrong output; it is **wrong output wrapped in a professional-looking artifact**. - ---- - -### M-03: Trust-boundary handling needs to be reworked, not just patched - -Two areas matter here: - -#### SPARQL/query safety -The audited material shows string-template interpolation for queries. That is a real code smell. -However, the reviewed copy avoids overstating destructive impact unless runtime permissions and update semantics are known. The safest defensible claim is: - -> unsafe interpolation is observed; query broadening, unauthorized data exposure, or result manipulation are plausible; destructive effects depend on endpoint permissions and whether update-capable operations are reachable. - -#### Prompt/instruction boundary safety -The package also reasonably flags that chemical identifiers and similar text fields may cross into LLM- or agent-facing contexts. -But the reviewed copy treats full prompt injection as **scenario-dependent** until the exact prompt boundary is demonstrated. - -**Practical implication:** -These should still be treated as near-term remediation items, because the mitigation cost is lower than the cost of being wrong later: -- bind literals safely -- allow-list structural query choices -- isolate untrusted text from system instructions -- prefer structured tool arguments over interpolated natural language - ---- - -### M-04: Cross-suite orchestration responsibility is missing - -This remains one of the most original and useful findings in the bundle. - -The issue is not merely that there is no single "orchestrator service" file. It is that the audited material repeatedly implies a higher layer is responsible for: - -- evidence deduplication -- contradiction detection -- cross-module narrative coherence -- schema translation and version negotiation -- final dossier assembly - -Yet that responsibility is not concretely implemented in the package. - -**Why this matters:** -Without an explicit owner for cross-tool reasoning, each repo can be locally correct while the suite-level story is inconsistent. - ---- - -### M-05: Resilience gaps are likely to surface under real load - -The package identifies several plausible service-stability issues: -- no clear circuit-breaker behavior for SPARQL-like upstream failure paths -- insufficient quotas for large PBPK workloads -- retry logic that may amplify load -- limited replay/diff tooling for diagnosing divergent results - -The reviewed copy retains these as **High** rather than inflating every one to Critical, because actual severity depends on deployment size, workload mix, and whether external infrastructure already enforces limits. - ---- - -### M-06: Observability and replayability are under-designed - -The original observability audit made a strong point: the suite is difficult to debug as a system, not just as four independent repos. - -The most important issues are: -- no single trace across tools -- insufficient replay artifacts -- limited diffability of outputs -- incomplete privacy/sensitivity handling in logs - -This is more than an operational inconvenience. It slows incident response, scientific debugging, and compliance evidence gathering. - ---- - -### M-07: Future-proofing risk is real, but should be framed as migration resilience - -The original package correctly identified fragmentation around: -- transport handling -- schema versioning -- ontology evolution -- provider coupling - -The reviewed copy updates the framing: these are not just "future features missing." They are **migration resilience risks**. -That is the more durable claim. - ---- - -## Cross-cutting bridge components still worth building - -The original master report recommended architectural bridge components. That remains the right direction. - -### 1. Provenance and evidence ledger -A suite-wide component that records: -- input identity resolution -- upstream retrieval metadata -- code/runtime snapshot -- tool outputs and hashes -- review checkpoints -- final artifact lineage - -### 2. Orchestration and evidence-broker layer -A single place to handle: -- schema mediation -- contradiction detection -- evidence deduplication -- confidence/uncertainty aggregation -- final narrative assembly rules - -### 3. Policy and safe-execution layer -A shared layer for: -- authorization and review policies -- prompt/query trust-boundary handling -- rate limits and quotas -- audit and trace propagation -- secure offline/controlled execution modes where required - ---- - -## Priority remediation plan - -### Wave 0 - package hygiene and governance -- adopt this reviewed copy as the working baseline -- assign repo owners for each critical item -- agree on validation criteria before external use -- stop describing snippets as production-ready code - -### Wave 1 - hard controls -- OQT: applicability-domain gating, review checkpoints, safer report defaults -- AOP: query safety redesign and resilience controls -- PBPK: bounds, quotas, and reproducibility metadata -- CompTox: provenance capture and tamper-evident audit design - -### Wave 2 - shared architecture -- provenance envelope -- trace propagation -- orchestration/evidence broker -- schema/version registry decisions - -### Wave 3 - external defensibility -- live-repo revalidation -- proof-of-concept or deterministic reasoning notes for each critical item -- fix verification tests -- commit or permalink references - ---- - -## What should not be claimed yet - -Until the validation backlog is complete, avoid saying that the package has already demonstrated: -- formal exploit reproduction for all security findings -- conclusive regulatory rejection outcomes -- production-ready remediation patches -- complete live-repo verification - ---- - -## Final assessment - -The original package had the right instincts and several genuinely strong insights. -The reviewed copy makes it safer and more useful by separating: -- what is directly observed, -- what is inferred, -- and what remains a scenario that should be validated. - -**Bottom line:** this is a strong internal audit and remediation planning bundle, and now a better one. It is still one validation step away from being an externally defensible assurance artifact. diff --git a/ToxMCP_Audit_Reviewed_v2/ToxMCP_Performance_Resilience_Audit_Report.md b/ToxMCP_Audit_Reviewed_v2/ToxMCP_Performance_Resilience_Audit_Report.md deleted file mode 100644 index 77e045e..0000000 --- a/ToxMCP_Audit_Reviewed_v2/ToxMCP_Performance_Resilience_Audit_Report.md +++ /dev/null @@ -1,474 +0,0 @@ -# ToxMCP Suite - Performance & Resilience Audit Report - -**Audit Date:** 2026-04-15 -**Auditor:** Performance & Resilience Engineer -**Scope:** comptox-mcp, oqt-mcp, aop-mcp, pbpk-mcp repositories - ---- - -> **Reviewed copy (2026-04-15):** This document was retained from the original package but lightly edited for consistency. -> Unless explicitly stated otherwise, code blocks are **reference implementations**, not validated patches, and scenario-based exploit narratives should not be read as reproduced proofs. - - - -## Executive Summary - -This audit identifies critical scaling cliffs and fault modes across the ToxMCP ecosystem. While the suite demonstrates good architectural patterns for job persistence and retry logic, significant gaps exist in **circuit breaker implementation**, **memory protection for large simulations**, and **input validation for chemical complexity**. - -**Overall Risk Rating: 🔴 HIGH** - ---- - -## 1. SPARQL Timeout Cascades (AOP-MCP) 🔴 Critical - -### Finding AOP-001: No Circuit Breaker Logic - -**File:** `aop-mcp/src/adapters/sparql_client.py` (lines 37-231) - -**Issue:** The SPARQL client implements failover across endpoints but **lacks circuit breaker pattern**: - -```python -# Current implementation - NO circuit breaker -async def _dispatch(self, query: str, *, timeout: float | None = None) -> dict[str, Any]: - last_error: Exception | None = None - for endpoint in self._endpoints: - attempts = self._max_retries + 1 - for attempt in range(attempts): - try: - response = await self._client.post(...) - except Exception as exc: - # Simply logs and retries - no circuit breaker - logger.warning("SPARQL request to %s failed...", endpoint.url, ...) - last_error = exc - continue -``` - -**Fault Mode:** When AOP-Wiki is down: -- System **FAILS CLOSED** - raises `SparqlUpstreamError` after all endpoints exhausted -- No graceful degradation to cached/empty results -- Each request waits full timeout (default 10s) x retries (default 2) x endpoints -- **Cascading latency** under load - -**Missing Protection:** -| Feature | Status | Risk | -|---------|--------|------| -| Circuit Breaker | Absent | 🔴 Critical | -| Exponential Backoff | Absent | 🟠 High | -| Jitter | Absent | 🟠 High | -| Half-Open State | Absent | 🔴 Critical | -| Cache-First on Failure | Absent | 🟠 High | - -**Thresholds:** -- Default timeout: **10 seconds** -- Default retries: **2 per endpoint** -- No maximum query complexity limits - -**Recommendation:** Implement circuit breaker with: -- Failure threshold: 5 errors in 60 seconds -- Open state duration: 30 seconds -- Half-open probe: 1 request -- Fallback to cache or empty results with warning - ---- - -## 2. Memory Exhaustion Patterns (PBPK-MCP) 🔴 Critical - -### Finding PBPK-001: No Population Size Limits - -**File:** `pbpk-mcp/src/mcp_bridge/services/job_service.py` (1392 lines) - -**Issue:** Population simulations can generate massive datasets with **no input validation**: - -```python -# From JobRecord dataclass - no population size limits -@dataclass -class JobRecord: - job_id: str - simulation_id: str - job_type: str # Can be "population_simulation" - # ... no max_population_size field -``` - -**Configuration (`.env.example`):** -```bash -JOB_TIMEOUT_SECONDS=300 # 5 minutes -JOB_MAX_RETRIES=0 -JOB_WORKER_THREADS=2 -# NO population size limit defined -``` - -**OOM Risk Assessment:** - -| Population Size | Memory Estimate | Timeout Risk | -|-----------------|-----------------|--------------| -| 100 patients | ~50 MB | Low | -| 1,000 patients | ~500 MB | Medium | -| 10,000 patients | ~5 GB | 🔴 High - Likely OOM | -| 100,000 patients | ~50 GB | 🔴 Critical - Likely OOM on many worker sizes | - -**Streaming Status:** NO streaming/chunking logic found for population results - -**File:** `pbpk-mcp/src/mcp_bridge/storage/population_store.py` (not examined but referenced) - -**Missing Protection:** -- No `max_population_size` parameter -- No memory quota enforcement -- No result pagination/streaming -- SQLite storage loads full results into memory - -**Recommendation:** -1. Add `MAX_POPULATION_SIZE=5000` environment variable -2. Implement result streaming with chunk handles -3. Add memory quota check before simulation start - ---- - -### Finding PBPK-002: Insufficient Job Timeout - -**Current:** `JOB_TIMEOUT_SECONDS=300` (5 minutes) - -**Risk:** Population simulations with 1000+ patients can exceed 5 minutes, causing: -- Job marked as `TIMEOUT` status -- Orphaned simulation processes in R/ospsuite -- Partial results lost - -**Recommendation:** -- Increase default to 1800s (30 minutes) for population jobs -- Implement job-type specific timeouts - ---- - -## 3. API Rate Limit Handling (CompTox-MCP) 🟠 High - -### Finding CTX-001: Basic Retry Without Jitter - -**File:** `comptox-mcp/src/epacomp_tox/settings.py` (lines 37-139) - -**Current Implementation:** -```python -class ContextSettings: - retry_attempts: int # Default: 3 - retry_base: float # Default: 0.5 seconds -``` - -**Configuration:** -```bash -CTX_RETRY_ATTEMPTS=3 -CTX_RETRY_BASE=0.5 -``` - -**Retry Pattern:** -- Attempt 1: Immediate -- Attempt 2: 0.5s delay -- Attempt 3: 0.5s delay (NOT exponential!) - -**Missing Protection:** -| Feature | Status | Risk | -|---------|--------|------| -| Exponential Backoff | Partial (fixed base) | 🟠 High | -| Jitter | Absent | 🔴 Critical | -| Rate Limit Headers | Not checked | 🟠 High | -| Quota Budgets | Absent | 🟠 High | -| 429 Retry-After | Not honored | 🔴 Critical | - -**Fault Mode:** Under EPA CompTox rate limiting: -- Multiple concurrent requests will retry simultaneously -- **Thundering herd** amplifies rate limit violations -- No `Retry-After` header parsing -- Risk of temporary API ban - -**Recommendation:** -```python -# Implement proper exponential backoff with jitter -delay = retry_base * (2 ** attempt) + random.uniform(0, 1) -``` - ---- - -## 4. Long-Running Job Orphans (PBPK-MCP) 🟡 Medium - -### Finding PBPK-003: SQLite Persistence with Limitations - -**File:** `pbpk-mcp/src/mcp_bridge/services/job_service.py` (lines 127-400) - -**Positive Finding:** Jobs are persisted to SQLite: -```python -class JobRegistry: - def __init__(self, db_path: str = "var/jobs/registry.json"): - self._conn = sqlite3.connect(str(self._prepare_path(db_path))) - # Creates tables: job_records, simulation_results -``` - -**Survival Scenario:** -| Scenario | Job Survival | Notes | -|----------|--------------|-------| -| API server restart | Yes | SQLite persists to disk | -| Worker crash | Partial | Job status may be "RUNNING" but actually dead | -| Full system restart | Yes | Jobs recover from SQLite | -| Celery backend crash | Depends | Redis/memory backend loses queue | - -**Orphan Risk:** -- Job status can remain `RUNNING` indefinitely if worker dies -- No heartbeat/health check from workers to verify liveness -- Cleanup only based on `retention_seconds` (default unknown) - -**Recommendation:** -1. Implement worker heartbeat (every 30s) -2. Mark jobs as `FAILED` if no heartbeat for 2x timeout -3. Add orphan detection job (runs every 5 minutes) - ---- - -## 5. Maximum Safe Chemical Complexity 🟠 High - -### Finding SUITE-001: No Complexity Limits - -**Cross-Repository Analysis:** - -| Component | Validation | Limit | -|-----------|------------|-------| -| AOP-MCP SPARQL queries | None | N/A | -| CompTox-MCP chemical search | Basic | None | -| PBPK-MCP population sims | None | N/A | -| OQT-MCP workflows | Timeout only | 300s | - -**Missing Validations:** -- **Molecular complexity:** No atom count limit -- **Pathway depth:** No AOP chain length limit -- **Query result size:** No LIMIT enforcement on SPARQL -- **Simulation granularity:** No time-step minimum - -**Risk Scenarios:** -1. **SPARQL query** with unlimited `?chemical aops:hasMIE` traversal → timeout/OOM -2. **Population simulation** with 100,000 virtual patients → OOM -3. **AOP network** query with 50+ key events → response size explosion - -**Recommendation:** Implement tiered limits: -```python -MAX_ATOMS = 500 # For PBPK modeling -MAX_AOP_CHAIN_DEPTH = 10 -MAX_SPARQL_RESULTS = 10000 -MAX_POPULATION_SIZE = 5000 -``` - ---- - -## 6. Cross-Component Vulnerability Matrix - -| Threat | CompTox | AOP | PBPK | OQT | Severity | -|--------|---------|-----|------|-----|----------| -| Timeout Cascade | 🟡 | 🔴 | 🟡 | 🟠 | 🔴 Critical | -| Memory Exhaustion | 🟢 | 🟢 | 🔴 | 🟢 | 🔴 Critical | -| Rate Limit Ban | 🟠 | 🟢 | 🟢 | 🟢 | 🟠 High | -| Job Orphans | 🟢 | 🟢 | 🟡 | 🟢 | 🟡 Medium | -| Complexity Bomb | 🟠 | 🔴 | 🔴 | 🟠 | 🔴 Critical | - ---- - -## 7. Specific File References - -### Critical Files Examined: - -1. **AOP-MCP:** - - `src/adapters/sparql_client.py` (231 lines) - No circuit breaker - - `src/adapters/aop_wiki.py` - SPARQL endpoint consumer - -2. **CompTox-MCP:** - - `src/epacomp_tox/settings.py` (139 lines) - Retry config - - `src/epacomp_tox/client.py` (102 lines) - Basic client - -3. **PBPK-MCP:** - - `src/mcp_bridge/services/job_service.py` (1392 lines) - Job persistence - - `src/mcp_bridge/config.py` (543 lines) - Configuration - - `.env.example` (67 lines) - Environment defaults - -4. **OQT-MCP:** - - `TIMEOUT_FIX_SUMMARY.md` - Timeout hardening documentation - ---- - -## 8. Concrete Thresholds & Resource Limits - -### Current Limits: - -| Parameter | Default | Maximum | Unit | -|-----------|---------|---------|------| -| SPARQL timeout | 10 | Configurable | seconds | -| SPARQL retries | 2 | Configurable | attempts | -| Job timeout | 300 | Configurable | seconds | -| Job retries | 0 | Configurable | attempts | -| API retry attempts | 3 | Configurable | attempts | -| API retry base | 0.5 | Configurable | seconds | -| Adapter timeout | 30 | Configurable | seconds | - -### Missing Limits (Critical Gaps): - -| Parameter | Recommended | Priority | -|-----------|-------------|----------| -| Max population size | 5000 | 🔴 Critical | -| Max SPARQL results | 10000 | 🔴 Critical | -| Max AOP chain depth | 10 | 🟠 High | -| Max molecule atoms | 500 | 🟠 High | -| Circuit breaker threshold | 5 errors/60s | 🔴 Critical | -| Memory quota per job | 2 GB | 🔴 Critical | - ---- - -## 9. Recommendations Summary - -### Immediate Actions (Critical): - -1. **PBPK-MCP:** Add `MAX_POPULATION_SIZE` limit (default 5000) -2. **AOP-MCP:** Implement circuit breaker for SPARQL endpoints -3. **CompTox-MCP:** Add jitter and exponential backoff to retries -4. **PBPK-MCP:** Implement memory quota check before simulations - -### Short-term (High Priority): - -5. **PBPK-MCP:** Add worker heartbeat to prevent orphan jobs -6. **AOP-MCP:** Add `MAX_SPARQL_RESULTS` limit -7. **CompTox-MCP:** Parse and honor `Retry-After` headers -8. **PBPK-MCP:** Implement result streaming for population sims - -### Long-term (Medium Priority): - -9. **All:** Add complexity scoring for chemical inputs -10. **All:** Implement distributed rate limiter -11. **All:** Add Prometheus alerts for resource exhaustion - ---- - -## Appendix: Evidence Snapshots - -### SPARQL Client (No Circuit Breaker): -```python -# From aop-mcp/src/adapters/sparql_client.py -class SparqlClient: - def __init__(self, ..., max_retries: int = 2, timeout: float = 10.0): - self._max_retries = max(0, max_retries) - self._timeout = timeout -``` - -### Job Persistence (SQLite): -```python -# From pbpk-mcp/src/mcp_bridge/services/job_service.py -class JobRegistry: - def __init__(self, db_path: str = "var/jobs/registry.json"): - self._conn = sqlite3.connect(str(self._prepare_path(db_path))) -``` - -### Retry Configuration (No Jitter): -```python -# From comptox-mcp/src/epacomp_tox/settings.py -ctx_retry_attempts: int = Field(default=3, alias="CTX_RETRY_ATTEMPTS") -ctx_retry_base: float = Field(default=0.5, alias="CTX_RETRY_BASE") -``` - ---- - -## Detailed Findings by Repository - -### AOP-MCP (aop-mcp) - -**Version:** v0.8.1 -**Primary Risk:** SPARQL timeout cascades - -**Key Files:** -- `src/adapters/sparql_client.py` - Async HTTPX client with failover -- `src/adapters/aop_wiki.py` - AOP-Wiki SPARQL consumer -- `src/adapters/aop_db.py` - AOP-DB integration - -**Findings:** -1. SPARQL client has configurable timeout (default 10s) and retries (default 2) -2. No circuit breaker - sequential endpoint failover only -3. Cache support exists but no cache-first on failure mode -4. Metrics recording available but not used for health checks - -**Maximum Safe Load:** -- Query complexity: Unlimited (no validation) -- Result size: Unlimited (no LIMIT enforcement) -- Concurrent queries: Limited by HTTPX connection pool (default 100) - ---- - -### CompTox-MCP (comptox-mcp) - -**Version:** v0.2.2 -**Primary Risk:** Rate limit handling - -**Key Files:** -- `src/epacomp_tox/settings.py` - Configuration with retry settings -- `src/epacomp_tox/client.py` - MCP client wrapper - -**Findings:** -1. Retry configuration: 3 attempts with 0.5s base delay -2. No exponential backoff - fixed delay between retries -3. No jitter - thundering herd risk -4. No rate limit header parsing (429, Retry-After) - -**Maximum Safe Load:** -- Requests per minute: Unknown (EPA CompTox limit not documented) -- Concurrent requests: Limited by client configuration -- No quota budget per tool call - ---- - -### PBPK-MCP (pbpk-mcp) - -**Version:** v0.4.3 -**Primary Risk:** Memory exhaustion - -**Key Files:** -- `src/mcp_bridge/services/job_service.py` - Job orchestration (1392 lines) -- `src/mcp_bridge/config.py` - Application configuration -- `src/mcp_bridge/storage/population_store.py` - Result storage - -**Findings:** -1. SQLite-based job persistence survives restarts -2. No population size validation -3. Job timeout: 300s (5 minutes) - insufficient for large populations -4. Worker threads: 2 (configurable) -5. No memory quota enforcement - -**Maximum Safe Load:** -- Population size: ~1000 patients (before timeout/OOM risk) -- Simulation duration: 5 minutes max (default timeout) -- Memory per job: Unlimited (no quota) - ---- - -### OQT-MCP (oqt-mcp) - -**Version:** v0.3.0 -**Primary Risk:** Timeout on heavy operations - -**Key Files:** -- `TIMEOUT_FIX_SUMMARY.md` - Timeout hardening history -- `src/` - QSAR workflow implementation - -**Findings:** -1. Timeout increased from 120s to 300s for heavy operations -2. Better error handling for 404 responses -3. MCP content type standardization applied - -**Maximum Safe Load:** -- Workflow timeout: 300 seconds -- Heavy operations: Metabolism, reports, batch processing - ---- - -## Risk Severity Legend - -| Badge | Severity | Description | -|-------|----------|-------------| -| 🔴 | Critical | System failure, data loss, or security breach likely | -| 🟠 | High | Performance degradation or availability issues likely | -| 🟡 | Medium | Limited impact, workarounds available | -| 🟢 | Low | Minor issues, easily mitigated | - ---- - -**End of Audit Report** - -*Report generated by Performance & Resilience Engineer* -*ToxMCP Ecosystem Analysis - April 2026* diff --git a/ToxMCP_Audit_Reviewed_v2/VALIDATION_BACKLOG.md b/ToxMCP_Audit_Reviewed_v2/VALIDATION_BACKLOG.md deleted file mode 100644 index 9a40fcd..0000000 --- a/ToxMCP_Audit_Reviewed_v2/VALIDATION_BACKLOG.md +++ /dev/null @@ -1,48 +0,0 @@ -# ToxMCP Audit Validation Backlog - -**Purpose:** Convert the reviewed audit pack into a more externally defensible package. - ---- - -## Priority 0 - validation required before external sharing - -| ID | Finding | What to validate | Output needed | -|---|---|---|---| -| V0-1 | SPARQL unsafe interpolation | Confirm whether structural query fragments, `ORDER BY`, `LIMIT`, or graph patterns can be influenced by untrusted input at runtime | Minimal PoC, affected code path, safe-vs-unsafe query examples | -| V0-2 | Prompt / instruction injection via chemical identifiers | Trace whether untrusted identifiers are interpolated into model prompts or agent instructions without structured isolation | Prompt boundary diagram, example payload, before/after mitigation test | -| V0-3 | Part 11 / Annex 11 readiness gap | Confirm intended regulated use, signature requirements, and whether procedural controls already exist outside the repos | Control mapping, gap matrix, intended-use memo | -| V0-4 | Upstream provenance/version capture | Verify what the external providers actually expose for versioning, snapshots, and response metadata | Provider capability matrix, proposed internal pinning strategy | -| V0-5 | Population/OOM thresholds | Run controlled load tests on representative worker sizes | Memory/latency curves, safe defaults, enforced limits | - ---- - -## Priority 1 - should be reproduced soon - -| ID | Finding | What to validate | Output needed | -|---|---|---|---| -| V1-1 | Audit chain integrity | Recompute hashes from stored content and confirm mismatch behavior | Unit/integration tests | -| V1-2 | Deterministic hashing for PBPK events | Cross-platform serialization check for floats, NaN, infinity, and ordering | Regression test matrix | -| V1-3 | Distributed tracing gap | Run a multi-tool workflow and confirm whether a single trace can be reconstructed | Trace propagation test | -| V1-4 | Scientific review checkpoints | Confirm that high-risk workflow states can be paused, reviewed, and resumed cleanly | UX flow and test cases | -| V1-5 | Container/runtime hardening risk | Validate actual attack surface for file parsing, package installation, and runtime privileges | Threat model plus runtime config review | - ---- - -## Priority 2 - packaging and governance - -| ID | Task | Why it matters | -|---|---|---| -| V2-1 | Replace inherited line references with live-repo permalinks or commit hashes | External readers can verify claims | -| V2-2 | Add fix verification criteria to each critical item | Prevents “remediation theater” | -| V2-3 | Create a machine-readable finding register | Easier tracking across repos | -| V2-4 | Add sign-off owners and due dates | Turns the pack into an execution tool | - ---- - -## Suggested working rule - -Do not present a finding as externally validated until it has: -1. a code or config location in the live repository -2. stated preconditions -3. a reproduction or reasoning note -4. a test for the proposed fix diff --git a/ToxMCP_Audit_Reviewed_v2/aop-mcp-audit/README.md b/ToxMCP_Audit_Reviewed_v2/aop-mcp-audit/README.md deleted file mode 100644 index c210d55..0000000 --- a/ToxMCP_Audit_Reviewed_v2/aop-mcp-audit/README.md +++ /dev/null @@ -1,161 +0,0 @@ -# AOP-MCP Audit Package (Reviewed Copy) - -**Repository:** `aop-mcp` -**Package version cited in original audit:** `v0.8.1` -**Review date:** 2026-04-15 -**Overall posture:** **High-to-critical for trust-boundary safety, draft integrity, and ontology evolution** - ---- - -## How to read this reviewed copy - -The original package correctly identified `aop-mcp` as an integration-heavy surface where: -- query safety -- upstream resilience -- draft/signature integrity -- ontology/schema drift - -all matter at once. - -This reviewed copy retains those concerns, but is stricter about exploit claims: -- **Observed** unsafe interpolation patterns are treated as hard findings -- destructive outcomes such as graph deletion are treated as **scenario-dependent** unless endpoint permissions are known - ---- - -## Finding register - -| ID | Finding | Severity | Evidence basis | Confidence | Reviewed interpretation | -|---|---|---|---|---|---| -| AOP-01 | Unsafe query templating / interpolation | **Critical** | Observed + scenario | High / Medium | A trust-boundary issue is present; exact exploit impact depends on runtime-controlled fields and endpoint permissions | -| AOP-02 | Upstream query failure handling lacks mature resilience controls | **High** | Observed | High | Failure cascades and latency amplification are plausible | -| AOP-03 | Draft metadata and signature semantics are not strong enough for high-assurance review flows | **Critical** | Observed | High | Review and approval lineage is weaker than it should be | -| AOP-04 | Checksum-chain verification needs stronger content binding and write/read validation | **Critical** | Observed | High | Draft history is not yet as tamper-evident as intended | -| AOP-05 | Ontology/version drift can break cross-suite meaning over time | **High** | Observed + inferred | Medium-High | Migration and comparability risk is real | - ---- - -## Detailed findings - -### AOP-01: Query templating should be redesigned around allow-listed query plans -**Severity:** **Critical** -**Evidence basis:** Observed + scenario -**Confidence:** High for unsafe interpolation, Medium for worst-case exploit impact - -The package shows template rendering through Python string formatting. That is a legitimate trust-boundary concern. - -### Reviewed wording -The safest defensible statement is: - -> query construction includes unsafe interpolation patterns; query broadening, result manipulation, or unintended data exposure are plausible if structural fragments can be influenced by untrusted input. - -Avoid assuming destructive update operations unless the endpoint is confirmed to allow them. - -### Better mitigation pattern -Do **not** treat arbitrary query fragments as bindable parameters. - -Use: -- fixed query templates selected from an allow-list -- safe binding only for literals/URIs -- allow-listed sort and limit options -- separate read-only query builders from any update-capable code path - ---- - -### AOP-02: Upstream resilience controls are underdeveloped -**Severity:** **High** -**Evidence basis:** Observed -**Confidence:** High - -The original package’s concern about circuit breaking, backoff, and graceful degradation remains sound. -If the AOP upstream is unavailable or slow, repeated retries can amplify latency and user confusion. - -### Recommended control -- bounded retries with jitter -- circuit-breaker/open-state behavior -- explicit error surface to callers -- cache or partial-result policy where scientifically acceptable -- telemetry for endpoint health and fallback path usage - ---- - -### AOP-03: Draft approval semantics are not yet strong enough -**Severity:** **Critical** -**Evidence basis:** Observed -**Confidence:** High - -The original package was right to highlight that draft metadata and authorship fields do not, by themselves, constitute strong review or approval lineage. - -### Recommended control -- strong actor identity linkage -- signature meaning (`authored`, `reviewed`, `approved`, `rejected`) -- UTC timestamping -- content-hash binding -- verified chain between successive draft versions - -### Reviewed wording -Use: **high risk of non-conformance for regulated or high-assurance review workflows** -Avoid: categorical claims of inevitable regulatory outcome. - ---- - -### AOP-04: Checksum verification should prove content integrity, not only compare stored values -**Severity:** **Critical** -**Evidence basis:** Observed -**Confidence:** High - -A checksum field is helpful only when: -- the checksum is mandatory -- the algorithm is defined -- the content used to compute it is canonicalized -- the chain is verified on read -- mutations cannot silently sever lineage - -This remains a strong and useful finding from the original pack. - ---- - -### AOP-05: Ontology and schema drift need an explicit migration strategy -**Severity:** **High** -**Evidence basis:** Observed + inferred -**Confidence:** Medium-High - -`aop-mcp` sits near an evolving ontology surface. That means long-lived interoperability requires more than normalization at read time. - -### Recommended control -- record ontology/version provenance in artifacts -- maintain deprecation and remapping tables -- define migration tests for cross-suite schemas -- avoid burying semantic version assumptions inside tool logic - ---- - -## Recommended sequence - -### Immediate -- redesign unsafe query construction -- add resilience controls around SPARQL/upstream failure -- strengthen draft metadata and checksum semantics - -### Next -- formalize ontology/version provenance -- add migration tests and compatibility policy -- align traceability with suite-wide provenance model - ---- - -## Validation backlog specific to this repo - -- confirm which query components can be influenced by untrusted input at runtime -- confirm endpoint permissions and whether any update semantics are reachable -- test checksum recomputation from draft content -- verify how ontology version changes propagate into downstream consumers - ---- - -## Related documents - -- `toxmcp_security_audit_report.md` -- `toxmcp_contract_audit_report.md` -- `toxmcp_regulatory_audit_report.md` -- `aop-mcp-audit/REMEDIATION_CODE.md` diff --git a/ToxMCP_Audit_Reviewed_v2/aop-mcp-audit/REMEDIATION_CODE.md b/ToxMCP_Audit_Reviewed_v2/aop-mcp-audit/REMEDIATION_CODE.md deleted file mode 100644 index d62b090..0000000 --- a/ToxMCP_Audit_Reviewed_v2/aop-mcp-audit/REMEDIATION_CODE.md +++ /dev/null @@ -1,710 +0,0 @@ -# AOP-MCP: Detailed Remediation Code - -> **Reviewed copy note:** Treat these snippets as reference patterns. Do **not** pass arbitrary structural query fragments from untrusted input; use allow-listed query plans and bind only literals/URIs. - - -## 1. Parameterized SPARQL Queries (Injection Prevention) - -**Reviewed caution:** Bind values safely, but keep query *structure* fixed. `ORDER BY`, `LIMIT`, graph patterns, and predicate choices should come from allow-lists, not directly from user input. - -**File:** `src/adapters/sparql_client.py` - -```python -from rdflib.plugins.sparql import prepareQuery -from rdflib import Literal, URIRef, Variable -from typing import Mapping, Any, Dict, Tuple -import re - -class SafeSparqlClient: - """SPARQL client with parameterized query support.""" - - def __init__(self, endpoints: List[SparqlEndpoint]): - self._endpoints = endpoints - self._client = httpx.AsyncClient() - self._template_cache: Dict[str, str] = {} - - def render(self, name: str, parameters: Mapping[str, Any] | None = None) -> Tuple[str, Dict]: - """ - Render SPARQL template with safe parameter binding. - - Returns: - Tuple of (query_string, bindings_dict) - """ - template = self._get_template(name) - params = parameters or {} - - # Extract parameter placeholders from template - placeholders = self._extract_placeholders(template) - - # Validate all parameters are provided - missing = placeholders - set(params.keys()) - if missing: - raise ValueError(f"Missing parameters for template {name}: {missing}") - - # Convert parameters to RDFLib types - bindings = {} - for key, value in params.items(): - bindings[key] = self._convert_to_rdf_type(value) - - # Replace placeholders in template with variable references - query_string = self._substitute_placeholders(template, placeholders) - - return query_string, bindings - - def _extract_placeholders(self, template: str) -> set: - """Extract {placeholder} patterns from template.""" - pattern = r'\{(\w+)\}' - return set(re.findall(pattern, template)) - - def _convert_to_rdf_type(self, value: Any) -> Any: - """Convert Python value to appropriate RDFLib type.""" - if isinstance(value, str): - # Check if it's a URI - if value.startswith('http://') or value.startswith('https://'): - return URIRef(value) - # Otherwise treat as literal - return Literal(value) - elif isinstance(value, (int, float)): - return Literal(value) - elif isinstance(value, bool): - return Literal(value) - else: - return Literal(str(value)) - - def _substitute_placeholders(self, template: str, placeholders: set) -> str: - """Replace {placeholder} with ?placeholder for SPARQL variable binding.""" - result = template - for placeholder in placeholders: - result = result.replace(f'{{{placeholder}}}', f'?{placeholder}') - return result - - async def query(self, name: str, parameters: Mapping[str, Any] | None = None) -> dict: - """Execute parameterized SPARQL query.""" - query_string, bindings = self.render(name, parameters) - - # Prepare the query - prepared = prepareQuery(query_string) - - # Execute with bindings - return await self._execute_with_bindings(prepared, bindings) - - async def _execute_with_bindings(self, prepared_query, bindings: Dict) -> dict: - """Execute prepared query with parameter bindings.""" - # Convert bindings to SPARQL BIND statements or use endpoint's binding mechanism - # This example uses string substitution for the final query (still safe due to RDFLib types) - bound_query = prepared_query.serialize() - - for var_name, value in bindings.items(): - # Replace ?var with bound value - if isinstance(value, URIRef): - bound_query = bound_query.replace(f'?{var_name}', f'<{value}>') - elif isinstance(value, Literal): - # Properly escape literal values - escaped = str(value).replace('\\', '\\\\').replace('"', '\\"') - bound_query = bound_query.replace(f'?{var_name}', f'"{escaped}"') - - return await self._dispatch(bound_query) - - -# Template example (search_aops.sparql) -SAFE_SEARCH_AOPS_TEMPLATE = """ -SELECT DISTINCT ?aop ?title ?shortName -WHERE {{ - ?aop a aopo:AdverseOutcomePathway ; - dc:title ?title . - - # Safe parameter binding with ?variable syntax - {search_bindings} - - FILTER ({search_filter}) -}} -ORDER BY {order_by} -LIMIT {limit} -""" - -# Usage example -async def search_aops_safe(chemical_name: str): - client = SafeSparqlClient(endpoints) - - # Parameters are only safe if structural query parts are fixed or allow-listed; do not treat arbitrary graph fragments as bindable user input - result = await client.query("search_aops", { - "search_bindings": "?aop aopo:hasMIE ?mie . ?mie dc:title ?chemicalName .", - "search_filter": "CONTAINS(LCASE(?chemicalName), LCASE(?chemicalNameParam))", - "order_by": "?title", - "limit": "100", - "chemicalNameParam": chemical_name # This is safely bound as Literal - }) - - return result -``` - ---- - -> **Reviewed copy (2026-04-15):** This document was retained from the original package but lightly edited for consistency. -> Unless explicitly stated otherwise, code blocks are **reference implementations**, not validated patches, and scenario-based exploit narratives should not be read as reproduced proofs. - - - -## 2. Circuit Breaker for SPARQL Endpoints - -**File:** `src/adapters/sparql_client.py` - -```python -import asyncio -import random -from enum import Enum -from dataclasses import dataclass -from typing import Optional -import time - -class CircuitState(Enum): - CLOSED = "closed" # Normal operation - OPEN = "open" # Failing, reject requests - HALF_OPEN = "half_open" # Testing if recovered - -@dataclass -class CircuitBreakerConfig: - failure_threshold: int = 5 - recovery_timeout: float = 30.0 - half_open_max_calls: int = 1 - success_threshold: int = 2 - -class SparqlCircuitBreaker: - """Circuit breaker for SPARQL endpoint protection.""" - - def __init__(self, config: CircuitBreakerConfig = None): - self.config = config or CircuitBreakerConfig() - self.state = CircuitState.CLOSED - self.failure_count = 0 - self.success_count = 0 - self.last_failure_time: Optional[float] = None - self.half_open_calls = 0 - self._lock = asyncio.Lock() - - async def call(self, func, *args, **kwargs): - """Execute function with circuit breaker protection.""" - async with self._lock: - if self.state == CircuitState.OPEN: - if self._should_attempt_reset(): - self.state = CircuitState.HALF_OPEN - self.half_open_calls = 0 - else: - raise CircuitBreakerOpen("SPARQL endpoint circuit breaker is OPEN") - - if self.state == CircuitState.HALF_OPEN: - if self.half_open_calls >= self.config.half_open_max_calls: - raise CircuitBreakerOpen("Circuit breaker half-open limit reached") - self.half_open_calls += 1 - - # Execute the call - try: - result = await func(*args, **kwargs) - await self._on_success() - return result - except Exception as e: - await self._on_failure() - raise - - def _should_attempt_reset(self) -> bool: - """Check if enough time has passed to try reset.""" - if self.last_failure_time is None: - return True - elapsed = time.time() - self.last_failure_time - return elapsed >= self.config.recovery_timeout - - async def _on_success(self): - """Handle successful call.""" - async with self._lock: - if self.state == CircuitState.HALF_OPEN: - self.success_count += 1 - if self.success_count >= self.config.success_threshold: - self.state = CircuitState.CLOSED - self.failure_count = 0 - self.success_count = 0 - else: - self.failure_count = max(0, self.failure_count - 1) - - async def _on_failure(self): - """Handle failed call.""" - async with self._lock: - self.failure_count += 1 - self.last_failure_time = time.time() - - if self.state == CircuitState.HALF_OPEN: - self.state = CircuitState.OPEN - elif self.failure_count >= self.config.failure_threshold: - self.state = CircuitState.OPEN - -class CircuitBreakerOpen(Exception): - """Exception raised when circuit breaker is open.""" - pass - - -# Integration with SPARQL client -class ResilientSparqlClient(SafeSparqlClient): - """SPARQL client with circuit breaker and retry logic.""" - - def __init__(self, endpoints: List[SparqlEndpoint]): - super().__init__(endpoints) - self.circuit_breakers = { - endpoint.url: SparqlCircuitBreaker() - for endpoint in endpoints - } - - async def _dispatch( - self, - query: str, - *, - timeout: float | None = None, - max_retries: int = 3 - ) -> dict[str, Any]: - """Dispatch with circuit breaker and exponential backoff.""" - last_error: Exception | None = None - - for endpoint in self._endpoints: - circuit_breaker = self.circuit_breakers[endpoint.url] - - for attempt in range(max_retries): - try: - # Use circuit breaker - result = await circuit_breaker.call( - self._execute_single, - endpoint, - query, - timeout - ) - return result - - except CircuitBreakerOpen: - # Skip to next endpoint - break - except Exception as exc: - last_error = exc - - # Exponential backoff with jitter - if attempt < max_retries - 1: - delay = (2 ** attempt) + random.uniform(0, 1) - await asyncio.sleep(delay) - - raise SparqlUpstreamError(f"All endpoints failed: {last_error}") - - -# Fallback mechanism -class SparqlClientWithFallback(ResilientSparqlClient): - """SPARQL client with fallback to cache on failure.""" - - def __init__(self, endpoints: List[SparqlEndpoint], cache: Cache): - super().__init__(endpoints) - self.cache = cache - - async def query_with_fallback( - self, - name: str, - parameters: Mapping[str, Any] | None = None, - use_cache_on_failure: bool = True - ) -> dict: - """Query with fallback to cache on failure.""" - cache_key = f"{name}:{hash(str(parameters))}" - - try: - # Try live query - result = await self.query(name, parameters) - - # Cache successful result - await self.cache.set(cache_key, result, ttl=3600) - - return result - - except SparqlUpstreamError as e: - if not use_cache_on_failure: - raise - - # Try cache fallback - cached = await self.cache.get(cache_key) - if cached: - return { - "results": cached, - "source": "cache", - "warning": "Results from cache due to upstream failure" - } - - # Return empty result with warning - return { - "results": [], - "source": "fallback", - "warning": f"Upstream failure: {e}. No cached data available." - } -``` - ---- - -## 3. Electronic Signatures (21 CFR Part 11) - -**File:** `src/services/draft_store/signing.py` - -```python -from cryptography.hazmat.primitives import hashes, serialization -from cryptography.hazmat.primitives.asymmetric import padding, rsa -from cryptography.hazmat.backends import default_backend -from cryptography.exceptions import InvalidSignature -from datetime import datetime -from typing import List, Optional, Literal -from pydantic import BaseModel -import base64 -import hashlib - -class ElectronicSignature(BaseModel): - """Electronic signature per 21 CFR Part 11.""" - - signer_user_id: str - signature_meaning: Literal["authored", "reviewed", "approved"] - timestamp_utc: str - content_hash: str # SHA-256 of signed content - signature_value: str # Base64-encoded signature - cert_chain: List[str] # PEM-encoded certificates - - def verify(self, content: bytes, trusted_certs: List[str]) -> bool: - """Verify signature against content.""" - # Verify content hash - computed_hash = hashlib.sha256(content).hexdigest() - if computed_hash != self.content_hash: - return False - - # Verify signature - try: - public_key = self._extract_public_key() - signature_bytes = base64.b64decode(self.signature_value) - - public_key.verify( - signature_bytes, - self.content_hash.encode(), - padding.PSS( - mgf=padding.MGF1(hashes.SHA256()), - salt_length=padding.PSS.MAX_LENGTH - ), - hashes.SHA256() - ) - return True - except InvalidSignature: - return False - - def _extract_public_key(self): - """Extract public key from certificate chain.""" - if not self.cert_chain: - raise ValueError("No certificate chain provided") - - cert_pem = self.cert_chain[0] - cert = serialization.load_pem_x509_certificate( - cert_pem.encode(), - default_backend() - ) - return cert.public_key() - -class SignatureService: - """Service for creating and verifying electronic signatures.""" - - def __init__(self, private_key_path: str, cert_path: str): - self.private_key = self._load_private_key(private_key_path) - self.certificate = self._load_certificate(cert_path) - - def sign_content( - self, - content: bytes, - signer_user_id: str, - meaning: Literal["authored", "reviewed", "approved"] - ) -> ElectronicSignature: - """Sign content electronically.""" - # Compute content hash - content_hash = hashlib.sha256(content).hexdigest() - - # Create signature - signature = self.private_key.sign( - content_hash.encode(), - padding.PSS( - mgf=padding.MGF1(hashes.SHA256()), - salt_length=padding.PSS.MAX_LENGTH - ), - hashes.SHA256() - ) - - return ElectronicSignature( - signer_user_id=signer_user_id, - signature_meaning=meaning, - timestamp_utc=datetime.utcnow().isoformat(), - content_hash=content_hash, - signature_value=base64.b64encode(signature).decode(), - cert_chain=[self.certificate] - ) - - def _load_private_key(self, path: str): - """Load private key from file.""" - with open(path, "rb") as f: - return serialization.load_pem_private_key( - f.read(), - password=None, - backend=default_backend() - ) - - def _load_certificate(self, path: str) -> str: - """Load certificate from file.""" - with open(path, "r") as f: - return f.read() - - -# Integration with draft store -from dataclasses import dataclass, field -from typing import List - -@dataclass -class VersionMetadata: - """Version metadata with electronic signatures.""" - - author: str - signatures: List[ElectronicSignature] = field(default_factory=list) - checksum: str = "" # REQUIRED - previous_checksum: str = "" # REQUIRED - created_at: str = field(default_factory=lambda: datetime.utcnow().isoformat()) - - def add_signature(self, signature: ElectronicSignature): - """Add electronic signature.""" - self.signatures.append(signature) - - def verify_signatures(self, content: bytes, trusted_certs: List[str]) -> bool: - """Verify all signatures.""" - if not self.signatures: - return False - - for sig in self.signatures: - if not sig.verify(content, trusted_certs): - return False - - return True - -class SignedDraftStore: - """Draft store with electronic signature support.""" - - def __init__(self, signature_service: SignatureService): - self.signature_service = signature_service - - async def sign_draft( - self, - draft_id: str, - user_id: str, - meaning: Literal["authored", "reviewed", "approved"], - content: bytes - ): - """Sign a draft electronically.""" - signature = self.signature_service.sign_content( - content=content, - signer_user_id=user_id, - meaning=meaning - ) - - draft = await self.get_draft(draft_id) - draft.metadata.add_signature(signature) - - await self.save_draft(draft) - - async def verify_draft(self, draft_id: str, trusted_certs: List[str]) -> bool: - """Verify all signatures on a draft.""" - draft = await self.get_draft(draft_id) - content = await self.get_draft_content(draft_id) - - return draft.metadata.verify_signatures(content, trusted_certs) -``` - ---- - -## 4. Ontology Migration Framework - -**File:** `src/semantic/migration.py` - -```python -from typing import Dict, List, Callable, Any -from pydantic import BaseModel -import json - -class OntologyVersion(BaseModel): - """Ontology version identifier.""" - name: str - version: str # Semantic version - -class MigrationRule(BaseModel): - """Single migration rule.""" - source_version: str - target_version: str - transformer: Callable[[Any], Any] - description: str - -class OntologyMigrator: - """Migrate data between ontology versions.""" - - def __init__(self): - self.migrations: Dict[str, List[MigrationRule]] = {} - self.term_mappings: Dict[str, Dict[str, str]] = {} - - def register_migration( - self, - source: str, - target: str, - transformer: Callable[[Any], Any], - description: str = "" - ): - """Register a migration rule.""" - key = f"{source}->{target}" - if key not in self.migrations: - self.migrations[key] = [] - - self.migrations[key].append(MigrationRule( - source_version=source, - target_version=target, - transformer=transformer, - description=description - )) - - def register_term_mapping(self, version: str, mappings: Dict[str, str]): - """Register term mappings for a version transition.""" - self.term_mappings[version] = mappings - - def migrate(self, data: Any, from_version: str, to_version: str) -> Any: - """Migrate data from one version to another.""" - if from_version == to_version: - return data - - # Find migration path - path = self._find_migration_path(from_version, to_version) - if not path: - raise UnsupportedMigration( - f"No migration path from {from_version} to {to_version}" - ) - - # Apply migrations in sequence - result = data - for step in path: - result = self._apply_migration(result, step) - - return result - - def _find_migration_path(self, from_version: str, to_version: str) -> List[str]: - """Find shortest migration path using BFS.""" - # Simplified BFS - production would use proper graph algorithm - visited = {from_version} - queue = [(from_version, [])] - - while queue: - current, path = queue.pop(0) - - if current == to_version: - return path - - # Find all possible next versions - for key in self.migrations: - if key.startswith(f"{current}->"): - next_version = key.split("->")[1] - if next_version not in visited: - visited.add(next_version) - queue.append((next_version, path + [key])) - - return None - - def _apply_migration(self, data: Any, migration_key: str) -> Any: - """Apply a single migration step.""" - rules = self.migrations.get(migration_key, []) - - for rule in rules: - data = rule.transformer(data) - - # Apply term mappings - version = migration_key.split("->")[1] - if version in self.term_mappings: - data = self._apply_term_mappings(data, self.term_mappings[version]) - - return data - - def _apply_term_mappings(self, data: Any, mappings: Dict[str, str]) -> Any: - """Apply term mappings to data.""" - if isinstance(data, dict): - return { - mappings.get(k, k): self._apply_term_mappings(v, mappings) - for k, v in data.items() - } - elif isinstance(data, list): - return [self._apply_term_mappings(item, mappings) for item in data] - elif isinstance(data, str): - return mappings.get(data, data) - return data - - -# Predefined migrations -migrator = OntologyMigrator() - -# AOP ontology v1 to v2 migration -migrator.register_term_mapping("aop-ontology-v2", { - "AOP:123": "AOP:123v2", - "KE:456": "KE:456v2", - "KER:789": "KER:789v2", -}) - -def migrate_aop_structure_v1_to_v2(data: dict) -> dict: - """Migrate AOP structure from v1 to v2.""" - if "key_events" in data: - # v2 uses 'key_event_relationships' instead of 'key_events' - data["key_event_relationships"] = data.pop("key_events") - - if "molecular_initiating_event" in data: - # v2 nests MIE under 'events' - data["events"] = { - "molecular_initiating_event": data.pop("molecular_initiating_event") - } - - return data - -migrator.register_migration( - source="aop-ontology-v1", - target="aop-ontology-v2", - transformer=migrate_aop_structure_v1_to_v2, - description="Migrate AOP structure to v2 format" -) - - -# Usage in CURIE service -class MigratingCurieService: - """CURIE service with migration support.""" - - def __init__(self, migrator: OntologyMigrator): - self.migrator = migrator - self.current_version = "aop-ontology-v2" - - def normalize(self, value: str, target_version: str = None) -> str: - """Normalize CURIE with optional version migration.""" - # Extract version from CURIE if present - curie_version = self._extract_version(value) - - if curie_version and curie_version != (target_version or self.current_version): - # Need to migrate - data = {"curie": value} - migrated = self.migrator.migrate( - data, - from_version=curie_version, - to_version=target_version or self.current_version - ) - return migrated["curie"] - - return value - - def _extract_version(self, curie: str) -> Optional[str]: - """Extract version from CURIE if present.""" - # Example: AOP:123v2 -> aop-ontology-v2 - if "v" in curie: - parts = curie.split(":") - if len(parts) == 2: - id_part = parts[1] - if "v" in id_part: - version = id_part.split("v")[-1] - return f"aop-ontology-v{version}" - return None -``` - ---- - -*These remediation code snippets address the critical issues identified in the AOP-MCP audit.* diff --git a/ToxMCP_Audit_Reviewed_v2/cognitive_ergonomics_audit_report.md b/ToxMCP_Audit_Reviewed_v2/cognitive_ergonomics_audit_report.md deleted file mode 100644 index ae11fbf..0000000 --- a/ToxMCP_Audit_Reviewed_v2/cognitive_ergonomics_audit_report.md +++ /dev/null @@ -1,413 +0,0 @@ -# ToxMCP Suite - Cognitive Ergonomics Audit Report - -**Auditor:** Cognitive Ergonomics Designer -**Date:** April 2026 -**Scope:** comptox-mcp, oqt-mcp, aop-mcp, pbpk-mcp - ---- - -> **Reviewed copy (2026-04-15):** This document was retained from the original package but lightly edited for consistency. -> Unless explicitly stated otherwise, code blocks are **reference implementations**, not validated patches, and scenario-based exploit narratives should not be read as reproduced proofs. - - - -## Executive Summary - -This audit identifies **critical cognitive ergonomics failures** in the ToxMCP ecosystem that could lead scientists to erroneous conclusions. The suite enables rapid "audit-ready" PDF generation without adequate safeguards, creating a dangerous illusion of scientific rigor. - -### Key Finding: The "Foot-Gun" Pattern - -The ToxMCP suite provides powerful automation for toxicology workflows but lacks critical guardrails that prevent non-programmer scientists from: -1. Accepting ambiguous chemical identifications -2. Trusting unverified PDF outputs as "audit-ready" -3. Proceeding without human verification of critical assumptions -4. Conflating units across different measurement contexts - ---- - -## 🔴 CRITICAL FINDINGS - -### CR-001: No Mandatory Scientific Review Mode for Autonomous Chains - -**Severity:** 🔴 CRITICAL -**File:** `oqt-mcp/src/tools/implementations/workflow_runner.py` (lines 60-98) -**Cross-ref:** CR-002, CR-003 - -**Issue:** The workflow runner can fully automate a safety assessment from chemical search to PDF generation without requiring human verification of critical assumptions at any point. - -```python -# From workflow_runner.py - WorkflowParams class -class WorkflowParams(BaseModel): - identifier: str = Field(..., description="Chemical identifier") - search_type: str = Field("auto", description="How to interpret the identifier") - # ... no mandatory review checkpoint parameter - qsar_mode: str = Field("recommended", description="QSAR execution preset") -``` - -**How Scientists Could Be Misled:** -- A non-programmer scientist could run `run_workflow` with a chemical name -- The system could resolve to the wrong chemical (e.g., wrong isomer) -- QSAR predictions would run on the wrong substance -- A PDF would be generated with "audit-ready" claims -- The scientist would have no indication that verification was needed - -**Missing Safeguard:** There is no `require_human_review: true` parameter that forces a pause for verification before proceeding to predictive modeling. - ---- - -### CR-002: PDF Generator Lacks Provenance Tables by Default - -**Severity:** 🔴 CRITICAL -**File:** `oqt-mcp/src/utils/pdf_generator.py` (lines 1-104) -**Cross-ref:** CR-001, HG-001 - -**Issue:** The PDF generator creates "audit-ready" reports without mandatory provenance tables showing data sources, versions, and confidence levels. - -```python -# From pdf_generator.py - _build_content function -lines = [ - "O-QT MCP Workflow Report", - "", - f"Generated: {datetime.utcnow().isoformat(timespec='seconds')}Z", - "", -] -lines.append("Summary") -# ... NO provenance table included by default -``` - -**How Scientists Could Be Misled:** -- PDF appears professional and complete -- No visible indication of which QSAR models were used -- No version information for the OECD QSAR Toolbox -- No confidence intervals or applicability domain warnings visible -- Scientist presents PDF to regulators as "audit-ready" evidence - -**Missing Safeguard:** No `include_provenance_table: true` default parameter. - ---- - -### CR-003: Confirmation Bias Amplification Through Rapid PDF Generation - -**Severity:** 🔴 CRITICAL -**File:** `oqt-mcp/src/tools/implementations/workflow_runner.py` (lines 330-392) -**Cross-ref:** CR-001, MD-001 - -**Issue:** The system generates PDFs quickly without any "red team" analysis that would surface contradictory evidence or alternative hypotheses. - -```python -# From workflow_runner.py - artifact generation -artifacts = { - "json": _build_artifact_entry(...), - "markdown": _build_artifact_entry(...), - "pdf": _build_artifact_entry(...), # Always generates PDF -} -``` - -**How Scientists Could Be Misled:** -- First result is presented as "the" result -- No automatic generation of alternative interpretations -- No highlighting of data gaps or conflicting evidence -- PDF format creates false sense of finality -- Scientist stops investigating after seeing first "positive" result - -**Missing Safeguard:** No `generate_alternative_hypotheses: true` option or `include_contradictory_evidence: true` parameter. - ---- - -## 🟠 HIGH SEVERITY FINDINGS - -### HG-001: Chemical Search Defaults to "auto" Without Warning - -**Severity:** 🟠 HIGH -**File:** `oqt-mcp/src/tools/implementations/o_qt_qsar_tools.py` (lines 61-67) -**Cross-ref:** CR-001 - -**Issue:** The `search_chemicals` tool defaults to `search_type: "auto"` which may silently match the wrong chemical. - -```python -class ChemicalSearchParams(BaseModel): - query: str = Field(..., description="The search term") - search_type: str = Field( - "auto", # DEFAULT DANGER: Auto-detection can be wrong - description="Type of search (e.g., 'auto', 'name', 'cas', 'smiles')." - ) -``` - -**How Scientists Could Be Misled:** -- Scientist searches for "benzene" with default "auto" mode -- System might interpret as SMILES instead of name -- Returns wrong chemical or no results -- Scientist concludes chemical not in database -- Or worse: proceeds with incorrect chemical identification - -**Concrete Example:** -```python -# User searches for CAS "50-00-0" (formaldehyde) -# search_type="auto" might interpret as SMILES "50-00-0" -# Returns no results or wrong chemical -search_chemicals(query="50-00-0", search_type="auto") # DANGEROUS -``` - -**Missing Safeguard:** No warning when "auto" detection is uncertain; no explicit confirmation of chemical identity before proceeding. - ---- - -### HG-002: AOP Version Not Captured in get_aop Output - -**Severity:** 🟠 HIGH -**File:** `aop-mcp/src/server/tools/aop.py` (lines 52-70) -**Cross-ref:** MD-001 - -**Issue:** The `get_aop` tool fetches current AOP-Wiki data without capturing the specific version or timestamp, making reproducibility impossible. - -```python -class GetAopInput(BaseModel): - aop_id: str # No version parameter - -async def get_aop(params: GetAopInput) -> dict[str, Any]: - wiki_adapter = get_aop_wiki_adapter() - db_adapter = get_aop_db_adapter() - core_record, assessment_record, stressor_records = await asyncio.gather( - wiki_adapter.get_aop(params.aop_id), # No version specified - wiki_adapter.get_aop_assessment(params.aop_id), - db_adapter.list_stressor_chemicals_for_aop(params.aop_id), - ) -``` - -**How Scientists Could Be Misled:** -- Scientist runs assessment in January, AOP has 3 key events -- AOP is updated in March with new key event -- Scientist re-runs same query in April -- Results are different but no warning is given -- Scientist doesn't realize conclusions have changed -- Regulatory submission contains inconsistent assessments - -**Missing Safeguard:** No `version` parameter; no `retrieved_at` timestamp in output; no warning when AOP has been modified since last retrieval. - ---- - -### HG-003: Unit Fields Present But Not Validated - -**Severity:** 🟠 HIGH -**File:** `pbpk-mcp/src/mcp_bridge/routes/simulation.py` (lines 200-218) -**Cross-ref:** MD-002 - -**Issue:** Unit fields exist in the schema but there's no validation to prevent unit confusion errors. - -```python -class SetParameterValueRequest(GetParameterValueRequest): - value: float - unit: Optional[str] = None # Present but not validated - update_mode: Optional[str] = Field(default="absolute", alias="updateMode") - comment: Optional[str] = None - confirm: Optional[bool] = None -``` - -**How Scientists Could Be Misled:** -- Scientist sets liver volume to "1.5" with unit "L" (liters) -- System expects "mL" (milliliters) -- Simulation runs with 1000x wrong volume -- PK parameters are calculated incorrectly -- No error is raised; results appear valid - -**Missing Safeguard:** No unit validation against expected units; no conversion warnings; no dimensional analysis. - ---- - -### HG-004: Confirmation System Can Be Bypassed - -**Severity:** 🟠 HIGH -**File:** `pbpk-mcp/src/mcp_bridge/security/confirmation.py` (lines 1-38) -**Cross-ref:** CR-001 - -**Issue:** The confirmation system for critical operations relies on a simple header check that can be easily bypassed by automated agents. - -```python -_TRUE_VALUES = {"true", "1", "yes", "y", "confirmed"} - -def is_confirmed(request: Request) -> bool: - header_value = request.headers.get(CONFIRMATION_HEADER) - if not header_value: - return False - return header_value.split(",")[0].strip().lower() in _TRUE_VALUES -``` - -**How Scientists Could Be Misled:** -- Agent chain includes `confirm: true` in all requests -- Critical operations proceed without actual human review -- Scientist believes system has "guardrails" -- In reality, guardrails are cosmetic only - -**Missing Safeguard:** No out-of-band confirmation (e.g., email, separate UI); no rate limiting on confirmations; no audit of who confirmed. - ---- - -## 🟡 MEDIUM SEVERITY FINDINGS - -### MD-001: Temporal Confusion in AOP Assessment - -**Severity:** 🟡 MEDIUM -**File:** `aop-mcp/src/server/tools/aop.py` (lines 152-291) -**Cross-ref:** HG-002 - -**Issue:** The `assess_aop_confidence` tool aggregates evidence without tracking when each piece of evidence was added or modified. - -```python -async def assess_aop_confidence(params: AssessAopConfidenceInput) -> dict[str, Any]: - # ... fetches current data - confidence_dimensions = _build_confidence_dimensions(aop, key_event_details, ker_details) - # No temporal metadata about when evidence was added -``` - -**How Scientists Could Be Misled:** -- Assessment shows "strong" empirical support -- Scientist doesn't realize evidence was added last week -- Previous assessment from 3 months ago showed "moderate" -- No way to track when confidence changed or why - -**Missing Safeguard:** No `evidence_timestamp` field; no `assessment_version` tracking. - ---- - -### MD-002: Unit Ambiguity in PK Parameter Output - -**Severity:** 🟡 MEDIUM -**File:** `pbpk-mcp/src/mcp_bridge/routes/simulation.py` (lines 314-326) -**Cross-ref:** HG-003 - -**Issue:** PK parameter units are returned as strings without standardized formatting, risking misinterpretation. - -```python -class PkMetricModel(CamelModel): - parameter: str - unit: Optional[str] = None # Free text, not validated - cmax: Optional[float] = Field(default=None, alias="cmax") - tmax: Optional[float] = Field(default=None, alias="tmax") - auc: Optional[float] = Field(default=None, alias="auc") -``` - -**How Scientists Could Be Misled:** -- AUC returned as "10" with unit "mg/L*h" -- Scientist interprets as "10 mg/(L*h)" when it's "(10 mg/L)*h" -- Dosing calculations are off by orders of magnitude - -**Missing Safeguard:** No standardized unit format (e.g., UCUM); no unit validation; no dimensional analysis. - ---- - -### MD-003: Fallback Search Mode Silently Changes Results - -**Severity:** 🟡 MEDIUM -**File:** `comptox-mcp/src/epacomp_tox/resources/chemical.py` (lines 450-547) -**Cross-ref:** HG-001 - -**Issue:** The `resolve_chemical_identifier` tool uses fallback search modes without requiring explicit user acknowledgment. - -```python -def resolve_chemical_identifier( - self, - *, - identifier: str, - identifier_type: Optional[str] = None, - allow_fallback: bool = False, # Must be explicitly set to True - max_candidates: int = 5, -) -> Dict[str, Any]: -``` - -**How Scientists Could Be Misled:** -- Scientist sets `allow_fallback=True` to handle edge cases -- Exact match fails, fallback to "contains" returns multiple candidates -- System returns "ambiguous" status but scientist's script ignores it -- First candidate is used without verification -- Wrong chemical proceeds through workflow - -**Missing Safeguard:** No mandatory pause when fallback is used; no requirement to explicitly select from candidates. - ---- - -### MD-004: QSAR Mode "recommended" Is Opaque - -**Severity:** 🟡 MEDIUM -**File:** `oqt-mcp/src/tools/implementations/workflow_runner.py` (lines 75-78) -**Cross-ref:** CR-001 - -**Issue:** The default `qsar_mode: "recommended"` doesn't explain which models are selected or why. - -```python -qsar_mode: str = Field( - "recommended", # What does "recommended" mean? - description="QSAR execution preset (`recommended`, `all`, or `none`).", -) -``` - -**How Scientists Could Be Misled:** -- Scientist uses default "recommended" mode -- Doesn't realize only 3 of 15 available models were run -- Reports "QSAR analysis complete" when it was partial -- Regulator assumes comprehensive analysis was performed - -**Missing Safeguard:** No transparency about which models are in "recommended" set; no warning when models are excluded. - ---- - -## CROSS-REFERENCE MATRIX - -| Finding | CR-001 | CR-002 | CR-003 | HG-001 | HG-002 | HG-003 | HG-004 | MD-001 | MD-002 | MD-003 | MD-004 | -|---------|--------|--------|--------|--------|--------|--------|--------|--------|--------|--------|--------| -| CR-001 | - | X | X | X | | | X | | | | X | -| CR-002 | X | - | X | | | | | | | | | -| CR-003 | X | X | - | | | | | X | | | | -| HG-001 | X | | | - | | | | | | X | | -| HG-002 | | | | | - | | | X | | | | -| HG-003 | | | | | | - | | | X | | | -| HG-004 | X | | | | | | - | | | | | -| MD-001 | | | X | | X | | | - | | | | -| MD-002 | | | | | | X | | | - | | | -| MD-003 | | | | X | | | | | | - | | -| MD-004 | X | | | | | | | | | | - | - ---- - -## RECOMMENDATIONS - -### Immediate Actions Required - -1. **Implement Mandatory Scientific Review Mode** - - Add `require_human_review: true` parameter to all workflow tools - - Require explicit acknowledgment before proceeding to predictive modeling - - Log reviewer identity and timestamp - -2. **Add Provenance Tables to All PDFs** - - Include data sources, versions, retrieval timestamps - - List all models used with confidence intervals - - Show applicability domain warnings prominently - -3. **Implement Red Team Analysis** - - Generate alternative hypotheses automatically - - Surface contradictory evidence - - Include confidence intervals and uncertainty quantification - -4. **Add Version Tracking to AOP Tools** - - Include `retrieved_at` timestamp in all outputs - - Warn when AOP has been modified since last retrieval - - Support explicit version selection - -5. **Implement Unit Validation** - - Use standardized unit formats (UCUM) - - Validate units against expected dimensions - - Require explicit unit confirmation for critical parameters - ---- - -## CONCLUSION - -The ToxMCP suite provides powerful automation capabilities but currently prioritizes convenience over scientific rigor. The lack of mandatory verification steps, combined with rapid PDF generation, creates a dangerous "foot-gun" pattern where well-intentioned scientists can unknowingly produce erroneous assessments. - -**The most critical issue is the absence of a mandatory scientific review mode.** An autonomous agent can currently execute a complete safety assessment workflow—from ambiguous chemical search to "audit-ready" PDF—without any human verification of critical assumptions. - -Without these safeguards, the ToxMCP suite risks becoming a tool for generating convincing-looking but potentially erroneous toxicology assessments. - ---- - -*Report generated by Cognitive Ergonomics Designer* -*For the ToxMCP Ecosystem Orchestrator* diff --git a/ToxMCP_Audit_Reviewed_v2/comptox-mcp-audit/README.md b/ToxMCP_Audit_Reviewed_v2/comptox-mcp-audit/README.md deleted file mode 100644 index 4c3007e..0000000 --- a/ToxMCP_Audit_Reviewed_v2/comptox-mcp-audit/README.md +++ /dev/null @@ -1,173 +0,0 @@ -# CompTox-MCP Audit Package (Reviewed Copy) - -**Repository:** `comptox-mcp` -**Package version cited in original audit:** `v0.2.2` -**Review date:** 2026-04-15 -**Overall posture:** **High risk for defensibility and provenance**, more than for classic appsec - ---- - -## How to read this reviewed copy - -This summary is designed to be safer to circulate internally than the original draft. - -- **Observed** means the claim is grounded in the supplied audit material. -- **Observed + inferred** means the material supports a broader architecture conclusion. -- **Scenario** means the issue is threat-model relevant but still needs runtime validation. - -This is **not** a live-repo re-audit. Line references were inherited from the supplied package. - ---- - -## Finding register - -| ID | Finding | Severity | Evidence basis | Confidence | Reviewed interpretation | -|---|---|---|---|---|---| -| CTX-01 | Upstream provenance / version capture is not first-class | **Critical** | Observed + inferred | Medium-High | Historical outputs may be hard to defend if provider versions or snapshots are not recorded | -| CTX-02 | Audit trail can fall back to ordinary logging semantics | **Critical** | Observed | High | Tamper evidence and reconstruction are weaker than they should be | -| CTX-03 | Retry strategy lacks mature backoff/jitter guidance | **High** | Observed | Medium | Could amplify upstream instability under load | -| CTX-04 | Transport and protocol handling is locally implemented | **High** | Observed | Medium-High | Migration cost and consistency risk increase as MCP evolves | -| CTX-05 | Upstream data integrity relies heavily on external providers | **High** | Scenario | Medium | Provenance and consistency controls should not depend on unsupported supplier features | - ---- - -## Why this repo matters in the suite - -`comptox-mcp` is a provenance-sensitive edge of the ToxMCP system because it often sits near: -- upstream evidence retrieval -- identity resolution and hazard context -- hand-off into downstream reasoning - -That means small omissions here can cascade into larger suite-level defensibility gaps later. - ---- - -## Detailed findings - -### CTX-01: Upstream provenance / version capture is incomplete -**Severity:** **Critical** -**Evidence basis:** Observed + inferred -**Confidence:** Medium-High - -The original audit correctly flagged that the package does not clearly show a robust mechanism to record: -- upstream provider version or release identifier -- data snapshot or retrieval timestamp -- request parameters used -- response hash or cache key -- how that metadata is persisted into downstream workflow records - -### Reviewed wording -The strongest defensible claim is **not** that every upstream supports strict version pinning. -It is that the current package does not show a reliable suite-level way to **capture and replay upstream provenance**. - -### Recommended control -Use the strongest control the provider actually supports: -1. if the provider exposes a version/snapshot selector, record and enforce it -2. if not, capture request URL, query params, retrieval time, response hash, and cache identity -3. persist that metadata into the workflow/provenance envelope -4. prefer an internal retrieval proxy if deterministic replay is a requirement - -> Do **not** assume that custom headers like `X-API-Version` or `X-Data-Snapshot` are supported unless the upstream provider documents them. - ---- - -### CTX-02: Audit trail design is weaker than required for defensibility -**Severity:** **Critical** -**Evidence basis:** Observed -**Confidence:** High - -The original audit’s concern about fallback-to-logging behavior remains strong. If audit events can devolve into ordinary logs without: -- chain validation -- content-addressed records -- user/session context -- immutable or append-controlled storage semantics - -then the resulting trail is unlikely to support strong post-hoc reconstruction. - -### Reviewed wording -Use: **high risk of non-conformance for regulated or high-assurance use** -Avoid: automatic claims of guaranteed regulatory rejection. - -### Recommended control -- define a canonical audit-event envelope -- include prior hash / content hash -- bind event to actor, session, tool, input identity, and upstream provenance -- verify the chain on read, not only on write - ---- - -### CTX-03: Retry behavior can worsen upstream instability -**Severity:** **High** -**Evidence basis:** Observed -**Confidence:** Medium - -This is a classic operational risk rather than a unique toxicology issue. Without jitter, bounded retries, and explicit failure-mode policy, a stressed upstream can trigger synchronized retries and unpredictable latency. - -### Recommended control -- exponential backoff with jitter -- hard retry caps -- surface upstream instability in provenance and alerts -- decide explicitly whether failures should be cached, retried later, or returned as partial results - ---- - -### CTX-04: Transport/protocol logic is fragmented -**Severity:** **High** -**Evidence basis:** Observed -**Confidence:** Medium-High - -The original package was directionally correct: local protocol handling increases long-term migration and consistency cost. - -### Recommended control -- centralize transport/version handling in a shared package or shared adapter layer -- keep server logic separate from transport concerns -- make capability/version negotiation testable at the boundary - ---- - -### CTX-05: Upstream integrity should not rely on unsupported supplier-side signing -**Severity:** **High** -**Evidence basis:** Scenario -**Confidence:** Medium - -The original audit’s concern about supplier dependence is valid, but the reviewed copy tightens the mitigation guidance. - -### Better control pattern -Prefer this order of controls: -1. TLS and authenticated transport where available -2. request/response provenance capture -3. cached response hashing -4. consistency checks across time or across sources for high-value conclusions -5. provider-side signatures **only if the provider actually supports them** - ---- - -## Recommended sequence - -### Immediate -- define the provenance fields that downstream repos must receive from `comptox-mcp` -- harden audit-event structure -- add retry jitter/backoff - -### Next -- align transport/version handling with the suite -- define provider capability matrix for versioning/snapshots -- add fix verification tests for audit chain and provenance persistence - ---- - -## Validation backlog specific to this repo - -- verify what upstream services actually expose for version or snapshot control -- confirm where provenance fields are persisted and consumed downstream -- test audit chain recomputation from stored content -- load-test retry behavior against realistic upstream failures - ---- - -## Related documents - -- `TOXMCP_MASTER_AUDIT_REPORT.md` -- `toxmcp_regulatory_audit_report.md` -- `toxmcp_future_proofing_audit_report.md` -- `comptox-mcp-audit/REMEDIATION_CODE.md` diff --git a/ToxMCP_Audit_Reviewed_v2/comptox-mcp-audit/REMEDIATION_CODE.md b/ToxMCP_Audit_Reviewed_v2/comptox-mcp-audit/REMEDIATION_CODE.md deleted file mode 100644 index 01c6457..0000000 --- a/ToxMCP_Audit_Reviewed_v2/comptox-mcp-audit/REMEDIATION_CODE.md +++ /dev/null @@ -1,649 +0,0 @@ -# CompTox-MCP: Detailed Remediation Code - -> **Reviewed copy note:** Treat these snippets as reference patterns. Do not assume upstream providers support custom version headers or response signing unless those features are documented by the provider. - - -## 1. Version Pinning for Upstream APIs - -**Reviewed caution:** If an upstream provider does not expose explicit version or snapshot selectors, capture request/response provenance internally instead of inventing unsupported protocol features. - -**File:** `src/epacomp_tox/client.py` - -```python -from typing import Dict, Optional -from pydantic import BaseModel -import httpx -import hashlib - -class APIVersionConfig(BaseModel): - """Configuration for API version pinning.""" - api_version: str # e.g., "2024-01-15" - data_snapshot_id: str # e.g., "ds_2024_q1_v3" - require_version_header: bool = True - -class VersionedCompToxClient: - """CompTox API client with version pinning.""" - - def __init__( - self, - base_url: str = "https://comptox.epa.gov/ctx-api", - version_config: Optional[APIVersionConfig] = None - ): - self.base_url = base_url - self.version_config = version_config or APIVersionConfig( - api_version="2024-01-15", - data_snapshot_id="latest" - ) - self.client = httpx.AsyncClient() - self.response_cache: Dict[str, dict] = {} - - def _get_version_headers(self) -> Dict[str, str]: - """Get version pinning headers.""" - headers = {} - if self.version_config.require_version_header: - headers["X-API-Version"] = self.version_config.api_version - headers["X-Data-Snapshot"] = self.version_config.data_snapshot_id - return headers - - async def get_chemical_detail( - self, - dtxsid: str, - use_cache: bool = True - ) -> dict: - """Get chemical details with version pinning.""" - cache_key = f"{dtxsid}:{self.version_config.api_version}:{self.version_config.data_snapshot_id}" - - if use_cache and cache_key in self.response_cache: - return self.response_cache[cache_key] - - url = f"{self.base_url}/chemical/detail/{dtxsid}" - headers = self._get_version_headers() - - response = await self.client.get(url, headers=headers) - response.raise_for_status() - - data = response.json() - - # Add version metadata - data["_api_metadata"] = { - "api_version": self.version_config.api_version, - "data_snapshot_id": self.version_config.data_snapshot_id, - "retrieved_at": datetime.utcnow().isoformat(), - "response_hash": hashlib.sha256(response.content).hexdigest()[:16] - } - - if use_cache: - self.response_cache[cache_key] = data - - return data - - async def get_qsar_predictions( - self, - dtxsid: str, - model_id: str - ) -> dict: - """Get QSAR predictions with model version tracking.""" - url = f"{self.base_url}/qsar/predictions/{dtxsid}" - headers = self._get_version_headers() - headers["X-QSAR-Model-ID"] = model_id - - response = await self.client.get(url, headers=headers) - response.raise_for_status() - - data = response.json() - - # Add model version metadata - data["_model_metadata"] = { - "model_id": model_id, - "model_version": response.headers.get("X-QSAR-Model-Version", "unknown"), - "api_version": self.version_config.api_version, - "retrieved_at": datetime.utcnow().isoformat() - } - - return data - - -# Integration with workflow -class VersionedWorkflow: - """Workflow with complete version tracking.""" - - def __init__(self, client: VersionedCompToxClient): - self.client = client - - async def run_assessment(self, dtxsid: str) -> dict: - """Run chemical assessment with full version tracking.""" - # Get chemical details - chemical = await self.client.get_chemical_detail(dtxsid) - - # Get QSAR predictions - predictions = await self.client.get_qsar_predictions( - dtxsid, - model_id="TEST_4.2" - ) - - # Compile evidence with version metadata - evidence = { - "chemical": chemical, - "predictions": predictions, - "assessment_metadata": { - "comptox_api_version": chemical["_api_metadata"]["api_version"], - "data_snapshot_id": chemical["_api_metadata"]["data_snapshot_id"], - "qsar_model_version": predictions["_model_metadata"]["model_version"], - "assessment_timestamp": datetime.utcnow().isoformat() - } - } - - return evidence -``` - ---- - -> **Reviewed copy (2026-04-15):** This document was retained from the original package but lightly edited for consistency. -> Unless explicitly stated otherwise, code blocks are **reference implementations**, not validated patches, and scenario-based exploit narratives should not be read as reproduced proofs. - - - -## 2. Cryptographic Audit Chain - -**File:** `src/epacomp_tox/audit.py` - -```python -import hashlib -import json -import base64 -from datetime import datetime -from typing import Dict, List, Optional, Callable -from cryptography.hazmat.primitives import hashes -from cryptography.hazmat.primitives.asymmetric import padding, rsa -from cryptography.hazmat.backends import default_backend -import os - -class AuditEvent(BaseModel): - """Single audit event with cryptographic verification.""" - event_type: str - timestamp: str - user_id: str - session_id: str - action: str - resource: str - details: Dict - content_hash: str - previous_hash: str - signature: Optional[str] = None - -class CryptographicAuditChain: - """Tamper-evident audit chain.""" - - def __init__(self, private_key_path: Optional[str] = None): - self.previous_hash = "0" * 64 - self.events: List[AuditEvent] = [] - self.sinks: List[Callable[[AuditEvent], None]] = [] - - # Load or generate signing key - if private_key_path and os.path.exists(private_key_path): - self.private_key = self._load_private_key(private_key_path) - else: - self.private_key = self._generate_key() - if private_key_path: - self._save_private_key(private_key_path) - - def emit(self, event_data: Dict, user_id: str, session_id: str) -> AuditEvent: - """Emit audit event with cryptographic chaining.""" - # Compute content hash - content = json.dumps(event_data, sort_keys=True) - content_hash = hashlib.sha256(content.encode()).hexdigest() - - # Create event - event = AuditEvent( - event_type=event_data.get("type", "unknown"), - timestamp=datetime.utcnow().isoformat(), - user_id=user_id, - session_id=session_id, - action=event_data.get("action", "unknown"), - resource=event_data.get("resource", "unknown"), - details=event_data, - content_hash=content_hash, - previous_hash=self.previous_hash - ) - - # Sign event - event.signature = self._sign_event(event) - - # Update chain - self.previous_hash = content_hash - self.events.append(event) - - # Emit to sinks - for sink in self.sinks: - sink(event) - - return event - - def _sign_event(self, event: AuditEvent) -> str: - """Cryptographically sign event.""" - payload = f"{event.content_hash}:{event.previous_hash}:{event.timestamp}" - signature = self.private_key.sign( - payload.encode(), - padding.PSS( - mgf=padding.MGF1(hashes.SHA256()), - salt_length=padding.PSS.MAX_LENGTH - ), - hashes.SHA256() - ) - return base64.b64encode(signature).decode() - - def verify_chain(self) -> bool: - """Verify integrity of entire audit chain.""" - previous_hash = "0" * 64 - - for event in self.events: - # Verify previous hash linkage - if event.previous_hash != previous_hash: - return False - - # Verify content hash - content = json.dumps(event.details, sort_keys=True) - computed_hash = hashlib.sha256(content.encode()).hexdigest() - if computed_hash != event.content_hash: - return False - - # Verify signature - if not self._verify_signature(event): - return False - - previous_hash = event.content_hash - - return True - - def _verify_signature(self, event: AuditEvent) -> bool: - """Verify event signature.""" - try: - payload = f"{event.content_hash}:{event.previous_hash}:{event.timestamp}" - signature = base64.b64decode(event.signature) - - self.private_key.public_key().verify( - signature, - payload.encode(), - padding.PSS( - mgf=padding.MGF1(hashes.SHA256()), - salt_length=padding.PSS.MAX_LENGTH - ), - hashes.SHA256() - ) - return True - except Exception: - return False - - def add_sink(self, sink: Callable[[AuditEvent], None]): - """Add audit sink (e.g., file, database, external service).""" - self.sinks.append(sink) - - def _generate_key(self): - """Generate RSA key pair.""" - return rsa.generate_private_key( - public_exponent=65537, - key_size=2048, - backend=default_backend() - ) - - def _load_private_key(self, path: str): - """Load private key from file.""" - with open(path, "rb") as f: - return serialization.load_pem_private_key( - f.read(), - password=None, - backend=default_backend() - ) - - def _save_private_key(self, path: str): - """Save private key to file.""" - pem = self.private_key.private_bytes( - encoding=serialization.Encoding.PEM, - format=serialization.PrivateFormat.PKCS8, - encryption_algorithm=serialization.NoEncryption() - ) - with open(path, "wb") as f: - f.write(pem) - - -# File-based audit sink with WORM properties -class WORMAuditSink: - """Write-Once-Read-Many audit log sink.""" - - def __init__(self, log_dir: str): - self.log_dir = Path(log_dir) - self.log_dir.mkdir(parents=True, exist_ok=True) - - # Set immutable flag on log directory (Unix) - self._set_immutable() - - def __call__(self, event: AuditEvent): - """Write event to WORM log.""" - date_str = datetime.utcnow().strftime("%Y-%m-%d") - log_file = self.log_dir / f"audit_{date_str}.jsonl" - - # Append-only mode - with open(log_file, "a") as f: - f.write(json.dumps(event.dict(), default=str) + "\n") - f.flush() - os.fsync(f.fileno()) # Ensure write to disk - - # Set immutable flag on file (Unix) - self._set_file_immutable(log_file) - - def _set_immutable(self): - """Set immutable flag on log directory.""" - try: - # Linux: chattr +i - import subprocess - subprocess.run(["chattr", "+a", str(self.log_dir)], check=False) - except Exception: - pass # Not supported on all systems - - def _set_file_immutable(self, file_path: Path): - """Set immutable flag on log file.""" - try: - import subprocess - subprocess.run(["chattr", "+i", str(file_path)], check=False) - except Exception: - pass - - -# Usage -audit_chain = CryptographicAuditChain(private_key_path="/secure/audit_key.pem") -audit_chain.add_sink(WORMAuditSink("/var/log/comptox-mcp/audit")) - -# In API endpoint -async def chemical_search_endpoint(request: Request): - user = authenticate(request) - - audit_chain.emit( - event_data={ - "type": "chemical_search", - "action": "search", - "resource": "chemical", - "query": request.query_params.get("q"), - "results_count": len(results) - }, - user_id=user.id, - session_id=request.session_id - ) -``` - ---- - -## 3. Retry with Exponential Backoff and Jitter - -**File:** `src/epacomp_tox/client.py` - -```python -import random -import asyncio -from typing import TypeVar, Callable -import httpx - -T = TypeVar('T') - -class RetryConfig: - """Configuration for retry behavior.""" - max_retries: int = 3 - base_delay: float = 0.5 - max_delay: float = 60.0 - exponential_base: float = 2.0 - jitter: bool = True - retryable_status_codes: set = {429, 500, 502, 503, 504} - -async def retry_with_backoff( - func: Callable[[], T], - config: RetryConfig = None, - is_retryable: Callable[[Exception], bool] = None -) -> T: - """ - Execute function with exponential backoff and jitter. - - Args: - func: Async function to execute - config: Retry configuration - is_retryable: Function to determine if exception is retryable - - Returns: - Result of func() - - Raises: - Last exception if all retries exhausted - """ - config = config or RetryConfig() - is_retryable = is_retryable or (lambda e: True) - - last_exception = None - - for attempt in range(config.max_retries + 1): - try: - return await func() - except Exception as e: - last_exception = e - - # Check if we should retry - if attempt >= config.max_retries: - raise - - if not is_retryable(e): - raise - - # Calculate delay - delay = config.base_delay * (config.exponential_base ** attempt) - delay = min(delay, config.max_delay) - - # Add jitter - if config.jitter: - delay = delay * (0.5 + random.random()) - - await asyncio.sleep(delay) - - raise last_exception - - -# HTTP-specific retry -async def http_request_with_retry( - client: httpx.AsyncClient, - method: str, - url: str, - **kwargs -) -> httpx.Response: - """Make HTTP request with retry logic.""" - config = RetryConfig() - - def is_retryable_error(e: Exception) -> bool: - """Determine if error is retryable.""" - if isinstance(e, httpx.HTTPStatusError): - return e.response.status_code in config.retryable_status_codes - if isinstance(e, (httpx.ConnectError, httpx.TimeoutException)): - return True - return False - - async def make_request(): - response = await client.request(method, url, **kwargs) - response.raise_for_status() - return response - - return await retry_with_backoff( - make_request, - config=config, - is_retryable=is_retryable_error - ) - - -# Rate limit handling -async def handle_rate_limit(response: httpx.Response) -> float: - """ - Extract retry delay from rate limit response. - - Returns: - Delay in seconds - """ - if response.status_code != 429: - return 0 - - # Check Retry-After header - retry_after = response.headers.get("Retry-After") - if retry_after: - try: - return float(retry_after) - except ValueError: - # Could be HTTP date, parse it - pass - - # Check X-RateLimit-Reset header - reset_timestamp = response.headers.get("X-RateLimit-Reset") - if reset_timestamp: - try: - reset_time = datetime.fromtimestamp(int(reset_timestamp)) - delay = (reset_time - datetime.utcnow()).total_seconds() - return max(delay, 1) - except (ValueError, OSError): - pass - - # Default backoff - return 60.0 -``` - ---- - -## 4. Distributed Tracing - -**File:** `src/epacomp_tox/middleware.py` - -```python -from contextvars import ContextVar -from typing import Optional, Dict -import uuid - -# Context variable for trace ID -trace_id_var: ContextVar[str] = ContextVar('trace_id', default=None) -span_id_var: ContextVar[str] = ContextVar('span_id', default=None) - -class TraceContext: - """W3C Trace Context propagation.""" - - TRACEPARENT_HEADER = "traceparent" - TRACESTATE_HEADER = "tracestate" - - def __init__(self, trace_id: str = None, span_id: str = None): - self.trace_id = trace_id or self._generate_trace_id() - self.span_id = span_id or self._generate_span_id() - self.parent_span_id = None - - @classmethod - def from_headers(cls, headers: Dict[str, str]) -> "TraceContext": - """Parse trace context from HTTP headers.""" - traceparent = headers.get(cls.TRACEPARENT_HEADER) - if traceparent: - # Parse W3C traceparent format: 00-{trace_id}-{span_id}-{flags} - parts = traceparent.split("-") - if len(parts) == 4: - return cls(trace_id=parts[1], span_id=parts[2]) - - return cls() # Generate new context - - def to_headers(self) -> Dict[str, str]: - """Convert to HTTP headers.""" - traceparent = f"00-{self.trace_id}-{self.span_id}-01" - return { - self.TRACEPARENT_HEADER: traceparent - } - - def create_child_span(self) -> "TraceContext": - """Create child span context.""" - child = TraceContext(trace_id=self.trace_id) - child.parent_span_id = self.span_id - return child - - def _generate_trace_id(self) -> str: - """Generate 16-byte hex trace ID.""" - return uuid.uuid4().hex + uuid.uuid4().hex[:16] - - def _generate_span_id(self) -> str: - """Generate 8-byte hex span ID.""" - return uuid.uuid4().hex[:16] - - -# FastAPI middleware -from fastapi import Request, Response -from starlette.middleware.base import BaseHTTPMiddleware - -class TracingMiddleware(BaseHTTPMiddleware): - """Middleware to handle distributed tracing.""" - - async def dispatch(self, request: Request, call_next): - # Extract trace context from incoming request - trace_context = TraceContext.from_headers(dict(request.headers)) - - # Set context variables - trace_id_var.set(trace_context.trace_id) - span_id_var.set(trace_context.span_id) - - # Add trace context to request state - request.state.trace_context = trace_context - - # Process request - response = await call_next(request) - - # Add trace context to response headers - for key, value in trace_context.to_headers().items(): - response.headers[key] = value - - return response - - -# Traced HTTP client -class TracedHTTPClient: - """HTTP client that propagates trace context.""" - - def __init__(self, base_url: str): - self.base_url = base_url - self.client = httpx.AsyncClient() - - async def request( - self, - method: str, - path: str, - **kwargs - ) -> httpx.Response: - """Make request with trace context propagation.""" - # Get current trace context - trace_id = trace_id_var.get() - span_id = span_id_var.get() - - if trace_id and span_id: - trace_context = TraceContext(trace_id, span_id) - child_context = trace_context.create_child_span() - - # Add trace headers - headers = kwargs.get("headers", {}) - headers.update(child_context.to_headers()) - kwargs["headers"] = headers - - url = f"{self.base_url}{path}" - return await self.client.request(method, url, **kwargs) - - -# Usage in service calls -async def call_oqt_service(chemical_id: str) -> dict: - """Call O-QT service with trace propagation.""" - client = TracedHTTPClient("http://oqt-mcp:8000") - - response = await client.request( - "POST", - "/mcp", - json={ - "tool": "run_qsar_prediction", - "params": {"chemical_id": chemical_id} - } - ) - - return response.json() -``` - ---- - -*These remediation code snippets address the critical issues identified in the CompTox-MCP audit.* diff --git a/ToxMCP_Audit_Reviewed_v2/oqt-mcp-audit/README.md b/ToxMCP_Audit_Reviewed_v2/oqt-mcp-audit/README.md deleted file mode 100644 index 7fecf65..0000000 --- a/ToxMCP_Audit_Reviewed_v2/oqt-mcp-audit/README.md +++ /dev/null @@ -1,180 +0,0 @@ -# OQT-MCP Audit Package (Reviewed Copy) - -**Repository:** `oqt-mcp` -**Package version cited in original audit:** `v0.3.0` -**Review date:** 2026-04-15 -**Overall posture:** **Critical for scientific review governance and output framing** - ---- - -## How to read this reviewed copy - -The strongest findings in `oqt-mcp` are not generic appsec findings. They are about: -- scientific review workflow design -- applicability-domain enforcement -- how confidence and provenance are communicated to users -- whether untrusted identifiers cross into agent or LLM contexts safely - -This reviewed copy keeps those concerns, but distinguishes between: -- **Observed** implementation gaps -- **Observed + inferred** user-risk conclusions -- **Scenario** exploit narratives that still need prompt-boundary validation - ---- - -## Finding register - -| ID | Finding | Severity | Evidence basis | Confidence | Reviewed interpretation | -|---|---|---|---|---|---| -| OQT-01 | Applicability-domain checks are too easy to treat as narrative metadata | **Critical** | Observed + inferred | High | Out-of-domain predictions may be surfaced without hard workflow friction | -| OQT-02 | High-risk flows do not appear to require human review by default | **Critical** | Observed + inferred | High | Wrong identity or weak evidence can propagate into polished outputs | -| OQT-03 | PDF/report defaults do not foreground provenance and uncertainty strongly enough | **Critical** | Observed + inferred | High | Artifacts may look more final than they are | -| OQT-04 | Untrusted identifiers may cross into prompt/agent contexts without enough isolation | **High** | Observed + scenario | Medium | Needs runtime prompt-boundary validation, but deserves near-term mitigation | -| OQT-05 | Logs may capture sensitive identifiers too directly | **High** | Observed | High | Privacy/confidentiality controls need strengthening | -| OQT-06 | Workflow permissions and escalation paths deserve review | **Medium / High** | Observed + inferred | Medium | Important, but needs live-repo validation before stronger claims | - ---- - -## Why this repo is central - -`oqt-mcp` is where scientific judgment can become visually convincing very quickly. -That makes it the most important place to embed: -- review checkpoints -- explicit uncertainty language -- provenance defaults -- safe handling of user-supplied identifiers - ---- - -## Detailed findings - -### OQT-01: Applicability-domain logic should gate decisions, not merely decorate them -**Severity:** **Critical** -**Evidence basis:** Observed + inferred -**Confidence:** High - -The original package persuasively showed that AD information exists, but can still be treated as a side note rather than a decision gate. - -### Why this matters -A user can be shown: -- a prediction value -- a confidence-ish narrative -- a professional artifact - -without a strong enough system-level interruption when the chemical is poorly represented by the model domain. - -### Recommended control -- introduce an explicit AD decision object -- separate `inside_domain`, `outside_domain`, and `unknown` -- require acknowledgement or manual approval before downstream reporting when outside or unknown -- carry AD status into every artifact header and summary - ---- - -### OQT-02: Human review checkpoints should be first-class -**Severity:** **Critical** -**Evidence basis:** Observed + inferred -**Confidence:** High - -The package’s original finding remains strong: a workflow that can proceed from search to output artifact with minimal user intervention is a governance risk in scientific settings. - -### Minimum checkpoints worth enforcing -1. identity resolution / substance confirmation -2. applicability-domain assessment -3. final narrative/report approval - -### Reviewed wording -The issue is not "automation is bad." -The issue is that **automation without explicit review-state transitions can create false confidence**. - ---- - -### OQT-03: Output defaults over-signal finality -**Severity:** **Critical** -**Evidence basis:** Observed + inferred -**Confidence:** High - -The original audit’s criticism of “audit-ready” style outputs remains valid. Even when technically true that a PDF was generated, the user experience can imply: -- completeness -- validated provenance -- reviewed interpretation -- stable confidence - -before those conditions are satisfied. - -### Recommended control -Make the artifact itself carry its uncertainty: -- provenance table in the first page or header section -- model/tool versions -- AD status and warnings -- explicit human-review state -- draft / reviewed / approved marker -- unresolved evidence gaps section - ---- - -### OQT-04: Treat chemical identifiers as untrusted text at LLM boundaries -**Severity:** **High** -**Evidence basis:** Observed + scenario -**Confidence:** Medium - -The original package may have overstated exploit certainty, but it identified the right boundary. -If chemical names, aliases, notes, or free-text identifiers are interpolated into prompts or agent instructions without structure, instruction confusion becomes plausible. - -### Better mitigation than simple keyword blocking -- normalize Unicode -- remove control characters for LLM-facing contexts, including newlines unless explicitly needed -- pass identifiers as structured data, not concatenated prose -- visually and logically separate system instructions from user-supplied fields -- add regression tests with adversarial identifiers - -### Important nuance -This should be treated as **high priority** even before full exploitation is demonstrated, because the cost of safer prompt construction is modest. - ---- - -### OQT-05: Logging needs a stronger privacy model -**Severity:** **High** -**Evidence basis:** Observed -**Confidence:** High - -The package’s privacy concern remains well supported. If identifiers, SMILES, or other sensitive fields are logged directly, confidentiality can be compromised even when the core workflow is correct. - -### Recommended control -- classify fields by sensitivity -- hash or tokenize where operationally acceptable -- separate immutable audit records from developer/debug logs -- define retention and access boundaries - ---- - -## Recommended sequence - -### Immediate -- AD gating with explicit workflow consequences -- mandatory review checkpoints -- stronger artifact provenance and review state labeling -- prompt-boundary hardening for untrusted identifiers - -### Next -- privacy-aware logging -- clearer permission model review -- validation tests covering wrong-identity and out-of-domain paths - ---- - -## Validation backlog specific to this repo - -- confirm prompt/agent boundary for all identifier-bearing fields -- test AD gating with representative in-domain / out-of-domain / ambiguous compounds -- validate PDF/report UX with scientist users -- review permission and escalation paths in the live repository - ---- - -## Related documents - -- `cognitive_ergonomics_audit_report.md` -- `toxmcp_adversarial_audit_report.md` -- `toxmcp_security_audit_report.md` -- `oqt-mcp-audit/REMEDIATION_CODE.md` diff --git a/ToxMCP_Audit_Reviewed_v2/oqt-mcp-audit/REMEDIATION_CODE.md b/ToxMCP_Audit_Reviewed_v2/oqt-mcp-audit/REMEDIATION_CODE.md deleted file mode 100644 index 4fb3fa0..0000000 --- a/ToxMCP_Audit_Reviewed_v2/oqt-mcp-audit/REMEDIATION_CODE.md +++ /dev/null @@ -1,1061 +0,0 @@ -# OQT-MCP: Detailed Remediation Code - -> **Reviewed copy note:** Treat these snippets as reference patterns. For LLM-facing contexts, prefer removing control characters **and** newlines from untrusted identifiers unless a well-tested structured representation is used. - - -## 1. Applicability Domain Index (ADI) Calculation - -**File:** `src/tools/implementations/o_qt_qsar_tools.py` - -```python -from pydantic import BaseModel -from typing import List, Dict, Tuple -import numpy as np -from rdkit import Chem -from rdkit.Chem import Descriptors, AllChem - -class ApplicabilityDomainResult(BaseModel): - """Quantitative applicability domain assessment.""" - adi_score: float # 0-1, higher is better - is_within_domain: bool # Hard gate - chemical_class_alerts: List[str] - training_set_overlap: float # Tanimoto similarity to nearest neighbor - domain_boundaries: Dict[str, Tuple[float, float]] - descriptor_values: Dict[str, float] - warnings: List[str] - -class ADICalculator: - """Calculate Applicability Domain Index for QSAR predictions.""" - - def __init__(self, model_id: str): - self.model_id = model_id - self.training_set = self.load_training_set(model_id) - self.domain_boundaries = self.calculate_domain_boundaries() - - def calculate_adi(self, smiles: str) -> ApplicabilityDomainResult: - """Calculate comprehensive ADI for a chemical.""" - mol = Chem.MolFromSmiles(smiles) - if not mol: - return ApplicabilityDomainResult( - adi_score=0.0, - is_within_domain=False, - chemical_class_alerts=["Invalid SMILES"], - training_set_overlap=0.0, - domain_boundaries={}, - descriptor_values={}, - warnings=["Cannot parse chemical structure"] - ) - - # 1. Calculate molecular descriptors - descriptors = self.calculate_descriptors(mol) - - # 2. Check domain boundaries - boundary_violations = self.check_boundaries(descriptors) - - # 3. Calculate training set similarity - similarity = self.calculate_training_set_similarity(mol) - - # 4. Check chemical class alerts - alerts = self.check_chemical_class_alerts(mol) - - # 5. Calculate overall ADI - adi_score = self.compute_adi_score( - descriptors, boundary_violations, similarity, alerts - ) - - # 6. Determine if within domain (hard gate) - is_within_domain = ( - adi_score >= 0.7 and # Minimum ADI threshold - similarity >= 0.5 and # Must have some training set similarity - len(boundary_violations) <= 2 # Limited boundary violations - ) - - return ApplicabilityDomainResult( - adi_score=adi_score, - is_within_domain=is_within_domain, - chemical_class_alerts=alerts, - training_set_overlap=similarity, - domain_boundaries=self.domain_boundaries, - descriptor_values=descriptors, - warnings=self.generate_warnings(boundary_violations, alerts) - ) - - def calculate_descriptors(self, mol: Chem.Mol) -> Dict[str, float]: - """Calculate key molecular descriptors.""" - return { - "molecular_weight": Descriptors.MolWt(mol), - "logp": Descriptors.MolLogP(mol), - "hbd": Descriptors.NumHDonors(mol), - "hba": Descriptors.NumHAcceptors(mol), - "tpsa": Descriptors.TPSA(mol), - "rotatable_bonds": Descriptors.NumRotatableBonds(mol), - "aromatic_rings": Descriptors.NumAromaticRings(mol), - "heavy_atoms": mol.GetNumHeavyAtoms(), - } - - def calculate_domain_boundaries(self) -> Dict[str, Tuple[float, float]]: - """Calculate domain boundaries from training set.""" - if not self.training_set: - return {} - - boundaries = {} - for descriptor in ["molecular_weight", "logp", "hbd", "hba", "tpsa"]: - values = [chem["descriptors"][descriptor] for chem in self.training_set] - q1, q3 = np.percentile(values, [25, 75]) - iqr = q3 - q1 - # Use IQR method with 1.5x expansion - boundaries[descriptor] = (q1 - 1.5 * iqr, q3 + 1.5 * iqr) - - return boundaries - - def check_boundaries(self, descriptors: Dict[str, float]) -> List[str]: - """Check if descriptors are within domain boundaries.""" - violations = [] - for desc, value in descriptors.items(): - if desc in self.domain_boundaries: - min_val, max_val = self.domain_boundaries[desc] - if not (min_val <= value <= max_val): - violations.append( - f"{desc}: {value:.2f} outside [{min_val:.2f}, {max_val:.2f}]" - ) - return violations - - def calculate_training_set_similarity(self, mol: Chem.Mol) -> float: - """Calculate Tanimoto similarity to nearest neighbor in training set.""" - if not self.training_set: - return 0.0 - - fp = AllChem.GetMorganFingerprintAsBitVect(mol, 2, nBits=2048) - max_similarity = 0.0 - - for train_chem in self.training_set: - train_fp = train_chem["fingerprint"] - similarity = DataStructs.TanimotoSimilarity(fp, train_fp) - max_similarity = max(max_similarity, similarity) - - return max_similarity - - def check_chemical_class_alerts(self, mol: Chem.Mol) -> List[str]: - """Check for chemical class-specific alerts.""" - alerts = [] - - # Check for reactive groups - if self.has_reactive_group(mol): - alerts.append("Reactive functional group detected") - - # Check for known problematic scaffolds - if self.has_problematic_scaffold(mol): - alerts.append("Known problematic scaffold") - - # Check for model-specific alerts - alerts.extend(self.model_specific_alerts(mol)) - - return alerts - - def compute_adi_score( - self, - descriptors: Dict[str, float], - boundary_violations: List[str], - similarity: float, - alerts: List[str] - ) -> float: - """Compute overall ADI score (0-1).""" - # Base score from similarity - score = similarity * 0.4 - - # Penalty for boundary violations - violation_penalty = len(boundary_violations) * 0.1 - score -= violation_penalty - - # Penalty for alerts - alert_penalty = len(alerts) * 0.15 - score -= alert_penalty - - # Bonus for being well within boundaries - if len(boundary_violations) == 0: - score += 0.2 - - return max(0.0, min(1.0, score)) - - def generate_warnings( - self, - boundary_violations: List[str], - alerts: List[str] - ) -> List[str]: - """Generate human-readable warnings.""" - warnings = [] - - if boundary_violations: - warnings.append(f"Descriptor boundary violations: {len(boundary_violations)}") - warnings.extend(boundary_violations[:3]) # Show first 3 - - if alerts: - warnings.append(f"Chemical class alerts: {len(alerts)}") - warnings.extend(alerts) - - return warnings - - -# Integration with run_qsar_prediction -async def run_qsar_prediction(smiles: str, model_id: str) -> dict: - """Run QSAR prediction with ADI enforcement.""" - # Calculate ADI - adi_calculator = ADICalculator(model_id) - ad_result = adi_calculator.calculate_adi(smiles) - - # Hard gate: reject if outside domain - if not ad_result.is_within_domain: - return { - "prediction": None, - "status": "REJECTED", - "reason": "Outside applicability domain", - "ad_result": ad_result.dict(), - "requires_human_review": True, - "recommendation": "Consider read-across or experimental testing" - } - - # Fetch prediction from QSAR Toolbox - prediction = await fetch_prediction_from_toolbox(smiles, model_id) - - # Combine with ADI - return { - "prediction": prediction, - "ad_result": ad_result.dict(), - "confidence": ad_result.adi_score * prediction.get("confidence", 0.5), - "status": "SUCCESS", - "requires_human_review": ad_result.adi_score < 0.8 # Review if borderline - } -``` - ---- - -> **Reviewed copy (2026-04-15):** This document was retained from the original package but lightly edited for consistency. -> Unless explicitly stated otherwise, code blocks are **reference implementations**, not validated patches, and scenario-based exploit narratives should not be read as reproduced proofs. - - - -## 2. Chemical Name Sanitization (Prompt Injection Prevention) - -**File:** `src/schemas/workflow_record.py` - -```python -import re -import unicodedata -from typing import Optional - -class ChemicalNameSanitizer: - """Sanitize chemical names to prevent prompt injection.""" - - # Blocked patterns that could be used for prompt injection - BLOCKED_PATTERNS = [ - r'ignore\s+(previous\s+)?instructions', - r'override\s+(all\s+)?(safety|guidelines|constraints)', - r'debug\s+mode', - r'system\s+(test|prompt|instruction)', - r'you\s+are\s+now', - r'new\s+instruction', - r'forget\s+(previous|everything)', - r'disregard\s+(all|previous)', - r'act\s+as\s+(if|though)', - r'pretend\s+to\s+be', - r'roleplay\s+as', - ] - - # Maximum allowed length - MAX_LENGTH = 1000 - - @classmethod - def sanitize(cls, name: str, context: str = "general") -> str: - """ - Sanitize chemical name to prevent prompt injection. - - Args: - name: Raw chemical name input - context: Context where name will be used ("general", "llm_prompt", "search") - - Returns: - Sanitized chemical name - - Raises: - ValueError: If potentially malicious input detected - """ - if not name: - return name - - # Check length - if len(name) > cls.MAX_LENGTH: - raise ValueError(f"Chemical name exceeds maximum length of {cls.MAX_LENGTH}") - - # Normalize Unicode - normalized = unicodedata.normalize('NFKC', name) - - # Remove zero-width and control characters - sanitized = cls._remove_control_chars(normalized) - - # Check for blocked patterns - cls._check_blocked_patterns(sanitized) - - # Context-specific sanitization - if context == "llm_prompt": - sanitized = sanitized.replace('\n', ' ').replace('\r', ' ') - sanitized = cls._sanitize_for_llm(sanitized) - - return sanitized.strip() - - @classmethod - def _remove_control_chars(cls, text: str) -> str: - """Remove control and zero-width characters.""" - # Remove zero-width characters - zero_width = [ - '\u200B', # Zero Width Space - '\u200C', # Zero Width Non-Joiner - '\u200D', # Zero Width Joiner - '\uFEFF', # Zero Width No-Break Space - '\u2060', # Word Joiner - '\u180E', # Mongolian Vowel Separator - ] - - for zw in zero_width: - text = text.replace(zw, '') - - # Remove control characters; if the value is destined for an LLM context, prefer removing newlines too - cleaned = [] - for char in text: - cat = unicodedata.category(char) - if cat.startswith('C') and char not in '\n\t': - continue - cleaned.append(char) - - return ''.join(cleaned) - - @classmethod - def _check_blocked_patterns(cls, text: str): - """Check for blocked instruction patterns.""" - text_lower = text.lower() - - for pattern in cls.BLOCKED_PATTERNS: - if re.search(pattern, text_lower, re.IGNORECASE): - raise ValueError( - f"Potentially malicious chemical name detected. " - f"Pattern matched: {pattern}" - ) - - @classmethod - def _sanitize_for_llm(cls, text: str) -> str: - """Additional sanitization for LLM prompts.""" - # Escape special characters that could be interpreted as formatting - text = text.replace('`', '') # Remove backticks - text = text.replace('$', '') # Remove dollar signs (LaTeX) - - # Limit consecutive newlines - text = re.sub(r'\n{3,}', '\n\n', text) - - return text - - @classmethod - def validate_smiles(cls, smiles: str) -> bool: - """Validate SMILES string format.""" - from rdkit import Chem - - try: - mol = Chem.MolFromSmiles(smiles) - return mol is not None - except: - return False - - -# Usage in workflow processing -from pydantic import validator - -class WorkflowInput(BaseModel): - chemical_name: str - - @validator('chemical_name') - def sanitize_chemical_name(cls, v): - return ChemicalNameSanitizer.sanitize(v, context="llm_prompt") - - -class ChemicalSearchParams(BaseModel): - query: str - search_type: str = "name" # Changed from "auto" to safer default - - @validator('query') - def sanitize_query(cls, v): - return ChemicalNameSanitizer.sanitize(v, context="search") -``` - ---- - -## 3. PII Scrubbing for Logs - -**File:** `src/tools/registry.py` - -```python -import hashlib -import json -import re -from typing import Any, Dict, List, Optional - -class PrivacyScrubber: - """Scrub PII/PSI (Proprietary Substance Information) from logs.""" - - # Sensitive field patterns - SENSITIVE_PATTERNS = [ - r'(?i)smiles?', # Case-insensitive match for "smiles" or "SMILES" - r'(?i)inchi(key)?', - r'(?i)cas(_number)?', - r'(?i)chemical_name', - r'(?i)preferred_name', - r'(?i)iupac_name', - r'(?i)structure', - r'(?i)molecule', - r'(?i)compound', - r'(?i)substance', - r'(?i)formula', - ] - - # SMILES detection pattern (simplified) - SMILES_PATTERN = re.compile(r'^[A-Za-z0-9@+\-\[\]\\\(\)=#$:.]+$') - - # CAS number pattern - CAS_PATTERN = re.compile(r'^\d{1,7}\-\d{2}\-\d$') - - def __init__(self, salt: Optional[str] = None): - """ - Initialize scrubber with optional salt for hashing. - - Args: - salt: Salt for hashing (should be consistent across services) - """ - self.salt = salt or "toxmcp_default_salt" - - def scrub(self, data: Any, path: str = "") -> Any: - """ - Recursively scrub sensitive data. - - Args: - data: Data to scrub - path: Current path in nested structure (for debugging) - - Returns: - Scrubbed data with sensitive fields hashed - """ - if isinstance(data, dict): - return self._scrub_dict(data, path) - elif isinstance(data, list): - return [self.scrub(item, f"{path}[]") for item in data] - elif isinstance(data, str): - return self._scrub_string(data, path) - else: - return data - - def _scrub_dict(self, data: Dict, path: str) -> Dict: - """Scrub dictionary values.""" - scrubbed = {} - for key, value in data.items(): - current_path = f"{path}.{key}" if path else key - - if self._is_sensitive_key(key): - # Hash the value - scrubbed[key] = self._hash_value(value) - else: - # Recursively scrub - scrubbed[key] = self.scrub(value, current_path) - - return scrubbed - - def _scrub_string(self, value: str, path: str) -> str: - """Scrub string value, detecting embedded sensitive data.""" - # Check if entire string is a SMILES - if self._is_smiles(value): - return self._hash_value(value) - - # Check if entire string is a CAS number - if self._is_cas_number(value): - return self._hash_value(value) - - # Check for embedded SMILES in text (more complex) - # This is a simplified check - production would need more sophisticated detection - words = value.split() - scrubbed_words = [] - for word in words: - if self._is_smiles(word) or self._is_cas_number(word): - scrubbed_words.append(self._hash_value(word)) - else: - scrubbed_words.append(word) - - return ' '.join(scrubbed_words) - - def _is_sensitive_key(self, key: str) -> bool: - """Check if key name indicates sensitive data.""" - key_lower = key.lower() - return any(re.match(pattern, key_lower) for pattern in self.SENSITIVE_PATTERNS) - - def _is_smiles(self, value: str) -> bool: - """Check if value looks like a SMILES string.""" - # Basic heuristic: contains typical SMILES characters and minimum length - if len(value) < 3: - return False - - # Check for SMILES-specific characters - smiles_chars = set('CNO[]()=@+-#$.1234567890') - value_chars = set(value.upper()) - - # If most characters are SMILES-specific, likely a SMILES - if len(value_chars - smiles_chars) <= 2: - return True - - return False - - def _is_cas_number(self, value: str) -> bool: - """Check if value is a CAS registry number.""" - return bool(self.CAS_PATTERN.match(value)) - - def _hash_value(self, value: Any) -> str: - """Hash a value for logging.""" - if value is None: - return None - - value_str = str(value) - - # Create deterministic hash with salt - hash_input = f"{self.salt}:{value_str}" - hash_value = hashlib.sha256(hash_input.encode()).hexdigest()[:16] - - return f"[HASH:{hash_value}]" - - def create_correlation_id(self, identifier: str) -> str: - """ - Create a correlation ID that can link events without revealing the identifier. - - This allows debugging across services without exposing sensitive data. - """ - return self._hash_value(identifier) - - -# Integration with audit logging -class AuditLogger: - """Audit logger with built-in PII scrubbing.""" - - def __init__(self, scrubber: Optional[PrivacyScrubber] = None): - self.scrubber = scrubber or PrivacyScrubber() - - def log_tool_execution( - self, - tool_name: str, - params: Dict[str, Any], - result: Any, - user_id: str, - correlation_id: str - ): - """Log tool execution with PII scrubbing.""" - scrubbed_params = self.scrubber.scrub(params) - scrubbed_result = self.scrubber.scrub(result) - - event = { - "type": "tool_execution", - "tool": tool_name, - "params": scrubbed_params, - "result_summary": self._summarize_result(scrubbed_result), - "user_id": user_id, - "correlation_id": correlation_id, - "timestamp": datetime.utcnow().isoformat(), - } - - self._emit(event) - - def _summarize_result(self, result: Any) -> Dict: - """Create a summary of result without sensitive data.""" - if isinstance(result, dict): - return { - "status": result.get("status"), - "has_prediction": "prediction" in result, - "has_warnings": "warnings" in result and len(result["warnings"]) > 0, - } - return {"type": type(result).__name__} - - -# Usage example -scrubber = PrivacyScrubber(salt="oqt_mcp_production_salt") -audit_logger = AuditLogger(scrubber) - -# In tool execution -async def run_qsar_prediction(smiles: str, model_id: str) -> dict: - correlation_id = scrubber.create_correlation_id(smiles) - - try: - result = await fetch_prediction(smiles, model_id) - - audit_logger.log_tool_execution( - tool_name="run_qsar_prediction", - params={"smiles": smiles, "model_id": model_id}, - result=result, - user_id=current_user.id, - correlation_id=correlation_id - ) - - return result - except Exception as e: - audit_logger.log_tool_execution( - tool_name="run_qsar_prediction", - params={"smiles": smiles, "model_id": model_id}, - result={"error": str(e)}, - user_id=current_user.id, - correlation_id=correlation_id - ) - raise -``` - ---- - -## 4. Mandatory Scientific Review Mode - -**File:** `src/tools/implementations/workflow_runner.py` - -```python -from enum import Enum -from typing import Optional, List, Dict, Any -from pydantic import BaseModel -import asyncio - -class ReviewStatus(str, Enum): - PENDING = "pending" - APPROVED = "approved" - REJECTED = "rejected" - EXPIRED = "expired" - -class ReviewCheckpoint(BaseModel): - """A checkpoint requiring human review.""" - checkpoint_id: str - step: str # e.g., "chemical_id", "ad_assessment", "final_report" - status: ReviewStatus - data: Dict[str, Any] # Data to review - reviewer_id: Optional[str] = None - reviewed_at: Optional[str] = None - comments: Optional[str] = None - expires_at: Optional[str] = None - -class ReviewOrchestrator: - """Orchestrate mandatory human review checkpoints.""" - - REVIEW_TIMEOUT = 3600 # 1 hour timeout for review - - def __init__(self): - self.pending_reviews: Dict[str, ReviewCheckpoint] = {} - self.review_callbacks: Dict[str, asyncio.Event] = {} - - async def create_checkpoint( - self, - workflow_id: str, - step: str, - data: Dict[str, Any], - require_approval: bool = True - ) -> ReviewCheckpoint: - """Create a review checkpoint and wait for human approval.""" - checkpoint_id = f"{workflow_id}_{step}_{uuid.uuid4().hex[:8]}" - - checkpoint = ReviewCheckpoint( - checkpoint_id=checkpoint_id, - step=step, - status=ReviewStatus.PENDING, - data=data, - expires_at=(datetime.utcnow() + timedelta(seconds=self.REVIEW_TIMEOUT)).isoformat() - ) - - self.pending_reviews[checkpoint_id] = checkpoint - self.review_callbacks[checkpoint_id] = asyncio.Event() - - # Notify reviewers (e.g., via email, Slack, or UI) - await self.notify_reviewers(checkpoint) - - if require_approval: - # Wait for review with timeout - try: - await asyncio.wait_for( - self.review_callbacks[checkpoint_id].wait(), - timeout=self.REVIEW_TIMEOUT - ) - except asyncio.TimeoutError: - checkpoint.status = ReviewStatus.EXPIRED - raise ReviewTimeout(f"Review checkpoint {checkpoint_id} timed out") - - return self.pending_reviews[checkpoint_id] - - async def submit_review( - self, - checkpoint_id: str, - reviewer_id: str, - decision: ReviewStatus, - comments: Optional[str] = None - ): - """Submit a review decision.""" - if checkpoint_id not in self.pending_reviews: - raise ValueError(f"Unknown checkpoint: {checkpoint_id}") - - checkpoint = self.pending_reviews[checkpoint_id] - - if checkpoint.status != ReviewStatus.PENDING: - raise ValueError(f"Checkpoint already reviewed: {checkpoint.status}") - - checkpoint.status = decision - checkpoint.reviewer_id = reviewer_id - checkpoint.reviewed_at = datetime.utcnow().isoformat() - checkpoint.comments = comments - - # Signal completion - self.review_callbacks[checkpoint_id].set() - - # Audit log - await self.log_review(checkpoint) - - async def notify_reviewers(self, checkpoint: ReviewCheckpoint): - """Notify available reviewers.""" - # Implementation depends on notification system - # Could be: email, Slack, WebSocket, etc. - notification = { - "type": "review_required", - "checkpoint_id": checkpoint.checkpoint_id, - "step": checkpoint.step, - "workflow_id": checkpoint.checkpoint_id.split("_")[0], - "data_summary": self.summarize_for_notification(checkpoint.data), - "review_url": f"/review/{checkpoint.checkpoint_id}" - } - - await send_notification(notification) - - def summarize_for_notification(self, data: Dict) -> str: - """Create human-readable summary for notification.""" - if "chemical_name" in data: - return f"Chemical: {data['chemical_name']}" - elif "prediction" in data: - return f"Prediction: {data['prediction']}" - return "Review required" - - -# Integration with workflow runner -class WorkflowRunner: - def __init__(self): - self.review_orchestrator = ReviewOrchestrator() - - async def run_workflow(self, params: WorkflowParams) -> WorkflowResult: - """Run workflow with mandatory review checkpoints.""" - workflow_id = str(uuid.uuid4()) - - # Step 1: Chemical identification - chemical = await self.identify_chemical(params.identifier) - - if params.require_human_review: - checkpoint = await self.review_orchestrator.create_checkpoint( - workflow_id=workflow_id, - step="chemical_id", - data={ - "input_identifier": params.identifier, - "resolved_chemical": chemical.dict(), - "search_type_used": params.search_type - }, - require_approval=True - ) - - if checkpoint.status == ReviewStatus.REJECTED: - return WorkflowResult( - status="REJECTED", - reason=f"Chemical identification rejected: {checkpoint.comments}", - checkpoint=checkpoint - ) - - # Step 2: QSAR predictions - predictions = await self.run_qsar_predictions(chemical, params.qsar_mode) - - # Check for AD warnings - ad_warnings = [p for p in predictions if not p.ad_result.is_within_domain] - - if params.require_human_review and ad_warnings: - checkpoint = await self.review_orchestrator.create_checkpoint( - workflow_id=workflow_id, - step="ad_assessment", - data={ - "chemical": chemical.dict(), - "ad_warnings": [w.dict() for w in ad_warnings], - "predictions": [p.dict() for p in predictions] - }, - require_approval=True - ) - - if checkpoint.status == ReviewStatus.REJECTED: - return WorkflowResult( - status="REJECTED", - reason=f"AD assessment rejected: {checkpoint.comments}", - checkpoint=checkpoint - ) - - # Step 3: Generate report - report = await self.generate_report(chemical, predictions) - - # Final review before PDF generation - if params.require_human_review: - checkpoint = await self.review_orchestrator.create_checkpoint( - workflow_id=workflow_id, - step="final_report", - data={ - "report_preview": report.summary(), - "chemical": chemical.dict(), - "predictions_count": len(predictions), - "warnings_count": len(ad_warnings) - }, - require_approval=True - ) - - if checkpoint.status == ReviewStatus.REJECTED: - return WorkflowResult( - status="REJECTED", - reason=f"Final report rejected: {checkpoint.comments}", - checkpoint=checkpoint - ) - - # Generate final PDF - pdf = await self.generate_pdf(report) - - return WorkflowResult( - status="SUCCESS", - workflow_id=workflow_id, - chemical=chemical, - predictions=predictions, - report=report, - pdf=pdf, - review_checkpoints=self.review_orchestrator.get_workflow_reviews(workflow_id) - ) -``` - ---- - -## 5. Provenance Tables for PDF Generation - -**File:** `src/utils/pdf_generator.py` - -```python -from datetime import datetime -from typing import Dict, List, Any - -class ProvenanceTableGenerator: - """Generate provenance tables for PDF reports.""" - - def generate_provenance_section(self, workflow_record: Dict) -> str: - """Generate complete provenance section for PDF.""" - sections = [ - self._generate_header(), - self._generate_data_sources_table(workflow_record), - self._generate_models_table(workflow_record), - self._generate_applicability_domain_section(workflow_record), - self._generate_signatures_table(workflow_record), - self._generate_audit_trail(workflow_record), - ] - - return "\n\n".join(sections) - - def _generate_header(self) -> str: - """Generate section header.""" - return """ -## Provenance and Data Quality Information - -This section provides complete traceability for the hazard assessment -contained in this report, including data sources, model versions, and -applicability domain status. - -""" - - def _generate_data_sources_table(self, workflow_record: Dict) -> str: - """Generate data sources table.""" - provenance = workflow_record.get("provenance", {}) - - table = """ -### Data Sources and Versions - -| Component | Version | Timestamp | Source | -|-----------|---------|-----------|--------| -""" - - # O-QT MCP version - table += f"| O-QT MCP | {provenance.get('oqt_version', 'N/A')} | {provenance.get('generated_at', 'N/A')} | Internal |\n" - - # QSAR Toolbox version - table += f"| QSAR Toolbox | {provenance.get('toolbox_version', 'N/A')} | {provenance.get('toolbox_timestamp', 'N/A')} | OECD |\n" - - # Data snapshot - table += f"| Data Snapshot | {provenance.get('data_snapshot_id', 'N/A')} | {provenance.get('snapshot_date', 'N/A')} | EPA/OECD |\n" - - # API versions - for api_name, api_info in provenance.get('upstream_apis', {}).items(): - table += f"| {api_name} | {api_info.get('version', 'N/A')} | {api_info.get('called_at', 'N/A')} | External |\n" - - return table - - def _generate_models_table(self, workflow_record: Dict) -> str: - """Generate QSAR models table.""" - predictions = workflow_record.get('predictions', []) - - table = """ -### QSAR Models Used - -| Model | Version | Prediction | Confidence | AD Status | -|-------|---------|------------|------------|-----------| -""" - - for pred in predictions: - model = pred.get('model', {}) - ad_result = pred.get('ad_result', {}) - - model_name = model.get('name', 'Unknown') - model_version = model.get('version', 'N/A') - prediction = pred.get('prediction', 'N/A') - confidence = pred.get('confidence', 'N/A') - ad_status = "✓ In Domain" if ad_result.get('is_within_domain') else "✗ Outside Domain" - - table += f"| {model_name} | {model_version} | {prediction} | {confidence:.2f if isinstance(confidence, float) else confidence} | {ad_status} |\n" - - return table - - def _generate_applicability_domain_section(self, workflow_record: Dict) -> str: - """Generate applicability domain warnings section.""" - predictions = workflow_record.get('predictions', []) - - # Collect all AD warnings - all_warnings = [] - for pred in predictions: - ad_result = pred.get('ad_result', {}) - if not ad_result.get('is_within_domain'): - warnings = ad_result.get('warnings', []) - all_warnings.extend(warnings) - - if not all_warnings: - return """ -### Applicability Domain Assessment - -✓ All predictions are within the applicability domain of their respective models. - -""" - - section = """ -### ⚠️ Applicability Domain Warnings - -**WARNING:** The following predictions were made outside the strict applicability domain -of the QSAR models. These predictions should be treated with caution and may require -additional experimental validation. - -**Warnings:** - -""" - for warning in set(all_warnings): # Deduplicate - section += f"- {warning}\n" - - section += """ -**Recommendations:** -1. Consider read-across from structurally similar compounds with experimental data -2. Conduct in vitro testing for critical endpoints -3. Consult with a QSAR expert before using these predictions for regulatory decisions - -""" - return section - - def _generate_signatures_table(self, workflow_record: Dict) -> str: - """Generate electronic signatures table.""" - signatures = workflow_record.get('signatures', []) - - if not signatures: - return """ -### Electronic Signatures - -*No electronic signatures have been applied to this report.* - -""" - - table = """ -### Electronic Signatures - -| Role | Signer | Date | Meaning | Verification | -|------|--------|------|---------|--------------| -""" - - for sig in signatures: - role = sig.get('role', 'Unknown') - signer = sig.get('signer_user_id', 'Unknown') - date = sig.get('timestamp', 'N/A') - meaning = sig.get('meaning', 'N/A') - verified = "✓ Verified" if sig.get('verified') else "✗ Failed" - - table += f"| {role} | {signer} | {date} | {meaning} | {verified} |\n" - - return table - - def _generate_audit_trail(self, workflow_record: Dict) -> str: - """Generate audit trail section.""" - audit_events = workflow_record.get('audit_trail', []) - - if not audit_events: - return """ -### Audit Trail - -*No audit events recorded.* - -""" - - section = """ -### Audit Trail - -| Timestamp | Event | User | Details | -|-----------|-------|------|---------| -""" - - for event in audit_events[-10:]: # Show last 10 events - timestamp = event.get('timestamp', 'N/A') - event_type = event.get('type', 'Unknown') - user = event.get('user_id', 'System') - details = event.get('details', '') - - section += f"| {timestamp} | {event_type} | {user} | {details} |\n" - - if len(audit_events) > 10: - section += f"\n*... and {len(audit_events) - 10} more events*\n" - - return section - - -# Integration with PDF generator -class PDFGenerator: - def __init__(self): - self.provenance_generator = ProvenanceTableGenerator() - - async def generate_pdf(self, workflow_record: Dict) -> bytes: - """Generate PDF with complete provenance.""" - # Generate main content - content = self.generate_main_content(workflow_record) - - # Generate provenance section - provenance = self.provenance_generator.generate_provenance_section(workflow_record) - - # Combine - full_content = f""" -{content} - ---- - -{provenance} - ---- - -## Disclaimer - -This report was generated automatically using the O-QT MCP system. -The predictions contained herein are based on QSAR models and should -be reviewed by a qualified toxicologist before use in regulatory submissions. - -Report ID: {workflow_record.get('workflow_id', 'N/A')} -Generated: {datetime.utcnow().isoformat()}Z - """ - - # Convert to PDF (using existing PDF library) - return await self.render_to_pdf(full_content) -``` - ---- - -*These remediation code snippets address the critical issues identified in the OQT-MCP audit.* diff --git a/ToxMCP_Audit_Reviewed_v2/pbpk-mcp-audit/README.md b/ToxMCP_Audit_Reviewed_v2/pbpk-mcp-audit/README.md deleted file mode 100644 index 3fea41b..0000000 --- a/ToxMCP_Audit_Reviewed_v2/pbpk-mcp-audit/README.md +++ /dev/null @@ -1,164 +0,0 @@ -# PBPK-MCP Audit Package (Reviewed Copy) - -**Repository:** `pbpk-mcp` -**Package version cited in original audit:** `v0.4.3` -**Review date:** 2026-04-15 -**Overall posture:** **High risk for scientific guardrails and runtime stability** - ---- - -## How to read this reviewed copy - -The original package’s best PBPK findings were about: -- scientifically meaningful parameter control -- resource limits for large simulations -- reproducibility metadata -- runtime isolation and operational hardening - -This reviewed copy keeps those findings, but reduces overstatement where exploitability or infrastructure specifics were not validated. - ---- - -## Finding register - -| ID | Finding | Severity | Evidence basis | Confidence | Reviewed interpretation | -|---|---|---|---|---|---| -| PBPK-01 | Parameter changes need stronger physiological bounds and sweep governance | **Critical** | Observed + inferred | High | Unreviewed parameter exploration can bias conclusions | -| PBPK-02 | Population-size and memory controls are insufficiently explicit | **Critical** | Observed | High | Large jobs can plausibly destabilize workers without enforced limits | -| PBPK-03 | Reproducibility metadata and deterministic hashing need improvement | **High** | Observed | High | Historical comparability and event integrity are weaker than intended | -| PBPK-04 | Container/runtime hardening needs a clearer threat model and stronger controls | **High** | Observed + scenario | Medium | Important, but severity depends on actual deployment/runtime permissions | -| PBPK-05 | Queueing and availability protections deserve explicit load-test validation | **High** | Observed + inferred | Medium-High | Failure under stress is plausible and should be measured | - ---- - -## Detailed findings - -### PBPK-01: Parameter editing needs governance, not only validation -**Severity:** **Critical** -**Evidence basis:** Observed + inferred -**Confidence:** High - -The original package correctly identified a domain-specific risk that many generic software audits would miss: -a parameter-editing API can become a vehicle for selective tuning until a preferred outcome appears. - -### Why this matters -Even if each individual parameter change is syntactically valid, the workflow still needs: -- physiological plausibility bounds -- actor/reason capture -- sweep detection -- explicit review requirements when repeated tuning occurs - -### Recommended control -- bounds database curated with domain-owner review -- change audit trail with before/after values and rationale -- heuristic or rule-based sweep detection -- stronger review requirements when model outputs change materially after repeated edits - ---- - -### PBPK-02: Resource controls should be measured and enforced -**Severity:** **Critical** -**Evidence basis:** Observed -**Confidence:** High - -The package’s central concern is sound: large population simulations can exhaust memory or queue capacity without explicit control points. - -### Reviewed wording -The exact OOM threshold depends on: -- model complexity -- output retention strategy -- worker memory size -- parallelism settings - -So the reviewed copy avoids wording like "certain OOM" unless infrastructure measurements support it. - -### Recommended control -- hard upper bounds on population size -- memory/CPU quotas -- per-job estimates before execution -- streaming or chunked result handling where feasible -- defaults based on benchmarked infrastructure, not only estimates - ---- - -### PBPK-03: Reproducibility needs a fuller provenance envelope -**Severity:** **High** -**Evidence basis:** Observed -**Confidence:** High - -The original package was right to emphasize that reproducibility is not just about input values. It also depends on: -- model version -- runtime environment -- floating-point serialization -- seeds and stochastic settings -- artifact generation behavior - -### Recommended control -- canonical serialization for hashed events -- explicit handling for float edge cases -- runtime snapshot capture -- clear distinction between scientific result hash and audit-event hash - ---- - -### PBPK-04: Runtime isolation should be threat-model driven -**Severity:** **High** -**Evidence basis:** Observed + scenario -**Confidence:** Medium - -Container hardening and file/runtime isolation matter here, especially if the system ingests untrusted files or executes complex scientific tooling. -The original package likely overstated certainty for some escape scenarios, but it was directionally right to treat runtime hardening as important. - -### Recommended control -- confirm actual trust boundaries for uploaded files and model assets -- run with least privilege -- document seccomp/AppArmor/SELinux or equivalent controls where used -- separate build-time privilege needs from runtime privilege needs - ---- - -### PBPK-05: Availability and queue behavior need explicit validation -**Severity:** **High** -**Evidence basis:** Observed + inferred -**Confidence:** Medium-High - -The original package’s DoS and queue-flooding concerns are plausible. The right next step is not stronger rhetoric; it is measurement. - -### Recommended control -- representative load tests -- queue depth and age limits -- cancellation/timeout policy -- clear partial-failure behavior -- telemetry for memory, queue delay, retries, and worker saturation - ---- - -## Recommended sequence - -### Immediate -- parameter bounds and sweep governance -- population and memory limits -- deterministic hashing improvements - -### Next -- runtime hardening review -- load tests and quota tuning -- provenance envelope alignment with the rest of the suite - ---- - -## Validation backlog specific to this repo - -- benchmark population-size vs memory/latency on representative workers -- validate deterministic hashing across platforms and Python versions -- review runtime/file ingestion threat model -- confirm how repeated parameter changes are surfaced to reviewers - ---- - -## Related documents - -- `ToxMCP_Performance_Resilience_Audit_Report.md` -- `toxmcp_regulatory_audit_report.md` -- `toxmcp_adversarial_audit_report.md` -- `pbpk-mcp-audit/REMEDIATION_CODE.md` diff --git a/ToxMCP_Audit_Reviewed_v2/pbpk-mcp-audit/REMEDIATION_CODE.md b/ToxMCP_Audit_Reviewed_v2/pbpk-mcp-audit/REMEDIATION_CODE.md deleted file mode 100644 index b819d40..0000000 --- a/ToxMCP_Audit_Reviewed_v2/pbpk-mcp-audit/REMEDIATION_CODE.md +++ /dev/null @@ -1,902 +0,0 @@ -# PBPK-MCP: Detailed Remediation Code - -> **Reviewed copy note:** Treat these snippets as reference patterns. Physiological bounds, workload limits, and runtime hardening values should be validated against representative models and infrastructure. - - -## 1. Parameter Bounds Validation (Physiological Plausibility) - -**File:** `src/mcp/tools/parameter_bounds.py` - -```python -from typing import Dict, Tuple, Optional -from pydantic import BaseModel, validator -from enum import Enum -import numpy as np - -class ParameterCategory(str, Enum): - """Categories of PBPK parameters.""" - PHYSICOCHEMICAL = "physicochemical" - ANATOMICAL = "anatomical" - PHYSIOLOGICAL = "physiological" - ENZYME_KINETICS = "enzyme_kinetics" - -class ParameterBounds(BaseModel): - """Bounds for a single parameter.""" - min_value: float - max_value: float - default_value: float - unit: str - category: ParameterCategory - description: str - references: list = [] # Literature references - - def validate_value(self, value: float) -> bool: - """Check if value is within bounds.""" - return self.min_value <= value <= self.max_value - -class PBPKParameterDatabase: - """Database of physiologically plausible parameter bounds.""" - - # Organ volumes (L) - based on literature - ORGAN_VOLUMES = { - "Liver": ParameterBounds( - min_value=0.5, - max_value=3.0, - default_value=1.5, - unit="L", - category=ParameterCategory.ANATOMICAL, - description="Liver volume", - references=["ICRP 89", "PK-Sim defaults"] - ), - "Kidney": ParameterBounds( - min_value=0.2, - max_value=0.6, - default_value=0.31, - unit="L", - category=ParameterCategory.ANATOMICAL, - description="Kidney volume (both kidneys)", - references=["ICRP 89"] - ), - "Brain": ParameterBounds( - min_value=1.0, - max_value=1.8, - default_value=1.4, - unit="L", - category=ParameterCategory.ANATOMICAL, - description="Brain volume", - references=["ICRP 89"] - ), - "Muscle": ParameterBounds( - min_value=15.0, - max_value=35.0, - default_value=24.0, - unit="L", - category=ParameterCategory.ANATOMICAL, - description="Muscle volume", - references=["ICRP 89"] - ), - "Adipose": ParameterBounds( - min_value=5.0, - max_value=30.0, - default_value=15.0, - unit="L", - category=ParameterCategory.ANATOMICAL, - description="Adipose tissue volume", - references=["ICRP 89"] - ), - } - - # Blood flows (L/min) - must sum to cardiac output - BLOOD_FLOWS = { - "Liver": ParameterBounds( - min_value=0.5, - max_value=2.0, - default_value=1.0, - unit="L/min", - category=ParameterCategory.PHYSIOLOGICAL, - description="Hepatic blood flow", - references=["Davies 1993"] - ), - "Kidney": ParameterBounds( - min_value=0.5, - max_value=1.5, - default_value=1.0, - unit="L/min", - category=ParameterCategory.PHYSIOLOGICAL, - description="Renal blood flow", - references=["Davies 1993"] - ), - "Brain": ParameterBounds( - min_value=0.3, - max_value=1.0, - default_value=0.7, - unit="L/min", - category=ParameterCategory.PHYSIOLOGICAL, - description="Cerebral blood flow", - references=["Davies 1993"] - ), - } - - # Clearance parameters - CLEARANCE = { - "Liver|Clearance": ParameterBounds( - min_value=0.0, - max_value=100.0, # Cannot exceed hepatic blood flow - default_value=1.0, - unit="L/h", - category=ParameterCategory.ENZYME_KINETICS, - description="Hepatic clearance", - references=["Rowland 1973"] - ), - "Kidney|Clearance": ParameterBounds( - min_value=0.0, - max_value=50.0, # Cannot exceed renal blood flow - default_value=1.0, - unit="L/h", - category=ParameterCategory.ENZYME_KINETICS, - description="Renal clearance", - references=["Rowland 1973"] - ), - } - - # Physicochemical properties - PHYSICOCHEMICAL = { - "Lipophilicity": ParameterBounds( - min_value=-5.0, - max_value=10.0, - default_value=1.0, - unit="logP", - category=ParameterCategory.PHYSICOCHEMICAL, - description="Octanol-water partition coefficient", - references=["Leo 1971"] - ), - "MolecularWeight": ParameterBounds( - min_value=50.0, - max_value=1000.0, - default_value=300.0, - unit="g/mol", - category=ParameterCategory.PHYSICOCHEMICAL, - description="Molecular weight", - references=[] - ), - "FractionUnbound": ParameterBounds( - min_value=0.0, - max_value=1.0, - default_value=0.1, - unit="dimensionless", - category=ParameterCategory.PHYSICOCHEMICAL, - description="Fraction unbound in plasma", - references=[] - ), - } - - @classmethod - def get_bounds(cls, parameter_path: str) -> Optional[ParameterBounds]: - """Get bounds for a parameter by path.""" - # Search in all categories - for category in [cls.ORGAN_VOLUMES, cls.BLOOD_FLOWS, cls.CLEARANCE, cls.PHYSICOCHEMICAL]: - if parameter_path in category: - return category[parameter_path] - - # Try partial matching - for category in [cls.ORGAN_VOLUMES, cls.BLOOD_FLOWS, cls.CLEARANCE, cls.PHYSICOCHEMICAL]: - for key, bounds in category.items(): - if key in parameter_path or parameter_path in key: - return bounds - - return None - - @classmethod - def validate_parameter(cls, parameter_path: str, value: float) -> tuple: - """ - Validate parameter value against bounds. - - Returns: - (is_valid, bounds, message) - """ - bounds = cls.get_bounds(parameter_path) - - if bounds is None: - return (True, None, f"No bounds defined for {parameter_path}") - - if not bounds.validate_value(value): - return ( - False, - bounds, - f"Value {value} for {parameter_path} outside plausible range " - f"[{bounds.min_value}, {bounds.max_value}] {bounds.unit}" - ) - - return (True, bounds, "Valid") - - @classmethod - def get_all_parameters(cls) -> Dict[str, ParameterBounds]: - """Get all defined parameters.""" - all_params = {} - for category in [cls.ORGAN_VOLUMES, cls.BLOOD_FLOWS, cls.CLEARANCE, cls.PHYSICOCHEMICAL]: - all_params.update(category) - return all_params - - -# Integration with set_parameter_value -class ValidatedSetParameterValueRequest(BaseModel): - """Parameter value request with validation.""" - - simulation_id: str - parameter_path: str - value: float - unit: Optional[str] = None - update_mode: Optional[str] = "absolute" - comment: Optional[str] = None - - @validator('value') - def validate_physiological_bounds(cls, v, values): - """Validate against physiological bounds.""" - if 'parameter_path' not in values: - return v - - parameter_path = values['parameter_path'] - is_valid, bounds, message = PBPKParameterDatabase.validate_parameter( - parameter_path, v - ) - - if not is_valid: - raise ValueError(message) - - return v - - @validator('parameter_path') - def validate_parameter_exists(cls, v): - """Warn if parameter not in database.""" - bounds = PBPKParameterDatabase.get_bounds(v) - if bounds is None: - # Log warning but allow (might be custom parameter) - logger.warning(f"Parameter {v} not in database - no bounds validation") - return v - - -# Parameter change audit trail -class ParameterChangeAudit: - """Audit trail for parameter changes.""" - - def __init__(self): - self.changes: list = [] - - def log_change( - self, - simulation_id: str, - parameter_path: str, - old_value: float, - new_value: float, - user_id: str, - reason: str = None - ): - """Log a parameter change.""" - change = { - "timestamp": datetime.utcnow().isoformat(), - "simulation_id": simulation_id, - "parameter_path": parameter_path, - "old_value": old_value, - "new_value": new_value, - "change_magnitude": abs(new_value - old_value) / old_value if old_value != 0 else float('inf'), - "user_id": user_id, - "reason": reason - } - self.changes.append(change) - - def detect_p_hacking(self, simulation_id: str) -> list: - """Detect systematic parameter exploration (p-hacking).""" - sim_changes = [c for c in self.changes if c["simulation_id"] == simulation_id] - - alerts = [] - - # Group by parameter - param_changes = {} - for change in sim_changes: - param = change["parameter_path"] - if param not in param_changes: - param_changes[param] = [] - param_changes[param].append(change) - - # Detect patterns - for param, changes in param_changes.items(): - # Pattern 1: Many small changes to same parameter - if len(changes) > 5: - alerts.append({ - "type": "frequent_changes", - "parameter": param, - "count": len(changes), - "recommendation": "Frequent parameter changes detected - possible optimization bias" - }) - - # Pattern 2: Oscillating values (searching for target) - if len(changes) >= 3: - values = [c["new_value"] for c in changes] - # Check for oscillation (up-down-up or down-up-down) - diffs = [values[i+1] - values[i] for i in range(len(values)-1)] - sign_changes = sum(1 for i in range(len(diffs)-1) if diffs[i] * diffs[i+1] < 0) - - if sign_changes >= 2: - alerts.append({ - "type": "oscillating_values", - "parameter": param, - "changes": len(changes), - "recommendation": "Oscillating parameter values - possible target-seeking behavior" - }) - - # Pattern 3: Large magnitude changes - large_changes = [c for c in changes if c["change_magnitude"] > 0.5] - if len(large_changes) > 2: - alerts.append({ - "type": "large_changes", - "parameter": param, - "count": len(large_changes), - "recommendation": "Large parameter changes detected - review physiological plausibility" - }) - - return alerts -``` - ---- - -> **Reviewed copy (2026-04-15):** This document was retained from the original package but lightly edited for consistency. -> Unless explicitly stated otherwise, code blocks are **reference implementations**, not validated patches, and scenario-based exploit narratives should not be read as reproduced proofs. - - - -## 2. Population Size Limits and Memory Quotas - -**File:** `src/mcp_bridge/services/job_service.py` - -```python -from pydantic import BaseModel, validator -import psutil -import os - -class JobResourceConfig(BaseModel): - """Resource limits for jobs.""" - - max_population_size: int = 5000 - max_memory_per_job_mb: int = 2048 # 2 GB - max_simulation_duration_seconds: int = 1800 # 30 minutes - max_concurrent_jobs_per_user: int = 5 - max_daily_jobs_per_user: int = 100 - - @validator('max_population_size') - def validate_population_size(cls, v): - if v > 10000: - raise ValueError("Population size cannot exceed 10000") - return v - -class ResourceQuotaEnforcer: - """Enforce resource quotas for jobs.""" - - def __init__(self, config: JobResourceConfig = None): - self.config = config or JobResourceConfig() - self.user_job_counts: Dict[str, Dict[str, int]] = {} - - def check_population_size(self, population_size: int) -> tuple: - """ - Check if population size is within quota. - - Returns: - (is_allowed, message) - """ - if population_size > self.config.max_population_size: - return ( - False, - f"Population size {population_size} exceeds maximum {self.config.max_population_size}. " - f"Contact administrator for large population simulations." - ) - - return (True, "Valid") - - def check_memory_quota(self, requested_memory_mb: int) -> tuple: - """ - Check if memory request is within quota. - - Returns: - (is_allowed, message) - """ - if requested_memory_mb > self.config.max_memory_per_job_mb: - return ( - False, - f"Memory request {requested_memory_mb} MB exceeds quota {self.config.max_memory_per_job_mb} MB" - ) - - # Check system memory - available_mb = psutil.virtual_memory().available / (1024 * 1024) - if requested_memory_mb > available_mb * 0.8: - return ( - False, - f"Insufficient system memory. Requested: {requested_memory_mb} MB, " - f"Available: {available_mb:.0f} MB" - ) - - return (True, "Valid") - - def check_user_quotas(self, user_id: str) -> tuple: - """ - Check if user is within daily and concurrent job quotas. - - Returns: - (is_allowed, message) - """ - user_counts = self.user_job_counts.get(user_id, { - "concurrent": 0, - "daily": 0, - "last_reset": datetime.utcnow() - }) - - # Reset daily count if new day - last_reset = user_counts["last_reset"] - if (datetime.utcnow() - last_reset).days >= 1: - user_counts["daily"] = 0 - user_counts["last_reset"] = datetime.utcnow() - - if user_counts["concurrent"] >= self.config.max_concurrent_jobs_per_user: - return ( - False, - f"Concurrent job limit reached ({self.config.max_concurrent_jobs_per_user}). " - f"Wait for existing jobs to complete." - ) - - if user_counts["daily"] >= self.config.max_daily_jobs_per_user: - return ( - False, - f"Daily job limit reached ({self.config.max_daily_jobs_per_user}). " - f"Try again tomorrow." - ) - - return (True, "Valid") - - def estimate_memory_requirement(self, population_size: int) -> int: - """ - Estimate memory requirement for population simulation. - - Returns: - Estimated memory in MB - """ - # Base memory for simulation - base_memory = 100 # MB - - # Per-patient memory (empirical estimate) - memory_per_patient = 0.5 # MB - - # Safety factor - safety_factor = 1.5 - - estimated = (base_memory + population_size * memory_per_patient) * safety_factor - - return int(estimated) - - def validate_job_request( - self, - user_id: str, - population_size: int - ) -> tuple: - """ - Validate complete job request against all quotas. - - Returns: - (is_valid, errors) - """ - errors = [] - - # Check population size - allowed, message = self.check_population_size(population_size) - if not allowed: - errors.append(message) - - # Check memory - memory_required = self.estimate_memory_requirement(population_size) - allowed, message = self.check_memory_quota(memory_required) - if not allowed: - errors.append(message) - - # Check user quotas - allowed, message = self.check_user_quotas(user_id) - if not allowed: - errors.append(message) - - return (len(errors) == 0, errors) - - def record_job_start(self, user_id: str, job_id: str): - """Record job start for quota tracking.""" - if user_id not in self.user_job_counts: - self.user_job_counts[user_id] = { - "concurrent": 0, - "daily": 0, - "last_reset": datetime.utcnow() - } - - self.user_job_counts[user_id]["concurrent"] += 1 - self.user_job_counts[user_id]["daily"] += 1 - - def record_job_end(self, user_id: str, job_id: str): - """Record job completion.""" - if user_id in self.user_job_counts: - self.user_job_counts[user_id]["concurrent"] = max( - 0, - self.user_job_counts[user_id]["concurrent"] - 1 - ) - - -# Integration with job submission -class ResourceConstrainedJobService: - """Job service with resource quota enforcement.""" - - def __init__(self): - self.quota_enforcer = ResourceQuotaEnforcer() - - async def submit_population_simulation( - self, - user_id: str, - simulation_id: str, - population_size: int, - **kwargs - ) -> JobRecord: - """Submit population simulation with quota checks.""" - # Validate against quotas - is_valid, errors = self.quota_enforcer.validate_job_request( - user_id, population_size - ) - - if not is_valid: - raise QuotaExceeded(f"Job validation failed: {'; '.join(errors)}") - - # Record job start - self.quota_enforcer.record_job_start(user_id, simulation_id) - - try: - # Create job - job = JobRecord( - job_id=str(uuid.uuid4()), - simulation_id=simulation_id, - job_type="population_simulation", - population_size=population_size, - user_id=user_id, - estimated_memory_mb=self.quota_enforcer.estimate_memory_requirement( - population_size - ), - submitted_at=datetime.utcnow() - ) - - # Submit to queue - await self._submit_to_queue(job) - - return job - - except Exception: - # Rollback quota on failure - self.quota_enforcer.record_job_end(user_id, simulation_id) - raise -``` - ---- - -## 3. Container Security Hardening - -**File:** `Dockerfile` (Secure Multi-Stage Build) - -```dockerfile -# ============================================================================= -# PBPK-MCP Secure Dockerfile -# Multi-stage build with security hardening -# ============================================================================= - -# Stage 1: Build environment (not used in final image) -FROM r-base:4.3.0 AS builder - -# Install build dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - build-essential \ - libxml2-dev \ - libcurl4-openssl-dev \ - libssl-dev \ - && rm -rf /var/lib/apt/lists/* - -# Install R packages -RUN R -e "install.packages('ospsuite', repos='https://...')" \ - && R -e "install.packages('rxode2', repos='https://...')" - -# Stage 2: Runtime environment (minimal, secure) -FROM gcr.io/distroless/cc-debian11:nonroot - -# Copy R installation from builder -COPY --from=builder /usr/lib/R /usr/lib/R -COPY --from=builder /usr/local/lib/R /usr/local/lib/R -COPY --from=builder /usr/share/R /usr/share/R - -# Copy application code -COPY --chown=nonroot:nonroot ./src /app/src -COPY --chown=nonroot:nonroot ./requirements.txt /app/ - -# Set working directory -WORKDIR /app - -# Switch to non-root user -USER nonroot:nonroot - -# Environment variables -ENV R_HOME=/usr/lib/R -ENV R_LIBS_USER=/usr/local/lib/R/site-library -ENV TOXMCP_CONTAINER_DIGEST=${CONTAINER_DIGEST} -ENV TOXMCP_GIT_COMMIT=${GIT_COMMIT} - -# Health check -HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ - CMD ["/app/src/health_check"] - -# Expose port -EXPOSE 8080 - -# Run application -ENTRYPOINT ["Rscript", "/app/src/main.R"] -``` - -**Seccomp Profile:** `pbpk-seccomp.json` - -```json -{ - "defaultAction": "SCMP_ACT_ERRNO", - "architectures": ["SCMP_ARCH_X86_64", "SCMP_ARCH_X86"], - "syscalls": [ - { - "names": [ - "accept", - "accept4", - "bind", - "clone", - "close", - "connect", - "epoll_create1", - "epoll_ctl", - "epoll_pwait", - "exit", - "exit_group", - "fcntl", - "fstat", - "futex", - "getpid", - "getrandom", - "getsockname", - "getsockopt", - "listen", - "mmap", - "mprotect", - "munmap", - "openat", - "read", - "recvfrom", - "recvmsg", - "rt_sigaction", - "rt_sigprocmask", - "rt_sigreturn", - "select", - "sendmsg", - "sendto", - "setitimer", - "setsockopt", - "socket", - "write", - "writev" - ], - "action": "SCMP_ACT_ALLOW" - }, - { - "names": [ - "execve", - "execveat", - "fork", - "vfork", - "ptrace", - "mount", - "umount", - "umount2", - "reboot", - "open_by_handle_at" - ], - "action": "SCMP_ACT_ERRNO" - } - ] -} -``` - -**Docker Compose Secure Configuration:** `docker-compose.secure.yml` - -```yaml -version: '3.8' - -services: - pbpk-mcp: - build: - context: . - dockerfile: Dockerfile.secure - args: - CONTAINER_DIGEST: ${CONTAINER_DIGEST} - GIT_COMMIT: ${GIT_COMMIT} - - # Security options - security_opt: - - no-new-privileges:true - - seccomp:pbpk-seccomp.json - - apparmor:pbpk-profile - - # Capabilities - cap_drop: - - ALL - cap_add: - - CHOWN - - SETGID - - SETUID - - # Read-only root filesystem - read_only: true - tmpfs: - - /tmp:noexec,nosuid,size=100m - - /var/tmp:noexec,nosuid,size=100m - - # Resource limits - deploy: - resources: - limits: - cpus: '4.0' - memory: 8G - reservations: - cpus: '1.0' - memory: 2G - - # Network - networks: - - toxmcp-internal - - # Environment - environment: - - TOXMCP_ENVIRONMENT=production - - TOXMCP_SECURE_MODE=true - - MAX_POPULATION_SIZE=5000 - - MAX_MEMORY_PER_JOB_MB=2048 - - # Health check - healthcheck: - test: ["CMD", "/app/health_check"] - interval: 30s - timeout: 10s - retries: 3 - start_period: 40s - -networks: - toxmcp-internal: - internal: true # No external access -``` - ---- - -## 4. Floating-Point Determinism - -**File:** `src/mcp_bridge/audit/trail.py` - -```python -import json -import decimal -from decimal import Decimal, ROUND_HALF_UP -from typing import Any - -class CanonicalJsonEncoder(json.JSONEncoder): - """ - JSON encoder with canonical floating-point representation. - - Ensures that the same scientific results produce identical - audit hashes across different hardware and Python versions. - """ - - # Precision for IEEE 754 double precision - FLOAT_PRECISION = 15 - - def encode(self, obj: Any) -> str: - return json.dumps( - self._canonicalize(obj), - separators=(",", ":"), - sort_keys=True, - ensure_ascii=True - ) - - def _canonicalize(self, obj: Any) -> Any: - """Convert object to canonical form.""" - if isinstance(obj, float): - # Handle special values - if obj != obj: # NaN - return "NaN" - if obj == float('inf'): - return "Infinity" - if obj == float('-inf'): - return "-Infinity" - - # Round to fixed precision - d = Decimal(obj) - quantized = d.quantize( - Decimal('0.00000000000000'), # 14 decimal places - rounding=ROUND_HALF_UP - ) - return float(quantized) - - elif isinstance(obj, dict): - # Sort keys recursively - return { - k: self._canonicalize(v) - for k, v in sorted(obj.items()) - } - - elif isinstance(obj, list): - return [self._canonicalize(item) for item in obj] - - elif isinstance(obj, str): - # Normalize Unicode - return obj.encode('utf-8', 'ignore').decode('utf-8') - - elif isinstance(obj, (int, bool, type(None))): - return obj - - else: - # Convert unknown types to string - return str(obj) - - -def compute_deterministic_hash(event: dict) -> str: - """ - Compute deterministic hash for audit event. - - This ensures that identical scientific results produce - identical hashes regardless of hardware or Python version. - """ - # Remove hash field if present - temp = dict(event) - temp.pop("hash", None) - temp.pop("signature", None) - - # Use canonical JSON encoding - encoder = CanonicalJsonEncoder() - payload = encoder.encode(temp) - - # Compute hash - return hashlib.sha256(payload.encode('utf-8')).hexdigest() - - -# Test determinism -def test_hash_determinism(): - """Test that hash is deterministic across calls.""" - event = { - "prediction": 0.1 + 0.2, # 0.30000000000000004 - "confidence": 0.95, - "nested": { - "value": 1.234567890123456789 - } - } - - hash1 = compute_deterministic_hash(event) - hash2 = compute_deterministic_hash(event) - - assert hash1 == hash2, "Hash should be deterministic" - - # Test with equivalent values - event2 = { - "prediction": 0.3, # Mathematically equivalent - "confidence": 0.95, - "nested": { - "value": 1.234567890123456789 - } - } - - hash3 = compute_deterministic_hash(event2) - - # Note: These may differ due to floating-point representation - # but should be consistent within the same Python session - print(f"Hash 1: {hash1}") - print(f"Hash 2: {hash2}") - print(f"Hash 3: {hash3}") -``` - ---- - -*These remediation code snippets address the critical issues identified in the PBPK-MCP audit.* diff --git a/ToxMCP_Audit_Reviewed_v2/toxmcp_adversarial_audit_report.md b/ToxMCP_Audit_Reviewed_v2/toxmcp_adversarial_audit_report.md deleted file mode 100644 index 4efeaeb..0000000 --- a/ToxMCP_Audit_Reviewed_v2/toxmcp_adversarial_audit_report.md +++ /dev/null @@ -1,386 +0,0 @@ -# ToxMCP Scientific Adversary Audit Report - -**Audit Date:** 2026-04-15 -**Auditor:** Scientific Adversary Agent -**Scope:** comptox-mcp, oqt-mcp, aop-mcp, pbpk-mcp -**Mission:** Identify attack surfaces for generating false confidence and misleading toxicological conclusions - ---- - -> **Reviewed copy (2026-04-15):** This document was retained from the original package but lightly edited for consistency. -> Unless explicitly stated otherwise, code blocks are **reference implementations**, not validated patches, and scenario-based exploit narratives should not be read as reproduced proofs. - - - -## Executive Summary - -The ToxMCP ecosystem, while architecturally sophisticated, contains **multiple critical attack surfaces** that an AI agent (or malicious user) could exploit to generate misleading toxicological conclusions with false confidence. The most severe vulnerabilities exist in: - -1. **Applicability Domain (AD) enforcement gaps** in O-QT-MCP QSAR predictions -2. **Confidence interpolation without calibration** across AOP-MCP assessment tools -3. **Parameter manipulation without physiological plausibility guardrails** in PBPK-MCP -4. **Missing epistemic uncertainty propagation** across the tool chain - ---- - -## 🔴 CRITICAL VULNERABILITIES - -### V-001: Missing Applicability Domain Enforcement (O-QT-MCP) - -**Severity:** 🔴 Critical -**Location:** `oqt-mcp/src/tools/implementations/o_qt_qsar_tools.py` -**Attack Surface:** QSAR prediction workflow - -**Description:** -The O-QT-MCP mentions "applicability domain review" in its documentation and schemas, but the actual enforcement is **qualitative and post-hoc**. The `build_hazard_applicability_domain()` function (line 61 in hazard_contracts.py) creates a summary but does NOT: - -- Calculate a quantitative Applicability Domain Index (ADI) -- Enforce chemical class boundary checks -- Block predictions for out-of-domain chemicals -- Require explicit user acknowledgment for extrapolation - -**Attack Example:** -```python -# An agent can obtain QSAR predictions for a chemical completely outside -# the training set without receiving a clear UNRELIABLE flag -{ - "tool": "run_qsar_prediction", - "arguments": { - "chem_id": "EXOTIC_CHEM_001", # Novel scaffold not in training data - "model_guid": "skin_sensitization_model" - } -} -# Returns: prediction with "medium" confidence and AD notes buried in metadata -``` - -**False Confidence Generation:** -- The `oqtHazardEvidenceSummary.v1.json` schema includes `applicabilityDomain` as a required field, but it's a **narrative summary**, not a quantitative gate -- An AI agent can chain predictions → ignore AD warnings → present conclusions as reliable - -**Cross-Reference:** V-005 (Confirmation Bias Accumulation) - ---- - -### V-002: Confidence Score False Precision (AOP-MCP) - -**Severity:** 🔴 Critical -**Location:** `aop-mcp/src/tools/semantic/` (confidence assessment) -**Attack Surface:** `assess_aop_confidence` tool - -**Description:** -The `assess_aop_confidence` tool returns heuristic confidence assessments that **appear quantitative but lack calibration**. From the README documentation: - -> "`assess_aop_confidence` is OECD-aligned, not OECD-complete... confidence outputs often remain partial even when the tool is behaving correctly" - -The tool returns confidence dimensions as text ("high", "medium", "low") but these are: -- **Not probabilistic** - no confidence intervals or uncertainty quantification -- **Not calibrated** - "high" confidence doesn't map to a specific accuracy rate -- **Text-mining derived** - based on evidence text presence, not mechanistic validation - -**Attack Example:** -```python -# Agent chains multiple AOP assessments, each with "medium" confidence -# The aggregate appears to support a conclusion, but confidence is not additive -{ - "aop_1_confidence": "medium", # Based on sparse KE evidence - "aop_2_confidence": "medium", # Based on different sparse evidence - "aop_3_confidence": "medium", # Based on yet different sparse evidence -} -# Agent reports: "Multiple AOPs show consistent medium-to-high confidence" -``` - -**False Precision Pattern:** -- The schema allows `confidence_dimensions` to be reported without accompanying `uncertainty_quantification` -- No warning when confidence is inferred from path structure alone (without text evidence) - -**Cross-Reference:** V-005 (Confirmation Bias Accumulation) - ---- - -### V-003: PBPK Parameter P-Hacking (PBPK-MCP) - -**Severity:** 🔴 Critical -**Location:** `pbpk-mcp/src/mcp/tools/set_parameter_value.py` -**Attack Surface:** Parameter editing and sensitivity analysis - -**Description:** -The `set_parameter_value` tool allows direct manipulation of PBPK parameters with **minimal physiological plausibility guardrails**: - -```python -class SetParameterValueRequest(BaseModel): - simulation_id: str - parameter_path: str # No validation against physiological bounds - value: float # No range validation - unit: Optional[str] # Unit conversion but no sanity checks - update_mode: Optional[str] = "absolute" # "relative" mode compounds errors -``` - -**Attack Example - Parameter Inflation:** -```python -# Agent systematically tweaks clearance parameters until desired outcome -for clearance_factor in [0.5, 0.6, 0.7, 0.8, 0.9, 1.0]: - set_parameter_value( - parameter_path="Liver|Clearance", - value=baseline * clearance_factor - ) - result = run_simulation() - if result.auc < safety_threshold: - return f"Model shows safe clearance at factor {clearance_factor}" -# No audit trail of parameter exploration; best result reported -``` - -**Missing Guardrails:** -- No physiological bounds checking (e.g., liver blood flow cannot exceed cardiac output) -- No parameter correlation enforcement (changing one parameter should affect correlated ones) -- No "p-hacking detection" for systematic parameter sweeps -- The `run_sensitivity_analysis` tool doesn't flag when results are cherry-picked - -**Cross-Reference:** V-005 (Confirmation Bias Accumulation) - ---- - -## 🟠 HIGH SEVERITY VULNERABILITIES - -### V-004: Read-Across Analogue Bias (O-QT-MCP) - -**Severity:** 🟠 High -**Location:** `oqt-mcp/schemas/oqtReadAcrossSummary.v1.json` -**Attack Surface:** Grouping and read-across justification - -**Description:** -The `build_grouping_justification` tool can suggest read-across from chemicals that are **structurally similar but toxicologically divergent**. The schema requires: - -- `structure_comparison` - structural similarity assessment -- `physicochemical_comparison` - physchem property comparison -- BUT: **No mechanistic justification gate** for Mode of Action (MOA) alignment - -**Attack Example - Analogue Bias:** -```python -# Agent groups chemicals by structural similarity alone -{ - "tool": "build_grouping_justification", - "arguments": { - "identifier": "Target_Chemical", - "analogue_identifiers": ["Analogue_A", "Analogue_B"], - "profiler_guids": ["structural_profiler_only"] # No MOA profiler - } -} -# Returns: grouping justification showing high structural similarity -# Problem: Target has genotoxic MOA, analogues have non-genotoxic MOA -``` - -**Schema Weakness:** -The `oqtReadAcrossSummary.v1.json` schema includes `applicabilityDomain` but it's a **qualitative field** without: -- MOA concordance scoring -- Toxicodynamic similarity metrics -- Mechanistic alert flags - -**Cross-Reference:** V-001 (Missing AD Enforcement) - ---- - -### V-005: Confirmation Bias Accumulation Across Tool Chain - -**Severity:** 🟠 High -**Location:** Cross-suite (comptox → oqt → aop → pbpk) -**Attack Surface:** Multi-tool chaining workflows - -**Description:** -There is **NO meta-assessment tool** that tracks epistemic uncertainty propagation across the tool chain. When an AI agent chains: - -``` -search_chemical → profile_chemical → run_qsar → assess_aop → run_pbpk -``` - -Each step can: -- Generate predictions with unquantified uncertainty -- Pass "confidence" forward without uncertainty accumulation -- Filter evidence that doesn't support the emerging conclusion - -**Attack Example - Confirmation Bias Chain:** -```python -# Step 1: Search finds chemical -search_result = search_chemicals("mystery_compound") - -# Step 2: Profiling shows some alerts (but agent focuses on benign ones) -profile = run_profiler(profiler_guid="safe_profiler") - -# Step 3: QSAR prediction with AD warning (agent ignores warning) -qsar = run_qsar_prediction(chem_id, model_guid="safe_model") -# AD note: "Chemical outside training domain" buried in metadata - -# Step 4: AOP assessment finds supportive pathway (ignores contradictory ones) -aop = assess_aop_confidence(aop_id="supportive_aop") - -# Step 5: PBPK with tweaked parameters shows favorable kinetics -set_parameter_value(parameter_path="clearance", value=high_value) -pbpk = run_simulation() - -# Final conclusion: "Multiple lines of evidence support safety" -# Reality: Each step had warnings that were filtered out -``` - -**Missing Safeguard:** -- No `uncertainty_propagation` tool -- No `evidence_contradiction_detection` across modules -- No `confidence_calibration` across the chain - -**Cross-Reference:** All other vulnerabilities - ---- - -## 🟡 MEDIUM SEVERITY VULNERABILITIES - -### V-006: CompTox Evidence Federation Gaps (CompTox-MCP) - -**Severity:** 🟡 Medium -**Location:** `comptox-mcp` (evidence federation) -**Attack Surface:** Multi-source evidence aggregation - -**Description:** -The CompTox-MCP federates evidence from multiple EPA sources but: -- **No source conflict resolution** - when sources disagree, all are presented equally -- **No evidence quality weighting** - high-quality studies not distinguished from preliminary data -- **No temporal decay** - older studies not flagged as potentially superseded - -**Attack Example:** -```python -# Agent can selectively cite evidence from conflicting sources -{ - "bioactivity_assays": [ - {"source": "ToxCast", "result": "inactive", "quality": "high"}, - {"source": "legacy_study", "result": "active", "quality": "low"} - ] -} -# Agent reports: "Study shows activity" (citing only legacy_study) -``` - ---- - -### V-007: Qualitative Uncertainty Masking (All Modules) - -**Severity:** 🟡 Medium -**Location:** Cross-suite schemas -**Attack Surface:** Uncertainty reporting - -**Description:** -All ToxMCP modules use **qualitative uncertainty descriptors** that mask underlying quantitative uncertainty: - -| Module | Uncertainty Field | Values | Problem | -|--------|------------------|--------|---------| -| O-QT | `accepted_uncertainty_level` | "low", "medium", "high" | No probabilistic meaning | -| AOP | `confidence_dimensions` | "high", "medium", "low" | Not calibrated | -| PBPK | `qualificationLevel` | "qualified", "unqualified" | Binary when continuous needed | -| CompTox | `evidence_quality` | "high", "medium", "low" | Subjective | - -**Attack Example:** -```python -# Agent can interpret "medium" uncertainty differently based on desired conclusion -if supporting_conclusion: - interpret("medium") = "acceptable for decision-making" -else: - interpret("medium") = "requires further study" -``` - ---- - -## Attack Surface Summary Matrix - -| Attack Vector | O-QT-MCP | AOP-MCP | PBPK-MCP | CompTox-MCP | Severity | -|--------------|----------|---------|----------|-------------|----------| -| False confidence from out-of-domain predictions | ✅ | ❌ | ❌ | ❌ | 🔴 | -| Confidence interpolation without calibration | ❌ | ✅ | ❌ | ❌ | 🔴 | -| Parameter p-hacking | ❌ | ❌ | ✅ | ❌ | 🔴 | -| Read-across analogue bias | ✅ | ❌ | ❌ | ❌ | 🟠 | -| Confirmation bias accumulation | ✅ | ✅ | ✅ | ✅ | 🟠 | -| Evidence selection bias | ✅ | ✅ | ❌ | ✅ | 🟡 | -| Qualitative uncertainty masking | ✅ | ✅ | ✅ | ✅ | 🟡 | - ---- - -## Concrete Attack Scenarios - -### Scenario 1: The "Safe by Design" Deception - -**Goal:** Convince stakeholders a hazardous chemical is safe - -**Attack Chain:** -1. Use O-QT-MCP to run QSAR models, selecting only those with favorable predictions -2. Ignore applicability domain warnings (buried in metadata) -3. Use AOP-MCP to find pathways where the chemical doesn't trigger key events -4. Use PBPK-MCP with inflated clearance parameters to show rapid elimination -5. Present conclusion: "Multiple independent lines of evidence support safety" - -**Vulnerabilities Exploited:** V-001, V-002, V-003, V-005 - ---- - -### Scenario 2: The "Toxic by Association" Smear - -**Goal:** Falsely associate a competitor's chemical with toxicity - -**Attack Chain:** -1. Use O-QT-MCP grouping to find structurally similar analogues with known toxicity -2. Ignore MOA differences (no mechanistic gate) -3. Build read-across dossier showing "consistent toxicity pattern" -4. Use AOP-MCP to construct speculative pathway linking chemical to adverse outcome -5. Present conclusion: "Read-across and AOP analysis indicate significant concern" - -**Vulnerabilities Exploited:** V-004, V-002, V-005 - ---- - -### Scenario 3: The "Confidence Inflation" Report - -**Goal:** Generate a report with inflated confidence metrics - -**Attack Chain:** -1. Run multiple QSAR predictions (O-QT-MCP) - each returns "medium" confidence -2. Run AOP assessments (AOP-MCP) - each returns "medium" confidence -3. Run PBPK simulations (PBPK-MCP) with favorable parameter sets -4. Aggregate results without uncertainty propagation -5. Present conclusion: "Consistent medium-to-high confidence across all assessments" - -**Vulnerabilities Exploited:** V-002, V-003, V-005, V-007 - ---- - -## Recommendations - -### Immediate (Critical) - -1. **Implement quantitative ADI calculation** in O-QT-MCP with hard gates for out-of-domain predictions -2. **Add confidence calibration** to AOP-MCP with explicit uncertainty quantification -3. **Implement physiological plausibility checks** in PBPK-MCP parameter editing -4. **Create uncertainty propagation tool** for cross-suite workflows - -### Short-term (High) - -5. **Add MOA concordance scoring** to O-QT-MCP read-across -6. **Implement evidence contradiction detection** across modules -7. **Add p-hacking detection** for systematic parameter exploration - -### Medium-term (Medium) - -8. **Standardize uncertainty representation** across all modules (probabilistic where possible) -9. **Implement evidence quality weighting** in CompTox-MCP -10. **Add temporal decay flags** for older studies - ---- - -## Conclusion - -The ToxMCP ecosystem, while innovative, contains significant attack surfaces that could be exploited to generate misleading toxicological conclusions. The most critical vulnerabilities are: - -1. **Missing AD enforcement** allowing out-of-domain predictions -2. **False precision** in confidence scores without calibration -3. **Parameter manipulation** without physiological guardrails -4. **No uncertainty propagation** across tool chains - -An AI agent with access to these tools could systematically exploit these vulnerabilities to build a case for virtually any predetermined conclusion, while appearing to follow rigorous scientific protocols. - -**The appearance of rigor is the most dangerous vulnerability of all.** - ---- - -*Report generated by Scientific Adversary Agent for ToxMCP Security Audit* diff --git a/ToxMCP_Audit_Reviewed_v2/toxmcp_contract_audit_report.md b/ToxMCP_Audit_Reviewed_v2/toxmcp_contract_audit_report.md deleted file mode 100644 index 5c98c97..0000000 --- a/ToxMCP_Audit_Reviewed_v2/toxmcp_contract_audit_report.md +++ /dev/null @@ -1,399 +0,0 @@ -# ToxMCP Suite: Contract Layer Architecture Audit Report -## Cross-Suite Orchestration Analysis - -**Audit Date:** 2026-04-15 -**Auditor:** Cross-Suite Orchestration Architect -**Scope:** comptox-mcp, oqt-mcp, aop-mcp, pbpk-mcp - ---- - -> **Reviewed copy (2026-04-15):** This document was retained from the original package but lightly edited for consistency. -> Unless explicitly stated otherwise, code blocks are **reference implementations**, not validated patches, and scenario-based exploit narratives should not be read as reproduced proofs. - - - -## Executive Summary - -The ToxMCP suite demonstrates sophisticated modular architecture with clear domain boundaries, but critical gaps exist in the **Contract Layer** that prevent coherent cross-suite workflows. The "Swiss Army Knife" problem is real: each module is sharp individually, but they lack the integration mechanisms to form a *coherent argument*. - -### Key Finding: Orchestrator Responsibility Is Documented but Not Implemented -The documentation repeatedly references a "downstream orchestrator" and "future ToxClaw orchestration layer" but **no such orchestrator exists** in the codebase. This is the single most critical architectural gap. - ---- - -## 1. Contract Drift Analysis - -### 🔴 CRITICAL: Evidence Block Structural Incompatibility - -| Module | Evidence Block Structure | Incompatibility | -|--------|-------------------------|-----------------| -| **CompTox-MCP** | `hazardEvidenceSummary.v1.json` - Flat structure with `datasets[]`, `keyFindings[]` | No `evidenceBlocks` wrapper | -| **O-QT-MCP** | `oqtHazardEvidenceSummary.v1.json` - Nested `evidenceBlocks{endpointData, profiling, metabolism, qsar}` | Uses `evidenceBlock` with `status`, `basis`, `keyEvidence[]` | -| **AOP-MCP** | `get_ker.response.schema.json` - `evidence_blocks{biological_plausibility, empirical_support, quantitative_understanding}` | Uses `evidenceBlock` with `text`, `heuristic_call`, `basis` | - -### Specific Contract Drift Examples - -#### 1.1 Field Name Inconsistencies (camelCase vs snake_case) - -``` -CompTox: "chemicalRef", "keyFindings", "sourceDataset" -O-QT: "chemicalIdentity", "endpointSummaries", "evidenceBlocks" -AOP: "overall_applicability", "evidence_blocks", "heuristic_call" -``` - -**File References:** -- `comptox-mcp/schemas/hazardEvidenceSummary.v1.json` (lines 16-45) -- `oqt-mcp/schemas/oqtHazardEvidenceSummary.v1.json` (lines 40-70) -- `aop-mcp/docs/contracts/schemas/read/get_ker.response.schema.json` (lines 125-134) - -#### 1.2 Evidence Block Schema Mismatch - -**O-QT `evidenceBlock` (lines 460-499):** -```json -{ - "summary": "string|null", - "status": "coverageState", - "basis": "string", - "keyEvidence": ["string"], - "references": ["referenceRecord"], - "provenanceRecords": ["provenanceRecord"] -} -``` - -**AOP `evidenceBlock` (lines 160-171):** -```json -{ - "text": "string|null", - "heuristic_call": "string", - "basis": "string", - "references": ["object"], - "provenance": ["provenanceRecord"] -} -``` - -**Transformation Loss:** A CompTox hazard evidence block CANNOT be directly consumed by AOP-MCP draft authoring without field mapping: -- `keyFindings[]` -> `evidence_blocks` requires manual transformation -- `confidence` (0-1 float in CompTox) -> `heuristic_call` (string in AOP) -- No shared `provenanceRecord` structure - -### 🟠 HIGH: Unit Mismatches - -**CompTox hazard evidence:** -- Uses `"unit": "log_mg_kg"` (line 667 in interop.py) -- ToxValDB: `mg/kg`, `uM`, `ppm` (mixed) - -**O-QT QSAR findings:** -- `"unit": "string"` (line 267-268 in oqtHazardEvidenceSummary.v1.json) -- No standardization enforced - -**PBPK context:** -- HTTK: `L/h/kg`, `1/hr` -- ADME/IVIVE: `L/h/kg` - -**Risk:** Downstream orchestrator must handle unit conversion without explicit metadata about unit systems. - -### 🟠 HIGH: Ontology Versioning Conflicts - -**AOP-MCP:** -- Uses AOP-Wiki RDF/SPARQL with OECD AOP-KB -- `assess_aop_confidence.response.schema.json` includes `oecd_alignment` field -- References "OECD-aligned" contracts throughout - -**CompTox-MCP:** -- Uses internal AOP crosswalk (bioactivity_aop mappings) -- `aopLinkageSummary.v1.json` has different `mapping` structure - -**Gap:** No shared ontology registry or version negotiation mechanism exists. - ---- - -## 2. The Orchestrator Gap - -### 🔴 CRITICAL: Missing Meta-Reasoning Layer - -**Finding:** The "downstream orchestrator" is referenced 20+ times across documentation but **DOES NOT EXIST** in the codebase. - -**Documentation References:** -- `oqt-mcp/docs/architecture.md` (line 47-56): "A downstream orchestrator sits above O-QT MCP" -- `oqt-mcp/docs/integration_orchestrators.md` (line 57): "Final suite-level evidence synthesis belongs in a downstream orchestrator" -- `comptox-mcp/docs/architecture_overview.md` (line 105): "future ToxClaw orchestration layer" - -**What the Orchestrator Should Do (but doesn't exist):** -1. **Evidence Deduplication:** Prevent double-counting when CompTox and O-QT both report similar hazard findings -2. **Contradiction Detection:** Flag when CompTox says "non-toxic" but AOP suggests "liver injury via different pathway" -3. **Cross-Module Consistency:** Ensure PBPK simulation results align with hazard evidence -4. **Narrative Coherence:** Verify PDF report from O-QT doesn't contradict PBPK results - -### 🟠 HIGH: No Narrative Consistency Checker - -**Example Scenario:** -``` -CompTox-MCP: "No genotoxicity signal detected in ToxCast assays" -AOP-MCP: "AOP 42: Liver steatosis via PPARG activation" -O-QT-MCP: "Profiler alert: potential DNA binding mechanism" -PBPK-MCP: "High hepatic concentration predicted" -``` - -**Question:** Where is the component that detects the tension between "no genotoxicity" and "DNA binding mechanism"? - -**Answer:** Nowhere. Each module operates in isolation. - -### 🟡 MEDIUM: GenRA Orchestrator is Experimental-Only - -**File:** `comptox-mcp/src/epacomp_tox/orchestrator/workflow.py` - -The `GenRAOrchestrator` class exists but: -- Is marked as **experimental** in architecture docs -- Only handles CompTox-internal workflows -- Does NOT integrate with O-QT, AOP, or PBPK modules -- Has no cross-module transaction management - ---- - -## 3. Transaction Boundaries - -### 🔴 CRITICAL: No Cross-Module Rollback Mechanism - -**Scenario Analysis:** - -``` -1. O-QT-MCP successfully generates grouping dossier -2. AOP-MCP fails to retrieve AOP (SPARQL timeout) -3. PBPK-MCP simulation completes -4. CompTox-MCP evidence pack assembly fails (API error) -``` - -**Current Behavior:** -- Each module operates independently -- No distributed transaction coordinator -- Partial results can be returned without context - -**Risk:** System can produce **partial, misleading safety reports** with missing context. - -**Evidence from Code:** -- `oqt-mcp/docs/architecture.md` (line 66): "Async queue and persistence layer remain roadmap work" -- `comptox-mcp/src/epacomp_tox/orchestrator/workflow.py` (lines 91-114): Error handling only within single workflow, no cross-module coordination - -### 🟠 HIGH: PBPK Has Session Registry, Others Don't - -**PBPK-MCP:** -- Has `mcp.session_registry` for simulation handles -- Supports job queue with Redis -- Has rollback via snapshot mechanism - -**Other Modules:** -- No session registry -- No job persistence -- No rollback capability - -**Gap:** Inconsistent state management across suite. - ---- - -## 4. Schema Evolution Strategy - -### 🔴 CRITICAL: No Schema Registry or Version Negotiation - -**Current State:** - -| Schema | Version | Version Detection | -|--------|---------|-------------------| -| `oqtWorkflowRecord.v1.json` | v1 | Hardcoded `const: "v1"` | -| `oqtHazardEvidenceSummary.v1.json` | v1 | Hardcoded `const: "v1"` | -| `hazardEvidenceSummary.v1.json` | v1 | In filename only | -| `aopLinkageSummary.v1.json` | v1 | In filename only | - -**Problems:** -1. **No schema registry** - Consumers cannot discover available versions -2. **No version negotiation** - Cannot request `v1` vs `v2` at runtime -3. **Breaking changes undefined** - No migration path documented - -**File References:** -- `oqt-mcp/schemas/oqtWorkflowRecord.v1.json` (lines 26-28): Hardcoded version -- `comptox-mcp/schemas/README.md`: "Portable schema versions are intentionally independent from package patch releases" - -### 🟠 HIGH: Inconsistent Version Declaration Patterns - -**O-QT Pattern (explicit):** -```json -"schemaName": { "const": "oqtWorkflowRecord" }, -"schemaVersion": { "const": "v1" } -``` - -**CompTox Pattern (implicit):** -```json -"$id": "https://epa.gov/comptox/schemas/hazardEvidenceSummary.v1.json" -``` - -**AOP Pattern (none):** -```json -"$schema": "https://json-schema.org/draft/2020-12/schema" -// No version in schema itself -``` - ---- - -## 5. Integration Anti-Patterns Catalog - -### Anti-Pattern 1: "Hope for the Best" Integration -**Evidence:** `comptox-mcp/src/epacomp_tox/orchestrator/workflow.py` (lines 388-411) -```python -try: - evidence_pack = self.interop_resource.assemble_comptox_evidence_pack(...) - aop_summary = self.interop_resource.build_aop_linkage_summary(...) - pbpk_bundle = self.interop_resource.build_pbpk_context_bundle(...) -except Exception as exc: - guardrails.append(...) - return None -``` - -**Problem:** Interop attachments can fail silently; no retry or compensation logic. - -### Anti-Pattern 2: "Every Module for Itself" Provenance -**CompTox Provenance:** -```json -{ - "sourceMcp": "epacomp-tox-mcp", - "generatedAt": "timestamp", - "sources": [...] -} -``` - -**O-QT Provenance:** -```json -{ - "workflowId": "string", - "sourceSystem": "string", - "generatedBy": "string", - "generatedAt": "timestamp" -} -``` - -**AOP Provenance:** -```json -{ - "source": "string", - "field": "string", - "transformation": "string|null", - "confidence": "string|null" -} -``` - -**Problem:** Three different provenance structures; no unified audit trail. - -### Anti-Pattern 3: Ambiguous Orchestrator Ownership -The orchestrator is simultaneously: -- Essential for final synthesis (per docs) -- Non-existent in code -- Referenced as "future ToxClaw layer" - ---- - -## 6. Swiss Army Knife Problem Assessment - -### Can the Tools Form a Coherent Argument? - -| Capability | Status | Gap | -|------------|--------|-----| -| Individual hazard assessment | Working | - | -| Individual AOP discovery | Working | - | -| Individual QSAR prediction | Working | - | -| Individual PBPK simulation | Working | - | -| Cross-module evidence fusion | Missing | No orchestrator | -| Contradiction detection | Missing | No meta-reasoning | -| Narrative consistency | Missing | No validation layer | -| Decision recommendation | Missing | Out of scope per design | - -### The Core Issue - -Each module correctly declares: -- `decisionBoundary.supportedDecisions` -- `decisionBoundary.prohibitedDecisions` -- `decisionOwner` - -But there's **no consumer** of these declarations. The orchestrator that should read these boundaries and make cross-module decisions doesn't exist. - ---- - -## 7. Recommendations - -### Immediate (High Priority) - -1. **Define the Orchestrator Interface** - - Create `toxmcp-orchestrator` repository - - Define contract for cross-module evidence fusion - - Implement contradiction detection engine - -2. **Standardize Evidence Blocks** - - Create `toxmcp-evidence-schema` shared package - - Unify `evidenceBlock` structure across all modules - - Version all schemas with explicit negotiation - -3. **Implement Transaction Coordination** - - Add saga pattern for cross-module workflows - - Define compensation actions for each module - - Create unified session registry - -### Medium Term - -4. **Build Meta-Reasoning Layer** - - Implement confidence aggregation across modules - - Create ontology alignment service - - Build narrative consistency validator - -5. **Schema Registry** - - Deploy central schema registry - - Implement version negotiation protocol - - Add schema compatibility testing - ---- - -## Appendix: File Reference Index - -### Schema Files Analyzed -- `comptox-mcp/schemas/hazardEvidenceSummary.v1.json` -- `comptox-mcp/schemas/aopLinkageSummary.v1.json` -- `comptox-mcp/schemas/comptoxEvidencePack.v1.json` -- `oqt-mcp/schemas/oqtHazardEvidenceSummary.v1.json` -- `oqt-mcp/schemas/oqtReadAcrossSummary.v1.json` -- `oqt-mcp/schemas/oqtWorkflowRecord.v1.json` -- `aop-mcp/docs/contracts/schemas/read/get_ker.response.schema.json` -- `aop-mcp/docs/contracts/schemas/read/assess_aop_confidence.response.schema.json` - -### Documentation Files Analyzed -- `oqt-mcp/docs/architecture.md` -- `oqt-mcp/docs/integration_orchestrators.md` -- `oqt-mcp/docs/cross_suite_alignment_2026.md` -- `comptox-mcp/docs/architecture_overview.md` -- `aop-mcp/docs/architecture.md` -- `pbpk-mcp/docs/mcp-bridge/architecture.md` - -### Code Files Analyzed -- `comptox-mcp/src/epacomp_tox/orchestrator/workflow.py` -- `comptox-mcp/src/epacomp_tox/orchestrator/offline.py` -- `comptox-mcp/src/epacomp_tox/resources/interop.py` - ---- - -## Summary of Findings by Severity - -### 🔴 Critical (4) -1. **Missing Orchestrator:** The downstream orchestrator referenced throughout docs does not exist -2. **Evidence Block Incompatibility:** CompTox, O-QT, and AOP use incompatible evidence block structures -3. **No Cross-Module Rollback:** Partial failures can produce misleading safety reports -4. **No Schema Registry:** No version negotiation or discovery mechanism - -### 🟠 High (5) -1. **No Narrative Consistency Checker:** No component validates coherence across module outputs -2. **Unit Mismatches:** Different unit systems without conversion metadata -3. **Inconsistent Version Patterns:** Each module uses different version declaration -4. **Inconsistent State Management:** PBPK has session registry; others don't -5. **Ontology Versioning Conflicts:** No shared ontology registry - -### 🟡 Medium (2) -1. **GenRA Orchestrator is Experimental:** Internal-only, not cross-module -2. **Provenance Structure Divergence:** Three different provenance formats - ---- - -**Audit Complete** diff --git a/ToxMCP_Audit_Reviewed_v2/toxmcp_future_proofing_audit_report.md b/ToxMCP_Audit_Reviewed_v2/toxmcp_future_proofing_audit_report.md deleted file mode 100644 index a0c3eb3..0000000 --- a/ToxMCP_Audit_Reviewed_v2/toxmcp_future_proofing_audit_report.md +++ /dev/null @@ -1,209 +0,0 @@ -# ToxMCP Suite - Future-Proofing & Standards Audit Report (Reviewed Copy) - -**Review date:** 2026-04-15 -**Scope:** `comptox-mcp`, `oqt-mcp`, `aop-mcp`, `pbpk-mcp` -**Focus:** Migration resilience for MCP, schema evolution, ontology drift, and provider coupling - ---- - -## Important update in this reviewed copy - -The original report treated streaming and transport changes as mostly future events. -This reviewed copy updates the framing: - -- Streamable HTTP is already part of the public MCP specification lineage. -- The current public MCP roadmap is focused on **evolving transport and session handling for scale**, not on introducing a large set of new official transports. -- The highest-value future-proofing question for ToxMCP is therefore **migration resilience**, not speculative feature timing. - ---- - -## Executive summary - -The original package correctly identified that the suite has several durability risks: - -1. **Transport/protocol logic is fragmented across repos** -2. **Schema/version handling is inconsistent** -3. **Ontology evolution is under-governed** -4. **Provider and model coupling is stronger than ideal** -5. **Binary/large artifact handling is not abstracted cleanly enough** - -These are best understood as **migration-cost multipliers**. -Even if every repo works today, the cost of adapting the suite to protocol, ontology, or provider change may be much higher than it needs to be. - ---- - -## Finding register - -| ID | Finding | Severity | Evidence basis | Confidence | Reviewed interpretation | -|---|---|---|---|---|---| -| FUT-01 | MCP transport handling is too repo-local | **High** | Observed | High | Transport change will likely require repeated work unless abstraction is shared | -| FUT-02 | Capability/version negotiation strategy is underdefined | **High** | Observed + standards note | Medium-High | Compatibility drift is likely as clients and servers evolve | -| FUT-03 | Schema evolution and registry discipline are insufficient | **High** | Observed | High | Cross-suite breakage risk grows as contracts change | -| FUT-04 | Ontology/version drift is under-managed | **High** | Observed + inferred | Medium-High | Historical comparability and interoperability may degrade over time | -| FUT-05 | Provider/model coupling is stronger than ideal | **Medium / High** | Observed | Medium-High | Supplier or API change could have outsized migration cost | -| FUT-06 | Binary/large artifact handling needs a clearer boundary | **Medium / High** | Observed + inferred | Medium | Performance and compatibility cost can rise as outputs get richer | - ---- - -## FUT-01: MCP transport handling is too repo-local -**Severity:** **High** -**Evidence basis:** Observed -**Confidence:** High - -The original report was right that transport logic is spread across repos. -That means even modest protocol evolution can create duplicated upgrade work. - -### Reviewed framing -This is not mainly a prediction about a specific future transport. -It is a present-day software architecture issue: -- transport concerns are not centralized enough -- compatibility behavior is harder to test consistently -- protocol changes may require multiple parallel migrations - -### Recommended control -Introduce a shared transport boundary or library that owns: -- protocol version selection -- capability negotiation -- request/response envelope handling -- streaming/session abstractions -- compatibility tests - ---- - -## FUT-02: Capability and version negotiation need explicit policy -**Severity:** **High** -**Evidence basis:** Observed + standards note -**Confidence:** Medium-High - -Hardcoded or uneven protocol-version handling increases: -- brittle client/server pairings -- ambiguous fallback behavior -- upgrade risk across repos - -### Recommended control -- define a single suite-level compatibility policy -- make supported protocol versions discoverable -- test downgrade/upgrade behavior explicitly -- separate “what we support” from “what we prefer” - ---- - -## FUT-03: Schema evolution discipline is insufficient -**Severity:** **High** -**Evidence basis:** Observed -**Confidence:** High - -The original contract-layer and future-proofing work reinforce each other here. -Version numbers appear, but the suite still needs a clearer answer to: -- where schemas are registered -- how new versions are discovered -- how breaking changes are communicated -- how older artifacts remain readable - -### Recommended control -- maintain a schema registry or index -- document compatibility guarantees -- ship transformers or adapters for version transitions -- add contract tests at cross-repo boundaries - ---- - -## FUT-04: Ontology evolution is under-managed -**Severity:** **High** -**Evidence basis:** Observed + inferred -**Confidence:** Medium-High - -This is especially relevant for `aop-mcp`, but it affects the full suite whenever ontology-backed concepts appear in downstream records or reports. - -### Risk pattern -- ontology or taxonomy changes upstream -- local normalization still succeeds syntactically -- semantic meaning or comparability changes silently -- historical artifacts become harder to compare or trust - -### Recommended control -- persist ontology/version provenance -- define remapping/deprecation policy -- test historical artifact interpretation against changed ontology states - ---- - -## FUT-05: Provider and model coupling should be loosened -**Severity:** **Medium / High** -**Evidence basis:** Observed -**Confidence:** Medium-High - -The original package noted provider-specific assumptions in several places. -That matters because: -- pricing can change -- APIs can shift -- naming and capabilities evolve -- fallback behavior can be unclear - -### Recommended control -- define internal capability contracts rather than provider names -- keep provider adapters narrow -- record provider/model identity in provenance -- test fallback behavior intentionally, not incidentally - ---- - -## FUT-06: Artifact and binary handling need a cleaner abstraction -**Severity:** **Medium / High** -**Evidence basis:** Observed + inferred -**Confidence:** Medium - -As the suite produces richer artifacts, handling everything as JSON payloads or per-repo conventions can create: -- overhead -- streaming friction -- inconsistent client behavior -- duplicated logic - -### Recommended control -- define a clear artifact abstraction -- separate metadata from large payload transport -- make artifact lineage and content-type handling consistent across repos - ---- - -## What changed from the original report - -### 1. Timing claims were softened -The reviewed copy avoids speculative statements tied to a single quarter unless backed by current public roadmap material. - -### 2. “Streaming gap” became “migration resilience gap” -The stronger and more durable claim is not that one specific feature is missing. -It is that the current suite structure makes protocol change expensive. - -### 3. Standards handling was made less theatrical and more operational -The reviewed copy emphasizes: -- compatibility policy -- shared abstractions -- migration tests -- version provenance - ---- - -## Recommended sequence - -### Immediate -- define shared MCP compatibility policy -- centralize transport/version handling strategy -- define schema ownership and versioning rules - -### Next -- add ontology/version provenance -- reduce provider-specific assumptions -- standardize artifact handling - -### Then -- add compatibility and migration test suites across repos -- document deprecation policy and supported-version windows - ---- - -## Final judgment - -The original package was right to worry about future change, but the best frame is **migration resilience**, not speculative roadmap drama. - -**Bottom line:** ToxMCP will be easier to evolve if transport, schema, ontology, and provider boundaries are made explicit now, while the suite is still small enough to refactor coherently. diff --git a/ToxMCP_Audit_Reviewed_v2/toxmcp_observability_audit_report.md b/ToxMCP_Audit_Reviewed_v2/toxmcp_observability_audit_report.md deleted file mode 100644 index 4ea5013..0000000 --- a/ToxMCP_Audit_Reviewed_v2/toxmcp_observability_audit_report.md +++ /dev/null @@ -1,616 +0,0 @@ -# ToxMCP Observability & Debuggability Audit Report - -**Audit Date:** 2026-04-15 -**Auditor:** Observability & Debuggability Specialist -**Scope:** comptox-mcp, oqt-mcp, aop-mcp, pbpk-mcp -**Severity Legend:** 🔴 Critical | 🟠 High | 🟡 Medium | 🟢 Low - ---- - -> **Reviewed copy (2026-04-15):** This document was retained from the original package but lightly edited for consistency. -> Unless explicitly stated otherwise, code blocks are **reference implementations**, not validated patches, and scenario-based exploit narratives should not be read as reproduced proofs. - - - -## Executive Summary - -This audit reveals **significant observability gaps** across the ToxMCP ecosystem that will make production debugging extremely difficult. The most critical issues are: - -1. **No distributed tracing** - Cross-tool workflows are untraceable -2. **Missing feature attribution** - The "Why" gap makes classification results unexplainable -3. **No PII/PSI scrubbing** - Proprietary chemical structures logged in plaintext -4. **No replay capability** - Cannot debug without re-running expensive simulations -5. **No result diff tooling** - Divergent results cannot be analyzed - -**Debuggability Debt Score: 8.5/10 (Critical)** - ---- - -## Finding 1: The 'Why' Gap - Missing Feature Attribution 🔴 CRITICAL - -### Description -When O-QT returns a classification like "Class 1 (narcosis or baseline toxicity)", there is **no explanation of which molecular features triggered this classification**. The response contains only the classification result without feature-level attribution. - -### Evidence - -**File:** `oqt-mcp/src/tools/implementations/o_qt_qsar_tools.py` (lines 311-393) - -```python -async def run_qsar_prediction(smiles: str, model_id: str) -> dict: - """Runs a QSAR prediction.""" - # ... fetch prediction ... - result = { - "chem_id": chem_id, - "model_id": model_id, - "prediction": prediction, # <-- Contains ONLY the result, not WHY - "domain": domain, - "search_hits": hits, - } -``` - -The `prediction` object from the QSAR Toolbox API contains: -- `Value`: The predicted value -- `Unit`: The unit of measurement -- `DomainResult`: In/out of domain status -- **Missing:** Which molecular features contributed to this prediction -- **Missing:** Feature importance scores -- **Missing:** Structural alerts triggered - -### Concrete Example - -**Current Response:** -```json -{ - "prediction": { - "Value": "Class 1", - "DomainResult": "Inside applicability domain" - }, - "model_provenance": { - "title": "Verhaar Scheme for predicting toxicity mode of action" - } -} -``` - -**What Scientists Need:** -```json -{ - "prediction": { - "Value": "Class 1", - "DomainResult": "Inside applicability domain", - "feature_attribution": { - "triggered_rules": [ - { - "rule_id": "VERHAAR_001", - "description": "Non-reactive organic compound with logKow > 2.0", - "confidence": 0.94, - "contributing_fragments": ["C-C", "C-H"], - "molecular_features": { - "logKow": 3.2, - "reactive_groups": [] - } - } - ], - "explanation": "Class 1 assigned due to non-reactive nature and moderate lipophilicity consistent with narcosis mechanism" - } - } -} -``` - -### Impact -- **Regulatory Rejection:** Agencies (EPA, ECHA) require explainable predictions -- **Scientific Distrust:** Users cannot validate or challenge results -- **Debugging Impossibility:** When results are wrong, cannot determine if it's data issue, model issue, or bug - -### Cross-References -- Related to: Finding 4 (Replay Without Re-execution) - Cannot debug what you cannot explain -- Related to: Finding 5 (Result Diff) - Cannot diff without feature-level comparison - -### Recommendation -1. Extend `hazard_contracts.py` to include `feature_attribution` field -2. Parse profiler alerts from Toolbox response to extract triggered rules -3. Add `explain_prediction()` tool that returns human-readable rationale - ---- - -## Finding 2: Cross-Tool Tracing - No Distributed Trace IDs 🔴 CRITICAL - -### Description -When a user runs a workflow that hits CompTox → O-QT → AOP, there is **no distributed trace ID that links all three calls**. Each MCP server generates its own isolated correlation ID, making it impossible to see the full request graph. - -### Evidence - -**File:** `oqt-mcp/src/api/server.py` (lines 95-118) - -```python -@app.middleware("http") -async def audit_log_middleware(request: Request, call_next): - correlation_id = str(uuid.uuid4()) # <-- NEW UUID FOR EVERY REQUEST - request.state.correlation_id = correlation_id - # ... - response.headers["X-Request-ID"] = correlation_id -``` - -**File:** `aop-mcp/src/server/mcp/router.py` (lines 57-118) - -```python -async def mcp_endpoint(request: Request, response: Response): - # No correlation ID extraction from incoming request! - payload = await request.json() - # ... -``` - -**File:** `comptox-mcp/src/epacomp_tox/orchestrator/workflow.py` (lines 68-206) - -```python -def run_workflow(self, ..., workflow_run_id: Optional[str] = None): - run_id = workflow_run_id or str(uuid4()) # <-- Local only, not propagated - # No tracing context propagation to O-QT or AOP -``` - -### The Problem - -``` -User Request - │ - ├──► CompTox-MCP [X-Request-ID: abc-123] - │ └──► Calls O-QT API [X-Request-ID: def-456] ← NEW ID! - │ - ├──► O-QT-MCP [X-Request-ID: ghi-789] ← NEW ID! - │ - └──► AOP-MCP [X-Request-ID: jkl-012] ← NEW ID! - -Result: Cannot correlate the full workflow! -``` - -### What Should Happen (OpenTelemetry/W3C Trace Context) - -``` -User Request [trace-id: abc-123, span-id: xyz] - │ - ├──► CompTox-MCP [trace-id: abc-123, span-id: comp-1] - │ └──► Calls O-QT API [trace-id: abc-123, span-id: oqt-1, parent: comp-1] - │ - ├──► O-QT-MCP [trace-id: abc-123, span-id: oqt-2, parent: xyz] - │ - └──► AOP-MCP [trace-id: abc-123, span-id: aop-1, parent: xyz] - -Result: Full request graph visible in Jaeger/Zipkin! -``` - -### Impact -- **No End-to-End Visibility:** Cannot trace a chemical through the entire analysis pipeline -- **Latency Attribution Impossible:** Cannot determine which tool is causing slowdowns -- **Error Propagation Opaque:** Errors in one tool appear as failures in another - -### Cross-References -- Related to: Finding 5 (Result Diff) - Cannot correlate divergent results across tools - -### Recommendation -1. Implement W3C Trace Context propagation (`traceparent` header) -2. Add OpenTelemetry SDK to all MCP servers -3. Deploy Jaeger/Zipkin for distributed tracing visualization -4. Add span IDs to all log entries - ---- - -## Finding 3: Log Privacy Leakage - No PII/PSI Scrubbing 🔴 CRITICAL - -### Description -Toxicological data can be proprietary (new drug candidates). The logs capture **chemical structures (SMILES) and CAS numbers in plaintext** with no PII/PSI (Proprietary Substance Information) scrubbing filters. - -### Evidence - -**File:** `oqt-mcp/src/tools/implementations/o_qt_qsar_tools.py` (lines 311-315) - -```python -async def run_qsar_prediction(smiles: str, model_id: str) -> dict: - log.info( - f"Running QSAR prediction for SMILES: {smiles[:20]}... using model: {model_id}" - ) # <-- SMILES LOGGED IN PLAINTEXT! -``` - -**File:** `oqt-mcp/src/tools/registry.py` (lines 135-157) - -```python -# CRITICAL: This should be handled by a dedicated, immutable audit service in production -# Ensure PII/Sensitive data in params is sanitized before logging if necessary. -try: - logged_params = json.dumps(params, default=str, indent=2)[:500] # <-- NO SANITIZATION! -except Exception: - logged_params = "Params serialization failed" - -audit.emit( - { - "type": "tool_execution", - "tool": name, - "user_id": user.id, - "status": "success", - "params": logged_params, # <-- CONTAINS SMILES, CAS, CHEMICAL NAMES! - } -) -``` - -**File:** `oqt-mcp/src/api/server.py` (lines 95-118) - -```python -async def audit_log_middleware(request: Request, call_next): - # ... - event = { - "type": "http_request", - "correlation_id": correlation_id, - "user_id": user_id, - "method": request.method, - "path": request.url.path, - "status_code": response.status_code, - "duration_ms": round(duration_ms, 3), - # <-- NO SCRUBBING OF REQUEST BODY! - } - audit.emit(event) -``` - -### Concrete Privacy Leak Example - -**Log Entry (Current):** -```json -{ - "timestamp": "2026-04-15T10:30:00Z", - "level": "INFO", - "message": "Running QSAR prediction for SMILES: CC(C)Cc1ccc...", - "params": { - "smiles": "CC(C)Cc1ccc(C(C)C(=O)O)cc1", # <-- IBUPROFEN STRUCTURE! - "chemical_identifier": "15687-27-1", # <-- CAS NUMBER! - "preferred_name": "Ibuprofen" # <-- DRUG NAME! - } -} -``` - -**What It Should Be (Scrubbed):** -```json -{ - "timestamp": "2026-04-15T10:30:00Z", - "level": "INFO", - "message": "Running QSAR prediction for SMILES: [REDACTED]...", - "params": { - "smiles_hash": "sha256:a3f5c8...", # <-- HASH ONLY - "chemical_identifier": "[REDACTED]", - "preferred_name": "[REDACTED]", - "_debug": "PII scrubbed - see secure vault for original" - } -} -``` - -### Impact -- **Regulatory Violation:** GDPR, CCPA, and pharma confidentiality agreements breached -- **IP Theft Risk:** Competitors can extract chemical structures from logs -- **Audit Failure:** Compliance audits will flag this as critical finding - -### Cross-References -- Related to: Finding 1 (Why Gap) - Feature attribution requires chemical data, creating tension with privacy - -### Recommendation -1. Implement `PrivacyScrubber` class with regex patterns for: - - SMILES strings - - CAS numbers - - InChI/InChIKey - - Chemical names (dictionary-based) -2. Hash chemical identifiers for correlation without exposure -3. Store original values in encrypted sidecar for authorized debugging -4. Add `X-Confidentiality-Level` header to control scrubbing per-request - ---- - -## Finding 4: Replay Without Re-execution - No Record Mode 🟠 HIGH - -### Description -There is **no 'record mode' that caches deterministic responses**. Developers cannot replay an exact MCP tool call from last Tuesday without re-running the expensive simulation. - -### Evidence - -**File:** `oqt-mcp/src/qsar/client.py` (lines 55-165) - -```python -async def _request(self, method, path, *, params=None, json=None, ...): - # No caching layer! - # No VCR/recording mechanism! - async def _execute_request(): - # ... makes live HTTP request every time ... -``` - -**File:** `aop-mcp/src/instrumentation/cache.py` (lines 1-47) - -```python -class InMemoryCache(Cache): - """Simple cache abstraction with in-memory implementation.""" - # Only used for SPARQL query caching, not for: - # - Tool call recording - # - Response replay - # - Deterministic debugging -``` - -**File:** `comptox-mcp/src/epacomp_tox/orchestrator/workflow.py` (lines 350-378) - -```python -def _persist_bundle(self, bundle, ...): - # Saves bundle AFTER execution - # No recording of intermediate steps - # No ability to replay from checkpoint -``` - -### The Problem - -**Scenario:** A scientist reports: "Last Tuesday, O-QT said this chemical was Class 2, but today it says Class 1. Why?" - -**Current Debugging Process:** -1. Re-run the same query → May get different result (data drift?) -2. Check logs → No feature attribution (Finding 1) -3. Check cross-tool trace → No trace ID (Finding 2) -4. **Result:** Cannot determine cause of divergence - -**What Should Exist:** -```python -# Record mode for deterministic replay -@record_replay(cache_dir=".vcr_cassettes") -async def run_qsar_prediction(smiles: str, model_id: str) -> dict: - # First call: Records to .vcr_cassettes/qsar_abc123.yaml - # Subsequent calls: Replays from cassette (no API call!) - ... -``` - -### Impact -- **Debugging Cost:** Each debug session requires expensive re-execution -- **Non-Determinism:** Cannot distinguish between data drift and bugs -- **Regression Testing:** Cannot verify fixes without live APIs - -### Cross-References -- Related to: Finding 1 (Why Gap) - Replay without explanation is insufficient -- Related to: Finding 5 (Result Diff) - Replay enables diff comparison - -### Recommendation -1. Integrate VCR.py for HTTP recording/replay -2. Add `TOXMCP_RECORD_MODE` environment variable -3. Store cassettes with versioning for regression testing -4. Add `replay_from_cassette()` helper for debugging - ---- - -## Finding 5: Result Diff Tool - No Divergence Analysis 🟠 HIGH - -### Description -When two scientists get different results for the same query, there is **no 'result diff' tool** to determine if it's data drift, model drift, hardware floating-point differences, or a bug. - -### Evidence - -**Search Results:** No `diff`, `compare`, `divergence`, or `regression` tools found in any repository. - -**File:** `comptox-mcp/src/epacomp_tox/orchestrator/audit.py` (lines 1-99) - -```python -class AuditBundleStore: - """Durable storage for orchestrator audit bundles.""" - - def save(self, bundle, *, attachments=None): - # Saves bundles with checksums - # No comparison/diff functionality! - - def load_bundle(self, run_id: str) -> Dict[str, any]: - # Loads single bundle - # No cross-run comparison! -``` - -**File:** `pbpk-mcp/docs/mcp-bridge/audit-trail.md` (lines 94-98) - -```markdown -## Verification Tools -- `audit verify --from 2025-10-16` – Streams events, recomputes hash chain -- `audit replay --job job-uuid` – Reconstructs timeline for a specific job -# <-- NO `audit diff` TOOL! -``` - -### What Should Exist - -```python -class ResultDiffer: - """Compare two workflow results to identify divergence.""" - - def diff(self, run_id_a: str, run_id_b: str) -> DivergenceReport: - return { - "divergence_type": "MODEL_DRIFT", # or DATA_DRIFT, BUG, HARDWARE_FP - "confidence": 0.94, - "differences": [ - { - "path": "predictive.results[0].prediction.Value", - "old": "Class 2", - "new": "Class 1", - "explanation": "Model version changed from 2.1 to 2.2" - } - ], - "root_cause": { - "type": "model_update", - "details": "Verhaar scheme updated 2025-01-10" - } - } -``` - -### Impact -- **Scientific Disagreements:** Cannot resolve "I got different results" issues -- **Regression Detection:** Cannot detect when updates break existing analyses -- **Data Quality:** Cannot identify upstream data changes - -### Cross-References -- Related to: Finding 1 (Why Gap) - Diff requires feature-level comparison -- Related to: Finding 4 (Replay) - Diff requires ability to replay old results - -### Recommendation -1. Create `toxmcp-diff` CLI tool -2. Implement semantic diff for chemical predictions -3. Add divergence classification (data vs model vs bug) -4. Integrate with audit bundle storage - ---- - -## Finding 6: Missing Structured Health/Metrics Endpoints 🟡 MEDIUM - -### Description -Only O-QT has a basic health endpoint. No comprehensive metrics for monitoring tool success rates, latency percentiles, or error rates. - -### Evidence - -**File:** `oqt-mcp/src/api/server.py` (lines 135-142) - -```python -@app.get("/health") -async def health_check(): - return { - "status": "healthy", - "environment": settings.app.ENVIRONMENT, - "auth_bypassed": settings.security.BYPASS_AUTH, - "qsar_api_url": settings.qsar.QSAR_TOOLBOX_API_URL, - } -``` - -**Missing:** -- Tool success/error rates -- Latency histograms -- Queue depth (for async jobs) -- External dependency health (QSAR Toolbox, CompTox API) - -### Recommendation -1. Add Prometheus metrics endpoint (`/metrics`) -2. Export key metrics: - - `toxmcp_tool_invocations_total` (counter with tool, status labels) - - `toxmcp_tool_duration_seconds` (histogram) - - `toxmcp_external_api_health` (gauge) - ---- - -## Finding 7: Inconsistent Audit Event Schemas 🟡 MEDIUM - -### Description -Each MCP server uses a different audit event schema, making centralized analysis impossible. - -### Evidence - -**O-QT:** `oqt-mcp/src/tools/registry.py` -```python -audit.emit({ - "type": "tool_execution", - "tool": name, - "user_id": user.id, - "status": "success", - "params": logged_params, -}) -``` - -**AOP-MCP:** `aop-mcp/src/instrumentation/audit.py` -```python -# Only verifies draft checksums, no event emission! -def verify_audit_chain(draft: Draft) -> bool: - ... -``` - -**CompTox:** `comptox-mcp/src/epacomp_tox/orchestrator/audit.py` -```python -# File-based bundle storage, no structured events -class AuditBundleStore: - def save(self, bundle, *, attachments=None): - ... -``` - -### Recommendation -1. Define unified `ToxMcpAuditEvent` schema -2. Include: timestamp, trace_id, tool_name, user_id, duration, status, checksums -3. Implement in shared library across all MCP servers - ---- - -## Finding 8: No Floating-Point Determinism Controls 🟡 MEDIUM - -### Description -No controls for ensuring floating-point determinism across different hardware/platforms. - -### Impact -- Results may differ between Intel vs AMD, or CPU vs GPU -- Cannot reproduce results on different deployments - -### Recommendation -1. Document FP precision requirements -2. Add `deterministic_mode` flag for critical calculations -3. Use fixed-precision libraries where appropriate - ---- - -## Summary Table - -| Finding | Severity | Component | Effort to Fix | -|---------|----------|-----------|---------------| -| 1. Why Gap | 🔴 Critical | O-QT | 2-3 weeks | -| 2. Cross-Tool Tracing | 🔴 Critical | All | 1-2 weeks | -| 3. Log Privacy | 🔴 Critical | All | 1 week | -| 4. Replay Mode | 🟠 High | All | 2 weeks | -| 5. Result Diff | 🟠 High | All | 2-3 weeks | -| 6. Health/Metrics | 🟡 Medium | All | 3-5 days | -| 7. Audit Schema | 🟡 Medium | All | 1 week | -| 8. FP Determinism | 🟡 Medium | CompTox | 1 week | - ---- - -## Debuggability Debt Quantification - -| Category | Debt Score | Justification | -|----------|------------|---------------| -| Explainability | 9/10 | No feature attribution anywhere | -| Traceability | 8/10 | No distributed tracing, isolated correlation IDs | -| Privacy | 9/10 | Plaintext chemical structures in logs | -| Reproducibility | 8/10 | No record/replay, cannot debug without re-execution | -| Comparability | 8/10 | No diff tools for divergence analysis | -| **Overall** | **8.5/10** | **Critical debuggability debt** | - ---- - -## Priority Recommendations - -### Immediate (Week 1-2) -1. **Implement PII/PSI scrubbing** - Critical regulatory/compliance risk -2. **Add distributed trace context propagation** - Enable end-to-end visibility - -### Short-term (Week 3-4) -3. **Add feature attribution to O-QT responses** - Enable explainability -4. **Implement VCR recording/replay** - Enable deterministic debugging - -### Medium-term (Month 2) -5. **Build result diff tool** - Enable divergence analysis -6. **Unify audit event schemas** - Enable centralized monitoring - ---- - -## Appendix: File References - -### O-QT MCP -- `oqt-mcp/src/tools/implementations/o_qt_qsar_tools.py` - Main QSAR tools -- `oqt-mcp/src/tools/hazard_contracts.py` - Response contract builders -- `oqt-mcp/src/tools/provenance.py` - Provenance tracking -- `oqt-mcp/src/tools/registry.py` - Tool execution & audit logging -- `oqt-mcp/src/api/server.py` - HTTP server & middleware -- `oqt-mcp/src/qsar/client.py` - QSAR Toolbox API client -- `oqt-mcp/src/utils/audit.py` - Audit event emission -- `oqt-mcp/src/utils/logging.py` - Structured logging setup - -### AOP MCP -- `aop-mcp/src/server/mcp/router.py` - MCP request routing -- `aop-mcp/src/instrumentation/audit.py` - Draft audit chain verification -- `aop-mcp/src/instrumentation/cache.py` - In-memory caching -- `aop-mcp/src/instrumentation/metrics.py` - Basic metrics recording -- `aop-mcp/src/instrumentation/logging.py` - Structured logging - -### CompTox MCP -- `comptox-mcp/src/epacomp_tox/orchestrator/workflow.py` - Workflow orchestration -- `comptox-mcp/src/epacomp_tox/orchestrator/audit.py` - Audit bundle storage -- `comptox-mcp/src/epacomp_tox/orchestrator/utils.py` - Metadata sanitization - -### PBPK MCP -- `pbpk-mcp/docs/mcp-bridge/audit-trail.md` - Audit trail design document -- `pbpk-mcp/docs/mcp-bridge/monitoring.md` - Monitoring design document - ---- - -*End of Report* diff --git a/ToxMCP_Audit_Reviewed_v2/toxmcp_regulatory_audit_report.md b/ToxMCP_Audit_Reviewed_v2/toxmcp_regulatory_audit_report.md deleted file mode 100644 index b5438b0..0000000 --- a/ToxMCP_Audit_Reviewed_v2/toxmcp_regulatory_audit_report.md +++ /dev/null @@ -1,230 +0,0 @@ -# ToxMCP Suite - Regulatory Survivability Audit Report (Reviewed Copy) - -**Review date:** 2026-04-15 -**Scope:** `comptox-mcp`, `oqt-mcp`, `aop-mcp`, `pbpk-mcp` -**Frameworks considered in the original package:** OECD GLP / data integrity expectations, 21 CFR Part 11, Annex 11, related regulated-use controls - ---- - -## Read this report carefully - -This reviewed copy preserves the original regulatory concerns but narrows the claim style. - -It uses the following rule: - -> The package can strongly identify **compliance-relevant design gaps**. -> It cannot, from the audit material alone, guarantee a specific regulator’s decision in a specific submission context. - -So this report prefers phrasing such as: -- **high risk of non-conformance** -- **likely unacceptable without compensating controls** -- **insufficient for defensible reconstruction** - -and avoids categorical claims such as: -- automatic FDA rejection -- automatic submission rejection - ---- - -## Executive summary - -The ToxMCP suite shows meaningful awareness of provenance and auditability, but the package still identifies several gaps that would matter for regulated or high-assurance use: - -1. **Historical reconstruction is incomplete** - The package does not show a suite-wide, fully reconstructable provenance envelope. - -2. **Audit trail semantics are not yet strong enough** - Several components rely on mechanisms that look audit-like but are not clearly tamper-evident end to end. - -3. **Electronic review/signature semantics are under-specified** - Draft authorship, review, and approval lineage are not yet represented robustly enough for stronger assurance contexts. - -4. **Determinism and version capture remain uneven** - Reproducibility depends on environment, data/version, ordering, and serialization choices that are not consistently captured. - -### Overall judgment -**Regulatory survivability is currently limited by provenance, reconstruction, and review-state design more than by any single missing field.** - ---- - -## Finding register - -| ID | Finding | Severity | Evidence basis | Confidence | Reviewed interpretation | -|---|---|---|---|---|---| -| REG-01 | Historical reconstruction / “time-machine” capability is incomplete | **Critical** | Observed + inferred | High | Hard to defend what happened, with what versions and conditions, after the fact | -| REG-02 | Audit trails are present but not uniformly tamper-evident | **Critical** | Observed | High | Audit-looking records are not yet equivalent to stronger integrity controls | -| REG-03 | Review/signature semantics are too weak for higher-assurance use | **Critical** | Observed | High | Identity, meaning, and content binding need strengthening | -| REG-04 | Determinism and canonicalization are uneven | **High** | Observed | High | Reproducibility can drift across runtime/environment changes | -| REG-05 | Upstream provenance capture is incomplete | **Critical** | Observed + inferred | Medium-High | External data dependence is not consistently reconstructable | -| REG-06 | Cross-suite provenance contracts are not unified | **High** | Observed + inferred | Medium-High | Even good local controls can fail if downstream artifacts do not preserve them | - ---- - -## REG-01: Historical reconstruction is incomplete -**Severity:** **Critical** -**Evidence basis:** Observed + inferred -**Confidence:** High - -The package’s “time-machine” concern remains one of its strongest findings. - -### Why this matters -For defensible historical reconstruction, the system needs a record of: -- code version / commit -- package and environment state -- upstream data/version context -- input identity resolution -- model/tool versions -- human review/approval status -- final artifact lineage - -The audited material shows fragments of this, but not a single suite-wide mechanism that makes reconstruction routine. - -### Reviewed wording -This is best framed as: -- **high risk of non-conformance for regulated or high-assurance use** -- **insufficient historical defensibility without compensating controls** - ---- - -## REG-02: Audit records are not yet uniformly tamper-evident -**Severity:** **Critical** -**Evidence basis:** Observed -**Confidence:** High - -The original package persuasively identified places where audit events or draft metadata can exist without: -- strong content binding -- mandatory previous-hash linkage -- verification on read -- clearly immutable storage semantics - -### Why this matters -An audit record is much more useful than a plain log line, but it is not equivalent to a verified integrity chain unless: -- the canonicalized content is defined, -- the chain is mandatory, -- and verification is part of normal operation. - ---- - -## REG-03: Electronic review/signature semantics are underdeveloped -**Severity:** **Critical** -**Evidence basis:** Observed -**Confidence:** High - -The package correctly highlighted missing or weak semantics around: -- reviewer identity -- signature meaning -- timestamp discipline -- signature-to-content linkage -- role or approval state - -### Reviewed wording -This is a **strong compliance gap finding**. -It is not, on its own, proof of a specific regulator outcome without intended-use and procedure context. - -### Practical implication -If the system is meant to support high-assurance draft approval or regulated record workflows, signature and approval state need to be explicit, verified, and preserved in lineage. - ---- - -## REG-04: Determinism and canonicalization need more discipline -**Severity:** **High** -**Evidence basis:** Observed -**Confidence:** High - -The package’s best examples here include: -- floating-point serialization for hashed records -- ordering assumptions in query results -- lack of explicit random-seed or environment recording - -### Why this matters -Two scientifically “same” runs can become operationally non-identical if: -- ordering differs, -- float serialization differs, -- environment changes are not captured, -- or a downstream artifact is regenerated under slightly different conditions. - ---- - -## REG-05: Upstream provenance capture remains too weak -**Severity:** **Critical** -**Evidence basis:** Observed + inferred -**Confidence:** Medium-High - -The package is strong in pointing out that upstream data dependence must be represented, not assumed. - -### Important refinement in the reviewed copy -The correct requirement is **not** “invent version headers.” -The requirement is to capture the strongest provenance and replay information the upstream actually makes available, and to supplement it internally where needed. - -That may include: -- provider release/version identifiers -- snapshot identifiers -- response hashes -- retrieval timestamps -- request parameters -- internal cache keys or mirror snapshots - ---- - -## REG-06: Cross-suite provenance contracts need to be unified -**Severity:** **High** -**Evidence basis:** Observed + inferred -**Confidence:** Medium-High - -Local compliance-minded controls are less useful if downstream repos cannot reliably preserve: -- provenance fields -- review state -- uncertainty state -- version metadata -- signed-artifact lineage - -This is where the regulatory and contract-layer audits reinforce each other. - ---- - -## Recommended control architecture - -### 1. Suite-wide provenance envelope -A single record model carried across repos, including: -- input identity -- upstream retrieval data -- code/runtime snapshot -- tool outputs and hashes -- review and approval state -- artifact lineage - -### 2. Verified audit chain -Separate from ordinary developer logging: -- canonical event schema -- mandatory chaining -- content recomputation -- immutable or append-controlled storage semantics -- automated verification tests - -### 3. Explicit review/signature model -For higher-assurance flows: -- actor identity -- role -- meaning -- time -- content linkage -- revocation or supersession model - ---- - -## What to validate next - -- intended regulated-use context for each repo and output type -- what external procedural controls already exist -- how draft approval is meant to work in practice -- which provenance fields survive cross-repo handoffs -- whether deterministic hashing and ordering assumptions hold across environments - ---- - -## Final judgment - -The original package was right to focus on provenance, reconstruction, and review-state design. -Those remain the most important regulatory-survivability concerns in the suite. - -**Bottom line:** the package strongly supports the claim that ToxMCP still needs a more robust integrity and provenance model before it can be treated as ready for regulated or similarly high-assurance use. diff --git a/ToxMCP_Audit_Reviewed_v2/toxmcp_remediation_snippets.py b/ToxMCP_Audit_Reviewed_v2/toxmcp_remediation_snippets.py deleted file mode 100644 index 2425f76..0000000 --- a/ToxMCP_Audit_Reviewed_v2/toxmcp_remediation_snippets.py +++ /dev/null @@ -1,463 +0,0 @@ -""" -ToxMCP Reviewed Remediation Snippets -=================================== - -This module contains implementation-oriented reference code derived from the -reviewed audit package. It is intentionally written as reference code rather -than a drop-in patch set. - -Important: -- These patterns still require repository-specific adaptation. -- Provider/version controls must use features the upstream actually supports. -- Signature verification is exposed via an injected verifier callback. -""" - -from __future__ import annotations - -from dataclasses import dataclass, field -from datetime import datetime, timezone -from decimal import Decimal -from pathlib import Path -from typing import Any, Callable, Dict, Iterable, List, Mapping, Optional, Protocol -import hashlib -import json -import os -import platform -import subprocess -import unicodedata - - -# ============================================================================= -# Shared helpers -# ============================================================================= - -def utc_now() -> datetime: - """Return a timezone-aware UTC timestamp.""" - return datetime.now(timezone.utc) - - -def iso_utc(dt: datetime) -> str: - """Serialize datetime consistently in UTC.""" - if dt.tzinfo is None: - dt = dt.replace(tzinfo=timezone.utc) - return dt.astimezone(timezone.utc).isoformat().replace("+00:00", "Z") - - -def sha256_hex(data: bytes) -> str: - return hashlib.sha256(data).hexdigest() - - -def normalize_json_value(value: Any, *, fp_precision: int = 17) -> Any: - """ - Normalize a value for deterministic JSON hashing. - - Notes: - - Floats are normalized conservatively. - - NaN/Infinity are represented as strings because JSON itself does not - define canonical encodings for these values. - """ - if isinstance(value, float): - if value != value: # NaN - return "NaN" - if value == float("inf"): - return "Infinity" - if value == float("-inf"): - return "-Infinity" - if value == 0.0: - return 0.0 - return round(value, fp_precision) - if isinstance(value, Decimal): - return format(value, "f") - if isinstance(value, datetime): - return iso_utc(value) - if isinstance(value, Mapping): - return {str(k): normalize_json_value(v, fp_precision=fp_precision) for k, v in sorted(value.items(), key=lambda item: str(item[0]))} - if isinstance(value, (list, tuple)): - return [normalize_json_value(v, fp_precision=fp_precision) for v in value] - return value - - -def canonical_json_bytes(value: Any) -> bytes: - normalized = normalize_json_value(value) - return json.dumps( - normalized, - sort_keys=True, - ensure_ascii=True, - separators=(",", ":"), - ).encode("utf-8") - - -# ============================================================================= -# Audit trail reference model -# ============================================================================= - -class AuditStorage(Protocol): - def append(self, event: "RegulatoryAuditEvent") -> None: - ... - - def read_all(self) -> Iterable["RegulatoryAuditEvent"]: - ... - - -class InMemoryAuditStorage: - """Simple storage backend for examples and tests.""" - - def __init__(self) -> None: - self._events: List[RegulatoryAuditEvent] = [] - - def append(self, event: "RegulatoryAuditEvent") -> None: - self._events.append(event) - - def read_all(self) -> Iterable["RegulatoryAuditEvent"]: - return list(self._events) - - -@dataclass(frozen=True) -class RegulatoryAuditEvent: - """Reference audit-event envelope for higher-assurance workflows.""" - - event_id: str - event_type: str - timestamp_utc: datetime - user_id: str - session_id: str - payload: Dict[str, Any] - previous_hash: str - content_hash: str - service_version: str - git_commit: str - upstream: Dict[str, Any] = field(default_factory=dict) - signature: Optional[str] = None - - @staticmethod - def build( - *, - event_id: str, - event_type: str, - user_id: str, - session_id: str, - payload: Dict[str, Any], - previous_hash: str, - service_version: str, - git_commit: str, - upstream: Optional[Dict[str, Any]] = None, - timestamp_utc: Optional[datetime] = None, - signature: Optional[str] = None, - ) -> "RegulatoryAuditEvent": - ts = timestamp_utc or utc_now() - canonical = { - "event_id": event_id, - "event_type": event_type, - "timestamp_utc": iso_utc(ts), - "user_id": user_id, - "session_id": session_id, - "payload": payload, - "previous_hash": previous_hash, - "service_version": service_version, - "git_commit": git_commit, - "upstream": upstream or {}, - } - content_hash = sha256_hex(canonical_json_bytes(canonical)) - return RegulatoryAuditEvent( - event_id=event_id, - event_type=event_type, - timestamp_utc=ts, - user_id=user_id, - session_id=session_id, - payload=payload, - previous_hash=previous_hash, - content_hash=content_hash, - service_version=service_version, - git_commit=git_commit, - upstream=upstream or {}, - signature=signature, - ) - - -class AuditChainBrokenError(Exception): - """Raised when the expected audit chain has been broken.""" - - -class RegulatoryAuditTrail: - """ - Append-only audit trail reference implementation. - - This example uses an in-memory backend by default. In production, replace - with an append-controlled storage implementation and add retention/access - controls appropriate for the deployment context. - """ - - def __init__(self, storage: Optional[AuditStorage] = None) -> None: - self._storage = storage or InMemoryAuditStorage() - self._tail_hash = "0" * 64 - - @property - def tail_hash(self) -> str: - return self._tail_hash - - def record(self, event: RegulatoryAuditEvent) -> str: - if event.previous_hash != self._tail_hash: - raise AuditChainBrokenError( - f"Expected previous_hash={self._tail_hash}, got {event.previous_hash}" - ) - if self._compute_hash(event) != event.content_hash: - raise AuditChainBrokenError("Event content hash does not match canonical content") - self._storage.append(event) - self._tail_hash = event.content_hash - return self._tail_hash - - def verify_chain(self) -> bool: - expected = "0" * 64 - for event in self._storage.read_all(): - if event.previous_hash != expected: - return False - if self._compute_hash(event) != event.content_hash: - return False - expected = event.content_hash - return True - - @staticmethod - def _compute_hash(event: RegulatoryAuditEvent) -> str: - canonical = { - "event_id": event.event_id, - "event_type": event.event_type, - "timestamp_utc": iso_utc(event.timestamp_utc), - "user_id": event.user_id, - "session_id": event.session_id, - "payload": event.payload, - "previous_hash": event.previous_hash, - "service_version": event.service_version, - "git_commit": event.git_commit, - "upstream": event.upstream, - } - return sha256_hex(canonical_json_bytes(canonical)) - - -# ============================================================================= -# Electronic review/signature reference model -# ============================================================================= - -SignatureVerifier = Callable[[bytes, "ElectronicSignature"], bool] - - -@dataclass(frozen=True) -class ElectronicSignature: - """ - Reference structure for review or approval events. - - This intentionally leaves cryptographic verification pluggable because the - concrete mechanism depends on deployment policy and available infrastructure. - """ - - signer_user_id: str - signer_full_name: str - signature_meaning: str # e.g. authored / reviewed / approved / rejected - signature_timestamp_utc: datetime - content_hash: str - signature_value: bytes - algorithm: str = "ecdsa-sha256" - certificate_chain_pem: List[str] = field(default_factory=list) - - def verify(self, content: bytes, verifier: SignatureVerifier) -> bool: - if sha256_hex(content) != self.content_hash: - return False - return verifier(content, self) - - -# ============================================================================= -# Upstream provenance capture -# ============================================================================= - -@dataclass(frozen=True) -class UpstreamRecord: - """ - Captures the strongest provenance information available for an upstream call. - - Note: - - Only populate provider_version or snapshot_id if the upstream actually - exposes such concepts. - - If not, internal response hashing and cache identity become more important. - """ - - provider_name: str - request_url: str - request_params: Dict[str, Any] = field(default_factory=dict) - retrieved_at_utc: datetime = field(default_factory=utc_now) - provider_version: Optional[str] = None - snapshot_id: Optional[str] = None - response_hash: Optional[str] = None - cache_key: Optional[str] = None - - def as_dict(self) -> Dict[str, Any]: - return { - "provider_name": self.provider_name, - "request_url": self.request_url, - "request_params": self.request_params, - "retrieved_at_utc": iso_utc(self.retrieved_at_utc), - "provider_version": self.provider_version, - "snapshot_id": self.snapshot_id, - "response_hash": self.response_hash, - "cache_key": self.cache_key, - } - - -# ============================================================================= -# Reproducibility and environment capture -# ============================================================================= - -@dataclass(frozen=True) -class ExecutionEnvironment: - container_image_digest: str - container_image_tag: str - git_commit_hash: str - git_tag: Optional[str] - git_dirty: bool - poetry_lock_hash: Optional[str] - python_version: str - os_name: str - os_version: str - cpu_architecture: str - random_seed: Optional[int] = None - floating_point_mode: str = "strict" - upstream_records: Dict[str, Dict[str, Any]] = field(default_factory=dict) - - def as_dict(self) -> Dict[str, Any]: - return { - "container": { - "image_digest": self.container_image_digest, - "image_tag": self.container_image_tag, - }, - "code": { - "git_commit": self.git_commit_hash, - "git_tag": self.git_tag, - "git_dirty": self.git_dirty, - "poetry_lock_hash": self.poetry_lock_hash, - }, - "runtime": { - "python": self.python_version, - "os": f"{self.os_name} {self.os_version}", - "cpu": self.cpu_architecture, - "random_seed": self.random_seed, - "floating_point_mode": self.floating_point_mode, - }, - "upstream": self.upstream_records, - } - - -def _run_git_command(args: List[str]) -> Optional[str]: - try: - result = subprocess.run(args, capture_output=True, text=True, check=True) - return result.stdout.strip() or None - except Exception: - return None - - -def _file_hash_if_exists(path: str) -> Optional[str]: - p = Path(path) - if not p.exists() or not p.is_file(): - return None - return sha256_hex(p.read_bytes()) - - -def capture_execution_environment( - *, - upstream_records: Optional[Mapping[str, UpstreamRecord]] = None, - random_seed: Optional[int] = None, - floating_point_mode: str = "strict", -) -> ExecutionEnvironment: - git_commit = _run_git_command(["git", "rev-parse", "HEAD"]) or "unknown" - git_tag = _run_git_command(["git", "describe", "--tags", "--exact-match"]) - git_status = _run_git_command(["git", "status", "--porcelain"]) - git_dirty = bool(git_status) - - upstream = { - name: record.as_dict() - for name, record in (upstream_records or {}).items() - } - - return ExecutionEnvironment( - container_image_digest=os.getenv("TOXMCP_IMAGE_DIGEST", "unknown"), - container_image_tag=os.getenv("TOXMCP_IMAGE_TAG", "unknown"), - git_commit_hash=git_commit, - git_tag=git_tag, - git_dirty=git_dirty, - poetry_lock_hash=_file_hash_if_exists("poetry.lock"), - python_version=platform.python_version(), - os_name=platform.system(), - os_version=platform.release(), - cpu_architecture=platform.machine(), - random_seed=random_seed, - floating_point_mode=floating_point_mode, - upstream_records=upstream, - ) - - -# ============================================================================= -# Untrusted text handling for model-facing contexts -# ============================================================================= - -def sanitize_untrusted_identifier(text: str, *, allow_newlines: bool = False, max_length: int = 256) -> str: - """ - Normalize and sanitize a free-text identifier before passing it into an - LLM- or agent-facing context. - - This is a helper, not a complete prompt-injection defense. The primary - defense should still be prompt structure and isolation of untrusted fields. - """ - normalized = unicodedata.normalize("NFKC", text) - if len(normalized) > max_length: - raise ValueError(f"Identifier exceeds maximum length {max_length}") - - cleaned_chars: List[str] = [] - for char in normalized: - category = unicodedata.category(char) - if category.startswith("C"): - if allow_newlines and char in "\n\r": - cleaned_chars.append("\n") - # drop all other control characters - continue - cleaned_chars.append(char) - - cleaned = "".join(cleaned_chars) - if not allow_newlines: - cleaned = cleaned.replace("\n", " ").replace("\r", " ") - return " ".join(cleaned.split()).strip() - - -# ============================================================================= -# Example usage -# ============================================================================= - -if __name__ == "__main__": - audit_trail = RegulatoryAuditTrail() - - env = capture_execution_environment( - upstream_records={ - "comptox": UpstreamRecord( - provider_name="comptox", - request_url="https://example.invalid/chemical/detail/DTXSID123", - request_params={"id": "DTXSID123"}, - provider_version=None, - snapshot_id=None, - response_hash="placeholder-response-hash", - cache_key="comptox:DTXSID123:v1", - ) - }, - random_seed=1234, - ) - - event = RegulatoryAuditEvent.build( - event_id="evt-001", - event_type="workflow_started", - user_id="user-123", - session_id="sess-001", - payload={"chemical_name": sanitize_untrusted_identifier("Benzene")}, - previous_hash=audit_trail.tail_hash, - service_version="toxmcp-suite reviewed-reference", - git_commit=env.git_commit_hash, - upstream={"comptox": env.upstream_records.get("comptox", {})}, - ) - - audit_trail.record(event) - print(json.dumps(env.as_dict(), indent=2)) - print(f"audit_chain_ok={audit_trail.verify_chain()}") diff --git a/ToxMCP_Audit_Reviewed_v2/toxmcp_security_audit_report.md b/ToxMCP_Audit_Reviewed_v2/toxmcp_security_audit_report.md deleted file mode 100644 index 748e777..0000000 --- a/ToxMCP_Audit_Reviewed_v2/toxmcp_security_audit_report.md +++ /dev/null @@ -1,215 +0,0 @@ -# ToxMCP Suite - Adversarial Security Audit Report (Reviewed Copy) - -**Review date:** 2026-04-15 -**Scope:** `comptox-mcp`, `oqt-mcp`, `aop-mcp`, `pbpk-mcp` -**Purpose:** Identify trust-boundary, availability, and integrity risks relevant to toxicology workflows - ---- - -## Read this report carefully - -The original security report had strong instincts but sometimes overstated exploit certainty. -This reviewed copy keeps the high-value findings while making the following distinction explicit: - -- **Observed:** insecure pattern directly visible in the audited material -- **Scenario:** plausible exploit or misuse path that depends on runtime preconditions -- **Operational consequence:** what the issue could mean in production if those preconditions hold - -This report is therefore more conservative in wording, not weaker in substance. - ---- - -## Executive summary - -The most important security issues in the package are: - -1. **Unsafe interpolation at trust boundaries** - Especially around query/template construction and any path where untrusted identifiers may influence model-facing text. - -2. **Weak provenance/integrity controls around upstream dependence** - The suite relies on external sources and intermediate transformations that are not always strongly verifiable afterward. - -3. **Insufficient resilience and rate/quotas for expensive operations** - Availability and integrity interact here: unstable systems are harder to trust and easier to misuse. - -### Security posture after review -- **Critical findings remain:** yes -- **But some original exploit narratives are better read as scenarios:** also yes - ---- - -## Finding register - -| ID | Finding | Severity | Evidence basis | Confidence | Reviewed interpretation | -|---|---|---|---|---|---| -| SEC-01 | Unsafe query interpolation in `aop-mcp` | **Critical** | Observed + scenario | High / Medium | The pattern is real; exact exploit effect depends on which query parts are attacker-influenced and what the endpoint allows | -| SEC-02 | Untrusted identifier handling across prompt/agent boundaries | **High** | Observed + scenario | Medium | Important to mitigate now, even though full exploit demonstration still needs runtime tracing | -| SEC-03 | Upstream integrity/provenance controls are uneven | **High** | Observed + inferred | Medium | External dependence needs stronger internal verification and capture | -| SEC-04 | Authorization / workflow escalation surfaces deserve targeted review | **Medium / High** | Observed + inferred | Medium | Needs live-repo validation before stronger claims | -| SEC-05 | Resource exhaustion and denial-of-service paths are plausible | **High** | Observed | High | Large simulations, retries, or missing quotas can destabilize the system | -| SEC-06 | Offline / controlled-execution posture is underdefined | **High** | Observed + inferred | Medium | Important for high-assurance deployments and incident containment | - ---- - -## SEC-01: Unsafe query interpolation in `aop-mcp` -**Severity:** **Critical** -**Evidence basis:** Observed + scenario -**Confidence:** High for the unsafe pattern; Medium for worst-case impact - -The original package showed string-based templating for query generation. That is a valid high-priority security finding. - -### What is directly supported -- query templates are rendered through string formatting -- this is unsafe if structural query fragments or control fields are influenced by untrusted input - -### What should be stated more carefully -The reviewed copy avoids assuming destructive update outcomes such as graph deletion unless the runtime path and endpoint permissions are known. - -### Better statement -> Unsafe interpolation is present. Depending on runtime data flow, this could permit query broadening, result manipulation, data exposure, or other unintended graph access. Destructive effects depend on whether update-capable operations are reachable. - -### Correct mitigation pattern -Do not try to “sanitize everything” with regexes alone. - -Instead: -- use fixed, allow-listed query plans -- bind only literals/URIs -- keep `ORDER BY`, `LIMIT`, graph patterns, and predicates on allow-lists -- separate read-only query paths from any privileged/update path - ---- - -## SEC-02: Untrusted identifiers may influence model-facing text -**Severity:** **High** -**Evidence basis:** Observed + scenario -**Confidence:** Medium - -The original report was directionally right to worry about prompt or instruction confusion from chemical names and related fields. -The reviewed copy treats the full jailbreak claim as scenario-dependent until the exact prompt boundary is demonstrated. - -### Why it still matters now -Because mitigation is relatively cheap and scientifically sensible: -- normalize Unicode -- strip control characters for LLM-facing contexts -- avoid passing free text directly into system or tool instructions -- carry identifiers as structured data -- regression-test with adversarial names and notes - -### Important correction -Simple keyword blocking is not enough. -The primary control should be **prompt structure and boundary isolation**, not only blacklists. - ---- - -## SEC-03: Upstream integrity controls are uneven -**Severity:** **High** -**Evidence basis:** Observed + inferred -**Confidence:** Medium - -The original report identified a real issue: results derived from upstream APIs or knowledge sources can be difficult to verify later if provenance is weak. - -### Reviewed refinement -The right mitigation is not to assume that all providers support response signing. -A better hierarchy of controls is: -1. authenticated transport where available -2. source/provenance capture -3. request/response hashing -4. internal caching or mirroring for replay -5. cross-source consistency checks for high-value conclusions -6. provider-side signing **if actually supported** - ---- - -## SEC-04: Authorization and workflow escalation need targeted validation -**Severity:** **Medium / High** -**Evidence basis:** Observed + inferred -**Confidence:** Medium - -The original report’s concern about permission boundaries remains useful, but this is an area where live-repo validation matters. -Configuration alone is rarely enough to prove exploitability. - -### What to verify -- how permissions are enforced at runtime -- whether tool composition can bypass intended gates -- which roles can launch expensive, destructive, or approval-relevant flows -- whether audit records capture denied and elevated actions - ---- - -## SEC-05: DoS and exhaustion paths are plausible -**Severity:** **High** -**Evidence basis:** Observed -**Confidence:** High - -The package identifies multiple cost-amplifying patterns: -- large PBPK workloads -- retry behavior on failing upstreams -- insufficient quotas or admission control -- incomplete cancellation/timeout semantics - -These are not “mere performance issues.” -In an analytical system, prolonged instability becomes a security and integrity problem because it encourages retries, bypasses, stale-data usage, and partial-result acceptance. - ---- - -## SEC-06: Controlled/offline execution posture should be made explicit -**Severity:** **High** -**Evidence basis:** Observed + inferred -**Confidence:** Medium - -The original report usefully raised the question of “secure mode” or constrained execution, but the reviewed copy frames it more practically: - -- Which repos can operate without live external dependencies? -- Which assets must be mirrored or pre-approved? -- What logging, auth, and approval rules change in controlled mode? -- What is the incident-response posture if a supplier or upstream becomes untrusted? - -This is important for regulated, confidential, or degraded-network settings. - ---- - -## Attack-chain view - -The original report’s attack chains were helpful conceptually. The reviewed copy keeps the model but phrases them as **scenario compositions**, not proof. - -### Example composite scenario -1. untrusted identifier or query input crosses a weak boundary -2. upstream retrieval/provenance is weak -3. review checkpoints are missing or optional -4. a polished artifact is produced -5. the resulting conclusion appears more trustworthy than its evidence warrants - -This is the core systemic security theme of the suite: **false confidence plus weak verification**. - ---- - -## Immediate actions - -1. **Fix trust-boundary handling** - - query allow-lists - - structured prompt inputs - - control-character stripping for model-facing fields - -2. **Improve provenance and integrity capture** - - response hashes - - retrieval metadata - - clear actor/review state - -3. **Add quotas and resilience controls** - - population/job limits - - bounded retries - - circuit breakers - - cancellation semantics - -4. **Validate authorization pathways** - - runtime permission tests - - escalation-path review - - denial auditability - ---- - -## Final judgment - -The original package correctly identified that ToxMCP’s biggest security risks are not only perimeter vulnerabilities. They are failures at **trust boundaries, provenance boundaries, and review boundaries**. - -**Bottom line:** the reviewed copy supports several strong security findings, especially around query safety, prompt-boundary hygiene, upstream integrity capture, and exhaustion control. Some exploit narratives remain scenario-based and should be validated against the live repositories before external use. diff --git a/docs/genra_workflow.md b/docs/genra_workflow.md index 46a689d..f564b3d 100644 --- a/docs/genra_workflow.md +++ b/docs/genra_workflow.md @@ -187,7 +187,7 @@ sequenceDiagram } ``` -- Bundles align with `docs/mcp_ctx_audit.md` by including request IDs, rate-limit headers, and reproducible payload copies when `includeRawResponses=true`. +- Bundles include request IDs, rate-limit headers, and reproducible payload copies when `includeRawResponses=true` so downstream review remains traceable. - Storage layout enables downstream systems to fetch artefacts by `workflowRunId`. Each bundle carries a SHA256 checksum for integrity. ## Failure Modes & Recovery Paths diff --git a/docs/mcp_ctx_audit.md b/docs/mcp_ctx_audit.md deleted file mode 100644 index f70a403..0000000 --- a/docs/mcp_ctx_audit.md +++ /dev/null @@ -1,54 +0,0 @@ -Overview -- MCP uses `ctx-python` (`ctxpy`) client classes to access CTX APIs. No raw HTTP is issued in this codebase. -- Resources map to ctxpy domains and methods; auth uses `x-api-key` header. -- Base URL was not explicitly configured before; now set via env for ctxpy. - -Authentication -- Header: `x-api-key` -- Env resolution in server: prefers `CTX_API_KEY`, then `EPA_COMPTOX_API_KEY`, then `ctx_x_api_key`. -- Also sets `os.environ['ctx_x_api_key']` for ctxpy compatibility. - -Base URL -- New default base: `https://comptox.epa.gov/ctx-api` -- Legacy toggle: `CTX_USE_LEGACY=1` switches to `https://api-ccte.epa.gov` -- Env exposed for ctxpy: `ctx_api_host` set from `CTX_API_BASE_URL` or legacy toggle; `ctx_api_accept=application/json`. - -Resource → Underlying ctxpy calls -- chemical (src/epacomp_tox/resources/chemical.py:1): - - `search_chemical`/`batch_search_chemical` → `/chemical/search/*` (batch sends newline-delimited identifiers) - - `get_chemical_details`/`batch_get_chemical_details` → `/chemical/detail/search/*` with optional projection query param - - `search_msready` → `/chemical/msready/search/(by-dtxcid|by-formula|by-mass)` -- hazard (src/epacomp_tox/resources/hazard.py:1): - - `search_hazard` → `ctx.Hazard.search` shim selecting `/hazard/{dataset}` routes (toxval, skin-eye, cancer, genetox, adme-ivive, toxref, iris, pprtv, hawc) - - `batch_search_hazard` → Reuses `ctx.Hazard.batch_search` to iterate the selector for each DTXSID - - `get_hazard_toxval` / `batch_get_hazard_toxval` → `/hazard/toxval/search/by-dtxsid/{id}` (single + newline-delimited batch) - - `get_hazard_skin_eye` / `batch_get_hazard_skin_eye` → `/hazard/skin-eye/search/by-dtxsid/{id}` - - `get_hazard_cancer_summary` / `batch_get_hazard_cancer_summary` → `/hazard/cancer-summary/search/by-dtxsid/{id}` - - `get_hazard_genetox_summary` / `batch_get_hazard_genetox_summary` → `/hazard/genetox/summary/search/by-dtxsid/{id}` - - `get_hazard_genetox_details` / `batch_get_hazard_genetox_details` → `/hazard/genetox/details/search/by-dtxsid/{id}` - - `get_hazard_adme_ivive` → `/hazard/adme-ivive/search/by-dtxsid/{id}` - - `get_hazard_pprtv` → `/hazard/pprtv/search/by-dtxsid/{id}` - - `get_hazard_iris` → `/hazard/iris/search/by-dtxsid/{id}` - - `get_hazard_hawc` → `/hazard/hawc/search/by-dtxsid/{id}` - - `get_hazard_toxref` / `batch_get_hazard_toxref` → `/hazard/toxref/{dataset}/search/{lookup}/{value}` + `/hazard/toxref/search/by-dtxsid/` -- exposure (src/epacomp_tox/resources/exposure.py:1): - - `search_cpdat` → `/exposure/{functional-use|product-data|list-presence}/search/by-dtxsid/{id}` - - `search_httk` → `GET /exposure/httk/search/by-dtxsid/{id}` - - `get_cpdat_vocabulary` → `/exposure/{functional-use|product-data|list-presence}/(category|puc|tags)` - - `search_qsurs` → `GET /exposure/functional-use/probability/search/by-dtxsid/{id}` - - `search_exposures` → `/exposure/{mmdb|seem}/...` endpoints based on selector -- chemical_list (src/epacomp_tox/resources/chemical_list.py:1): - - `get_public_list_names` → `GET /chemical/list/` - - `get_full_list` → `GET /chemical/list/chemicals/search/by-listname/{list}` -- cheminformatics (src/epacomp_tox/resources/cheminformatics.py:1): - - `search_toxprints` → `ctx.search_toxprints(chemical)` (returns DataFrame; code converts to dict) - -Notes -- Method signatures and available calls extracted into `epa_comptox_api_structure.json:1` (generated via `extract_api_structure.py:1`). -- Lightweight shim in `src/ctxpy/__init__.py` wraps GET/POST/batch, respects `ctx_api_host`, enforces batch chunking, and surfaces structured `CtxApiError` data (request id, rate limits, retry-after). -- `_with_retry` now provides exponential backoff with jitter, retries only on retryable statuses, and exposes `get_last_metadata()` for downstream telemetry. -- Cheminformatics/ToxPrint endpoints remain unavailable on comptox.epa.gov/ctx-api; shim raises migration warning. - -Gaps/Actions -- Confirm maximum batch payload accepted by comptox host (shim currently assumes 200 identifiers per chunk). -- Add smoke tests exercising 1–2 endpoints per domain using `CTX_API_KEY`. diff --git a/docs/model_metadata.md b/docs/model_metadata.md index e07c8bf..cf13df2 100644 --- a/docs/model_metadata.md +++ b/docs/model_metadata.md @@ -47,7 +47,7 @@ This writes Markdown and HTML summaries under `docs/generated/`. See `docs/model - **Task 2.3** will populate cards for TEST, OPERA, and GenRA using the schema, ensuring AD definitions and provenance are complete. - **Task 2.4–2.5** will publish the AD reference data and wire schema validation into CI so regressions are blocked automatically. -Questions or suggestions can be captured in `docs/mcp_ctx_audit.md` for review during the metadata governance workshops. +Questions or suggestions should be captured in GitHub issues or focused documentation PRs during the metadata governance workshops. ## Implementation Notes diff --git a/epa_comptox_api_structure.json b/epa_comptox_api_structure.json deleted file mode 100644 index 617f178..0000000 --- a/epa_comptox_api_structure.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "Chemical": { - "batch": "(suffix: str, word: Iterable[str], batch_size: int, bracketed: bool = False)", - "details": "(by: str, word: Union[str, Iterable[str]], subset: Optional[str] = 'all')", - "get": "(suffix: str)", - "msready": "(by: str, word: Optional[str] = None, start: Optional[float] = None, end: Optional[float] = None)", - "post": "(suffix: str, word: str)", - "search": "(by: str, word: Union[str, Iterable[str]])" - }, - "Exposure": { - "batch": "(suffix: str, word: Iterable[str], batch_size: int, bracketed: bool = False)", - "get": "(suffix: str)", - "get_cpdat_vocabulary": "(vocab_name)", - "post": "(suffix: str, word: str)", - "search_cpdat": "(vocab_name, dtxsid)", - "search_exposures": "(by, dtxsid)", - "search_httk": "(dtxsid)", - "search_qsurs": "(dtxsid)" - }, - "Hazard": { - "batch": "(suffix: str, word: Iterable[str], batch_size: int, bracketed: bool = False)", - "batch_search": "(by: str, dtxsid: Iterable[str], summary: bool = True)", - "get": "(suffix: str)", - "post": "(suffix: str, word: str)", - "search": "(by: str, dtxsid: str, summary: bool = True)" - }, - "ChemicalList": { - "batch": "(suffix: str, word: Iterable[str], batch_size: int, bracketed: bool = False)", - "get": "(suffix: str)", - "get_full_list": "(list_name: str)", - "post": "(suffix: str, word: str)", - "public_list_names": "()" - }, - "Cheminformatics": { - "search_toxprints": "(chemical)" - } -} diff --git a/extract_api_structure.py b/extract_api_structure.py index 81c25f2..e99fb79 100644 --- a/extract_api_structure.py +++ b/extract_api_structure.py @@ -1,7 +1,9 @@ -import os -import ctxpy as ctx import inspect import json +import os +from pathlib import Path + +import ctxpy as ctx # Initialize with API key from environment api_key = os.environ.get('CTX_API_KEY') or os.environ.get('EPA_COMPTOX_API_KEY') @@ -57,8 +59,11 @@ def extract_class_methods(cls, instance=None): 'search_toxprints': str(inspect.signature(ctx.search_toxprints)) } -# Save to file -with open('epa_comptox_api_structure.json', 'w') as f: +# Save to an ignored local artifact path so ad hoc snapshots do not clutter the repo root. +output_path = Path(__file__).resolve().parent / "artifacts" / "epa_comptox_api_structure.json" +output_path.parent.mkdir(parents=True, exist_ok=True) + +with output_path.open("w") as f: json.dump(api_structure, f, indent=2) -print('API structure extracted and saved to epa_comptox_api_structure.json') +print(f"API structure extracted and saved to {output_path}") diff --git a/scientific_engine_bundle.txt b/scientific_engine_bundle.txt deleted file mode 100644 index 1b7f8aa..0000000 --- a/scientific_engine_bundle.txt +++ /dev/null @@ -1,6829 +0,0 @@ -EPA CompTox Scientific Engine Bundle - -Generated: 2026-04-11T14:15:01.651033+00:00 -Repo root: /Volumes/Storage/topotox_space_relief_20260220/mcp_epacomp_tox -Selection: core scientific engine, predictive harness, science-facing CTX resources, relevant tests, and model metadata. -Included files: 40 -Source characters (included files only): 238674 -Source lines (included files only): 6652 -Conservative token ceiling at 2 chars/token: 119337 -Rule-of-thumb token estimate at 4 chars/token: 59668 - -Included paths: -- src/epacomp_tox/contracts/__init__.py -- src/epacomp_tox/orchestrator/__init__.py -- src/epacomp_tox/orchestrator/audit.py -- src/epacomp_tox/orchestrator/ctx_data.py -- src/epacomp_tox/orchestrator/evidence.py -- src/epacomp_tox/orchestrator/identifiers.py -- src/epacomp_tox/orchestrator/models.py -- src/epacomp_tox/orchestrator/offline.py -- src/epacomp_tox/orchestrator/predictive.py -- src/epacomp_tox/orchestrator/utils.py -- src/epacomp_tox/orchestrator/workflow.py -- src/epacomp_tox/predictive/__init__.py -- src/epacomp_tox/predictive/base.py -- src/epacomp_tox/predictive/clients.py -- src/epacomp_tox/predictive/genra_service.py -- src/epacomp_tox/predictive/opera_service.py -- src/epacomp_tox/predictive/router.py -- src/epacomp_tox/predictive/test_service.py -- src/epacomp_tox/metadata/__init__.py -- src/epacomp_tox/metadata/applicability.py -- src/epacomp_tox/metadata/model_cards.py -- src/epacomp_tox/metadata/validator.py -- src/epacomp_tox/resources/base.py -- src/epacomp_tox/resources/bioactivity.py -- src/epacomp_tox/resources/chemical.py -- src/epacomp_tox/resources/hazard.py -- src/epacomp_tox/resources/exposure.py -- src/epacomp_tox/resources/cheminformatics.py -- src/epacomp_tox/resources/metadata.py -- tests/test_orchestrator_stages.py -- tests/test_predictive_regression.py -- tests/workflows/test_offline_workflows.py -- tests/test_domain_contracts.py -- tests/test_cross_suite_handoffs.py -- metadata/model_cards/genra_read_across.json -- metadata/model_cards/opera_property.json -- metadata/model_cards/test_consensus.json -- metadata/applicability_domains/genra_read_across_ad.json -- metadata/applicability_domains/opera_property_ad.json -- metadata/applicability_domains/test_consensus_ad.json - -==================================================================================================== - -==================================================================================================== -FILE: src/epacomp_tox/contracts/__init__.py -==================================================================================================== -from __future__ import annotations - -import json -from functools import lru_cache -from pathlib import Path -from typing import Any, Dict, Tuple - -from jsonschema import Draft202012Validator - -SCHEMA_ROOT = Path(__file__).resolve().parents[3] / "docs" / "contracts" / "schemas" - - -class SchemaValidationError(RuntimeError): - """Raised when a payload fails JSON Schema validation.""" - - -def _schema_path(namespace: str, name: str) -> Path: - return SCHEMA_ROOT / namespace / f"{name}.json" - - -@lru_cache(maxsize=128) -def load_schema(namespace: str, name: str) -> Dict[str, Any]: - """Load and cache a JSON Schema by namespace/name.""" - path = _schema_path(namespace, name) - if not path.exists(): - raise FileNotFoundError(f"Schema '{namespace}/{name}' not found at {path}") - with path.open("r", encoding="utf-8") as handle: - return json.load(handle) - - -def validate_payload(payload: Any, *, namespace: str, name: str) -> None: - """Validate a payload against the referenced schema.""" - schema = load_schema(namespace, name) - validator = Draft202012Validator(schema) - errors = sorted(validator.iter_errors(payload), key=lambda error: error.path) - if errors: - message = "; ".join(error.message for error in errors) - raise SchemaValidationError(message) - - -def schema_ref(namespace: str, name: str) -> Dict[str, str]: - """Helper to build a schema reference dictionary for tool definitions.""" - return {"namespace": namespace, "name": name} - - -__all__ = ["SchemaValidationError", "load_schema", "schema_ref", "validate_payload"] - -==================================================================================================== -FILE: src/epacomp_tox/orchestrator/__init__.py -==================================================================================================== -"""GenRA orchestration helpers (identifier resolution + CTX data staging).""" - -from .audit import AuditBundleStore -from .ctx_data import CtxDataAssembler, CtxDataAssemblyError -from .evidence import EvidenceSynthesizer -from .identifiers import IdentifierResolutionError, IdentifierResolver -from .models import ( - CtxDataBundle, - EvidenceScore, - EvidenceSynthesis, - GuardrailEvent, - IdentifierResolution, - MetadataTrace, - PredictiveRunResult, - PredictiveStepResult, - PredictiveTask, -) -from .offline import ( - OFFLINE_SCENARIOS, - OfflinePredictiveService, - build_offline_orchestrator, -) -from .predictive import PredictiveCoordinator -from .workflow import GenRAOrchestrator - -__all__ = [ - "CtxDataAssembler", - "CtxDataAssemblyError", - "CtxDataBundle", - "GuardrailEvent", - "IdentifierResolution", - "IdentifierResolutionError", - "IdentifierResolver", - "MetadataTrace", - "PredictiveCoordinator", - "EvidenceSynthesizer", - "AuditBundleStore", - "OFFLINE_SCENARIOS", - "build_offline_orchestrator", - "OfflinePredictiveService", - "GenRAOrchestrator", - "PredictiveRunResult", - "PredictiveStepResult", - "PredictiveTask", - "EvidenceSynthesis", - "EvidenceScore", -] - -==================================================================================================== -FILE: src/epacomp_tox/orchestrator/audit.py -==================================================================================================== -from __future__ import annotations - -import hashlib -import json -from datetime import datetime, timezone -from pathlib import Path -from typing import Dict, Iterable, List, Optional, Tuple, Union - - -class AuditBundleStore: - """Durable storage for orchestrator audit bundles and attachments.""" - - def __init__( - self, base_dir: Union[str, Path], *, retention_days: Optional[int] = None - ) -> None: - self.base_dir = Path(base_dir) - self.base_dir.mkdir(parents=True, exist_ok=True) - self.retention_days = retention_days - - def save( - self, - bundle: Dict[str, any], - *, - attachments: Optional[Dict[str, Union[str, bytes]]] = None, - ) -> Dict[str, any]: - run_id = bundle.get("workflowRunId") - if not run_id: - raise ValueError("Bundle must include 'workflowRunId'.") - - run_dir = self.base_dir / run_id - run_dir.mkdir(parents=True, exist_ok=True) - created_at = datetime.now(timezone.utc).isoformat() - - payload = json.dumps( - bundle, ensure_ascii=False, indent=2, sort_keys=True - ).encode("utf-8") - bundle_path = run_dir / "bundle.json" - bundle_path.write_bytes(payload) - bundle_checksum = hashlib.sha256(payload).hexdigest() - - attachments_meta: List[Dict[str, any]] = [] - if attachments: - attachments_dir = run_dir / "attachments" - attachments_dir.mkdir(parents=True, exist_ok=True) - for name, content in attachments.items(): - target = attachments_dir / name - target.parent.mkdir(parents=True, exist_ok=True) - data = content.encode("utf-8") if isinstance(content, str) else content - target.write_bytes(data) - attachments_meta.append( - { - "name": name, - "path": str(target.relative_to(self.base_dir)), - "size": len(data), - "checksum": hashlib.sha256(data).hexdigest(), - } - ) - - metadata = { - "workflowRunId": run_id, - "createdAt": created_at, - "bundlePath": str(bundle_path.relative_to(self.base_dir)), - "bundleChecksum": bundle_checksum, - "attachments": attachments_meta, - "retentionDays": self.retention_days, - } - - (run_dir / "metadata.json").write_text( - json.dumps(metadata, indent=2, sort_keys=True), - encoding="utf-8", - ) - return metadata - - def load_bundle(self, run_id: str) -> Dict[str, any]: - bundle_path = self.base_dir / run_id / "bundle.json" - if not bundle_path.exists(): - raise FileNotFoundError(f"No bundle found for run {run_id}") - return json.loads(bundle_path.read_text(encoding="utf-8")) - - def load_metadata(self, run_id: str) -> Dict[str, any]: - metadata_path = self.base_dir / run_id / "metadata.json" - if not metadata_path.exists(): - raise FileNotFoundError(f"No metadata found for run {run_id}") - return json.loads(metadata_path.read_text(encoding="utf-8")) - - def list_runs(self) -> List[Dict[str, any]]: - runs: List[Dict[str, any]] = [] - for entry in sorted(self.base_dir.iterdir()): - if not entry.is_dir(): - continue - metadata_path = entry / "metadata.json" - if not metadata_path.exists(): - continue - try: - runs.append(json.loads(metadata_path.read_text(encoding="utf-8"))) - except json.JSONDecodeError: - continue - return runs - -==================================================================================================== -FILE: src/epacomp_tox/orchestrator/ctx_data.py -==================================================================================================== -from __future__ import annotations - -import time -from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple - -from ctxpy import CtxApiError -from epacomp_tox.resources.cheminformatics import CheminformaticsResource -from epacomp_tox.resources.exposure import ExposureResource -from epacomp_tox.resources.hazard import HazardResource - -from .models import CtxDataBundle, MetadataTrace -from .utils import sanitize_metadata - - -class CtxDataAssemblyError(RuntimeError): - """Raised when CTX data retrieval fails.""" - - -class CtxDataAssembler: - """Fetch and cache CTX datasets required before GenRA predictive calls.""" - - SCENARIO_OVERRIDES = { - "acute_toxicity": { - "hazard": ["all", "human", "eco"], - "exposure": ["httk"], - }, - "exposure_prioritization": { - "hazard": ["all"], - "exposure": ["pathways", "seem"], - "cpdat": ["fc", "puc"], - }, - "genra_read_across": { - "hazard": ["all"], - "exposure": ["httk", "qsurs"], - "cpdat": ["fc"], - "cheminformatics": True, - }, - } - - def __init__( - self, - *, - hazard_resource: HazardResource, - exposure_resource: ExposureResource, - cheminformatics_resource: Optional[CheminformaticsResource] = None, - hazard_data_types: Sequence[str] = ("all",), - exposure_datasets: Sequence[str] = ("httk",), - cpdat_vocabularies: Sequence[str] = ("fc",), - include_toxprints: bool = False, - cache_ttl: int = 900, - time_fn: Callable[[], float] = time.time, - ) -> None: - self.hazard_resource = hazard_resource - self.exposure_resource = exposure_resource - self.cheminformatics_resource = cheminformatics_resource - self.hazard_data_types = tuple(dict.fromkeys(hazard_data_types)) - self.exposure_datasets = tuple(dict.fromkeys(exposure_datasets)) - self.cpdat_vocabularies = tuple(dict.fromkeys(cpdat_vocabularies)) - self.include_toxprints = include_toxprints - self.cache_ttl = max(0, cache_ttl) - self._time_fn = time_fn - self._cache: Dict[ - Tuple[str, Tuple[str, ...], Tuple[str, ...], Tuple[str, ...], bool], - Tuple[float, CtxDataBundle], - ] = {} - - def assemble( - self, - dtxsid: str, - *, - scenarios: Optional[Sequence[str]] = None, - include_cheminformatics: Optional[bool] = None, - hazard_summary: bool = True, - ) -> CtxDataBundle: - """Gather hazard/exposure datasets (with caching) for the orchestrator workflow.""" - normalized_sid = (dtxsid or "").strip().upper() - if not normalized_sid: - raise CtxDataAssemblyError("DTXSID is required for CTX data assembly.") - - scenario_list = sorted( - { - scenario.strip().lower() - for scenario in scenarios or [] - if isinstance(scenario, str) and scenario.strip() - } - ) - - hazard_types = set(self.hazard_data_types) - exposure_types = set(self.exposure_datasets) - cpdat_vocab = set(self.cpdat_vocabularies) - include_toxprints = ( - self.include_toxprints - if include_cheminformatics is None - else include_cheminformatics - ) - - for scenario in scenario_list: - overrides = self.SCENARIO_OVERRIDES.get(scenario) - if not overrides: - continue - hazard_types.update(overrides.get("hazard", [])) - exposure_types.update(overrides.get("exposure", [])) - cpdat_vocab.update(overrides.get("cpdat", [])) - if overrides.get("cheminformatics"): - include_toxprints = True - - # Stable cache key covering config and request - cache_key = ( - normalized_sid, - tuple(sorted(hazard_types)), - tuple(sorted(exposure_types)), - tuple(sorted(cpdat_vocab)), - bool(include_toxprints), - ) - now = self._time_fn() - cached = self._cache.get(cache_key) - if cached and (self.cache_ttl == 0 or now - cached[0] <= self.cache_ttl): - return cached[1].model_copy(update={"cache_hit": True}) - - trace: List[MetadataTrace] = [] - data_gaps: List[str] = [] - hazard_data: Dict[str, List[Dict[str, Any]]] = {} - exposure_data: Dict[str, List[Dict[str, Any]]] = {} - cheminformatics_data: Dict[str, Any] = {} - - # Hazard datasets --------------------------------------------------- - for hazard_type in sorted(hazard_types): - try: - payload = self.hazard_resource.search_hazard( - data_type=hazard_type, - dtxsid=normalized_sid, - summary=hazard_summary, - ) - except CtxApiError as exc: - trace.append( - self._metadata_trace(self.hazard_resource, f"hazard:{hazard_type}") - ) - raise CtxDataAssemblyError( - f"Failed to fetch hazard dataset '{hazard_type}' for {normalized_sid}: {exc}" - ) from exc - hazard_data[hazard_type] = payload - if not payload: - data_gaps.append(f"hazard:{hazard_type}") - trace.append( - self._metadata_trace(self.hazard_resource, f"hazard:{hazard_type}") - ) - - # Exposure datasets ------------------------------------------------- - for exposure_type in sorted(exposure_types): - step_name = f"exposure:{exposure_type}" - try: - payload = self._fetch_exposure_dataset(exposure_type, normalized_sid) - except CtxApiError as exc: - trace.append(self._metadata_trace(self.exposure_resource, step_name)) - raise CtxDataAssemblyError( - f"Failed to fetch exposure dataset '{exposure_type}' for {normalized_sid}: {exc}" - ) from exc - exposure_data[exposure_type] = payload - if not payload: - data_gaps.append(step_name) - trace.append(self._metadata_trace(self.exposure_resource, step_name)) - - for vocab in sorted(cpdat_vocab): - step_name = f"exposure:cpdat:{vocab}" - try: - payload = self.exposure_resource.search_cpdat( - vocab_name=vocab, - dtxsids=[normalized_sid], - ) - except CtxApiError as exc: - trace.append(self._metadata_trace(self.exposure_resource, step_name)) - raise CtxDataAssemblyError( - f"Failed to fetch CPDat vocabulary '{vocab}' for {normalized_sid}: {exc}" - ) from exc - exposure_data[f"cpdat:{vocab}"] = payload - if not payload: - data_gaps.append(step_name) - trace.append(self._metadata_trace(self.exposure_resource, step_name)) - - # Cheminformatics --------------------------------------------------- - if include_toxprints: - if not self.cheminformatics_resource: - data_gaps.append("cheminformatics:toxprints") - else: - step_name = "cheminformatics:toxprints" - try: - payload = self.cheminformatics_resource.search_toxprints( - chemical=normalized_sid - ) - except CtxApiError as exc: - trace.append( - self._metadata_trace(self.cheminformatics_resource, step_name) - ) - raise CtxDataAssemblyError( - f"Failed to fetch toxprints for {normalized_sid}: {exc}" - ) from exc - cheminformatics_data["toxprints"] = payload - if not payload: - data_gaps.append(step_name) - trace.append( - self._metadata_trace(self.cheminformatics_resource, step_name) - ) - - bundle = CtxDataBundle( - dtxsid=normalized_sid, - scenarios=scenario_list, - hazard=hazard_data, - exposure=exposure_data, - cheminformatics=cheminformatics_data, - data_gaps=data_gaps, - trace=trace, - ) - if self.cache_ttl: - self._cache[cache_key] = (now, bundle) - return bundle - - # Internal helpers ----------------------------------------------------- - - def _metadata_trace(self, resource: Optional[object], step: str) -> MetadataTrace: - metadata = {} - if resource and hasattr(resource, "get_last_metadata"): - metadata = sanitize_metadata(resource.get_last_metadata()) - return MetadataTrace(step=step, metadata=metadata) - - def _fetch_exposure_dataset( - self, dataset: str, dtxsid: str - ) -> List[Dict[str, Any]]: - dataset = dataset.lower() - if dataset == "httk": - return self.exposure_resource.search_httk(dtxsids=[dtxsid]) - if dataset == "qsurs": - return self.exposure_resource.search_qsurs(dtxsids=[dtxsid]) - if dataset in ("pathways", "mmdb-single", "seem", "seem-demographic"): - return self.exposure_resource.search_exposures( - data_type=dataset, - dtxsids=[dtxsid], - ) - raise CtxDataAssemblyError(f"Unsupported exposure dataset '{dataset}'.") - -==================================================================================================== -FILE: src/epacomp_tox/orchestrator/evidence.py -==================================================================================================== -from __future__ import annotations - -from typing import Iterable, List - -from epacomp_tox.predictive import PredictiveResponse - -from .models import EvidenceScore, EvidenceSynthesis, PredictiveStepResult - - -class EvidenceSynthesizer: - """Compose GenRA evidence grades and narrative summaries.""" - - def synthesize(self, results: Iterable[PredictiveStepResult]) -> EvidenceSynthesis: - steps: List[PredictiveStepResult] = [ - step for step in results if step.status == "success" - ] - if not steps: - return EvidenceSynthesis( - confidence_band="Unavailable", - scores=EvidenceScore( - analogue_coverage=0.0, - evidence_quality=0.0, - predictive_agreement=0.0, - ), - narrative="No successful predictive results available for synthesis.", - recommended_actions=[ - "Review applicability domain denials", - "Re-run orchestration after addressing guardrail failures", - ], - ) - - analogue_scores = [ - self._extract_score(step, "analogueCoverage") for step in steps - ] - quality_scores = [ - self._extract_score(step, "evidenceQuality") for step in steps - ] - agreement_scores = [ - self._extract_score(step, "predictiveAgreement") for step in steps - ] - - coverage = sum(analogue_scores) / len(analogue_scores) - evidence_quality = sum(quality_scores) / len(quality_scores) - predictive_agreement = sum(agreement_scores) / len(agreement_scores) - - band = self._resolve_confidence_band( - coverage, evidence_quality, predictive_agreement - ) - narrative = self._build_narrative( - band, coverage, evidence_quality, predictive_agreement - ) - - return EvidenceSynthesis( - confidence_band=band, - scores=EvidenceScore( - analogue_coverage=coverage, - evidence_quality=evidence_quality, - predictive_agreement=predictive_agreement, - ), - narrative=narrative, - recommended_actions=self._recommended_actions(band), - ) - - def _extract_score(self, step: PredictiveStepResult, key: str) -> float: - metadata = step.metadata or {} - value = metadata.get(key) - if isinstance(value, (int, float)): - return float(value) - if key == "predictiveAgreement" and step.prediction: - return float(step.prediction.get("confidence", 0.0)) - return 0.0 - - def _resolve_confidence_band( - self, coverage: float, quality: float, agreement: float - ) -> str: - if min(coverage, quality, agreement) >= 0.8: - return "Robust" - if min(coverage, quality, agreement) >= 0.5: - return "Limited" - return "Unavailable" - - def _build_narrative( - self, band: str, coverage: float, quality: float, agreement: float - ) -> str: - return ( - f"Confidence band: {band}. Analogue coverage={coverage:.2f}, " - f"evidence quality={quality:.2f}, predictive agreement={agreement:.2f}." - ) - - def _recommended_actions(self, band: str) -> List[str]: - if band == "Robust": - return [ - "Proceed with automated dossier generation", - "Document rationale for regulatory submission", - ] - if band == "Limited": - return ["Seek SME review", "Augment analogue set or supporting evidence"] - return [ - "Address guardrail failures", - "Acquire additional data or adjust predictor inputs", - ] - -==================================================================================================== -FILE: src/epacomp_tox/orchestrator/identifiers.py -==================================================================================================== -from __future__ import annotations - -import re -import time -from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple - -from ctxpy import CtxApiError -from epacomp_tox.resources.chemical import ChemicalResource - -from .models import IdentifierResolution, MetadataTrace -from .utils import sanitize_metadata - - -class IdentifierResolutionError(RuntimeError): - """Raised when chemical identifier normalization fails.""" - - -class IdentifierResolver: - """Resolve user-supplied identifiers into canonical DTXSID records.""" - - _DTXSID_RE = re.compile(r"^DTXSID\d{7}$", re.IGNORECASE) - _TYPE_ALIASES = { - "dtxsid": "dtxsid", - "sid": "dtxsid", - "dsstox": "dtxsid", - "cas": "casrn", - "casrn": "casrn", - "name": "name", - "preferred_name": "name", - "inchikey": "inchikey", - "inchi": "inchikey", - "smiles": "smiles", - } - _SEARCH_ORDER = { - "casrn": ("equals",), - "name": ("equals", "starts-with", "contains"), - "smiles": ("equals", "contains"), - "inchikey": ("equals", "contains"), - } - - def __init__( - self, - *, - chemical_resource: ChemicalResource, - cache_ttl: int = 900, - detail_subset: str = "identifiers", - time_fn: Callable[[], float] = time.time, - ) -> None: - self.chemical_resource = chemical_resource - self.cache_ttl = max(0, cache_ttl) - self.detail_subset = detail_subset - self._time_fn = time_fn - self._cache: Dict[Tuple[str, str], Tuple[float, IdentifierResolution]] = {} - - def resolve( - self, identifier: str, identifier_type: Optional[str] = None - ) -> IdentifierResolution: - """Resolve an identifier to a canonical DTXSID.""" - normalized_value = (identifier or "").strip() - if not normalized_value: - raise IdentifierResolutionError("Identifier value is required.") - - normalized_type = self._normalize_type(identifier_type, normalized_value) - cache_key = (normalized_value.lower(), normalized_type) - cached = self._cache.get(cache_key) - now = self._time_fn() - if cached and (self.cache_ttl == 0 or now - cached[0] <= self.cache_ttl): - return cached[1].model_copy(update={"cache_hit": True}) - - trace: List[MetadataTrace] = [] - warnings: List[str] = [] - matched_record: Dict[str, Any] - detail_record: Dict[str, Any] - - if normalized_type == "dtxsid": - detail_record = self._fetch_details( - identifier=normalized_value, - trace=trace, - stage="chemical.details", - ) - matched_record = detail_record - else: - matched_record = self._search_for_match( - identifier=normalized_value, - identifier_type=normalized_type, - trace=trace, - warnings=warnings, - ) - detail_record = self._fetch_details( - identifier=self._extract_dtxsid(matched_record), - trace=trace, - stage="chemical.details", - ) - - resolution = self._build_resolution( - input_value=normalized_value, - input_type=normalized_type, - matched_record=matched_record, - detail_record=detail_record, - warnings=warnings, - trace=trace, - ) - if self.cache_ttl: - self._cache[cache_key] = (now, resolution) - return resolution - - # Internal helpers ----------------------------------------------------- - - def _normalize_type(self, identifier_type: Optional[str], value: str) -> str: - if identifier_type: - key = identifier_type.strip().lower() - if key not in self._TYPE_ALIASES: - raise IdentifierResolutionError( - f"Unsupported identifier type '{identifier_type}'." - ) - return self._TYPE_ALIASES[key] - if self._DTXSID_RE.match(value): - return "dtxsid" - if value.count("-") == 2 and len(value.replace("-", "")) in (5, 6, 7, 8, 9): - return "casrn" - return "name" - - def _metadata_trace(self, stage: str) -> MetadataTrace: - metadata = sanitize_metadata(self.chemical_resource.get_last_metadata()) - return MetadataTrace(step=stage, metadata=metadata) - - def _search_for_match( - self, - *, - identifier: str, - identifier_type: str, - trace: List[MetadataTrace], - warnings: List[str], - ) -> Dict[str, Any]: - search_modes = self._SEARCH_ORDER.get(identifier_type) - if not search_modes: - raise IdentifierResolutionError( - f"Identifier type '{identifier_type}' is not searchable." - ) - - last_error: Optional[Exception] = None - for mode in search_modes: - try: - results = self.chemical_resource.search_chemical( - query=identifier, search_type=mode - ) - trace.append(self._metadata_trace(f"chemical.search:{mode}")) - except CtxApiError as exc: - last_error = exc - trace.append(self._metadata_trace(f"chemical.search:{mode}")) - continue - except Exception as exc: # pragma: no cover - defensive - last_error = exc - trace.append(self._metadata_trace(f"chemical.search:{mode}")) - continue - - candidates = [record for record in results if isinstance(record, dict)] - if not candidates: - continue - if len(candidates) > 1: - warnings.append( - f"Multiple matches found for '{identifier}' using search mode '{mode}'. " - "Using the first result." - ) - return candidates[0] - - if last_error: - raise IdentifierResolutionError( - f"Failed to search for identifier '{identifier}': {last_error}" - ) from last_error - raise IdentifierResolutionError( - f"No CTX record found for identifier '{identifier}'." - ) - - def _fetch_details( - self, - *, - identifier: str, - trace: List[MetadataTrace], - stage: str, - ) -> Dict[str, Any]: - try: - details = self.chemical_resource.get_chemical_details( - identifier=identifier, - id_type="dtxsid", - subset=self.detail_subset, - ) - trace.append(self._metadata_trace(stage)) - if not isinstance(details, dict): - raise IdentifierResolutionError( - f"Unexpected payload when fetching details for '{identifier}'." - ) - return details - except CtxApiError as exc: - trace.append(self._metadata_trace(stage)) - raise IdentifierResolutionError( - f"CTX API error retrieving details for '{identifier}': {exc}" - ) from exc - except Exception as exc: # pragma: no cover - defensive - trace.append(self._metadata_trace(stage)) - raise IdentifierResolutionError( - f"Failed to retrieve details for '{identifier}': {exc}" - ) from exc - - def _extract_dtxsid(self, record: Dict[str, Any]) -> str: - for key in ("dtxsid", "DTXSID", "dtxSid", "sid"): - value = record.get(key) - if isinstance(value, str) and value.strip(): - return value.strip() - raise IdentifierResolutionError("Search result did not include a DTXSID.") - - def _build_resolution( - self, - *, - input_value: str, - input_type: str, - matched_record: Dict[str, Any], - detail_record: Dict[str, Any], - warnings: List[str], - trace: List[MetadataTrace], - ) -> IdentifierResolution: - dtxsid = self._extract_dtxsid(detail_record or matched_record) - synonyms = self._extract_synonyms(detail_record) - casrn = self._extract_field( - ("casrn", "cas", "CASRN", "casNumber"), detail_record, matched_record - ) - preferred_name = self._extract_field( - ("preferredName", "preferred_name", "name"), - detail_record, - matched_record, - ) - - return IdentifierResolution( - input_identifier=input_value, - input_type=input_type, - dtxsid=dtxsid, - matched_record=matched_record, - detail_record=detail_record, - preferred_name=preferred_name, - casrn=casrn, - synonyms=synonyms, - warnings=warnings, - trace=trace, - ) - - def _extract_synonyms(self, detail: Dict[str, Any]) -> List[str]: - raw = ( - detail.get("synonyms") or detail.get("synonym") or detail.get("synonymList") - ) - values: Iterable[Any] - if isinstance(raw, (list, tuple)): - values = raw - elif isinstance(raw, str): - values = [raw] - elif isinstance(raw, dict): - values = raw.values() - else: - values = [] - result = [] - for item in values: - if not item: - continue - if isinstance(item, str): - trimmed = item.strip() - if trimmed and trimmed not in result: - result.append(trimmed) - return result - - def _extract_field( - self, - keys: Tuple[str, ...], - detail: Dict[str, Any], - fallback: Dict[str, Any], - ) -> Optional[str]: - for source in (detail, fallback): - for key in keys: - value = source.get(key) - if isinstance(value, str) and value.strip(): - return value.strip() - return None - -==================================================================================================== -FILE: src/epacomp_tox/orchestrator/models.py -==================================================================================================== -from __future__ import annotations - -from typing import Any, Dict, List, Optional - -from pydantic import BaseModel, Field - -from epacomp_tox.predictive import ADCheckResult, PredictiveRequest, PredictiveResponse - - -class MetadataTrace(BaseModel): - """Structured record of transport metadata captured during orchestration.""" - - step: str - metadata: Dict[str, Any] = Field(default_factory=dict) - - -class IdentifierResolution(BaseModel): - """Canonicalized identity data for orchestrator workflows.""" - - input_identifier: str - input_type: str - dtxsid: str - matched_record: Dict[str, Any] = Field(default_factory=dict) - detail_record: Dict[str, Any] = Field(default_factory=dict) - preferred_name: Optional[str] = None - casrn: Optional[str] = None - synonyms: List[str] = Field(default_factory=list) - warnings: List[str] = Field(default_factory=list) - trace: List[MetadataTrace] = Field(default_factory=list) - cache_hit: bool = False - - -class CtxDataBundle(BaseModel): - """CTX data payload and provenance captured before predictive stages.""" - - dtxsid: str - scenarios: List[str] = Field(default_factory=list) - hazard: Dict[str, List[Dict[str, Any]]] = Field(default_factory=dict) - exposure: Dict[str, List[Dict[str, Any]]] = Field(default_factory=dict) - cheminformatics: Dict[str, Any] = Field(default_factory=dict) - data_gaps: List[str] = Field(default_factory=list) - trace: List[MetadataTrace] = Field(default_factory=list) - cache_hit: bool = False - - -class PredictiveTask(BaseModel): - """Definition of a predictive call executed during orchestration.""" - - service: str - request: PredictiveRequest - scenario: Optional[str] = None - label: Optional[str] = None - - -class GuardrailEvent(BaseModel): - """Recorded guardrail outcome (denial, warning, or error).""" - - stage: str - component: str - status: str - code: Optional[str] - message: str - confidence: Optional[float] = None - timestamp: str - metadata: Dict[str, Any] = Field(default_factory=dict) - - -class PredictiveStepResult(BaseModel): - """Outcome of an individual predictive service invocation.""" - - service: str - status: str - scenario: Optional[str] = None - label: Optional[str] = None - request: PredictiveRequest - ad: Optional[ADCheckResult] = None - prediction: Optional[Dict[str, Any]] = None - metadata: Dict[str, Any] = Field(default_factory=dict) - error: Optional[str] = None - - -class PredictiveRunResult(BaseModel): - """Combined results for a predictive orchestration stage.""" - - results: List[PredictiveStepResult] = Field(default_factory=list) - guardrails: List[GuardrailEvent] = Field(default_factory=list) - succeeded: bool = True - - -class EvidenceScore(BaseModel): - """Weighted representation of evidence dimensions used in synthesis.""" - - analogue_coverage: float - evidence_quality: float - predictive_agreement: float - - -class EvidenceSynthesis(BaseModel): - """Structured result returned by the evidence grading engine.""" - - confidence_band: str - scores: EvidenceScore - narrative: str - recommended_actions: List[str] = Field(default_factory=list) - -==================================================================================================== -FILE: src/epacomp_tox/orchestrator/offline.py -==================================================================================================== -from __future__ import annotations - -from pathlib import Path -from typing import Any, Callable, Dict, Optional, Sequence - -from ..predictive.base import ADCheckResult, PredictiveRequest, PredictiveServiceBase -from .ctx_data import CtxDataAssembler -from .evidence import EvidenceSynthesizer -from .identifiers import IdentifierResolver -from .predictive import PredictiveCoordinator -from .workflow import GenRAOrchestrator - -OFFLINE_SCENARIOS = [ - "acute_toxicity", - "exposure_prioritization", - "genra_read_across", -] - - -class _OfflineChemicalResource: - def __init__(self) -> None: - self._metadata: Dict[str, Any] = {} - - def search_chemical(self, query: str, search_type: str) -> list[dict[str, Any]]: - self._metadata = {"status": 200} - return [ - { - "dtxsid": "DTXSID0000001", - "preferredName": "Offline Example", - "casrn": "50-00-0", - } - ] - - def get_chemical_details( - self, identifier: str, id_type: str, subset: str = "default" - ) -> dict[str, Any]: - self._metadata = {"status": 200} - return { - "dtxsid": "DTXSID0000001", - "preferredName": "Offline Example", - "casrn": "50-00-0", - "synonyms": ["Formaldehyde", "Methanal"], - } - - def get_last_metadata(self) -> Dict[str, Any]: - return dict(self._metadata) - - -class _OfflineHazardResource: - def __init__(self) -> None: - self._metadata: Dict[str, Any] = {} - - def search_hazard( - self, data_type: str, dtxsid: str, summary: bool = True - ) -> list[dict[str, Any]]: - self._metadata = {"status": 200} - return [{"endpoint": "Acute toxicity", "value": "LD50", "source": "Offline"}] - - def get_last_metadata(self) -> Dict[str, Any]: - return dict(self._metadata) - - -class _OfflineExposureResource: - def __init__(self) -> None: - self._metadata: Dict[str, Any] = {} - - def search_httk(self, dtxsids: Sequence[str]) -> list[dict[str, Any]]: - self._metadata = {"status": 200} - return [{"kmp": 1.2, "unit": "1/hr"}] - - def search_cpdat( - self, vocab_name: str, dtxsids: Sequence[str] - ) -> list[dict[str, Any]]: - self._metadata = {"status": 200} - return [{"vocab": vocab_name, "label": "Consumer product"}] - - def search_qsurs(self, dtxsids: Sequence[str]) -> list[dict[str, Any]]: - self._metadata = {"status": 200} - return [{"probability": 0.42}] - - def search_exposures( - self, data_type: str, dtxsids: Sequence[str] - ) -> list[dict[str, Any]]: - self._metadata = {"status": 200} - return [{"dataset": data_type, "value": "offline"}] - - def get_last_metadata(self) -> Dict[str, Any]: - return dict(self._metadata) - - -class _OfflineCheminformaticsResource: - def __init__(self) -> None: - self._metadata: Dict[str, Any] = {} - - def search_toxprints(self, chemical: str) -> dict[str, Any]: - self._metadata = {"status": 200} - return {"toxprints": ["FP_001", "FP_057"]} - - def get_last_metadata(self) -> Dict[str, Any]: - return dict(self._metadata) - - -class OfflinePredictiveService(PredictiveServiceBase): - """Predictive service stub returning deterministic GenRA-like results.""" - - def __init__(self) -> None: - super().__init__( - config={ - "name": "Offline GenRA", - "version": "0.1", - "ad_model_name": "Offline GenRA", - } - ) - - def _predict_impl(self, request: PredictiveRequest) -> Dict[str, Any]: - return { - "prediction": "Read-across suggests low concern.", - "confidence": 0.82, - } - - def _check_ad_impl(self, request: PredictiveRequest) -> ADCheckResult: - return ADCheckResult(in_domain=True, confidence=0.85, details={"analogues": 4}) - - def _build_metadata( - self, request: PredictiveRequest, ad_result: ADCheckResult - ) -> Dict[str, Any]: - metadata = super()._build_metadata(request, ad_result) - metadata.update( - { - "analogueCoverage": 0.88, - "evidenceQuality": 0.74, - "predictiveAgreement": ad_result.confidence, - } - ) - return metadata - - -def build_offline_orchestrator( - *, - persistence_dir: Optional[Path] = None, - clock: Optional[Callable[[], str]] = None, -) -> GenRAOrchestrator: - """Construct an orchestrator wired with offline stub resources.""" - resolver = IdentifierResolver( - chemical_resource=_OfflineChemicalResource(), cache_ttl=0 - ) - assembler = CtxDataAssembler( - hazard_resource=_OfflineHazardResource(), - exposure_resource=_OfflineExposureResource(), - cheminformatics_resource=_OfflineCheminformaticsResource(), - include_toxprints=True, - cache_ttl=0, - ) - predictive_service = OfflinePredictiveService() - coordinator = PredictiveCoordinator({"offline_genra": predictive_service}) - return GenRAOrchestrator( - identifier_resolver=resolver, - ctx_data_assembler=assembler, - predictive_coordinator=coordinator, - persistence_dir=persistence_dir, - evidence_synthesizer=EvidenceSynthesizer(), - clock=clock or (lambda: ""), - ) - - -__all__ = [ - "OFFLINE_SCENARIOS", - "build_offline_orchestrator", - "OfflinePredictiveService", -] - -==================================================================================================== -FILE: src/epacomp_tox/orchestrator/predictive.py -==================================================================================================== -from __future__ import annotations - -from datetime import datetime, timezone -from typing import Dict, Iterable, List, Optional - -from epacomp_tox.predictive import ( - ADCheckResult, - PredictiveRequest, - PredictiveResponse, - PredictiveServiceBase, -) - -from .models import ( - GuardrailEvent, - PredictiveRunResult, - PredictiveStepResult, - PredictiveTask, -) - - -class PredictiveCoordinator: - """Coordinate predictive micro-service execution with applicability guardrails.""" - - def __init__( - self, - services: Dict[str, PredictiveServiceBase], - *, - default_require_ad_clearance: bool = True, - stage_name: str = "RunPredictiveModels", - ) -> None: - self._services = dict(services) - self.default_require_ad_clearance = default_require_ad_clearance - self.stage_name = stage_name - - def register_service(self, name: str, service: PredictiveServiceBase) -> None: - """Register or replace a predictive service.""" - self._services[name] = service - - def run( - self, - tasks: Iterable[PredictiveTask], - *, - require_ad_clearance: Optional[bool] = None, - ) -> PredictiveRunResult: - """Execute predictive tasks and aggregate guardrail events.""" - require = ( - self.default_require_ad_clearance - if require_ad_clearance is None - else require_ad_clearance - ) - results: List[PredictiveStepResult] = [] - guardrails: List[GuardrailEvent] = [] - succeeded = True - - for task in tasks: - service = self._ensure_service(task.service) - ad_result: Optional[ADCheckResult] = None - try: - ad_result = service.check_applicability_domain(task.request) - except Exception as exc: # pragma: no cover - defensive - succeeded = False - results.append( - PredictiveStepResult( - service=task.service, - status="error", - scenario=task.scenario, - label=task.label, - request=task.request, - error=str(exc), - ) - ) - guardrails.append( - self._make_guardrail_event( - component=task.service, - status="error", - code=self._resolve_error_code(service), - message=f"Applicability domain check failed ({exc})", - confidence=None, - metadata={"stage": "check_applicability_domain"}, - ) - ) - continue - - policy = self._resolve_policy(service) - if not ad_result.in_domain and (require or policy == "block"): - succeeded = False - guardrails.append( - self._make_guardrail_event( - component=task.service, - status="denied", - code=self._resolve_error_code(service), - message="Applicability domain check failed.", - confidence=ad_result.confidence, - metadata={"policy": policy}, - ) - ) - results.append( - PredictiveStepResult( - service=task.service, - status="denied", - scenario=task.scenario, - label=task.label, - request=task.request, - ad=ad_result, - metadata={"policy": policy}, - ) - ) - continue - - try: - prediction = service.predict(task.request) - except Exception as exc: # pragma: no cover - defensive - succeeded = False - guardrails.append( - self._make_guardrail_event( - component=task.service, - status="error", - code=self._resolve_error_code(service), - message=f"Prediction failed ({exc})", - confidence=ad_result.confidence if ad_result else None, - metadata={"policy": policy}, - ) - ) - results.append( - PredictiveStepResult( - service=task.service, - status="error", - scenario=task.scenario, - label=task.label, - request=task.request, - ad=ad_result, - error=str(exc), - metadata={"policy": policy}, - ) - ) - continue - - step_status = "success" - if not prediction.applicability_domain.in_domain: - guardrails.append( - self._make_guardrail_event( - component=task.service, - status="warning", - code=self._resolve_error_code(service), - message="Applicability domain warning.", - confidence=prediction.applicability_domain.confidence, - metadata={"policy": policy}, - ) - ) - if policy == "block": - step_status = "denied" - succeeded = False - - results.append( - PredictiveStepResult( - service=task.service, - status=step_status, - scenario=task.scenario, - label=task.label, - request=task.request, - ad=prediction.applicability_domain, - prediction=prediction.prediction, - metadata=prediction.metadata, - ) - ) - - return PredictiveRunResult( - results=results, guardrails=guardrails, succeeded=succeeded - ) - - # Internal utilities ----------------------------------------------------- - - def _ensure_service(self, name: str) -> PredictiveServiceBase: - if name not in self._services: - raise KeyError(f"Predictive service '{name}' is not registered.") - return self._services[name] - - def _resolve_policy(self, service: PredictiveServiceBase) -> str: - definition = getattr(service, "ad_definition", None) or {} - policy = definition.get("policy") if isinstance(definition, dict) else None - if isinstance(policy, str): - return policy.lower() - return "block" - - def _resolve_error_code(self, service: PredictiveServiceBase) -> Optional[str]: - definition = getattr(service, "ad_definition", None) or {} - if isinstance(definition, dict): - return definition.get("errorCode") - return None - - def _make_guardrail_event( - self, - *, - component: str, - status: str, - message: str, - code: Optional[str], - confidence: Optional[float], - metadata: Optional[Dict[str, str]] = None, - ) -> GuardrailEvent: - timestamp = datetime.now(timezone.utc).isoformat() - return GuardrailEvent( - stage=self.stage_name, - component=component, - status=status, - code=code, - message=message, - confidence=confidence, - timestamp=timestamp, - metadata=metadata or {}, - ) - -==================================================================================================== -FILE: src/epacomp_tox/orchestrator/utils.py -==================================================================================================== -from __future__ import annotations - -from typing import Any, Dict, Optional - - -def sanitize_metadata(metadata: Optional[Dict[str, Any]]) -> Dict[str, Any]: - """ - Convert transport/resource metadata into JSON-serializable primitives. - - ctxpy returns dataclass instances (e.g., RateLimitInfo) inside the metadata - payload. Downstream audit bundles expect plain dictionaries, so this helper - normalizes nested structures while preserving the original keys. - """ - - def _convert(value: Any) -> Any: - if hasattr(value, "__dataclass_fields__"): - return { - field: getattr(value, field) - for field in value.__dataclass_fields__.keys() # type: ignore[attr-defined] - } - if isinstance(value, dict): - return {key: _convert(val) for key, val in value.items()} - if isinstance(value, (list, tuple)): - return [_convert(item) for item in value] - return value - - if not metadata: - return {} - return {key: _convert(val) for key, val in metadata.items()} - -==================================================================================================== -FILE: src/epacomp_tox/orchestrator/workflow.py -==================================================================================================== -from __future__ import annotations - -import json -from dataclasses import asdict, is_dataclass -from pathlib import Path -from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence -from uuid import uuid4 - -from .audit import AuditBundleStore -from .ctx_data import CtxDataAssembler, CtxDataAssemblyError -from .evidence import EvidenceSynthesizer -from .identifiers import IdentifierResolutionError, IdentifierResolver -from .models import ( - CtxDataBundle, - EvidenceSynthesis, - GuardrailEvent, - IdentifierResolution, - MetadataTrace, - PredictiveRunResult, - PredictiveStepResult, - PredictiveTask, -) -from .predictive import PredictiveCoordinator -from .utils import sanitize_metadata - - -def _serialize(obj: Any) -> Any: - if obj is None: - return None - if hasattr(obj, "model_dump"): - return obj.model_dump() - if is_dataclass(obj): - return asdict(obj) - if isinstance(obj, (list, tuple)): - return [_serialize(item) for item in obj] - if isinstance(obj, dict): - return {key: _serialize(val) for key, val in obj.items()} - return obj - - -class GenRAOrchestrator: - """Controller that ties identifier resolution, CTX data staging, and predictive runs.""" - - def __init__( - self, - *, - identifier_resolver: IdentifierResolver, - ctx_data_assembler: CtxDataAssembler, - predictive_coordinator: PredictiveCoordinator, - persistence_dir: Optional[Path] = None, - clock: Callable[[], str] = lambda: None, - evidence_synthesizer: Optional[EvidenceSynthesizer] = None, - ) -> None: - self.identifier_resolver = identifier_resolver - self.ctx_data_assembler = ctx_data_assembler - self.predictive_coordinator = predictive_coordinator - self.bundle_store = ( - AuditBundleStore(persistence_dir) if persistence_dir else None - ) - self._clock = clock - self.evidence_synthesizer = evidence_synthesizer or EvidenceSynthesizer() - - def run_workflow( - self, - *, - target_identifier: str, - identifier_type: Optional[str] = None, - scenarios: Optional[Sequence[str]] = None, - predictive_plan: Iterable[PredictiveTask], - workflow_run_id: Optional[str] = None, - options: Optional[Dict[str, Any]] = None, - ) -> Dict[str, Any]: - run_id = workflow_run_id or str(uuid4()) - options = options or {} - guardrails: List[GuardrailEvent] = [] - timeline: List[Dict[str, Any]] = [] - - resolution: IdentifierResolution - try: - resolution = self.identifier_resolver.resolve( - target_identifier, identifier_type - ) - timeline.append( - self._timeline_entry("NormalizeIdentifier", resolution.trace) - ) - except IdentifierResolutionError as exc: - guardrails.append( - GuardrailEvent( - stage="NormalizeIdentifier", - component="IdentifierResolver", - status="denied", - code="IDENTIFIER_NOT_RESOLVED", - message=str(exc), - confidence=None, - timestamp=self._clock() or "", - metadata={}, - ) - ) - return self._assemble_bundle( - run_id=run_id, - resolution=None, - ctx_bundle=None, - predictive_result=None, - guardrails=guardrails, - timeline=timeline, - scenarios=list(scenarios or []), - options=options, - status="denied", - ) - - ctx_bundle: CtxDataBundle - try: - ctx_bundle = self.ctx_data_assembler.assemble( - resolution.dtxsid, - scenarios=scenarios, - ) - timeline.append(self._timeline_entry("AssembleCtxData", ctx_bundle.trace)) - except CtxDataAssemblyError as exc: - guardrails.append( - GuardrailEvent( - stage="AssembleCtxData", - component="CtxDataAssembler", - status="error", - code="CTX_DATA_UNAVAILABLE", - message=str(exc), - confidence=None, - timestamp=self._clock() or "", - metadata={}, - ) - ) - return self._assemble_bundle( - run_id=run_id, - resolution=resolution, - ctx_bundle=None, - predictive_result=None, - guardrails=guardrails, - timeline=timeline, - scenarios=list(scenarios or []), - options=options, - status="error", - ) - - predictive_result: PredictiveRunResult = self.predictive_coordinator.run( - predictive_plan, - require_ad_clearance=options.get("requireAdClearance"), - ) - guardrails.extend(predictive_result.guardrails) - timeline.append( - { - "stage": "RunPredictiveModels", - "metadata": [ - self._result_metadata(step) for step in predictive_result.results - ], - } - ) - - status = "success" if predictive_result.succeeded else "error" - evidence = self.evidence_synthesizer.synthesize(predictive_result.results) - - bundle = self._assemble_bundle( - run_id=run_id, - resolution=resolution, - ctx_bundle=ctx_bundle, - predictive_result=predictive_result, - evidence=evidence, - guardrails=guardrails, - timeline=timeline, - scenarios=list(scenarios or []), - options=options, - status=status, - ) - storage = self._persist_bundle( - bundle, - ctx_bundle=ctx_bundle, - predictive_result=predictive_result, - evidence=evidence, - ) - if storage: - bundle["storage"] = storage - return bundle - - # Internal helpers ----------------------------------------------------- - - def _timeline_entry( - self, stage: str, trace: Sequence[MetadataTrace] - ) -> Dict[str, Any]: - return { - "stage": stage, - "metadata": [_serialize(item) for item in trace], - } - - def _result_metadata(self, step: PredictiveStepResult) -> Dict[str, Any]: - payload = { - "service": step.service, - "status": step.status, - "scenario": step.scenario, - "label": step.label, - "metadata": step.metadata, - } - if step.ad: - payload["ad"] = step.ad.model_dump() - return payload - - def _assemble_bundle( - self, - *, - run_id: str, - resolution: Optional[IdentifierResolution], - ctx_bundle: Optional[CtxDataBundle], - predictive_result: Optional[PredictiveRunResult], - evidence: Optional[EvidenceSynthesis], - guardrails: Sequence[GuardrailEvent], - timeline: Sequence[Dict[str, Any]], - scenarios: List[str], - options: Dict[str, Any], - status: str, - ) -> Dict[str, Any]: - bundle: Dict[str, Any] = { - "bundleVersion": "0.1", - "workflowRunId": run_id, - "status": status, - "scenarios": scenarios, - "options": options, - "guardrails": [_serialize(item) for item in guardrails], - "timeline": timeline, - } - - if resolution: - bundle["target"] = { - "dtxsid": resolution.dtxsid, - "inputIdentifier": { - "value": resolution.input_identifier, - "type": resolution.input_type, - }, - "preferredName": resolution.preferred_name, - "casrn": resolution.casrn, - "synonyms": resolution.synonyms, - "warnings": resolution.warnings, - } - - if ctx_bundle: - bundle["ctxData"] = { - "hazard": ctx_bundle.hazard, - "exposure": ctx_bundle.exposure, - "cheminformatics": ctx_bundle.cheminformatics, - "dataGaps": ctx_bundle.data_gaps, - } - - if predictive_result: - bundle["predictive"] = { - "results": [ - { - "service": step.service, - "status": step.status, - "scenario": step.scenario, - "label": step.label, - "request": step.request.model_dump(), - "ad": step.ad.model_dump() if step.ad else None, - "prediction": step.prediction, - "metadata": sanitize_metadata(step.metadata), - "error": step.error, - } - for step in predictive_result.results - ], - } - - if evidence: - bundle["evidence"] = { - "confidenceBand": evidence.confidence_band, - "scores": evidence.scores.model_dump(), - "narrative": evidence.narrative, - "recommendedActions": evidence.recommended_actions, - } - - return bundle - - def _persist_bundle( - self, - bundle: Dict[str, Any], - *, - ctx_bundle: Optional[CtxDataBundle], - predictive_result: Optional[PredictiveRunResult], - evidence: Optional[EvidenceSynthesis], - ) -> Optional[Dict[str, Any]]: - if not self.bundle_store: - return None - attachments: Dict[str, str] = {} - if ctx_bundle: - attachments["ctx_data.json"] = json.dumps( - _serialize(ctx_bundle), indent=2, sort_keys=True - ) - if predictive_result: - attachments["predictive_results.json"] = json.dumps( - _serialize(predictive_result), - indent=2, - sort_keys=True, - ) - if evidence: - attachments["evidence.json"] = json.dumps( - _serialize(evidence), indent=2, sort_keys=True - ) - return self.bundle_store.save(bundle, attachments=attachments) - -==================================================================================================== -FILE: src/epacomp_tox/predictive/__init__.py -==================================================================================================== -"""Predictive micro-service utilities.""" - -from .base import ( - ADCheckResult, - PredictiveRequest, - PredictiveResponse, - PredictiveServiceBase, -) -from .clients import PredictiveClient -from .genra_service import GenRAService -from .opera_service import OperaPropertyService -from .router import build_predictive_router -from .test_service import TestConsensusPredictiveService - -__all__ = [ - "PredictiveServiceBase", - "PredictiveRequest", - "PredictiveResponse", - "ADCheckResult", - "PredictiveClient", - "TestConsensusPredictiveService", - "OperaPropertyService", - "GenRAService", - "build_predictive_router", -] - -==================================================================================================== -FILE: src/epacomp_tox/predictive/base.py -==================================================================================================== -from __future__ import annotations - -import logging -from abc import ABC, abstractmethod -from typing import Any, Dict, Optional - -from pydantic import BaseModel - -logger = logging.getLogger(__name__) - -from epacomp_tox.metadata.applicability import ApplicabilityDomainStore - - -class PredictiveRequest(BaseModel): - """Base request model for predictive micro-servers.""" - - chemical_identifier: str - identifier_type: str = "dtxsid" - - -class ADCheckResult(BaseModel): - """Standard response for applicability domain evaluations.""" - - in_domain: bool - confidence: float - details: Dict[str, Any] = {} - - -class PredictiveResponse(BaseModel): - """Standardized predictive response envelope.""" - - prediction: Dict[str, Any] - applicability_domain: ADCheckResult - metadata: Dict[str, Any] = {} - - -class PredictiveServiceBase(ABC): - """Shared scaffolding for predictive micro-servers.""" - - def __init__( - self, - *, - config: Dict[str, Any], - ad_store: Optional[ApplicabilityDomainStore] = None, - ) -> None: - self.config = config - self.logger = logger.getChild(self.__class__.__name__) - self.ad_store = ad_store or ApplicabilityDomainStore() - self.ad_definition = self._resolve_ad_definition() - - def predict(self, request: PredictiveRequest) -> PredictiveResponse: - """Run applicability domain check, prediction, and assemble response.""" - ad_result = self.check_applicability_domain(request) - policy_metadata = self._apply_ad_policy(request, ad_result) - payload = self._predict_impl(request) - metadata = self._build_metadata(request, ad_result) - metadata.update(policy_metadata) - return PredictiveResponse( - prediction=payload, - applicability_domain=ad_result, - metadata=metadata, - ) - - def check_applicability_domain(self, request: PredictiveRequest) -> ADCheckResult: - """Evaluate whether the request falls within the validated domain.""" - return self._check_ad_impl(request) - - @abstractmethod - def _predict_impl(self, request: PredictiveRequest) -> Dict[str, Any]: - """Model-specific prediction.""" - - @abstractmethod - def _check_ad_impl(self, request: PredictiveRequest) -> ADCheckResult: - """Model-specific AD evaluation.""" - - def _build_metadata( - self, request: PredictiveRequest, ad_result: ADCheckResult - ) -> Dict[str, Any]: - """Hook for adding provenance/telemetry to responses.""" - metadata: Dict[str, Any] = { - "identifier": request.chemical_identifier, - "identifier_type": request.identifier_type, - "model": self.config.get("name"), - "model_version": self.config.get("version"), - } - if self.ad_definition: - metadata["adPolicy"] = self.ad_definition.get("policy") - metadata["adErrorCode"] = self.ad_definition.get("errorCode") - metadata["adDefinition"] = { - "model": self.ad_definition.get("model"), - "version": self.ad_definition.get("version"), - } - return metadata - - def _resolve_ad_definition(self) -> Optional[Dict[str, Any]]: - target = self.config.get("ad_model_name") or self.config.get("name") - if not target: - return None - definition = self.ad_store.get_definition(target) - if not definition: - self.logger.debug("No AD definition found for %s", target) - return definition - - def _apply_ad_policy( - self, request: PredictiveRequest, ad_result: ADCheckResult - ) -> Dict[str, Any]: - definition = self.ad_definition or {} - policy = (definition.get("policy") or "block").lower() - metadata: Dict[str, Any] = {} - if not ad_result.in_domain: - message = ( - f"Applicability domain check failed for {request.chemical_identifier}" - ) - error_code = definition.get("errorCode") - if policy == "block": - raise ValueError(error_code or message) - if policy == "warn": - metadata["adWarning"] = True - metadata["adMessage"] = error_code or message - self.logger.warning("%s", metadata["adMessage"]) - else: - # Unknown policy defaults to block - raise ValueError(error_code or message) - return metadata - -==================================================================================================== -FILE: src/epacomp_tox/predictive/clients.py -==================================================================================================== -from __future__ import annotations - -from abc import ABC, abstractmethod -from typing import Any - -from epacomp_tox.predictive.base import ADCheckResult, PredictiveRequest - - -class PredictiveClient(ABC): - """Minimal client interface for predictive services.""" - - @abstractmethod - def predict(self, request: PredictiveRequest) -> dict[str, Any]: - """Execute model prediction.""" - - @abstractmethod - def check_applicability_domain(self, request: PredictiveRequest) -> ADCheckResult: - """Evaluate applicability domain for the request.""" - -==================================================================================================== -FILE: src/epacomp_tox/predictive/genra_service.py -==================================================================================================== -from __future__ import annotations - -from typing import Any, Dict, Optional - -from epacomp_tox.metadata.applicability import ApplicabilityDomainStore -from epacomp_tox.predictive.base import ( - ADCheckResult, - PredictiveRequest, - PredictiveServiceBase, -) -from epacomp_tox.predictive.clients import PredictiveClient - - -class GenRAClient(PredictiveClient): - """Wrapper interface for GenRA analogue search + prediction service.""" - - def __init__(self, client: Any) -> None: - self.client = client - - def predict(self, request: PredictiveRequest) -> Dict[str, Any]: - return self.client.predict( - chemical=request.chemical_identifier, - identifier_type=request.identifier_type, - ) - - def check_applicability_domain(self, request: PredictiveRequest) -> ADCheckResult: - result = self.client.check_applicability_domain( - chemical=request.chemical_identifier, - identifier_type=request.identifier_type, - ) - return ADCheckResult( - in_domain=result.get("in_domain", False), - confidence=result.get("confidence", 0.0), - details=result, - ) - - -class GenRAService(PredictiveServiceBase): - """Predictive service wrapper for the GenRA read-across workflow.""" - - def __init__( - self, - *, - config: Dict[str, Any], - client: Optional[PredictiveClient] = None, - ad_store: Optional[ApplicabilityDomainStore] = None, - ) -> None: - super().__init__(config=config, ad_store=ad_store) - self.client = client - - def _ensure_client(self) -> PredictiveClient: - if self.client is None: - raise RuntimeError("GenRA client not configured") - return self.client - - def _predict_impl(self, request: PredictiveRequest) -> Dict[str, Any]: - client = self._ensure_client() - return client.predict(request) - - def _check_ad_impl(self, request: PredictiveRequest) -> ADCheckResult: - client = self._ensure_client() - return client.check_applicability_domain(request) - -==================================================================================================== -FILE: src/epacomp_tox/predictive/opera_service.py -==================================================================================================== -from __future__ import annotations - -from typing import Any, Dict, Optional - -from epacomp_tox.metadata.applicability import ApplicabilityDomainStore -from epacomp_tox.predictive.base import ( - ADCheckResult, - PredictiveRequest, - PredictiveServiceBase, -) -from epacomp_tox.predictive.clients import PredictiveClient - - -class OperaClient(PredictiveClient): - """Wrapper around OPERA CLI/API integration.""" - - def __init__(self, client: Any) -> None: - self.client = client - - def predict(self, request: PredictiveRequest) -> Dict[str, Any]: - payload = self.client.predict_property( - chemical=request.chemical_identifier, - identifier_type=request.identifier_type, - ) - return payload - - def check_applicability_domain(self, request: PredictiveRequest) -> ADCheckResult: - result = self.client.check_applicability_domain( - chemical=request.chemical_identifier, - identifier_type=request.identifier_type, - ) - return ADCheckResult( - in_domain=result.get("in_domain", False), - confidence=result.get("confidence", 0.0), - details=result, - ) - - -class OperaPropertyService(PredictiveServiceBase): - """Predictive service wrapper for OPERA property models.""" - - def __init__( - self, - *, - config: Dict[str, Any], - client: Optional[PredictiveClient] = None, - ad_store: Optional[ApplicabilityDomainStore] = None, - ) -> None: - super().__init__(config=config, ad_store=ad_store) - self.client = client - - def _ensure_client(self) -> PredictiveClient: - if self.client is None: - raise RuntimeError("OPERA client not configured") - return self.client - - def _predict_impl(self, request: PredictiveRequest) -> Dict[str, Any]: - client = self._ensure_client() - return client.predict(request) - - def _check_ad_impl(self, request: PredictiveRequest) -> ADCheckResult: - client = self._ensure_client() - return client.check_applicability_domain(request) - -==================================================================================================== -FILE: src/epacomp_tox/predictive/router.py -==================================================================================================== -from __future__ import annotations - -from typing import Callable, Dict, Optional - -from fastapi import APIRouter, Depends, HTTPException, status - -from epacomp_tox.contracts import validate_payload -from epacomp_tox.predictive.base import ( - ADCheckResult, - PredictiveRequest, - PredictiveResponse, - PredictiveServiceBase, -) - -PREDICT_RESPONSE_SCHEMA = ("predictive", "predict.response.schema") -AD_RESPONSE_SCHEMA = ("predictive", "ad_check.response.schema") - - -def build_predictive_router( - *, - service_factory: Callable[[], PredictiveServiceBase], - prefix: str = "", - tags: Optional[list[str]] = None, -) -> APIRouter: - """Construct a router exposing predict and AD check endpoints.""" - router = APIRouter(prefix=prefix, tags=tags or ["predictive"]) - - async def get_service() -> PredictiveServiceBase: - return service_factory() - - @router.post( - "/predict", - response_model=PredictiveResponse, - summary="Run predictive model with applicability domain enforcement", - ) - async def predict_endpoint( - body: PredictiveRequest, service: PredictiveServiceBase = Depends(get_service) - ) -> PredictiveResponse: - try: - response = service.predict(body) - validate_payload( - response.model_dump(), - namespace=PREDICT_RESPONSE_SCHEMA[0], - name=PREDICT_RESPONSE_SCHEMA[1], - ) - return response - except ValueError as exc: - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, detail=str(exc) - ) from exc - - @router.post( - "/check_applicability_domain", - response_model=ADCheckResult, - summary="Evaluate applicability domain for the given request", - ) - async def ad_endpoint( - body: PredictiveRequest, service: PredictiveServiceBase = Depends(get_service) - ) -> ADCheckResult: - result = service.check_applicability_domain(body) - validate_payload( - result.model_dump(), - namespace=AD_RESPONSE_SCHEMA[0], - name=AD_RESPONSE_SCHEMA[1], - ) - return result - - return router - -==================================================================================================== -FILE: src/epacomp_tox/predictive/test_service.py -==================================================================================================== -from __future__ import annotations - -from typing import Any, Dict, Optional - -from ctxpy import CtxApiError -from epacomp_tox.metadata.applicability import ApplicabilityDomainStore -from epacomp_tox.predictive.base import ( - ADCheckResult, - PredictiveRequest, - PredictiveServiceBase, -) -from epacomp_tox.predictive.clients import PredictiveClient - - -class TestClient(PredictiveClient): - """Wrapper around ctxpy TEST client.""" - - def __init__(self, client: Any) -> None: - self.client = client - - def predict(self, request: PredictiveRequest) -> Dict[str, Any]: - try: - return self.client.predict( - chemical=request.chemical_identifier, - identifier_type=request.identifier_type, - ) - except CtxApiError as exc: # pragma: no cover - passthrough - raise ValueError(f"TEST prediction failed: {exc}") from exc - - def check_applicability_domain(self, request: PredictiveRequest) -> ADCheckResult: - result = self.client.check_applicability_domain( - chemical=request.chemical_identifier, - identifier_type=request.identifier_type, - ) - return ADCheckResult( - in_domain=result.get("in_domain", False), - confidence=result.get("confidence", 0.0), - details=result, - ) - - -class TestConsensusPredictiveService(PredictiveServiceBase): - """Predictive service wrapper for TEST consensus toxicity models.""" - - def __init__( - self, - *, - config: Dict[str, Any], - client: Optional[PredictiveClient] = None, - ad_store: Optional[ApplicabilityDomainStore] = None, - ) -> None: - super().__init__(config=config, ad_store=ad_store) - self.client = client - - def _ensure_client(self) -> PredictiveClient: - if self.client is None: - raise RuntimeError("TEST client not configured") - return self.client - - def _predict_impl(self, request: PredictiveRequest) -> Dict[str, Any]: - client = self._ensure_client() - payload = client.predict(request) - return payload - - def _check_ad_impl(self, request: PredictiveRequest) -> ADCheckResult: - client = self._ensure_client() - return client.check_applicability_domain(request) - - -TestConsensusPredictiveService.__test__ = False - -==================================================================================================== -FILE: src/epacomp_tox/metadata/__init__.py -==================================================================================================== -"""Metadata utilities for CompTox model cards.""" - -from .model_cards import ModelCardFilter, ModelCardStore - -__all__ = ["ModelCardStore", "ModelCardFilter"] - -==================================================================================================== -FILE: src/epacomp_tox/metadata/applicability.py -==================================================================================================== -from __future__ import annotations - -import json -from pathlib import Path -from typing import Any, Dict, Iterable, List, Optional, Tuple - -DEFAULT_AD_DIR = Path(Path.cwd(), "metadata", "applicability_domains") - - -class ApplicabilityDomainStore: - """File-backed access to applicability domain reference data.""" - - def __init__(self, directory: Optional[Path] = None): - self.directory = Path(directory or DEFAULT_AD_DIR) - self.directory.mkdir(parents=True, exist_ok=True) - - def list_definitions( - self, - *, - limit: Optional[int] = None, - cursor: Optional[str] = None, - ) -> Tuple[List[Dict[str, Any]], Optional[str]]: - entries = list(self._iter_defs()) - start = int(cursor) if cursor else 0 - end = start + limit if limit else None - page = entries[start:end] - next_cursor = None - if end is not None and end < len(entries): - next_cursor = str(end) - return page, next_cursor - - def get_definition(self, model_name: str) -> Optional[Dict[str, Any]]: - model_name_lower = model_name.lower() - for entry in self._iter_defs(): - if entry["model"].lower() == model_name_lower: - return entry - return None - - def _iter_defs(self) -> Iterable[Dict[str, Any]]: - for path in sorted(self.directory.glob("*.json")): - try: - payload = json.loads(path.read_text(encoding="utf-8")) - except ( - OSError, - json.JSONDecodeError, - ): # pragma: no cover - logged upstream - continue - payload["path"] = str(path) - yield payload - -==================================================================================================== -FILE: src/epacomp_tox/metadata/model_cards.py -==================================================================================================== -from __future__ import annotations - -import hashlib -import json -from dataclasses import dataclass -from datetime import datetime -from pathlib import Path -from typing import Any, Dict, Iterable, List, Optional, Tuple - -DEFAULT_MODEL_CARD_DIR = Path(Path.cwd(), "metadata", "model_cards") - - -@dataclass -class ModelCardFilter: - model_name: Optional[str] = None - endpoint_contains: Optional[str] = None - compliance: Optional[str] = None # "approved" or "draft" - - -class ModelCardStore: - """Simple file-backed store for CompTox model cards.""" - - def __init__(self, directory: Optional[Path] = None): - self.directory = Path(directory or DEFAULT_MODEL_CARD_DIR) - self.directory.mkdir(parents=True, exist_ok=True) - - def list_cards( - self, - *, - filters: Optional[ModelCardFilter] = None, - limit: Optional[int] = None, - cursor: Optional[str] = None, - ) -> Tuple[List[Dict[str, Any]], Optional[str]]: - entries = list(self._iter_cards()) - filtered = self._apply_filters(entries, filters) - start = int(cursor) if cursor else 0 - end = start + limit if limit else None - page = filtered[start:end] - next_cursor = None - if end is not None and end < len(filtered): - next_cursor = str(end) - return page, next_cursor - - def _iter_cards(self) -> Iterable[Dict[str, Any]]: - for path in sorted(self.directory.glob("*.json")): - try: - raw = path.read_text(encoding="utf-8") - payload = json.loads(raw) - except ( - OSError, - json.JSONDecodeError, - ): # pragma: no cover - logged upstream - continue - checksum = hashlib.sha256(raw.encode("utf-8")).hexdigest() - stat = path.stat() - yield { - "card": payload, - "checksum": checksum, - "path": str(path), - "lastModified": datetime.fromtimestamp(stat.st_mtime).isoformat(), - } - - @staticmethod - def _apply_filters( - entries: Iterable[Dict[str, Any]], filters: Optional[ModelCardFilter] - ) -> List[Dict[str, Any]]: - if not filters: - return list(entries) - result: List[Dict[str, Any]] = [] - for entry in entries: - card = entry["card"] - if filters.model_name: - model_name = card.get("modelDetails", {}).get("name", "") - if filters.model_name.lower() not in model_name.lower(): - continue - if filters.endpoint_contains: - endpoint = ( - card.get("oecdValidationPrinciples", {}) - .get("definedEndpoint", {}) - .get("description", "") - ) - if filters.endpoint_contains.lower() not in endpoint.lower(): - continue - if filters.compliance: - status = _compute_compliance_status(card) - if status != filters.compliance.lower(): - continue - result.append(entry) - return result - - -def _compute_compliance_status(card: Dict[str, Any]) -> str: - review = card.get("provenance", {}).get("reviewStatus", {}) - approved_by = review.get("approvedBy", []) - if approved_by: - return "approved" - return "draft" - -==================================================================================================== -FILE: src/epacomp_tox/metadata/validator.py -==================================================================================================== -from __future__ import annotations - -import json -from pathlib import Path -from typing import Iterable, List - -from jsonschema import ValidationError, validate - -DEFAULT_SCHEMA_PATH = Path("schemas/comptox_model_card.schema.json") -DEFAULT_CARDS_DIR = Path("metadata/model_cards") -DEFAULT_AD_DIR = Path("metadata/applicability_domains") - - -class MetadataValidationError(Exception): - """Raised when metadata validation fails.""" - - -def validate_model_cards( - *, - cards_dir: Path = DEFAULT_CARDS_DIR, - schema_path: Path = DEFAULT_SCHEMA_PATH, -) -> None: - schema = json.loads(schema_path.read_text(encoding="utf-8")) - errors: List[str] = [] - for path in sorted(cards_dir.glob("*.json")): - try: - payload = json.loads(path.read_text(encoding="utf-8")) - validate(instance=payload, schema=schema) - except (ValidationError, json.JSONDecodeError) as exc: - errors.append(f"{path}: {exc}") - if errors: - raise MetadataValidationError("\n".join(errors)) - - -def validate_applicability_domains(*, directory: Path = DEFAULT_AD_DIR) -> None: - required_fields = {"model", "version", "criteria", "policy"} - errors: List[str] = [] - for path in sorted(directory.glob("*.json")): - try: - payload = json.loads(path.read_text(encoding="utf-8")) - except json.JSONDecodeError as exc: - errors.append(f"{path}: invalid JSON: {exc}") - continue - missing = required_fields - set(payload.keys()) - if missing: - errors.append(f"{path}: missing fields {sorted(missing)}") - if not isinstance(payload.get("criteria"), list): - errors.append(f"{path}: 'criteria' must be a list") - if errors: - raise MetadataValidationError("\n".join(errors)) - - -def validate_all() -> None: - validate_model_cards() - validate_applicability_domains() - -==================================================================================================== -FILE: src/epacomp_tox/resources/base.py -==================================================================================================== -import random -import time -from abc import ABC, abstractmethod -from typing import Any, Callable, Dict, List, Optional - -from ctxpy import CtxApiError -from epacomp_tox.config import get_retry_config -from epacomp_tox.validators import ensure_list, ensure_object, to_serializable - - -class BaseResource(ABC): - """ - Base class for all MCP resources. - - A resource represents a collection of related data and functionality - from the EPA CompTox APIs. - """ - - def __init__(self, api_key: str): - """ - Initialize the resource. - - Args: - api_key: EPA CompTox API key. - """ - self.api_key = api_key - self._last_metadata: Dict[str, Any] = {} - - def _with_retry( - self, - fn: Callable[[], Any], - *, - retries: Optional[int] = None, - base_delay: Optional[float] = None, - ) -> Any: - """ - Call a function with basic exponential backoff and jitter on transient errors. - - Retries on generic Exceptions to avoid tight coupling to underlying HTTP client types. - """ - if retries is None or base_delay is None: - r, b = get_retry_config() - retries = retries if retries is not None else r - base_delay = base_delay if base_delay is not None else b - attempt = 0 - while True: - try: - result = fn() - self._capture_last_metadata() - return result - except CtxApiError as exc: - self._last_metadata = { - "status": exc.status, - "request_id": exc.request_id, - "rate_limit": exc.rate_limit, - "retry_after": exc.retry_after, - } - attempt += 1 - if attempt > retries or not exc.retryable: - raise - sleep_for = base_delay * (2 ** (attempt - 1)) - sleep_for = sleep_for * (0.8 + random.random() * 0.4) - time.sleep(sleep_for) - except Exception as e: - attempt += 1 - if attempt > retries: - raise - # Exponential backoff with jitter - sleep_for = base_delay * (2 ** (attempt - 1)) - sleep_for = sleep_for * (0.8 + random.random() * 0.4) - time.sleep(sleep_for) - - def _ensure_list(self, value: Any) -> List[Any]: - """Normalize value into a list that is JSON-serializable.""" - serialized = to_serializable(value) - return ensure_list(serialized) - - def _ensure_object(self, value: Any, *, allow_list: bool = False) -> Dict[str, Any]: - """Normalize value into a mapping; optionally wrap list responses.""" - serialized = to_serializable(value) - return ensure_object(serialized, allow_list=allow_list) - - def _capture_last_metadata(self) -> None: - client = getattr(self, "client", None) - if client is not None and hasattr(client, "last_metadata"): - self._last_metadata = client.last_metadata - - def get_last_metadata(self) -> Dict[str, Any]: - """Return metadata captured from the most recent CTX API call.""" - return self._last_metadata - - @property - @abstractmethod - def name(self) -> str: - """Get the resource name.""" - pass - - @property - @abstractmethod - def description(self) -> str: - """Get the resource description.""" - pass - - @abstractmethod - def get_tools(self) -> List[Dict[str, Any]]: - """ - Get a list of tools provided by this resource. - - Returns: - List of tool definitions. - """ - pass - - def has_tool(self, tool_name: str) -> bool: - """ - Check if this resource provides the given tool. - - Args: - tool_name: Name of the tool to check. - - Returns: - True if the tool is provided by this resource, False otherwise. - """ - return any(tool["name"] == tool_name for tool in self.get_tools()) - - @abstractmethod - def execute_tool(self, tool_name: str, parameters: Dict[str, Any]) -> Any: - """ - Execute a tool with the given parameters. - - Args: - tool_name: Name of the tool to execute. - parameters: Parameters for the tool. - - Returns: - Tool execution result. - - Raises: - ValueError: If the tool is not found or parameters are invalid. - """ - pass - -==================================================================================================== -FILE: src/epacomp_tox/resources/bioactivity.py -==================================================================================================== -import logging -from typing import Any, Dict, List, Optional - -import ctxpy as ctx -from epacomp_tox.contracts import schema_ref -from epacomp_tox.validators import to_serializable - -from .base import BaseResource - -logger = logging.getLogger(__name__) - - -class BioactivityResource(BaseResource): - """MCP resource exposing CTX Bioactivity endpoints.""" - - @property - def name(self) -> str: - return "bioactivity" - - @property - def description(self) -> str: - return "Access to ToxCast/Tox21 bioactivity data, assays, models, and AOP crosswalks" - - def __init__(self, api_key: str): - super().__init__(api_key) - - # Increase upstream timeout for slow queries - UPSTREAM_TIMEOUT = 120.0 - try: - self.client = ctx.Bioactivity(x_api_key=api_key, timeout=UPSTREAM_TIMEOUT) - logger.info( - f"Successfully initialized ctx.Bioactivity with timeout={UPSTREAM_TIMEOUT}s" - ) - except TypeError as e: - logger.warning( - f"Could not set timeout for ctx.Bioactivity (TypeError: {e}). Using default timeout." - ) - self.client = ctx.Bioactivity(x_api_key=api_key) - - def get_tools(self) -> List[Dict[str, Any]]: - tools: List[Dict[str, Any]] = [ - { - "name": "search_bioactivity_terms", - "description": "Search bioactivity terms by prefix, exact match, or substring", - "parameters": { - "type": "object", - "properties": { - "search_type": { - "type": "string", - "enum": ["equals", "starts-with", "contains"], - "description": "Search mode to use", - }, - "value": { - "type": "string", - "description": "Term to search for", - }, - }, - "required": ["search_type", "value"], - }, - }, - { - "name": "get_bioactivity_summary_by_dtxsid", - "description": "Fetch bioactivity summary data for a chemical", - "parameters": { - "type": "object", - "properties": { - "dtxsid": { - "type": "string", - "description": "DSSTox Substance Identifier", - } - }, - "required": ["dtxsid"], - }, - }, - { - "name": "get_bioactivity_summary_by_aeid", - "description": "Fetch bioactivity summary data for an assay endpoint ID (AEID)", - "parameters": { - "type": "object", - "properties": { - "aeid": { - "type": "string", - "description": "Assay endpoint identifier", - } - }, - "required": ["aeid"], - }, - }, - { - "name": "get_bioactivity_summary_by_tissue", - "description": "Fetch bioactivity summary data for a chemical in a specific tissue", - "parameters": { - "type": "object", - "properties": { - "dtxsid": { - "type": "string", - "description": "DSSTox Substance Identifier", - }, - "tissue": { - "type": "string", - "description": "Tissue of origin (e.g., liver)", - }, - }, - "required": ["dtxsid", "tissue"], - }, - }, - { - "name": "get_bioactivity_data", - "description": "Retrieve detailed bioactivity data for a single identifier", - "parameters": { - "type": "object", - "properties": { - "identifier_type": { - "type": "string", - "enum": ["spid", "m4id", "dtxsid", "aeid"], - "description": "Identifier category", - }, - "identifier": { - "type": "string", - "description": "Identifier value", - }, - "projection": { - "type": "string", - "description": "Optional projection (e.g., toxcast-summary-plot)", - }, - }, - "required": ["identifier_type", "identifier"], - }, - }, - { - "name": "batch_get_bioactivity_data", - "description": "Batch fetch bioactivity data for multiple identifiers", - "parameters": { - "type": "object", - "properties": { - "identifier_type": { - "type": "string", - "enum": ["spid", "m4id", "dtxsid", "aeid"], - "description": "Identifier category", - }, - "identifiers": { - "type": "array", - "items": {"type": "string"}, - "minItems": 1, - "description": "Identifiers to request (max 200 per batch)", - }, - }, - "required": ["identifier_type", "identifiers"], - }, - }, - { - "name": "get_bioactivity_aed", - "description": "Retrieve Activity Exposure Distribution (AED) data for a chemical", - "parameters": { - "type": "object", - "properties": { - "dtxsid": { - "type": "string", - "description": "DSSTox Substance Identifier", - } - }, - "required": ["dtxsid"], - }, - }, - { - "name": "batch_get_bioactivity_aed", - "description": "Batch retrieve AED data for multiple chemicals", - "parameters": { - "type": "object", - "properties": { - "dtxsids": { - "type": "array", - "items": {"type": "string"}, - "minItems": 1, - "description": "DSSTox IDs to request (max 200 per batch)", - } - }, - "required": ["dtxsids"], - }, - }, - { - "name": "get_bioactivity_assay", - "description": "Retrieve assay annotations or lists (by AEID, gene, single-concentration, or all)", - "parameters": { - "type": "object", - "properties": { - "mode": { - "type": "string", - "enum": ["all", "aeid", "gene", "single-concentration"], - "description": "Assay query type", - }, - "aeid": { - "type": "string", - "description": "Assay endpoint ID (required for aeid and single-concentration modes)", - }, - "gene_symbol": { - "type": "string", - "description": "Gene symbol (required for gene mode)", - }, - }, - "required": ["mode"], - }, - }, - { - "name": "batch_get_bioactivity_assay_annotations", - "description": "Batch retrieve assay annotations for AEIDs", - "parameters": { - "type": "object", - "properties": { - "aeids": { - "type": "array", - "items": {"type": "string"}, - "minItems": 1, - "description": "List of assay endpoint IDs", - } - }, - "required": ["aeids"], - }, - }, - { - "name": "get_bioactivity_assay_count", - "description": "Return the total count of available assays", - "parameters": {"type": "object", "properties": {}}, - }, - { - "name": "get_bioactivity_assay_chemicals", - "description": "Get chemicals associated with an assay endpoint", - "parameters": { - "type": "object", - "properties": { - "aeid": { - "type": "string", - "description": "Assay endpoint ID", - } - }, - "required": ["aeid"], - }, - }, - { - "name": "get_bioactivity_aop", - "description": "Retrieve adverse outcome pathway mappings", - "parameters": { - "type": "object", - "properties": { - "lookup_type": { - "type": "string", - "enum": ["toxcast-aeid", "event-number", "entrez-gene-id"], - "description": "AOP lookup type", - }, - "identifier": { - "type": "string", - "description": "Identifier value matching the lookup type", - }, - }, - "required": ["lookup_type", "identifier"], - }, - }, - { - "name": "get_bioactivity_analytical_qc", - "description": "Retrieve analytical QC data for a chemical", - "parameters": { - "type": "object", - "properties": { - "dtxsid": { - "type": "string", - "description": "DSSTox Substance Identifier", - } - }, - "required": ["dtxsid"], - }, - }, - ] - - list_schema = schema_ref("common", "list_generic.response.schema") - schema_map = { - "search_bioactivity_terms": ( - "bioactivity", - "search_bioactivity_terms.response.schema", - ), - "get_bioactivity_summary_by_dtxsid": ( - "bioactivity", - "get_bioactivity_summary_by_dtxsid.response.schema", - ), - "get_bioactivity_assay": ( - "bioactivity", - "get_bioactivity_assay.response.schema", - ), - "get_bioactivity_aop": ( - "bioactivity", - "get_bioactivity_aop.response.schema", - ), - "get_bioactivity_assay_count": ("common", "object.response.schema"), - } - for tool in tools: - schema_info = schema_map.get(tool["name"]) - if schema_info: - tool["responseSchemaRef"] = schema_ref(*schema_info) - else: - tool["responseSchemaRef"] = list_schema - - # Ensure outputSchema is populated from the reference - if "responseSchemaRef" in tool: - from epacomp_tox.contracts import load_schema - - ref = tool["responseSchemaRef"] - tool["outputSchema"] = load_schema(ref["namespace"], ref["name"]) - - return tools - - def execute_tool(self, tool_name: str, parameters: Dict[str, Any]) -> Any: - if tool_name == "search_bioactivity_terms": - return self.search_bioactivity_terms( - search_type=parameters["search_type"], - value=parameters["value"], - ) - if tool_name == "get_bioactivity_summary_by_dtxsid": - return self.get_bioactivity_summary_by_dtxsid(parameters["dtxsid"]) - if tool_name == "get_bioactivity_summary_by_aeid": - return self.get_bioactivity_summary_by_aeid(parameters["aeid"]) - if tool_name == "get_bioactivity_summary_by_tissue": - return self.get_bioactivity_summary_by_tissue( - dtxsid=parameters["dtxsid"], - tissue=parameters["tissue"], - ) - if tool_name == "get_bioactivity_data": - return self.get_bioactivity_data( - identifier_type=parameters["identifier_type"], - identifier=parameters["identifier"], - projection=parameters.get("projection"), - ) - if tool_name == "batch_get_bioactivity_data": - return self.batch_get_bioactivity_data( - identifier_type=parameters["identifier_type"], - identifiers=parameters["identifiers"], - ) - if tool_name == "get_bioactivity_aed": - return self.get_bioactivity_aed(parameters["dtxsid"]) - if tool_name == "batch_get_bioactivity_aed": - return self.batch_get_bioactivity_aed(parameters["dtxsids"]) - if tool_name == "get_bioactivity_assay": - return self.get_bioactivity_assay( - mode=parameters["mode"], - aeid=parameters.get("aeid"), - gene_symbol=parameters.get("gene_symbol"), - ) - if tool_name == "batch_get_bioactivity_assay_annotations": - return self.batch_get_bioactivity_assay_annotations(parameters["aeids"]) - if tool_name == "get_bioactivity_assay_count": - return self.get_bioactivity_assay_count() - if tool_name == "get_bioactivity_assay_chemicals": - return self.get_bioactivity_assay_chemicals(parameters["aeid"]) - if tool_name == "get_bioactivity_aop": - return self.get_bioactivity_aop( - lookup_type=parameters["lookup_type"], - identifier=parameters["identifier"], - ) - if tool_name == "get_bioactivity_analytical_qc": - return self.get_bioactivity_analytical_qc(parameters["dtxsid"]) - raise ValueError(f"Unknown tool: {tool_name}") - - # Tool implementations ------------------------------------------------- - - def search_bioactivity_terms(self, search_type: str, value: str) -> List[Any]: - result = self._with_retry(lambda: self.client.search(search_type, value)) - return self._ensure_list(result) - - def get_bioactivity_models( - self, dtxsid: str, model: Optional[str] = None - ) -> List[Any]: - kwargs = {"dtxsid": dtxsid} - if model is not None: - kwargs["model"] = model - result = self._with_retry( - lambda: self.client.models_by_dtxsid_and_name(**kwargs) - ) - else: - result = self._with_retry(lambda: self.client.models_by_dtxsid(**kwargs)) - return self._ensure_list(result) - - def get_bioactivity_summary_by_dtxsid(self, dtxsid: str) -> List[Any]: - result = self._with_retry(lambda: self.client.data_summary_by_dtxsid(dtxsid)) - return self._ensure_list(result) - - def get_bioactivity_summary_by_aeid(self, aeid: str) -> List[Any]: - result = self._with_retry(lambda: self.client.data_summary_by_aeid(aeid)) - return self._ensure_list(result) - - def get_bioactivity_summary_by_tissue(self, dtxsid: str, tissue: str) -> List[Any]: - result = self._with_retry( - lambda: self.client.data_summary_by_tissue(dtxsid, tissue) - ) - return self._ensure_list(result) - - def get_bioactivity_data( - self, - identifier_type: str, - identifier: str, - projection: Optional[str] = None, - ) -> List[Any]: - norm = identifier_type.strip().lower() - kwargs = {"identifier": identifier} - if projection is not None: - kwargs["projection"] = projection - - if norm == "spid": - result = self._with_retry( - lambda: self.client.data_by_spid(kwargs["identifier"]) - ) - elif norm == "m4id": - result = self._with_retry( - lambda: self.client.data_by_m4id(kwargs["identifier"]) - ) - elif norm == "dtxsid": - result = self._with_retry(lambda: self.client.data_by_dtxsid(**kwargs)) - elif norm == "aeid": - result = self._with_retry(lambda: self.client.data_by_aeid(**kwargs)) - else: - raise ValueError( - "identifier_type must be one of spid, m4id, dtxsid, or aeid" - ) - return self._ensure_list(result) - - def batch_get_bioactivity_data( - self, identifier_type: str, identifiers: List[str] - ) -> List[Any]: - clean = [value for value in identifiers if value] - if not clean: - return [] - result = self._with_retry( - lambda: self.client.data_batch(identifier_type, clean) - ) - return self._ensure_list(result) - - def get_bioactivity_aed(self, dtxsid: str) -> List[Any]: - result = self._with_retry(lambda: self.client.aed_by_dtxsid(dtxsid)) - return self._ensure_list(result) - - def batch_get_bioactivity_aed(self, dtxsids: List[str]) -> List[Any]: - clean = [value for value in dtxsids if value] - if not clean: - return [] - result = self._with_retry(lambda: self.client.aed_batch(clean)) - return self._ensure_list(result) - - def get_bioactivity_assay( - self, - mode: str, - aeid: Optional[str] = None, - gene_symbol: Optional[str] = None, - ) -> Any: - normalized = mode.strip().lower() - kwargs = {} - if normalized == "all": - result = self._with_retry(self.client.assays_all) - elif normalized == "aeid": - if aeid is None: - raise ValueError("aeid is required when mode='aeid'") - kwargs["aeid"] = aeid - result = self._with_retry(lambda: self.client.assay_by_aeid(**kwargs)) - elif normalized == "single-concentration": - if aeid is None: - raise ValueError("aeid is required when mode='single-concentration'") - kwargs["aeid"] = aeid - result = self._with_retry( - lambda: self.client.assay_single_conc_by_aeid(**kwargs) - ) - elif normalized == "gene": - if gene_symbol is None: - raise ValueError("gene_symbol is required when mode='gene'") - kwargs["gene_symbol"] = gene_symbol - result = self._with_retry(lambda: self.client.assay_by_gene(**kwargs)) - else: - raise ValueError( - "mode must be one of all, aeid, single-concentration, or gene" - ) - return to_serializable(result) - - def batch_get_bioactivity_assay_annotations(self, aeids: List[str]) -> List[Any]: - clean = [value for value in aeids if value] - if not clean: - return [] - result = self._with_retry(lambda: self.client.assay_batch(clean)) - return self._ensure_list(result) - - def get_bioactivity_assay_count(self) -> Any: - result = self._with_retry(self.client.assay_count) - return to_serializable(result) - - def get_bioactivity_assay_chemicals(self, aeid: str) -> List[Any]: - result = self._with_retry(lambda: self.client.assay_chemicals_by_aeid(aeid)) - return self._ensure_list(result) - - def get_bioactivity_aop(self, lookup_type: str, identifier: str) -> List[Any]: - norm = lookup_type.strip().lower() - if norm == "toxcast-aeid": - result = self._with_retry( - lambda: self.client.aop_by_toxcast_aeid(identifier) - ) - elif norm == "event-number": - result = self._with_retry( - lambda: self.client.aop_by_event_number(identifier) - ) - elif norm == "entrez-gene-id": - result = self._with_retry( - lambda: self.client.aop_by_entrez_gene(identifier) - ) - else: - raise ValueError( - "lookup_type must be one of toxcast-aeid, event-number, or entrez-gene-id" - ) - return self._ensure_list(result) - - def get_bioactivity_analytical_qc(self, dtxsid: str) -> List[Any]: - result = self._with_retry(lambda: self.client.analytical_qc_by_dtxsid(dtxsid)) - return self._ensure_list(result) - -==================================================================================================== -FILE: src/epacomp_tox/resources/chemical.py -==================================================================================================== -import base64 -import logging -from typing import Any, Dict, List, Optional - -import ctxpy as ctx -from epacomp_tox.contracts import schema_ref -from epacomp_tox.validators import to_serializable - -from .base import BaseResource - -logger = logging.getLogger(__name__) - - -class ChemicalResource(BaseResource): - """ - MCP resource for EPA CompTox chemical data. - - Provides access to chemical structures, nomenclature, IDs, and properties. - """ - - @property - def name(self) -> str: - return "chemical" - - @property - def description(self) -> str: - return "Access to chemical structures, nomenclature, IDs, and properties" - - def __init__(self, api_key: str): - """ - Initialize the chemical resource. - - Args: - api_key: EPA CompTox API key. - """ - super().__init__(api_key) - - # --- START MODIFICATION: Increase Upstream Timeout --- - # The default timeout is too short for complex queries. - # Increase it significantly (e.g., 120 seconds). - UPSTREAM_TIMEOUT = 120.0 - - try: - # Attempt to initialize the client with the increased timeout. - # This assumes the ctxpy library accepts a 'timeout' argument. - self.client = ctx.Chemical(x_api_key=api_key, timeout=UPSTREAM_TIMEOUT) - logger.info( - f"Successfully initialized ctx.Chemical with timeout={UPSTREAM_TIMEOUT}s" - ) - - except TypeError as e: - # If ctxpy does not accept the 'timeout' argument, it raises a TypeError. - # Fall back to the original initialization and log a warning. - logger.warning( - f"Could not set timeout for ctx.Chemical (TypeError: {e}). Using default timeout. " - "Timeouts may still occur for slow queries. Check ctxpy documentation/version." - ) - self.client = ctx.Chemical(x_api_key=api_key) - # --- END MODIFICATION --- - - def get_tools(self) -> List[Dict[str, Any]]: - """ - Get a list of tools provided by this resource. - - Returns: - List of tool definitions. - """ - tools: List[Dict[str, Any]] = [ - { - "name": "search_chemical", - "description": "Search for chemicals by name, CAS-RN, or other identifiers", - "parameters": { - "type": "object", - "properties": { - "query": {"type": "string", "description": "Search term"}, - "search_type": { - "type": "string", - "description": "Search type: equals, starts-with, or contains", - "enum": ["equals", "starts-with", "contains"], - }, - }, - "required": ["query", "search_type"], - }, - }, - { - "name": "batch_search_chemical", - "description": "Batch search for chemicals using a list of identifiers", - "parameters": { - "type": "object", - "properties": { - "identifiers": { - "type": "array", - "items": {"type": "string"}, - "description": "Identifiers to search (DTXSIDs, CASRNs, names, etc.)", - } - }, - "required": ["identifiers"], - }, - }, - { - "name": "get_chemical_details", - "description": "Get detailed information about a chemical", - "parameters": { - "type": "object", - "properties": { - "identifier": { - "type": "string", - "description": "Chemical identifier (DTXSID or DTXCID)", - }, - "id_type": { - "type": "string", - "description": "Type of identifier", - "enum": ["dtxsid", "dtxcid"], - }, - "subset": { - "type": "string", - "description": "Optional subset selector for details", - "enum": [ - "default", - "all", - "details", - "identifiers", - "structures", - "nta", - ], - "default": "default", - }, - }, - "required": ["identifier", "id_type"], - }, - }, - { - "name": "batch_get_chemical_details", - "description": "Get detailed information about multiple chemicals", - "parameters": { - "type": "object", - "properties": { - "identifiers": { - "type": "array", - "items": {"type": "string"}, - "description": "List of chemical identifiers", - }, - "id_type": { - "type": "string", - "description": "Type of identifier", - "enum": ["dtxsid", "dtxcid"], - }, - "subset": { - "type": "string", - "description": "Optional subset selector for details", - "enum": [ - "default", - "all", - "details", - "identifiers", - "structures", - "nta", - ], - "default": "default", - }, - }, - "required": ["identifiers", "id_type"], - }, - }, - { - "name": "search_msready", - "description": "Search for chemicals by MS-ready properties", - "parameters": { - "type": "object", - "properties": { - "search_type": { - "type": "string", - "description": "Type of MS-ready search", - "enum": ["dtxcid", "formula", "mass-range"], - }, - "query": { - "type": "string", - "description": "Search term for dtxcid or formula", - }, - "mass_start": { - "type": "number", - "description": "Start of mass range for mass-range search", - }, - "mass_end": { - "type": "number", - "description": "End of mass range for mass-range search", - }, - }, - "required": ["search_type"], - }, - }, - ] - - # Property endpoints are not available in the current ctxpy client; excluded to avoid runtime 500s. - tools.extend( - [ - { - "name": "get_chemical_fate_summary", - "description": "Retrieve environmental fate summary for a chemical", - "parameters": { - "type": "object", - "properties": { - "dtxsid": { - "type": "string", - "description": "DSSTox Substance Identifier", - }, - "property_name": { - "type": "string", - "description": "Optional fate property filter", - }, - }, - "required": ["dtxsid"], - }, - }, - { - "name": "get_chemical_fate_details", - "description": "Retrieve detailed environmental fate data for a chemical", - "parameters": { - "type": "object", - "properties": { - "dtxsid": { - "type": "string", - "description": "DSSTox Substance Identifier", - } - }, - "required": ["dtxsid"], - }, - }, - { - "name": "get_chemical_extra_data", - "description": "Fetch extra chemical data (functional use, use cases, etc.)", - "parameters": { - "type": "object", - "properties": { - "dtxsids": { - "type": "array", - "items": {"type": "string"}, - "minItems": 1, - "description": "List of DSSTox Substance Identifiers", - } - }, - "required": ["dtxsids"], - }, - }, - { - "name": "opsin_convert_name", - "description": "Convert a systematic name using OPSIN", - "parameters": { - "type": "object", - "properties": { - "name": { - "type": "string", - "description": "Systematic IUPAC name", - }, - "output_format": { - "type": "string", - "enum": ["smiles", "inchikey", "inchi"], - "description": "Desired representation", - }, - }, - "required": ["name", "output_format"], - }, - }, - { - "name": "indigo_convert_molfile", - "description": "Convert a molfile using Indigo toolkit endpoints", - "parameters": { - "type": "object", - "properties": { - "molfile": { - "type": "string", - "description": "Molfile contents (V2000/V3000)", - }, - "output_format": { - "type": "string", - "enum": [ - "smiles", - "inchikey", - "inchi", - "mol_v2000", - "mol_v3000", - "mol_weight", - "canonical_smiles", - ], - "description": "Desired transformation", - }, - }, - "required": ["molfile", "output_format"], - }, - }, - ] - ) - - schema_map = { - "search_chemical": ("chemical", "search_chemical.response.schema"), - "batch_search_chemical": ("chemical", "search_chemical.response.schema"), - "get_chemical_details": ("common", "object.response.schema"), - "batch_get_chemical_details": ("common", "list_generic.response.schema"), - "search_msready": ("common", "list_generic.response.schema"), - "get_chemical_fate_summary": ("common", "object.response.schema"), - "get_chemical_fate_details": ("common", "object.response.schema"), - "get_chemical_extra_data": ("common", "list_generic.response.schema"), - "opsin_convert_name": ("chemical", "opsin_convert.response.schema"), - "indigo_convert_molfile": ("chemical", "indigo_convert.response.schema"), - } - - for tool in tools: - schema_info = schema_map.get(tool["name"]) - if schema_info: - tool["responseSchemaRef"] = schema_ref(*schema_info) - - # Ensure outputSchema is populated from the reference - if "responseSchemaRef" in tool: - from epacomp_tox.contracts import load_schema - - ref = tool["responseSchemaRef"] - tool["outputSchema"] = load_schema(ref["namespace"], ref["name"]) - - return tools - - def execute_tool(self, tool_name: str, parameters: Dict[str, Any]) -> Any: - """ - Execute a tool with the given parameters. - - Args: - tool_name: Name of the tool to execute. - parameters: Parameters for the tool. - - Returns: - Tool execution result. - - Raises: - ValueError: If the tool is not found or parameters are invalid. - """ - if tool_name == "search_chemical": - return self.search_chemical( - query=parameters["query"], - search_type=parameters["search_type"], - ) - if tool_name == "batch_search_chemical": - return self.batch_search_chemical( - identifiers=parameters["identifiers"], - ) - if tool_name == "get_chemical_details": - return self.get_chemical_details( - identifier=parameters["identifier"], - id_type=parameters["id_type"], - subset=parameters.get("subset", "default"), - ) - if tool_name == "batch_get_chemical_details": - return self.batch_get_chemical_details( - identifiers=parameters["identifiers"], - id_type=parameters["id_type"], - subset=parameters.get("subset", "default"), - ) - if tool_name == "search_msready": - return self.search_msready( - search_type=parameters["search_type"], - query=parameters.get("query"), - mass_start=parameters.get("mass_start"), - mass_end=parameters.get("mass_end"), - ) - if tool_name == "get_chemical_fate_summary": - return self.get_chemical_fate_summary( - dtxsid=parameters["dtxsid"], - property_name=parameters.get("property_name"), - ) - if tool_name == "get_chemical_fate_details": - return self.get_chemical_fate_details(parameters["dtxsid"]) - if tool_name == "get_chemical_extra_data": - return self.get_chemical_extra_data(parameters["dtxsids"]) - if tool_name == "opsin_convert_name": - return self.opsin_convert_name( - name=parameters["name"], - output_format=parameters["output_format"], - ) - if tool_name == "indigo_convert_molfile": - return self.indigo_convert_molfile( - molfile=parameters["molfile"], - output_format=parameters["output_format"], - ) - raise ValueError(f"Unknown tool: {tool_name}") - - def search_chemical(self, query: str, search_type: str) -> List[Dict[str, Any]]: - """Search for chemicals by name, CAS-RN, or other identifiers.""" - result = self._with_retry( - lambda: self.client.search(by=search_type, word=query) - ) - return self._ensure_list(result) - - def batch_search_chemical(self, identifiers: List[str]) -> List[Dict[str, Any]]: - """Batch search for multiple chemical identifiers.""" - identifiers = [item for item in identifiers if item] - if not identifiers: - return [] - result = self._with_retry( - lambda: self.client.search(by="batch", word=identifiers) - ) - return self._ensure_list(result) - - def get_chemical_details( - self, identifier: str, id_type: str, subset: str = "default" - ) -> Dict[str, Any]: - """Get detailed information about a single chemical.""" - result = self._with_retry( - lambda: self.client.details(by=id_type, word=identifier, subset=subset) - ) - return self._ensure_object(result) - - def batch_get_chemical_details( - self, identifiers: List[str], id_type: str, subset: str = "default" - ) -> List[Dict[str, Any]]: - """Get detailed information about multiple chemicals.""" - identifiers = [item for item in identifiers if item] - if not identifiers: - return [] - result = self._with_retry( - lambda: self.client.details(by="batch", word=identifiers, subset=subset) - ) - return self._ensure_list(result) - - def search_msready( - self, - search_type: str, - query: Optional[str] = None, - mass_start: Optional[float] = None, - mass_end: Optional[float] = None, - ) -> List[Dict[str, Any]]: - """Search for chemicals by MS-ready properties or mass range.""" - normalized = search_type.strip().lower() - kwargs = {} - if normalized == "mass-range": - if mass_start is not None: - kwargs["start"] = mass_start - if mass_end is not None: - kwargs["end"] = mass_end - result = self._with_retry(lambda: self.client.msready(by="mass", **kwargs)) - else: - if query is not None: - kwargs["word"] = query - result = self._with_retry( - lambda: self.client.msready(by=search_type, **kwargs) - ) - return self._ensure_list(result) - - def _raise_properties_unavailable(self, tool_name: str) -> None: - """Helper to surface a clear error when property endpoints are unavailable.""" - raise NotImplementedError( - f"Chemical property tool '{tool_name}' is disabled: ctxpy client does not expose property endpoints." - ) - - def get_chemical_property_summary( - self, dtxsid: str, property_name: Optional[str] = None - ) -> Any: - self._raise_properties_unavailable("get_chemical_property_summary") - - def get_chemical_predicted_properties(self, dtxsid: str) -> List[Dict[str, Any]]: - self._raise_properties_unavailable("get_chemical_predicted_properties") - - def batch_get_chemical_predicted_properties( - self, dtxsids: List[str] - ) -> List[Dict[str, Any]]: - self._raise_properties_unavailable("batch_get_chemical_predicted_properties") - - def get_chemical_predicted_properties_by_range( - self, property_id: str, start: float, end: float - ) -> List[Dict[str, Any]]: - self._raise_properties_unavailable("get_chemical_predicted_properties_by_range") - - def get_chemical_experimental_properties(self, dtxsid: str) -> List[Dict[str, Any]]: - self._raise_properties_unavailable("get_chemical_experimental_properties") - - def batch_get_chemical_experimental_properties( - self, dtxsids: List[str] - ) -> List[Dict[str, Any]]: - self._raise_properties_unavailable("batch_get_chemical_experimental_properties") - - def get_chemical_experimental_properties_by_range( - self, property_name: str, start: float, end: float - ) -> List[Dict[str, Any]]: - self._raise_properties_unavailable( - "get_chemical_experimental_properties_by_range" - ) - - def list_chemical_property_names(self, property_type: str) -> List[str]: - self._raise_properties_unavailable("list_chemical_property_names") - - def get_chemical_fate_summary( - self, dtxsid: str, property_name: Optional[str] = None - ) -> Any: - kwargs = {"dtxsid": dtxsid} - if property_name is not None: - kwargs["prop_name"] = property_name - - result = self._with_retry(lambda: self.client.fate_summary(**kwargs)) - return to_serializable(result) - - def get_chemical_fate_details(self, dtxsid: str) -> Any: - result = self._with_retry(lambda: self.client.fate_details(dtxsid)) - return to_serializable(result) - - def get_chemical_extra_data(self, dtxsids: List[str]) -> List[Any]: - identifiers = [sid for sid in dtxsids if sid] - if not identifiers: - return [] - result = self._with_retry(lambda: self.client.extra_data_batch(identifiers)) - return self._ensure_list(result) - - def check_chemical_ghs_links( - self, source: str, dtxsids: List[str] - ) -> Dict[str, Any]: - identifiers = [sid for sid in dtxsids if sid] - if not identifiers: - return {"source": source, "results": []} - result = self._with_retry( - lambda: self.client.ghs_check_batch(source, identifiers) - ) - return { - "source": source, - "results": self._ensure_list(result), - } - - def opsin_convert_name(self, name: str, output_format: str) -> Dict[str, Any]: - result = self._with_retry( - lambda: self.client.opsin_convert(name, output=output_format) - ) - return { - "name": name, - "outputFormat": output_format, - "value": to_serializable(result), - } - - def indigo_convert_molfile( - self, molfile: str, output_format: str - ) -> Dict[str, Any]: - result = self._with_retry( - lambda: self.client.indigo_convert(molfile, output=output_format) - ) - converted = to_serializable(result) - return { - "outputFormat": output_format, - "value": converted, - } - - def get_chemical_structure_file( - self, - identifier_type: str, - identifier: str, - file_format: str, - image_format: Optional[str] = None, - ) -> Dict[str, Any]: - kwargs = { - "identifier_type": identifier_type, - "identifier": identifier, - "file_format": file_format, - } - if image_format is not None: - kwargs["image_format"] = image_format - - payload = self._with_retry(lambda: self.client.structure_file(**kwargs)) - # ... (rest of the method remains the same) - metadata = self.get_last_metadata() - content_type = metadata.get("content_type") if metadata else None - - if isinstance(payload, bytes): - # Ensure base64 is imported if needed - import base64 - - data = base64.b64encode(payload).decode("ascii") - encoding = "base64" - else: - data = to_serializable(payload) - encoding = "utf-8" - - response: Dict[str, Any] = { - "identifier": identifier, - "identifierType": identifier_type, - "fileFormat": file_format, - "encoding": encoding, - "data": data, - "length": len(payload) if isinstance(payload, (bytes, str)) else None, - } - if content_type: - response["contentType"] = content_type - if file_format.lower() == "image": - response["imageFormat"] = (image_format or "PNG").upper() - return response - -==================================================================================================== -FILE: src/epacomp_tox/resources/hazard.py -==================================================================================================== -import logging -from typing import Any, Dict, List - -import ctxpy as ctx -from epacomp_tox.contracts import schema_ref - -from .base import BaseResource - -logger = logging.getLogger(__name__) - - -class HazardResource(BaseResource): - """MCP resource exposing CTX hazard datasets (ToxValDB, ToxRefDB, cancer, genetox, ADME/IVIVE, IRIS, PPRTV, HAWC).""" - - _DATA_TYPE_ENUM = [ - "all", - "hazard", - "toxval", - "human", - "eco", - "skin-eye", - "cancer", - "genetox", - "adme", - "toxref", - "iris", - "pprtv", - "hawc", - ] - - @staticmethod - def _schema(namespace: str, name: str) -> Dict[str, str]: - return schema_ref(namespace, name) - - @property - def name(self) -> str: - return "hazard" - - @property - def description(self) -> str: - return ( - "Access to hazard datasets from the CTX APIs, including ToxValDB, ToxRefDB, cancer, " - "genetox, ADME/IVIVE, IRIS, PPRTV, and HAWC link mappers." - ) - - def __init__(self, api_key: str): - super().__init__(api_key) - # Increase upstream timeout for slow queries - UPSTREAM_TIMEOUT = 120.0 - try: - self.client = ctx.Hazard(x_api_key=api_key, timeout=UPSTREAM_TIMEOUT) - logger.info( - f"Successfully initialized ctx.Hazard with timeout={UPSTREAM_TIMEOUT}s" - ) - except TypeError as e: - logger.warning( - f"Could not set timeout for ctx.Hazard (TypeError: {e}). Using default timeout." - ) - self.client = ctx.Hazard(x_api_key=api_key) - - def _clean_identifiers(self, identifiers: List[str]) -> List[str]: - return [ - value.strip() - for value in identifiers - if isinstance(value, str) and value.strip() - ] - - def get_tools(self) -> List[Dict[str, Any]]: - tools: List[Dict[str, Any]] = [ - { - "name": "search_hazard", - "description": "Search for hazard data by DTXSID across ToxValDB, ToxRefDB, cancer, genetox, ADME/IVIVE, IRIS, PPRTV, or HAWC datasets.", - "parameters": { - "type": "object", - "properties": { - "data_type": { - "type": "string", - "description": "Hazard dataset to query.", - "enum": self._DATA_TYPE_ENUM, - }, - "dtxsid": { - "type": "string", - "description": "Chemical identifier (DTXSID).", - }, - "summary": { - "type": "boolean", - "description": "Whether to request summary (vs. detailed) data when supported.", - "default": True, - }, - }, - "required": ["data_type", "dtxsid"], - }, - }, - { - "name": "batch_search_hazard", - "description": "Batch hazard lookup for multiple DTXSIDs for the selected dataset.", - "parameters": { - "type": "object", - "properties": { - "data_type": { - "type": "string", - "description": "Hazard dataset to query.", - "enum": self._DATA_TYPE_ENUM, - }, - "dtxsids": { - "type": "array", - "items": {"type": "string"}, - "minItems": 1, - "description": "List of chemical identifiers (DTXSIDs).", - }, - "summary": { - "type": "boolean", - "description": "Whether to request summary (vs. detailed) data when supported.", - "default": True, - }, - }, - "required": ["data_type", "dtxsids"], - }, - }, - { - "name": "get_hazard_toxval", - "description": "Retrieve full ToxValDB hazard data for a single chemical.", - "parameters": { - "type": "object", - "properties": { - "dtxsid": { - "type": "string", - "description": "Chemical identifier (DTXSID).", - } - }, - "required": ["dtxsid"], - }, - }, - { - "name": "batch_get_hazard_toxval", - "description": "Retrieve ToxValDB hazard data for multiple chemicals.", - "parameters": { - "type": "object", - "properties": { - "dtxsids": { - "type": "array", - "items": {"type": "string"}, - "minItems": 1, - "description": "Chemical identifiers (DTXSIDs).", - } - }, - "required": ["dtxsids"], - }, - }, - { - "name": "get_hazard_skin_eye", - "description": "Retrieve skin and eye hazard data for a single chemical.", - "parameters": { - "type": "object", - "properties": { - "dtxsid": { - "type": "string", - "description": "Chemical identifier (DTXSID).", - } - }, - "required": ["dtxsid"], - }, - }, - { - "name": "batch_get_hazard_skin_eye", - "description": "Retrieve skin and eye hazard data for multiple chemicals.", - "parameters": { - "type": "object", - "properties": { - "dtxsids": { - "type": "array", - "items": {"type": "string"}, - "minItems": 1, - "description": "Chemical identifiers (DTXSIDs).", - } - }, - "required": ["dtxsids"], - }, - }, - { - "name": "get_hazard_cancer_summary", - "description": "Retrieve cancer hazard summary for a single chemical.", - "parameters": { - "type": "object", - "properties": { - "dtxsid": { - "type": "string", - "description": "Chemical identifier (DTXSID).", - } - }, - "required": ["dtxsid"], - }, - }, - { - "name": "batch_get_hazard_cancer_summary", - "description": "Retrieve cancer hazard summary for multiple chemicals.", - "parameters": { - "type": "object", - "properties": { - "dtxsids": { - "type": "array", - "items": {"type": "string"}, - "minItems": 1, - "description": "Chemical identifiers (DTXSIDs).", - } - }, - "required": ["dtxsids"], - }, - }, - { - "name": "get_hazard_genetox_summary", - "description": "Retrieve genotoxicity summary data for a chemical.", - "parameters": { - "type": "object", - "properties": { - "dtxsid": { - "type": "string", - "description": "Chemical identifier (DTXSID).", - } - }, - "required": ["dtxsid"], - }, - }, - { - "name": "batch_get_hazard_genetox_summary", - "description": "Retrieve genotoxicity summary data for multiple chemicals.", - "parameters": { - "type": "object", - "properties": { - "dtxsids": { - "type": "array", - "items": {"type": "string"}, - "minItems": 1, - "description": "Chemical identifiers (DTXSIDs).", - } - }, - "required": ["dtxsids"], - }, - }, - { - "name": "get_hazard_genetox_details", - "description": "Retrieve genotoxicity detailed data for a chemical.", - "parameters": { - "type": "object", - "properties": { - "dtxsid": { - "type": "string", - "description": "Chemical identifier (DTXSID).", - } - }, - "required": ["dtxsid"], - }, - }, - { - "name": "batch_get_hazard_genetox_details", - "description": "Retrieve genotoxicity detailed data for multiple chemicals.", - "parameters": { - "type": "object", - "properties": { - "dtxsids": { - "type": "array", - "items": {"type": "string"}, - "minItems": 1, - "description": "Chemical identifiers (DTXSIDs).", - } - }, - "required": ["dtxsids"], - }, - }, - { - "name": "get_hazard_adme_ivive", - "description": "Retrieve ADME/IVIVE hazard data for a chemical.", - "parameters": { - "type": "object", - "properties": { - "dtxsid": { - "type": "string", - "description": "Chemical identifier (DTXSID).", - } - }, - "required": ["dtxsid"], - }, - }, - { - "name": "get_hazard_pprtv", - "description": "Retrieve PPRTV hazard data for a chemical.", - "parameters": { - "type": "object", - "properties": { - "dtxsid": { - "type": "string", - "description": "Chemical identifier (DTXSID).", - } - }, - "required": ["dtxsid"], - }, - }, - { - "name": "get_hazard_iris", - "description": "Retrieve IRIS hazard data for a chemical.", - "parameters": { - "type": "object", - "properties": { - "dtxsid": { - "type": "string", - "description": "Chemical identifier (DTXSID).", - } - }, - "required": ["dtxsid"], - }, - }, - { - "name": "get_hazard_hawc", - "description": "Retrieve HAWC link mapper data for a chemical.", - "parameters": { - "type": "object", - "properties": { - "dtxsid": { - "type": "string", - "description": "Chemical identifier (DTXSID).", - } - }, - "required": ["dtxsid"], - }, - }, - { - "name": "get_hazard_toxref", - "description": "Retrieve ToxRefDB data (summary, data, effects, or observations) by DTXSID, study ID, or study type.", - "parameters": { - "type": "object", - "properties": { - "dataset": { - "type": "string", - "enum": ["summary", "data", "effects", "observations"], - "description": "ToxRefDB dataset to query.", - }, - "lookup_type": { - "type": "string", - "enum": ["dtxsid", "study-id", "study-type"], - "description": "Lookup mode for the query.", - }, - "value": { - "type": "string", - "description": "Identifier corresponding to the selected lookup type.", - }, - }, - "required": ["dataset", "lookup_type", "value"], - }, - }, - { - "name": "batch_get_hazard_toxref", - "description": "Batch retrieve ToxRefDB data by DTXSID.", - "parameters": { - "type": "object", - "properties": { - "dtxsids": { - "type": "array", - "items": {"type": "string"}, - "minItems": 1, - "description": "Chemical identifiers (DTXSIDs).", - } - }, - "required": ["dtxsids"], - }, - }, - ] - for tool in tools: - if tool["name"] == "search_hazard": - tool["responseSchemaRef"] = self._schema( - "hazard", "search_hazard.response.schema" - ) - elif tool["name"] == "batch_search_hazard": - tool["responseSchemaRef"] = self._schema( - "hazard", "batch_search_hazard.response.schema" - ) - else: - tool["responseSchemaRef"] = self._schema( - "common", "list_generic.response.schema" - ) - - # Ensure outputSchema is populated from the reference - if "responseSchemaRef" in tool: - from epacomp_tox.contracts import load_schema - - ref = tool["responseSchemaRef"] - tool["outputSchema"] = load_schema(ref["namespace"], ref["name"]) - - return tools - - def execute_tool(self, tool_name: str, parameters: Dict[str, Any]) -> Any: - handlers = { - "search_hazard": lambda params: self.search_hazard( - data_type=params["data_type"], - dtxsid=params["dtxsid"], - summary=params.get("summary", True), - ), - "batch_search_hazard": lambda params: self.batch_search_hazard( - data_type=params["data_type"], - dtxsids=params["dtxsids"], - summary=params.get("summary", True), - ), - "get_hazard_toxval": lambda params: self.get_hazard_toxval( - params["dtxsid"] - ), - "batch_get_hazard_toxval": lambda params: self.batch_get_hazard_toxval( - params["dtxsids"] - ), - "get_hazard_skin_eye": lambda params: self.get_hazard_skin_eye( - params["dtxsid"] - ), - "batch_get_hazard_skin_eye": lambda params: self.batch_get_hazard_skin_eye( - params["dtxsids"] - ), - "get_hazard_cancer_summary": lambda params: self.get_hazard_cancer_summary( - params["dtxsid"] - ), - "batch_get_hazard_cancer_summary": lambda params: self.batch_get_hazard_cancer_summary( - params["dtxsids"] - ), - "get_hazard_genetox_summary": lambda params: self.get_hazard_genetox_summary( - params["dtxsid"] - ), - "batch_get_hazard_genetox_summary": lambda params: self.batch_get_hazard_genetox_summary( - params["dtxsids"] - ), - "get_hazard_genetox_details": lambda params: self.get_hazard_genetox_details( - params["dtxsid"] - ), - "batch_get_hazard_genetox_details": lambda params: self.batch_get_hazard_genetox_details( - params["dtxsids"] - ), - "get_hazard_adme_ivive": lambda params: self.get_hazard_adme_ivive( - params["dtxsid"] - ), - "get_hazard_pprtv": lambda params: self.get_hazard_pprtv(params["dtxsid"]), - "get_hazard_iris": lambda params: self.get_hazard_iris(params["dtxsid"]), - "get_hazard_hawc": lambda params: self.get_hazard_hawc(params["dtxsid"]), - "get_hazard_toxref": lambda params: self.get_hazard_toxref( - dataset=params["dataset"], - lookup_type=params["lookup_type"], - value=params["value"], - ), - "batch_get_hazard_toxref": lambda params: self.batch_get_hazard_toxref( - params["dtxsids"] - ), - } - - try: - handler = handlers[tool_name] - except KeyError as exc: # pragma: no cover - defensive - raise ValueError(f"Unknown tool: {tool_name}") from exc - return handler(parameters) - - def search_hazard( - self, data_type: str, dtxsid: str, summary: bool = True - ) -> List[Dict[str, Any]]: - """ - Search hazard datasets for a chemical. - - Args: - data_type: Hazard dataset to query (all, hazard, toxval, human, eco, skin-eye, cancer, genetox, adme, toxref, iris, pprtv, hawc). - dtxsid: Chemical identifier (DTXSID). - summary: Whether to request summary data when the API supports a detail toggle. - - Returns: - List of hazard data records. - """ - result = self._with_retry( - lambda: self.client.search(by=data_type, dtxsid=dtxsid, summary=summary) - ) - return self._ensure_list(result) - - def batch_search_hazard( - self, - data_type: str, - dtxsids: List[str], - summary: bool = True, - ) -> Dict[str, List[Dict[str, Any]]]: - """ - Search hazard datasets for multiple chemicals. - - Args: - data_type: Hazard dataset to query. - dtxsids: List of DTXSIDs. - summary: Whether to request summary data when supported. - - Returns: - Mapping of DTXSID to hazard records. - """ - cleaned = self._clean_identifiers(dtxsids) - if not cleaned: - return {} - payload = self._with_retry( - lambda: self.client.batch_search( - by=data_type, dtxsid=cleaned, summary=summary - ) - ) - normalized = self._ensure_object(payload) - return {key: self._ensure_list(value) for key, value in normalized.items()} - - def get_hazard_toxval(self, dtxsid: str) -> List[Dict[str, Any]]: - payload = self._with_retry(lambda: self.client.toxval(dtxsid=dtxsid)) - return self._ensure_list(payload) - - def batch_get_hazard_toxval(self, dtxsids: List[str]) -> List[Dict[str, Any]]: - cleaned = self._clean_identifiers(dtxsids) - if not cleaned: - return [] - payload = self._with_retry(lambda: self.client.toxval_batch(dtxsids=cleaned)) - return self._ensure_list(payload) - - def get_hazard_skin_eye(self, dtxsid: str) -> List[Dict[str, Any]]: - payload = self._with_retry(lambda: self.client.skin_eye(dtxsid=dtxsid)) - return self._ensure_list(payload) - - def batch_get_hazard_skin_eye(self, dtxsids: List[str]) -> List[Dict[str, Any]]: - cleaned = self._clean_identifiers(dtxsids) - if not cleaned: - return [] - payload = self._with_retry(lambda: self.client.skin_eye_batch(dtxsids=cleaned)) - return self._ensure_list(payload) - - def get_hazard_cancer_summary(self, dtxsid: str) -> List[Dict[str, Any]]: - payload = self._with_retry(lambda: self.client.cancer_summary(dtxsid=dtxsid)) - return self._ensure_list(payload) - - def batch_get_hazard_cancer_summary( - self, dtxsids: List[str] - ) -> List[Dict[str, Any]]: - cleaned = self._clean_identifiers(dtxsids) - if not cleaned: - return [] - payload = self._with_retry( - lambda: self.client.cancer_summary_batch(dtxsids=cleaned) - ) - return self._ensure_list(payload) - - def get_hazard_genetox_summary(self, dtxsid: str) -> List[Dict[str, Any]]: - payload = self._with_retry(lambda: self.client.genetox_summary(dtxsid=dtxsid)) - return self._ensure_list(payload) - - def batch_get_hazard_genetox_summary( - self, dtxsids: List[str] - ) -> List[Dict[str, Any]]: - cleaned = self._clean_identifiers(dtxsids) - if not cleaned: - return [] - payload = self._with_retry( - lambda: self.client.genetox_summary_batch(dtxsids=cleaned) - ) - return self._ensure_list(payload) - - def get_hazard_genetox_details(self, dtxsid: str) -> List[Dict[str, Any]]: - payload = self._with_retry(lambda: self.client.genetox_details(dtxsid=dtxsid)) - return self._ensure_list(payload) - - def batch_get_hazard_genetox_details( - self, dtxsids: List[str] - ) -> List[Dict[str, Any]]: - cleaned = self._clean_identifiers(dtxsids) - if not cleaned: - return [] - payload = self._with_retry( - lambda: self.client.genetox_details_batch(dtxsids=cleaned) - ) - return self._ensure_list(payload) - - def get_hazard_adme_ivive(self, dtxsid: str) -> List[Dict[str, Any]]: - payload = self._with_retry(lambda: self.client.adme_ivive(dtxsid=dtxsid)) - return self._ensure_list(payload) - - def get_hazard_pprtv(self, dtxsid: str) -> List[Dict[str, Any]]: - payload = self._with_retry(lambda: self.client.pprtv(dtxsid=dtxsid)) - return self._ensure_list(payload) - - def get_hazard_iris(self, dtxsid: str) -> List[Dict[str, Any]]: - payload = self._with_retry(lambda: self.client.iris(dtxsid=dtxsid)) - return self._ensure_list(payload) - - def get_hazard_hawc(self, dtxsid: str) -> List[Dict[str, Any]]: - payload = self._with_retry(lambda: self.client.hawc(dtxsid=dtxsid)) - return self._ensure_list(payload) - - def get_hazard_toxref( - self, dataset: str, lookup_type: str, value: str - ) -> List[Dict[str, Any]]: - payload = self._with_retry( - lambda: self.client.toxref(dataset=dataset, lookup=lookup_type, value=value) - ) - return self._ensure_list(payload) - - def batch_get_hazard_toxref(self, dtxsids: List[str]) -> List[Dict[str, Any]]: - cleaned = self._clean_identifiers(dtxsids) - if not cleaned: - return [] - payload = self._with_retry(lambda: self.client.toxref_batch(dtxsids=cleaned)) - return self._ensure_list(payload) - -==================================================================================================== -FILE: src/epacomp_tox/resources/exposure.py -==================================================================================================== -import logging -from typing import Any, Dict, List, Optional, Sequence - -import ctxpy as ctx -from epacomp_tox.contracts import schema_ref - -from .base import BaseResource - -logger = logging.getLogger(__name__) - - -class ExposureResource(BaseResource): - """MCP resource for EPA CompTox exposure data.""" - - @property - def name(self) -> str: - return "exposure" - - @property - def description(self) -> str: - return "Access to SEEM predictions, CPDat product data, HTTK, MMDB monitoring, and CCD datasets" - - def __init__(self, api_key: str): - super().__init__(api_key) - # Increase upstream timeout for slow queries - UPSTREAM_TIMEOUT = 120.0 - try: - self.client = ctx.Exposure(x_api_key=api_key, timeout=UPSTREAM_TIMEOUT) - logger.info( - f"Successfully initialized ctx.Exposure with timeout={UPSTREAM_TIMEOUT}s" - ) - except TypeError as e: - logger.warning( - f"Could not set timeout for ctx.Exposure (TypeError: {e}). Using default timeout." - ) - self.client = ctx.Exposure(x_api_key=api_key) - - # ------------------------------------------------------------------ - # Tool catalog - # ------------------------------------------------------------------ - def get_tools(self) -> List[Dict[str, Any]]: - tools: List[Dict[str, Any]] = [ - { - "name": "search_cpdat", - "description": "Search historical CPDat data (functional use, product use categories, or list presence) for chemicals", - "parameters": { - "type": "object", - "properties": { - "vocab_name": { - "type": "string", - "enum": ["fc", "puc", "lpk"], - "description": "Vocabulary domain to query: functional use (fc), product use categories (puc), or list presence keywords (lpk)", - }, - "dtxsid": { - "type": "string", - "description": "Optional single DSSTox ID", - }, - "dtxsids": { - "type": "array", - "items": {"type": "string"}, - "description": "Optional list of DSSTox IDs (max 200 per batch)", - }, - }, - "required": ["vocab_name"], - }, - }, - { - "name": "search_httk", - "description": "Search for high-throughput toxicokinetics (HTTK) data", - "parameters": { - "type": "object", - "properties": { - "dtxsid": { - "type": "string", - "description": "Optional single DSSTox ID", - }, - "dtxsids": { - "type": "array", - "items": {"type": "string"}, - "description": "Optional list of DSSTox IDs", - }, - }, - "required": [], - }, - }, - { - "name": "get_cpdat_vocabulary", - "description": "Return CPDat controlled vocabulary values (functional use, product use categories, or list presence tags)", - "parameters": { - "type": "object", - "properties": { - "vocab_name": { - "type": "string", - "enum": ["fc", "puc", "lpk"], - "description": "Vocabulary domain to list", - } - }, - "required": ["vocab_name"], - }, - }, - { - "name": "search_qsurs", - "description": "Retrieve QSUR model functional-use probability predictions", - "parameters": { - "type": "object", - "properties": { - "dtxsid": { - "type": "string", - "description": "Optional single DSSTox ID", - }, - "dtxsids": { - "type": "array", - "items": {"type": "string"}, - "description": "Optional list of DSSTox IDs", - }, - }, - "required": [], - }, - }, - { - "name": "search_exposures", - "description": "Backwards-compatible exposure search across pathways/MMDB/SEEM datasets", - "parameters": { - "type": "object", - "properties": { - "data_type": { - "type": "string", - "enum": [ - "pathways", - "mmdb-single", - "seem", - "seem-demographic", - ], - "description": "Legacy exposure dataset selector", - }, - "dtxsid": { - "type": "string", - "description": "Optional single DSSTox ID", - }, - "dtxsids": { - "type": "array", - "items": {"type": "string"}, - "description": "Optional list of DSSTox IDs", - }, - }, - "required": ["data_type"], - }, - }, - ] - - # Additional granular tools (single-item retrievals) - tools.extend( - [ - _single_id_tool( - "get_seem_general", "Fetch SEEM general exposure predictions" - ), - _batch_id_tool( - "batch_get_seem_general", - "Batch fetch SEEM general exposure predictions", - ), - _single_id_tool( - "get_seem_demographic", - "Fetch SEEM demographic exposure predictions", - ), - _batch_id_tool( - "batch_get_seem_demographic", - "Batch fetch SEEM demographic exposure predictions", - ), - _single_id_tool( - "get_exposure_product_data", "Retrieve CPDat product data" - ), - _batch_id_tool( - "batch_get_exposure_product_data", "Batch fetch CPDat product data" - ), - _no_param_tool( - "list_exposure_product_puc", "List product use categories (PUC)" - ), - _single_id_tool( - "get_exposure_list_presence", "Retrieve list presence data" - ), - _batch_id_tool( - "batch_get_exposure_list_presence", "Batch fetch list presence data" - ), - _no_param_tool( - "list_exposure_list_presence_tags", "List list-presence tags" - ), - _single_id_tool("get_exposure_httk", "Retrieve HTTK data"), - _batch_id_tool("batch_get_exposure_httk", "Batch fetch HTTK data"), - _single_id_tool( - "get_exposure_functional_use", - "Retrieve reported functional use data", - ), - _batch_id_tool( - "batch_get_exposure_functional_use", - "Batch fetch reported functional use data", - ), - _single_id_tool( - "get_exposure_functional_use_probability", - "Retrieve functional use probability predictions", - ), - _no_param_tool( - "list_exposure_functional_use_categories", - "List functional use categories", - ), - _single_id_tool( - "get_exposure_ccd_puc", "Retrieve CCD Product Use Category data" - ), - _single_id_tool( - "get_exposure_ccd_production_volume", - "Retrieve CCD production volume data", - ), - _single_id_tool( - "get_exposure_ccd_monitoring_data", - "Retrieve CCD biomonitoring data", - ), - _single_id_tool( - "get_exposure_ccd_keywords", "Retrieve CCD general use keywords" - ), - _single_id_tool( - "get_exposure_ccd_functional_use", - "Retrieve CCD reported functional use data", - ), - _single_id_tool( - "get_exposure_ccd_chem_weight_fractions", - "Retrieve CCD chemical weight fractions data", - ), - _str_param_tool( - "get_exposure_mmdb_single_sample_by_medium", - "medium", - "Retrieve MMDB single-sample data filtered by medium", - ), - _single_id_tool( - "get_exposure_mmdb_single_sample_by_dtxsid", - "Retrieve MMDB single-sample data", - ), - _no_param_tool( - "list_exposure_mmdb_mediums", "List MMDB medium categories" - ), - _str_param_tool( - "get_exposure_mmdb_aggregate_by_medium", - "medium", - "Retrieve MMDB aggregate records filtered by medium", - ), - _single_id_tool( - "get_exposure_mmdb_aggregate_by_dtxsid", - "Retrieve MMDB aggregate records", - ), - ] - ) - - for tool in tools: - schema_map = { - "search_cpdat": ("exposure", "search_cpdat.response.schema"), - "search_httk": ("exposure", "search_httk.response.schema"), - "get_exposure_httk": ( - "exposure", - "get_exposure_httk.response.schema", - ), - } - schema_info = schema_map.get( - tool["name"], ("common", "list_generic.response.schema") - ) - tool["responseSchemaRef"] = schema_ref(*schema_info) - - # Ensure outputSchema is populated from the reference - if "responseSchemaRef" in tool: - from epacomp_tox.contracts import load_schema - - ref = tool["responseSchemaRef"] - tool["outputSchema"] = load_schema(ref["namespace"], ref["name"]) - - return tools - - # ------------------------------------------------------------------ - # Tool execution - # ------------------------------------------------------------------ - def execute_tool(self, tool_name: str, parameters: Dict[str, Any]) -> Any: - # Legacy handlers ------------------------------------------------- - if tool_name == "search_cpdat": - vocab = parameters["vocab_name"] - identifiers = self._resolve_identifiers( - parameters.get("dtxsid"), - parameters.get("dtxsids"), - ) - return self.search_cpdat(vocab, identifiers) - - if tool_name == "search_httk": - identifiers = self._resolve_identifiers( - parameters.get("dtxsid"), - parameters.get("dtxsids"), - ) - return self.search_httk(identifiers) - - if tool_name == "get_cpdat_vocabulary": - return self.get_cpdat_vocabulary(parameters["vocab_name"]) - - if tool_name == "search_qsurs": - identifiers = self._resolve_identifiers( - parameters.get("dtxsid"), - parameters.get("dtxsids"), - ) - return self.search_qsurs(identifiers) - - if tool_name == "search_exposures": - identifiers = self._resolve_identifiers( - parameters.get("dtxsid"), - parameters.get("dtxsids"), - ) - return self.search_exposures(parameters["data_type"], identifiers) - - # Granular handlers ---------------------------------------------- - handler_map = { - "get_seem_general": lambda p: self.get_seem_general(p["dtxsid"]), - "batch_get_seem_general": lambda p: self.batch_get_seem_general( - p["dtxsids"] - ), - "get_seem_demographic": lambda p: self.get_seem_demographic(p["dtxsid"]), - "batch_get_seem_demographic": lambda p: self.batch_get_seem_demographic( - p["dtxsids"] - ), - "get_exposure_product_data": lambda p: self.get_exposure_product_data( - p["dtxsid"] - ), - "batch_get_exposure_product_data": lambda p: self.batch_get_exposure_product_data( - p["dtxsids"] - ), - "list_exposure_product_puc": lambda p: self.list_exposure_product_puc(), - "get_exposure_list_presence": lambda p: self.get_exposure_list_presence( - p["dtxsid"] - ), - "batch_get_exposure_list_presence": lambda p: self.batch_get_exposure_list_presence( - p["dtxsids"] - ), - "list_exposure_list_presence_tags": lambda p: self.list_exposure_list_presence_tags(), - "get_exposure_httk": lambda p: self.get_exposure_httk(p["dtxsid"]), - "batch_get_exposure_httk": lambda p: self.batch_get_exposure_httk( - p["dtxsids"] - ), - "get_exposure_functional_use": lambda p: self.get_exposure_functional_use( - p["dtxsid"] - ), - "batch_get_exposure_functional_use": lambda p: self.batch_get_exposure_functional_use( - p["dtxsids"] - ), - "get_exposure_functional_use_probability": lambda p: self.get_exposure_functional_use_probability( - p["dtxsid"] - ), - "list_exposure_functional_use_categories": lambda p: self.list_exposure_functional_use_categories(), - "get_exposure_ccd_puc": lambda p: self.get_exposure_ccd_puc(p["dtxsid"]), - "get_exposure_ccd_production_volume": lambda p: self.get_exposure_ccd_production_volume( - p["dtxsid"] - ), - "get_exposure_ccd_monitoring_data": lambda p: self.get_exposure_ccd_monitoring_data( - p["dtxsid"] - ), - "get_exposure_ccd_keywords": lambda p: self.get_exposure_ccd_keywords( - p["dtxsid"] - ), - "get_exposure_ccd_functional_use": lambda p: self.get_exposure_ccd_functional_use( - p["dtxsid"] - ), - "get_exposure_ccd_chem_weight_fractions": lambda p: self.get_exposure_ccd_chem_weight_fractions( - p["dtxsid"] - ), - "get_exposure_mmdb_single_sample_by_medium": lambda p: self.get_exposure_mmdb_single_sample_by_medium( - p["medium"] - ), - "get_exposure_mmdb_single_sample_by_dtxsid": lambda p: self.get_exposure_mmdb_single_sample_by_dtxsid( - p["dtxsid"] - ), - "list_exposure_mmdb_mediums": lambda p: self.list_exposure_mmdb_mediums(), - "get_exposure_mmdb_aggregate_by_medium": lambda p: self.get_exposure_mmdb_aggregate_by_medium( - p["medium"] - ), - "get_exposure_mmdb_aggregate_by_dtxsid": lambda p: self.get_exposure_mmdb_aggregate_by_dtxsid( - p["dtxsid"] - ), - } - - try: - handler = handler_map[tool_name] - except KeyError as exc: - raise ValueError(f"Unknown tool: {tool_name}") from exc - return handler(parameters) - - # ------------------------------------------------------------------ - # Helper utilities - # ------------------------------------------------------------------ - def _resolve_identifiers( - self, - single: Optional[str], - multiple: Optional[Sequence[str]], - ) -> List[str]: - identifiers: List[str] = [] - if multiple: - identifiers.extend([item for item in multiple if item]) - if single: - identifiers.append(single) - identifiers = [item for item in identifiers if item] - if not identifiers: - raise ValueError("At least one DSSTox identifier must be provided.") - return identifiers - - # ------------------------------------------------------------------ - # Legacy tool implementations - # ------------------------------------------------------------------ - def search_cpdat( - self, vocab_name: str, dtxsids: Sequence[str] - ) -> List[Dict[str, Any]]: - results: List[Dict[str, Any]] = [] - for sid in dtxsids: - payload = self._with_retry( - lambda sid=sid: self.client.search_cpdat(vocab_name, sid) - ) - results.extend(self._ensure_list(payload)) - return results - - def search_httk(self, dtxsids: Sequence[str]) -> List[Dict[str, Any]]: - results: List[Dict[str, Any]] = [] - for sid in dtxsids: - payload = self._with_retry(lambda sid=sid: self.client.search_httk(sid)) - results.extend(self._ensure_list(payload)) - return results - - def get_cpdat_vocabulary(self, vocab_name: str) -> List[Any]: - payload = self._with_retry(lambda: self.client.get_cpdat_vocabulary(vocab_name)) - return self._ensure_list(payload) - - def search_qsurs(self, dtxsids: Sequence[str]) -> List[Dict[str, Any]]: - results: List[Dict[str, Any]] = [] - for sid in dtxsids: - payload = self._with_retry(lambda sid=sid: self.client.search_qsurs(sid)) - results.extend(self._ensure_list(payload)) - return results - - def search_exposures(self, data_type: str, dtxsids: Sequence[str]) -> List[Any]: - if not dtxsids: - raise ValueError("At least one DSSTox identifier must be provided.") - results: List[Any] = [] - for sid in dtxsids: - payload = self._with_retry( - lambda sid=sid: self.client.search_exposures(data_type, sid) - ) - results.extend(self._ensure_list(payload)) - return results - - # ------------------------------------------------------------------ - # SEEM helpers - # ------------------------------------------------------------------ - def get_seem_general(self, dtxsid: str) -> List[Dict[str, Any]]: - result = self._with_retry(lambda: self.client.seem_general(dtxsid)) - return self._ensure_list(result) - - def batch_get_seem_general(self, dtxsids: Sequence[str]) -> List[Any]: - identifiers = [sid for sid in dtxsids if sid] - if not identifiers: - return [] - result = self._with_retry(lambda: self.client.seem_general_batch(identifiers)) - return self._ensure_list(result) - - def get_seem_demographic(self, dtxsid: str) -> List[Dict[str, Any]]: - result = self._with_retry(lambda: self.client.seem_demographic(dtxsid)) - return self._ensure_list(result) - - def batch_get_seem_demographic(self, dtxsids: Sequence[str]) -> List[Any]: - identifiers = [sid for sid in dtxsids if sid] - if not identifiers: - return [] - result = self._with_retry( - lambda: self.client.seem_demographic_batch(identifiers) - ) - return self._ensure_list(result) - - # ------------------------------------------------------------------ - # CPDat product + list presence helpers - # ------------------------------------------------------------------ - def get_exposure_product_data(self, dtxsid: str) -> List[Dict[str, Any]]: - result = self._with_retry(lambda: self.client.product_data(dtxsid)) - return self._ensure_list(result) - - def batch_get_exposure_product_data(self, dtxsids: Sequence[str]) -> List[Any]: - identifiers = [sid for sid in dtxsids if sid] - if not identifiers: - return [] - result = self._with_retry(lambda: self.client.product_data_batch(identifiers)) - return self._ensure_list(result) - - def list_exposure_product_puc(self) -> List[Any]: - result = self._with_retry(self.client.product_data_puc) - return self._ensure_list(result) - - def get_exposure_list_presence(self, dtxsid: str) -> List[Dict[str, Any]]: - result = self._with_retry(lambda: self.client.list_presence(dtxsid)) - return self._ensure_list(result) - - def batch_get_exposure_list_presence(self, dtxsids: Sequence[str]) -> List[Any]: - identifiers = [sid for sid in dtxsids if sid] - if not identifiers: - return [] - result = self._with_retry(lambda: self.client.list_presence_batch(identifiers)) - return self._ensure_list(result) - - def list_exposure_list_presence_tags(self) -> List[Any]: - result = self._with_retry(self.client.list_presence_tags) - return self._ensure_list(result) - - # ------------------------------------------------------------------ - # HTTK + functional use helpers - # ------------------------------------------------------------------ - def get_exposure_httk(self, dtxsid: str) -> List[Dict[str, Any]]: - result = self._with_retry(lambda: self.client.httk(dtxsid)) - return self._ensure_list(result) - - def batch_get_exposure_httk(self, dtxsids: Sequence[str]) -> List[Any]: - identifiers = [sid for sid in dtxsids if sid] - if not identifiers: - return [] - result = self._with_retry(lambda: self.client.httk_batch(identifiers)) - return self._ensure_list(result) - - def get_exposure_functional_use(self, dtxsid: str) -> List[Dict[str, Any]]: - result = self._with_retry(lambda: self.client.functional_use(dtxsid)) - return self._ensure_list(result) - - def batch_get_exposure_functional_use(self, dtxsids: Sequence[str]) -> List[Any]: - identifiers = [sid for sid in dtxsids if sid] - if not identifiers: - return [] - result = self._with_retry(lambda: self.client.functional_use_batch(identifiers)) - return self._ensure_list(result) - - def get_exposure_functional_use_probability( - self, dtxsid: str - ) -> List[Dict[str, Any]]: - result = self._with_retry( - lambda: self.client.functional_use_probability(dtxsid) - ) - return self._ensure_list(result) - - def list_exposure_functional_use_categories(self) -> List[Any]: - result = self._with_retry(self.client.functional_use_categories) - return self._ensure_list(result) - - # ------------------------------------------------------------------ - # CCD helpers - # ------------------------------------------------------------------ - def get_exposure_ccd_puc(self, dtxsid: str) -> List[Dict[str, Any]]: - result = self._with_retry(lambda: self.client.ccd_puc(dtxsid)) - return self._ensure_list(result) - - def get_exposure_ccd_production_volume(self, dtxsid: str) -> List[Dict[str, Any]]: - result = self._with_retry(lambda: self.client.ccd_production_volume(dtxsid)) - return self._ensure_list(result) - - def get_exposure_ccd_monitoring_data(self, dtxsid: str) -> List[Dict[str, Any]]: - result = self._with_retry(lambda: self.client.ccd_monitoring_data(dtxsid)) - return self._ensure_list(result) - - def get_exposure_ccd_keywords(self, dtxsid: str) -> List[Dict[str, Any]]: - result = self._with_retry(lambda: self.client.ccd_keywords(dtxsid)) - return self._ensure_list(result) - - def get_exposure_ccd_functional_use(self, dtxsid: str) -> List[Dict[str, Any]]: - result = self._with_retry(lambda: self.client.ccd_functional_use(dtxsid)) - return self._ensure_list(result) - - def get_exposure_ccd_chem_weight_fractions( - self, dtxsid: str - ) -> List[Dict[str, Any]]: - result = self._with_retry(lambda: self.client.ccd_chem_weight_fractions(dtxsid)) - return self._ensure_list(result) - - # ------------------------------------------------------------------ - # MMDB helpers - # ------------------------------------------------------------------ - def get_exposure_mmdb_single_sample_by_medium( - self, medium: str - ) -> List[Dict[str, Any]]: - result = self._with_retry( - lambda: self.client.mmdb_single_sample_by_medium(medium) - ) - return self._ensure_list(result) - - def get_exposure_mmdb_single_sample_by_dtxsid( - self, dtxsid: str - ) -> List[Dict[str, Any]]: - result = self._with_retry( - lambda: self.client.mmdb_single_sample_by_dtxsid(dtxsid) - ) - return self._ensure_list(result) - - def list_exposure_mmdb_mediums(self) -> List[Any]: - result = self._with_retry(self.client.mmdb_mediums) - return self._ensure_list(result) - - def get_exposure_mmdb_aggregate_by_medium( - self, medium: str - ) -> List[Dict[str, Any]]: - result = self._with_retry(lambda: self.client.mmdb_aggregate_by_medium(medium)) - return self._ensure_list(result) - - def get_exposure_mmdb_aggregate_by_dtxsid( - self, dtxsid: str - ) -> List[Dict[str, Any]]: - result = self._with_retry(lambda: self.client.mmdb_aggregate_by_dtxsid(dtxsid)) - return self._ensure_list(result) - - -# ---------------------------------------------------------------------- -# Utility helpers for tool definitions -# ---------------------------------------------------------------------- -def _single_id_tool(name: str, description: str) -> Dict[str, Any]: - return { - "name": name, - "description": description, - "parameters": { - "type": "object", - "properties": { - "dtxsid": { - "type": "string", - "description": "DSSTox Substance Identifier", - } - }, - "required": ["dtxsid"], - }, - } - - -def _batch_id_tool(name: str, description: str) -> Dict[str, Any]: - return { - "name": name, - "description": description, - "parameters": { - "type": "object", - "properties": { - "dtxsids": { - "type": "array", - "items": {"type": "string"}, - "minItems": 1, - "description": "List of DSSTox Substance Identifiers", - } - }, - "required": ["dtxsids"], - }, - } - - -def _no_param_tool(name: str, description: str) -> Dict[str, Any]: - return { - "name": name, - "description": description, - "parameters": {"type": "object", "properties": {}}, - } - - -def _str_param_tool(name: str, field: str, description: str) -> Dict[str, Any]: - return { - "name": name, - "description": description, - "parameters": { - "type": "object", - "properties": { - field: { - "type": "string", - "description": field.replace("_", " ").capitalize(), - } - }, - "required": [field], - }, - } - -==================================================================================================== -FILE: src/epacomp_tox/resources/cheminformatics.py -==================================================================================================== -from typing import Any, Dict, List - -import ctxpy as ctx -from epacomp_tox.contracts import schema_ref -from epacomp_tox.validators import to_serializable - -from .base import BaseResource - - -class CheminformaticsResource(BaseResource): - """ - MCP resource for EPA CompTox cheminformatics tools. - - Provides access to ToxPrint chemotypes and other cheminformatics tools. - """ - - @property - def name(self) -> str: - return "cheminformatics" - - @property - def description(self) -> str: - return "Access to ToxPrint chemotypes and other cheminformatics tools" - - def __init__(self, api_key: str): - """ - Initialize the cheminformatics resource. - - Args: - api_key: EPA CompTox API key. - """ - super().__init__(api_key) - # No specific client for cheminformatics, using functions directly - - def get_tools(self) -> List[Dict[str, Any]]: - """ - Get a list of tools provided by this resource. - - Returns: - List of tool definitions. - """ - tools: List[Dict[str, Any]] = [] - # ToxPrint tools disabled: endpoints not available on new CTX API - return tools - - def execute_tool(self, tool_name: str, parameters: Dict[str, Any]) -> Any: - """ - Execute a tool with the given parameters. - - Args: - tool_name: Name of the tool to execute. - parameters: Parameters for the tool. - - Returns: - Tool execution result. - - Raises: - ValueError: If the tool is not found or parameters are invalid. - """ - raise ValueError(f"Unknown tool: {tool_name}") - - def search_toxprints(self, chemical: str) -> Dict[str, Any]: - """ - Search for ToxPrint chemotypes for a chemical. - - Args: - chemical: Chemical identifier (DTXSID, DTXCID, or SMILES). - - Returns: - ToxPrint chemotypes. - """ - results = self._with_retry(lambda: ctx.search_toxprints(chemical=chemical)) - return to_serializable(results) - - def batch_search_toxprints(self, chemicals: List[str]) -> Dict[str, Any]: - """ - Search for ToxPrint chemotypes for multiple chemicals. - - Args: - chemicals: List of chemical identifiers (DTXSIDs, DTXCIDs, or SMILES). - - Returns: - ToxPrint chemotypes for multiple chemicals. - """ - results = self._with_retry(lambda: ctx.search_toxprints(chemical=chemicals)) - return to_serializable(results) - -==================================================================================================== -FILE: src/epacomp_tox/resources/metadata.py -==================================================================================================== -from __future__ import annotations - -from typing import Any, Dict, List - -from epacomp_tox.contracts import schema_ref -from epacomp_tox.metadata import ModelCardFilter, ModelCardStore -from epacomp_tox.metadata.applicability import ApplicabilityDomainStore -from epacomp_tox.resources.base import BaseResource - - -class MetadataResource(BaseResource): - """Resource exposing model metadata and applicability domain definitions.""" - - def __init__( - self, - api_key: str = "", - *, - store: ModelCardStore | None = None, - ad_store: ApplicabilityDomainStore | None = None, - ): - super().__init__(api_key) - self.store = store or ModelCardStore() - self.ad_store = ad_store or ApplicabilityDomainStore() - - @property - def name(self) -> str: - return "metadata" - - @property - def description(self) -> str: - return "Model cards, applicability domain definitions, and provenance metadata" - - def get_tools(self) -> List[Dict[str, Any]]: - return [ - { - "name": "metadata_get_model_card", - "description": "Retrieve CompTox model cards with optional filters and pagination", - "inputSchema": { - "type": "object", - "properties": { - "model_name": {"type": "string"}, - "endpoint": {"type": "string"}, - "compliance": { - "type": "string", - "enum": ["approved", "draft"], - }, - "limit": {"type": "integer", "minimum": 1, "maximum": 100}, - "cursor": {"type": "string"}, - }, - }, - "outputSchema": { - "type": "object", - "properties": { - "modelCards": {"type": "array"}, - "nextCursor": {"type": ["string", "null"]}, - }, - }, - "responseSchemaRef": schema_ref( - "metadata", "model_cards.response.schema" - ), - "outputSchema": { - "type": "object", - "properties": { - "modelCards": {"type": "array"}, - "nextCursor": {"type": ["string", "null"]}, - }, - }, - }, - { - "name": "metadata_list_applicability_domain", - "description": "List applicability domain reference definitions", - "inputSchema": { - "type": "object", - "properties": { - "limit": {"type": "integer", "minimum": 1, "maximum": 100}, - "cursor": {"type": "string"}, - }, - }, - "outputSchema": { - "type": "object", - "properties": { - "applicabilityDomains": {"type": "array"}, - "nextCursor": {"type": ["string", "null"]}, - }, - }, - "responseSchemaRef": schema_ref( - "metadata", "applicability_list.response.schema" - ), - "outputSchema": { - "type": "object", - "properties": { - "applicabilityDomains": {"type": "array"}, - "nextCursor": {"type": ["string", "null"]}, - }, - }, - }, - { - "name": "metadata_get_applicability_domain", - "description": "Fetch applicability domain configuration for a specific model", - "inputSchema": { - "type": "object", - "properties": { - "model_name": {"type": "string"}, - }, - "required": ["model_name"], - }, - "outputSchema": { - "type": "object", - "properties": { - "model": {"type": "string"}, - "version": {"type": "string"}, - "criteria": {"type": "array"}, - "policy": {"type": "string"}, - "errorCode": {"type": "string"}, - "references": {"type": "array"}, - }, - }, - "responseSchemaRef": schema_ref( - "metadata", "applicability_detail.response.schema" - ), - "outputSchema": { - "type": "object", - "properties": { - "model": {"type": "string"}, - "version": {"type": "string"}, - "criteria": {"type": "array"}, - "policy": {"type": "string"}, - "errorCode": {"type": ["string", "null"]}, - "references": {"type": "array"}, - }, - }, - }, - ] - - def execute_tool(self, tool_name: str, parameters: Dict[str, Any]) -> Any: - if tool_name == "metadata_get_model_card": - filters = ModelCardFilter( - model_name=parameters.get("model_name"), - endpoint_contains=parameters.get("endpoint"), - compliance=parameters.get("compliance"), - ) - limit = parameters.get("limit") - cursor = parameters.get("cursor") - cards, next_cursor = self.store.list_cards( - filters=filters, limit=limit, cursor=cursor - ) - payload = [] - for item in cards: - data = { - "card": item["card"], - "checksum": item["checksum"], - "lastModified": item["lastModified"], - } - payload.append(data) - return { - "modelCards": payload, - "nextCursor": next_cursor, - } - - if tool_name == "metadata_list_applicability_domain": - limit = parameters.get("limit") - cursor = parameters.get("cursor") - defs, next_cursor = self.ad_store.list_definitions( - limit=limit, cursor=cursor - ) - return { - "applicabilityDomains": defs, - "nextCursor": next_cursor, - } - - if tool_name == "metadata_get_applicability_domain": - model_name = parameters["model_name"] - definition = self.ad_store.get_definition(model_name) - if not definition: - raise ValueError(f"No applicability domain found for {model_name}") - return definition - - raise ValueError(f"Unknown tool: {tool_name}") - -==================================================================================================== -FILE: tests/test_orchestrator_stages.py -==================================================================================================== -from __future__ import annotations - -from unittest import mock - -import pytest - -from ctxpy import RateLimitInfo -from epacomp_tox.orchestrator.ctx_data import CtxDataAssembler -from epacomp_tox.orchestrator.identifiers import ( - IdentifierResolutionError, - IdentifierResolver, -) -from epacomp_tox.orchestrator.models import PredictiveTask -from epacomp_tox.orchestrator.predictive import PredictiveCoordinator -from epacomp_tox.orchestrator.workflow import GenRAOrchestrator -from epacomp_tox.predictive import ( - ADCheckResult, - PredictiveRequest, - PredictiveServiceBase, -) -from epacomp_tox.resources.cheminformatics import CheminformaticsResource -from epacomp_tox.resources.exposure import ExposureResource -from epacomp_tox.resources.hazard import HazardResource - - -def _rate_limit( - limit: int = 120, remaining: int = 119, reset: int = 60 -) -> RateLimitInfo: - return RateLimitInfo(limit=limit, remaining=remaining, reset=reset) - - -class _StubADStore: - def __init__(self, definition): - self._definition = definition - - def get_definition(self, _model_name): - return self._definition - - -class _StubPredictiveService(PredictiveServiceBase): - def __init__(self, *, name: str, ad_results, payloads, ad_definition): - super().__init__( - config={"name": name, "version": "1.0"}, - ad_store=_StubADStore(ad_definition), - ) - self._ad_results = list(ad_results) - self._payloads = list(payloads) - self._last_ad_result = self._ad_results[-1] if self._ad_results else None - self.ad_checks = 0 - self.predictions = 0 - - def _predict_impl(self, request: PredictiveRequest): - self.predictions += 1 - if not self._payloads: - raise RuntimeError("No payload configured") - value = self._payloads.pop(0) - # retain last value so repeated predict calls can reuse when necessary - self._payloads.append(value) - return value - - def _check_ad_impl(self, request: PredictiveRequest) -> ADCheckResult: - self.ad_checks += 1 - if self._ad_results: - result = self._ad_results.pop(0) - self._last_ad_result = result - self._ad_results.append(result) - return result - if self._last_ad_result is None: - raise RuntimeError("No AD result configured") - return self._last_ad_result - - -def test_identifier_resolver_caches_and_sanitizes_metadata(): - chemical_resource = mock.Mock() - chemical_resource.search_chemical.return_value = [ - { - "dtxsid": "DTXSID0000001", - "preferredName": "Example Chemical", - "casrn": "50-00-0", - } - ] - chemical_resource.get_chemical_details.return_value = { - "dtxsid": "DTXSID0000001", - "preferredName": "Example Chemical", - "casrn": "50-00-0", - "synonyms": ["example chemical", "Formaldehyde"], - } - chemical_resource.get_last_metadata.side_effect = [ - {"status": 200, "rate_limit": _rate_limit()}, - {"status": 200, "request_id": "req-chem-1"}, - ] - - resolver = IdentifierResolver(chemical_resource=chemical_resource, cache_ttl=120) - - result = resolver.resolve("50-00-0", identifier_type="casrn") - assert result.dtxsid == "DTXSID0000001" - assert result.cache_hit is False - assert "Formaldehyde" in result.synonyms - assert result.trace[0].metadata["rate_limit"]["limit"] == 120 - - cached = resolver.resolve("50-00-0", identifier_type="casrn") - assert cached.cache_hit is True - assert chemical_resource.search_chemical.call_count == 1 - assert chemical_resource.get_chemical_details.call_count == 1 - # No additional metadata calls when serving from cache - assert chemical_resource.get_last_metadata.call_count == 2 - - -def test_identifier_resolver_raises_when_not_found(): - chemical_resource = mock.Mock() - chemical_resource.search_chemical.return_value = [] - chemical_resource.get_last_metadata.return_value = {} - - resolver = IdentifierResolver(chemical_resource=chemical_resource) - with pytest.raises(IdentifierResolutionError): - resolver.resolve("UNKNOWN", identifier_type="name") - - -def _mock_resource(resource_cls): - return mock.create_autospec(resource_cls, instance=True) - - -def test_ctx_data_assembler_fetches_datasets_and_uses_cache(): - hazard_resource = _mock_resource(HazardResource) - exposure_resource = _mock_resource(ExposureResource) - cheminformatics_resource = _mock_resource(CheminformaticsResource) - - hazard_resource.search_hazard.return_value = [{"hazard": "record"}] - hazard_resource.get_last_metadata.side_effect = lambda: { - "status": 200, - "rate_limit": _rate_limit(100, 98, 30), - } - - exposure_resource.search_httk.return_value = [{"httk": "value"}] - exposure_resource.search_qsurs.return_value = [] - exposure_resource.search_cpdat.return_value = [{"fc": "industrial"}] - exposure_resource.get_last_metadata.side_effect = lambda: { - "status": 200, - "request_id": "req-exp", - } - - cheminformatics_resource.search_toxprints.return_value = {"fingerprints": ["FP1"]} - cheminformatics_resource.get_last_metadata.return_value = {} - - assembler = CtxDataAssembler( - hazard_resource=hazard_resource, - exposure_resource=exposure_resource, - cheminformatics_resource=cheminformatics_resource, - hazard_data_types=("all",), - exposure_datasets=("httk",), - cpdat_vocabularies=("fc",), - include_toxprints=False, - cache_ttl=300, - ) - - bundle = assembler.assemble("dtxsid0001234", scenarios=["genra_read_across"]) - assert bundle.cache_hit is False - assert bundle.hazard["all"][0]["hazard"] == "record" - assert bundle.exposure["httk"][0]["httk"] == "value" - assert bundle.exposure["cpdat:fc"][0]["fc"] == "industrial" - assert "exposure:qsurs" in bundle.data_gaps # qsurs returned empty - assert bundle.cheminformatics["toxprints"]["fingerprints"] == ["FP1"] - assert bundle.trace[0].metadata["rate_limit"]["limit"] == 100 - - # Cached execution should avoid additional upstream calls - cached = assembler.assemble("dtxsid0001234", scenarios=["genra_read_across"]) - assert cached.cache_hit is True - assert hazard_resource.search_hazard.call_count == 1 - assert exposure_resource.search_httk.call_count == 1 - assert exposure_resource.search_qsurs.call_count == 1 - assert exposure_resource.search_cpdat.call_count == 1 - assert cheminformatics_resource.search_toxprints.call_count == 1 - - -def test_ctx_data_assembler_marks_toxprint_gap_when_resource_missing(): - hazard_resource = _mock_resource(HazardResource) - hazard_resource.search_hazard.return_value = [] - hazard_resource.get_last_metadata.return_value = {} - - exposure_resource = _mock_resource(ExposureResource) - exposure_resource.get_last_metadata.return_value = {} - - assembler = CtxDataAssembler( - hazard_resource=hazard_resource, - exposure_resource=exposure_resource, - cheminformatics_resource=None, - hazard_data_types=("all",), - exposure_datasets=(), - cpdat_vocabularies=(), - include_toxprints=True, - cache_ttl=0, - ) - - bundle = assembler.assemble("DTXSID9999999") - assert "cheminformatics:toxprints" in bundle.data_gaps - assert "hazard:all" in bundle.data_gaps - - -def test_predictive_coordinator_success_flow(): - ad = ADCheckResult(in_domain=True, confidence=0.9) - service = _StubPredictiveService( - name="Stub", - ad_results=[ad], - payloads=[{"value": 42}], - ad_definition={ - "model": "Stub", - "version": "1", - "policy": "block", - "errorCode": "STUB_AD_FAIL", - }, - ) - coordinator = PredictiveCoordinator({"stub": service}) - task = PredictiveTask( - service="stub", request=PredictiveRequest(chemical_identifier="DTXSID0001") - ) - - result = coordinator.run([task]) - - assert result.succeeded is True - assert len(result.guardrails) == 0 - assert result.results[0].prediction == {"value": 42} - assert service.ad_checks >= 2 - assert service.predictions == 1 - - -def test_predictive_coordinator_blocks_on_ad_failure(): - ad = ADCheckResult(in_domain=False, confidence=0.3) - service = _StubPredictiveService( - name="Blocked", - ad_results=[ad], - payloads=[{"value": 1}], - ad_definition={ - "model": "Blocked", - "version": "1", - "policy": "block", - "errorCode": "BLOCKED_AD", - }, - ) - coordinator = PredictiveCoordinator({"blocked": service}) - task = PredictiveTask( - service="blocked", request=PredictiveRequest(chemical_identifier="DTXSID0002") - ) - - result = coordinator.run([task], require_ad_clearance=True) - - assert result.succeeded is False - assert result.results[0].status == "denied" - assert result.guardrails[0].status == "denied" - assert result.guardrails[0].code == "BLOCKED_AD" - # predict never invoked when AD fails hard - assert service.predictions == 0 - - -def test_predictive_coordinator_warn_policy_continues(): - ad = ADCheckResult(in_domain=False, confidence=0.55) - service = _StubPredictiveService( - name="Warning", - ad_results=[ad], - payloads=[{"value": 7}], - ad_definition={ - "model": "Warning", - "version": "1", - "policy": "warn", - "errorCode": "WARN_AD", - }, - ) - coordinator = PredictiveCoordinator( - {"warning": service}, default_require_ad_clearance=False - ) - task = PredictiveTask( - service="warning", request=PredictiveRequest(chemical_identifier="DTXSID0003") - ) - - result = coordinator.run([task]) - - assert result.succeeded is True - assert result.results[0].status == "success" - assert len(result.guardrails) == 1 - assert result.guardrails[0].status == "warning" - assert result.guardrails[0].code == "WARN_AD" - assert service.predictions == 1 - - -def test_genra_orchestrator_successful_bundle(tmp_path): - hazard_resource = _mock_resource(HazardResource) - exposure_resource = _mock_resource(ExposureResource) - cheminformatics_resource = _mock_resource(CheminformaticsResource) - chemical_resource = mock.Mock() - - hazard_resource.search_hazard.return_value = [{"hazard": 1}] - hazard_resource.get_last_metadata.return_value = {} - exposure_resource.search_httk.return_value = [{"httk": 2}] - exposure_resource.search_cpdat.return_value = [{"fc": "cat"}] - exposure_resource.get_last_metadata.return_value = {} - cheminformatics_resource.search_toxprints.return_value = {"toxprints": []} - cheminformatics_resource.get_last_metadata.return_value = {} - - chemical_resource.search_chemical.return_value = [ - {"dtxsid": "DTXSID0000001", "preferredName": "Example"} - ] - chemical_resource.get_chemical_details.return_value = { - "dtxsid": "DTXSID0000001", - "preferredName": "Example", - "casrn": "50-00-0", - } - chemical_resource.get_last_metadata.return_value = {} - - resolver = IdentifierResolver(chemical_resource=chemical_resource, cache_ttl=0) - assembler = CtxDataAssembler( - hazard_resource=hazard_resource, - exposure_resource=exposure_resource, - cheminformatics_resource=cheminformatics_resource, - include_toxprints=False, - cache_ttl=0, - ) - predictive_service = _StubPredictiveService( - name="Stub", - ad_results=[ADCheckResult(in_domain=True, confidence=0.9)], - payloads=[{"prediction": "ok"}], - ad_definition={ - "model": "Stub", - "version": "1", - "policy": "block", - "errorCode": "GENRA_AD_FAIL", - }, - ) - coordinator = PredictiveCoordinator({"stub": predictive_service}) - orchestrator = GenRAOrchestrator( - identifier_resolver=resolver, - ctx_data_assembler=assembler, - predictive_coordinator=coordinator, - persistence_dir=tmp_path, - clock=lambda: "2025-03-26T00:00:00Z", - ) - - bundle = orchestrator.run_workflow( - target_identifier="50-00-0", - identifier_type="casrn", - scenarios=["genra_read_across"], - predictive_plan=[ - PredictiveTask( - service="stub", - request=PredictiveRequest(chemical_identifier="DTXSID0000001"), - ) - ], - ) - - assert bundle["status"] == "success" - assert bundle["target"]["dtxsid"] == "DTXSID0000001" - assert bundle["ctxData"]["hazard"]["all"][0]["hazard"] == 1 - assert bundle["predictive"]["results"][0]["prediction"] == {"prediction": "ok"} - assert bundle["evidence"]["confidenceBand"] in {"Robust", "Limited", "Unavailable"} - run_dir = tmp_path / bundle["workflowRunId"] - bundle_path = run_dir / "bundle.json" - metadata_path = run_dir / "metadata.json" - attachments_dir = run_dir / "attachments" - assert bundle_path.exists() - assert metadata_path.exists() - assert (attachments_dir / "ctx_data.json").exists() - assert (attachments_dir / "predictive_results.json").exists() - assert (attachments_dir / "evidence.json").exists() - assert len(bundle["storage"]["attachments"]) >= 3 - assert bundle["storage"]["bundleChecksum"] - - -def test_predictive_coordinator_records_prediction_errors(): - ad = ADCheckResult(in_domain=True, confidence=0.8) - service = _StubPredictiveService( - name="Error", - ad_results=[ad], - payloads=[], # triggers runtime error inside predict - ad_definition={ - "model": "Error", - "version": "1", - "policy": "block", - "errorCode": "ERR_AD", - }, - ) - coordinator = PredictiveCoordinator({"error": service}) - task = PredictiveTask( - service="error", request=PredictiveRequest(chemical_identifier="DTXSID0004") - ) - - result = coordinator.run([task]) - - assert result.succeeded is False - assert result.results[0].status == "error" - assert result.guardrails[0].status == "error" - assert "No payload" in result.results[0].error - -==================================================================================================== -FILE: tests/test_predictive_regression.py -==================================================================================================== -from __future__ import annotations - -import json -from pathlib import Path -from typing import Any, Dict - -from fastapi import FastAPI -from fastapi.testclient import TestClient - -from epacomp_tox.metadata.applicability import ApplicabilityDomainStore -from epacomp_tox.predictive import ( - ADCheckResult, - OperaPropertyService, - PredictiveRequest, - PredictiveServiceBase, - TestConsensusPredictiveService, - build_predictive_router, -) -from epacomp_tox.predictive.clients import PredictiveClient - - -class StubClient(PredictiveClient): - def __init__(self, *, response, in_domain: bool, confidence: float = 0.9): - self.response = response - self.in_domain = in_domain - self.confidence = confidence - - def predict(self, request: PredictiveRequest): - return self.response - - def check_applicability_domain(self, request: PredictiveRequest) -> ADCheckResult: - return ADCheckResult(in_domain=self.in_domain, confidence=self.confidence) - - -def _write_ad( - tmp_path: Path, name: str, policy: str, error_code: str | None = None -) -> ApplicabilityDomainStore: - directory = tmp_path / "ad" - directory.mkdir() - payload = { - "model": name, - "version": "1", - "criteria": [], - "policy": policy, - } - if error_code: - payload["errorCode"] = error_code - (directory / "entry.json").write_text(json.dumps(payload)) - return ApplicabilityDomainStore(directory=directory) - - -def _create_client(app) -> TestClient: - return TestClient(app) - - -class _SchemaStubService(PredictiveServiceBase): - def __init__(self) -> None: - super().__init__( - config={ - "name": "schema-stub", - "version": "0.0.1", - } - ) - - def _predict_impl(self, request: PredictiveRequest) -> Dict[str, Any]: - return {"value": 42, "identifier": request.chemical_identifier} - - def _check_ad_impl(self, request: PredictiveRequest) -> ADCheckResult: - return ADCheckResult( - in_domain=True, confidence=0.99, details={"policy": "allow"} - ) - - -def test_block_policy_returns_error(tmp_path: Path) -> None: - ad_store = _write_ad( - tmp_path, "TEST Consensus Acute Toxicity", "block", "TEST_AD_FAIL" - ) - service = TestConsensusPredictiveService( - config={ - "name": "TEST Consensus Acute Toxicity", - "version": "5.2.0", - "ad_model_name": "TEST Consensus Acute Toxicity", - }, - client=StubClient(response={"value": 1.23}, in_domain=False), - ad_store=ad_store, - ) - router = build_predictive_router(service_factory=lambda: service, prefix="/test") - app = FastAPI() - app.include_router(router) - client = _create_client(app) - response = client.post("/test/predict", json={"chemical_identifier": "DTXSID1"}) - assert response.status_code == 400 - assert "TEST_AD_FAIL" in response.json()["detail"] - - -def test_warn_policy_allows_response(tmp_path: Path) -> None: - ad_store = _write_ad( - tmp_path, "OPERA Property Predictions", "warn", "OPERA_AD_WARN" - ) - service = OperaPropertyService( - config={ - "name": "OPERA Property Predictions", - "version": "3.6.1", - "ad_model_name": "OPERA Property Predictions", - }, - client=StubClient(response={"value": 0.5}, in_domain=False), - ad_store=ad_store, - ) - router = build_predictive_router(service_factory=lambda: service, prefix="/opera") - app = FastAPI() - app.include_router(router) - client = _create_client(app) - response = client.post("/opera/predict", json={"chemical_identifier": "DTXSID2"}) - assert response.status_code == 200 - body = response.json() - assert body["metadata"]["adWarning"] is True - assert "OPERA_AD_WARN" in body["metadata"]["adMessage"] - - -def test_predictive_router_validates_responses(monkeypatch) -> None: - service = _SchemaStubService() - router = build_predictive_router(service_factory=lambda: service, prefix="/schema") - app = FastAPI() - app.include_router(router) - client = _create_client(app) - - recorded: list[tuple[str, str]] = [] - - def _fake_validate(payload, *, namespace, name): # type: ignore[override] - recorded.append((namespace, name)) - - monkeypatch.setattr( - "epacomp_tox.predictive.router.validate_payload", _fake_validate - ) - - resp = client.post("/schema/predict", json={"chemical_identifier": "DTXSID3"}) - assert resp.status_code == 200 - ad_resp = client.post( - "/schema/check_applicability_domain", json={"chemical_identifier": "DTXSID3"} - ) - assert ad_resp.status_code == 200 - - assert ("predictive", "predict.response.schema") in recorded - assert ("predictive", "ad_check.response.schema") in recorded - -==================================================================================================== -FILE: tests/workflows/test_offline_workflows.py -==================================================================================================== -from __future__ import annotations - -from pathlib import Path -from typing import Dict, List - -import pytest - -from epacomp_tox import PredictiveRequest, PredictiveTask -from epacomp_tox.orchestrator.offline import ( - OFFLINE_SCENARIOS, - build_offline_orchestrator, -) - - -def _sanitize_bundle(bundle: Dict[str, any]) -> Dict[str, any]: - predictive = bundle["predictive"]["results"][0] - metadata = predictive["metadata"] - return { - "status": bundle["status"], - "scenarios": bundle.get("scenarios"), - "target": { - "dtxsid": bundle["target"]["dtxsid"], - "preferredName": bundle["target"].get("preferredName"), - "casrn": bundle["target"].get("casrn"), - "synonyms": sorted(bundle["target"].get("synonyms", [])), - }, - "guardrails": bundle.get("guardrails", []), - "ctxData": { - "hazardEndpoints": [ - item.get("endpoint") - for item in bundle["ctxData"]["hazard"].get("all", []) - ], - "exposureKeys": sorted(bundle["ctxData"]["exposure"].keys()), - "cheminformaticsKeys": sorted(bundle["ctxData"]["cheminformatics"].keys()), - "dataGaps": bundle["ctxData"].get("dataGaps", []), - }, - "predictive": { - "service": predictive["service"], - "status": predictive["status"], - "scenario": predictive.get("scenario"), - "prediction": predictive["prediction"], - "ad": predictive["ad"], - "metadata": { - "model": metadata.get("model"), - "model_version": metadata.get("model_version"), - "identifier": metadata.get("identifier"), - "identifier_type": metadata.get("identifier_type"), - "analogueCoverage": metadata.get("analogueCoverage"), - "evidenceQuality": metadata.get("evidenceQuality"), - "predictiveAgreement": metadata.get("predictiveAgreement"), - }, - }, - "evidence": { - "confidenceBand": bundle["evidence"].get("confidenceBand"), - "scores": bundle["evidence"].get("scores"), - "recommendedActions": bundle["evidence"].get("recommendedActions"), - }, - } - - -def _expected_snapshot(scenario: str) -> Dict[str, any]: - exposure_lookup = { - "acute_toxicity": ["cpdat:fc", "httk"], - "exposure_prioritization": [ - "cpdat:fc", - "cpdat:puc", - "httk", - "pathways", - "seem", - ], - "genra_read_across": ["cpdat:fc", "httk", "qsurs"], - } - return { - "status": "success", - "scenarios": [scenario], - "target": { - "dtxsid": "DTXSID0000001", - "preferredName": "Offline Example", - "casrn": "50-00-0", - "synonyms": ["Formaldehyde", "Methanal"], - }, - "guardrails": [], - "ctxData": { - "hazardEndpoints": ["Acute toxicity"], - "exposureKeys": exposure_lookup[scenario], - "cheminformaticsKeys": ["toxprints"], - "dataGaps": [], - }, - "predictive": { - "service": "offline_genra", - "status": "success", - "scenario": scenario, - "prediction": { - "prediction": "Read-across suggests low concern.", - "confidence": 0.82, - }, - "ad": { - "in_domain": True, - "confidence": 0.85, - "details": {"analogues": 4}, - }, - "metadata": { - "model": "Offline GenRA", - "model_version": "0.1", - "identifier": "DTXSID0000001", - "identifier_type": "dtxsid", - "analogueCoverage": 0.88, - "evidenceQuality": 0.74, - "predictiveAgreement": 0.85, - }, - }, - "evidence": { - "confidenceBand": "Limited", - "scores": { - "analogue_coverage": 0.88, - "evidence_quality": 0.74, - "predictive_agreement": 0.85, - }, - "recommendedActions": [ - "Seek SME review", - "Augment analogue set or supporting evidence", - ], - }, - } - - -@pytest.mark.parametrize("scenario", OFFLINE_SCENARIOS) -def test_offline_orchestrator_scenarios(tmp_path: Path, scenario: str) -> None: - orchestrator = build_offline_orchestrator( - persistence_dir=tmp_path, - clock=lambda: "2025-03-26T00:00:00Z", - ) - bundle = orchestrator.run_workflow( - target_identifier="50-00-0", - identifier_type="casrn", - scenarios=[scenario], - predictive_plan=[ - PredictiveTask( - service="offline_genra", - scenario=scenario, - request=PredictiveRequest(chemical_identifier="DTXSID0000001"), - ) - ], - ) - - sanitized = _sanitize_bundle(bundle) - assert sanitized == _expected_snapshot(scenario) - - run_dir = tmp_path / bundle["workflowRunId"] - bundle_path = run_dir / "bundle.json" - metadata_path = run_dir / "metadata.json" - attachments_dir = run_dir / "attachments" - - assert bundle_path.exists() - assert metadata_path.exists() - assert attachments_dir.exists() - - attachment_names = {path.name for path in attachments_dir.iterdir()} - assert {"ctx_data.json", "predictive_results.json", "evidence.json"}.issubset( - attachment_names - ) - - storage_meta = bundle.get("storage") or {} - assert storage_meta.get("bundlePath") == str(bundle_path.relative_to(tmp_path)) - assert storage_meta.get("bundleChecksum") - - -def test_audit_bundle_store_lists_runs(tmp_path: Path) -> None: - orchestrator = build_offline_orchestrator( - persistence_dir=tmp_path, - clock=lambda: "2025-03-26T00:00:00Z", - ) - bundle_ids: List[str] = [] - for scenario in OFFLINE_SCENARIOS: - result = orchestrator.run_workflow( - target_identifier="50-00-0", - identifier_type="casrn", - scenarios=[scenario], - predictive_plan=[ - PredictiveTask( - service="offline_genra", - scenario=scenario, - request=PredictiveRequest(chemical_identifier="DTXSID0000001"), - ) - ], - ) - bundle_ids.append(result["workflowRunId"]) - - store = orchestrator.bundle_store - assert store is not None - runs = store.list_runs() - assert len(runs) == len(OFFLINE_SCENARIOS) - retrieved_ids = {row["workflowRunId"] for row in runs} - assert retrieved_ids == set(bundle_ids) - -==================================================================================================== -FILE: tests/test_domain_contracts.py -==================================================================================================== -from __future__ import annotations - -import json -from pathlib import Path - -from jsonschema import Draft202012Validator - -from epacomp_tox.resources.bioactivity import BioactivityResource -from epacomp_tox.resources.exposure import ExposureResource -from epacomp_tox.resources.hazard import HazardResource -from epacomp_tox.resources.interop import InteropResource - -SCHEMA_PATHS = [ - Path("docs/contracts/schemas/hazard/search_hazard.response.schema.json"), - Path("docs/contracts/schemas/hazard/batch_search_hazard.response.schema.json"), - Path("docs/contracts/schemas/exposure/search_cpdat.response.schema.json"), - Path("docs/contracts/schemas/exposure/search_httk.response.schema.json"), - Path("docs/contracts/schemas/exposure/get_exposure_httk.response.schema.json"), - Path( - "docs/contracts/schemas/bioactivity/search_bioactivity_terms.response.schema.json" - ), - Path( - "docs/contracts/schemas/bioactivity/get_bioactivity_summary_by_dtxsid.response.schema.json" - ), - Path( - "docs/contracts/schemas/bioactivity/get_bioactivity_assay.response.schema.json" - ), - Path("docs/contracts/schemas/bioactivity/get_bioactivity_aop.response.schema.json"), - Path("docs/contracts/schemas/workflow/aop_linkage_summary.response.schema.json"), - Path("docs/contracts/schemas/workflow/pbpk_context_bundle.response.schema.json"), - Path("docs/contracts/schemas/workflow/comptox_evidence_pack.response.schema.json"), -] - - -def _load_json(path: Path) -> dict: - with path.open("r", encoding="utf-8") as handle: - return json.load(handle) - - -def _tool_map(resource) -> dict[str, dict]: - return {tool["name"]: tool for tool in resource.get_tools()} - - -def test_domain_response_schemas_are_valid() -> None: - for path in SCHEMA_PATHS: - Draft202012Validator.check_schema(_load_json(path)) - - -def test_hazard_tools_use_domain_specific_response_schemas() -> None: - tools = _tool_map(HazardResource(api_key="fake")) - assert tools["search_hazard"]["responseSchemaRef"] == { - "namespace": "hazard", - "name": "search_hazard.response.schema", - } - assert tools["batch_search_hazard"]["responseSchemaRef"] == { - "namespace": "hazard", - "name": "batch_search_hazard.response.schema", - } - - -def test_exposure_tools_use_domain_specific_response_schemas() -> None: - tools = _tool_map(ExposureResource(api_key="fake")) - assert tools["search_cpdat"]["responseSchemaRef"] == { - "namespace": "exposure", - "name": "search_cpdat.response.schema", - } - assert tools["search_httk"]["responseSchemaRef"] == { - "namespace": "exposure", - "name": "search_httk.response.schema", - } - assert tools["get_exposure_httk"]["responseSchemaRef"] == { - "namespace": "exposure", - "name": "get_exposure_httk.response.schema", - } - - -def test_bioactivity_tools_use_domain_specific_response_schemas() -> None: - tools = _tool_map(BioactivityResource(api_key="fake")) - assert tools["search_bioactivity_terms"]["responseSchemaRef"] == { - "namespace": "bioactivity", - "name": "search_bioactivity_terms.response.schema", - } - assert tools["get_bioactivity_summary_by_dtxsid"]["responseSchemaRef"] == { - "namespace": "bioactivity", - "name": "get_bioactivity_summary_by_dtxsid.response.schema", - } - assert tools["get_bioactivity_assay"]["responseSchemaRef"] == { - "namespace": "bioactivity", - "name": "get_bioactivity_assay.response.schema", - } - assert tools["get_bioactivity_aop"]["responseSchemaRef"] == { - "namespace": "bioactivity", - "name": "get_bioactivity_aop.response.schema", - } - - -def test_workflow_tools_use_domain_specific_response_schemas() -> None: - tools = _tool_map(InteropResource(api_key="fake")) - assert tools["assemble_comptox_evidence_pack"]["responseSchemaRef"] == { - "namespace": "workflow", - "name": "comptox_evidence_pack.response.schema", - } - assert tools["build_aop_linkage_summary"]["responseSchemaRef"] == { - "namespace": "workflow", - "name": "aop_linkage_summary.response.schema", - } - assert tools["build_pbpk_context_bundle"]["responseSchemaRef"] == { - "namespace": "workflow", - "name": "pbpk_context_bundle.response.schema", - } - -==================================================================================================== -FILE: tests/test_cross_suite_handoffs.py -==================================================================================================== -from __future__ import annotations - -import json -from pathlib import Path - -from tests.interop_test_support import ( - build_interop_resource, - sanitize_aop_handoff, - sanitize_pbpk_handoff, - validate_portable_schema, -) - -FIXTURES_DIR = Path(__file__).parent / "fixtures" / "cross_suite" - - -def _load_fixture(name: str) -> dict: - return json.loads((FIXTURES_DIR / name).read_text(encoding="utf-8")) - - -def test_comptox_to_aop_handoff_matches_fixture() -> None: - interop = build_interop_resource() - result = interop.execute_tool( - "build_aop_linkage_summary", - {"dtxsid": "DTXSID7020182", "max_assays": 5}, - ) - - validate_portable_schema("aopLinkageSummary.v1.json", result) - assert sanitize_aop_handoff(result) == _load_fixture("comptox_to_aop_handoff.json") - - -def test_comptox_to_pbpk_handoff_matches_fixture() -> None: - interop = build_interop_resource() - result = interop.execute_tool( - "build_pbpk_context_bundle", - {"dtxsid": "DTXSID7020182"}, - ) - - validate_portable_schema("pbpkContextBundle.v1.json", result) - assert sanitize_pbpk_handoff(result) == _load_fixture( - "comptox_to_pbpk_handoff.json" - ) - -==================================================================================================== -FILE: metadata/model_cards/genra_read_across.json -==================================================================================================== -{ - "schemaVersion": "1.0", - "modelDetails": { - "name": "GenRA Read-Across Workflow", - "version": "2.1.0", - "modelType": "Read-Across", - "description": "Generalized read-across workflow combining analogue search, evidence weighting, and prediction synthesis.", - "developers": [ - { - "name": "EPA Computational Toxicology" - } - ], - "organizations": [ - "US EPA" - ], - "releaseDate": "2025-03-05", - "license": "EPA Terms of Use" - }, - "intendedUse": { - "summary": "Supports regulatory read-across decisions for data gap filling and hazard assessment.", - "inScope": [ - "Organic chemicals with available ToxCast/ToxVal analogues" - ], - "outOfScope": [ - "Chemicals lacking sufficient analogue coverage", - "Mixtures" - ], - "limitations": [ - "Requires SME review when analogue similarity < 0.7." - ], - "warnings": [ - "Document evidence narrative before external submission." - ], - "regulatoryPrograms": [ - "TSCA New Chemicals", - "OECD Cooperative Chemicals Assessment" - ] - }, - "oecdValidationPrinciples": { - "definedEndpoint": { - "description": "Endpoints inherited from analogue dataset (e.g., repeat-dose toxicity LOAEL)", - "unit": "varies by endpoint" - }, - "unambiguousAlgorithm": { - "summary": "Analogue search using structural fingerprints, evidence scoring across data streams, Bayesian-weighted prediction aggregation.", - "methodClass": "Read-Across", - "implementation": "GenRA Service 2.1", - "references": [ - { - "citation": "Patlewicz et al. 2015", - "doi": "10.1093/toxsci/kfv169" - } - ] - }, - "definedApplicabilityDomain": { - "summary": "Assess analogue availability, structural similarity, and metadata completeness before generating predictions.", - "relatedTools": [ - "genra.check_applicability_domain" - ], - "references": [ - { - "citation": "GenRA Technical Manual 2025" - } - ] - }, - "goodnessOfFitMetrics": { - "internalValidation": [ - { - "name": "Coverage", - "value": 0.78, - "dataset": "historical read-across cases" - } - ], - "externalValidation": [ - { - "name": "Accuracy", - "value": 0.72, - "dataset": "case studies" - }, - { - "name": "Precision", - "value": 0.69, - "dataset": "case studies" - } - ] - }, - "mechanisticInterpretation": { - "summary": "Evidence weighting prioritizes analogues sharing mode-of-action descriptors and toxicity pathways.", - "confidence": "moderate" - } - }, - "trainingData": { - "dataset": { - "name": "GenRA Analogue Library 2025", - "source": "EPA CompTox", - "description": "Curated analogue relationships with experimental endpoints" - }, - "records": 1200, - "chemicalCount": 850, - "descriptorCount": 60, - "preprocessing": "Harmonized identifiers, removal of conflicting analogue evidence, assignment of mode-of-action tags." - }, - "evaluationData": { - "datasets": [ - { - "name": "GenRA Case Studies", - "source": "EPA Internal", - "description": "Historical regulatory read-across decisions" - } - ], - "validationApproach": "Leave-one-target-out analogue removal", - "metrics": [ - { - "name": "Balanced Accuracy", - "value": 0.71, - "dataset": "case studies" - }, - { - "name": "Coverage", - "value": 0.76, - "dataset": "case studies" - } - ], - "applicabilityDomainCoverage": 0.82 - }, - "applicabilityDomain": { - "summary": "Composite checks for analogue similarity, data completeness, and evidence diversity.", - "criteria": [ - { - "type": "similarity", - "description": "At least three structural analogues with Tanimoto similarity >= 0.7.", - "parameters": { - "threshold": 0.7, - "minAnalogues": 3 - } - }, - { - "type": "coverage", - "description": "Analogues must span at least two evidence domains (in vivo, in vitro, in silico).", - "parameters": { - "minDomains": 2 - } - }, - { - "type": "expert_rule", - "description": "Mode-of-action tags must align across selected analogues.", - "parameters": { - "allowableMismatch": 1 - } - } - ], - "enforcement": { - "mcpTools": [ - "genra.check_applicability_domain" - ], - "policy": "block", - "errorCodes": [ - "GENRA_AD_FAIL" - ] - }, - "confidenceBands": [ - { - "label": "Robust", - "minConfidence": 0.8, - "actions": [ - "Eligible for automated dossier generation" - ] - }, - { - "label": "Limited", - "minConfidence": 0.5, - "actions": [ - "Requires SME justification and documentation" - ] - } - ] - }, - "ethicalConsiderations": { - "risks": [ - "Analogues may introduce hidden biases when evidence base is uneven." - ], - "mitigations": [ - "Require documentation of analogue selection rationale and SME oversight." - ], - "humanOversight": "SME approval mandated for final predictions and evidence narratives." - }, - "provenance": { - "sourceRepositories": [ - "https://github.com/epa/genra" - ], - "build": { - "id": "genra-build-2025-03-05", - "timestamp": "2025-03-05T09:15:00Z", - "environment": "EPA CICD" - }, - "checksum": { - "algorithm": "SHA256", - "value": "3ce4ec4983d3e7c6b2089b967679f5fc293096750293eb98d2b211f780a1f95e" - }, - "reviewStatus": { - "approvedBy": [ - { - "name": "Regulatory Affairs Read-Across Committee" - } - ], - "approvalDate": "2025-03-10" - } - } -} - -==================================================================================================== -FILE: metadata/model_cards/opera_property.json -==================================================================================================== -{ - "schemaVersion": "1.0", - "modelDetails": { - "name": "OPERA Property Predictions", - "version": "3.6.1", - "modelType": "QSAR", - "description": "Predicts physicochemical properties (LogP, water solubility, vapor pressure) using OPERA ensemble models.", - "developers": [ - { - "name": "NIEHS NICEATM" - }, - { - "name": "EPA Computational Toxicology" - } - ], - "organizations": [ - "US EPA", - "NIEHS" - ], - "releaseDate": "2025-02-20", - "license": "OPERA EULA" - }, - "intendedUse": { - "summary": "Supports exposure assessment workflows requiring physicochemical property estimates for organic chemicals.", - "inScope": [ - "Neutral organic chemicals", - "Screening-level exposure modelling" - ], - "outOfScope": [ - "Inorganic substances", - "Highly ionized species" - ], - "limitations": [ - "Predictions outside training descriptor ranges may be unreliable." - ], - "warnings": [ - "Verify units when integrating with downstream PBPK models." - ], - "regulatoryPrograms": [ - "TSCA Existing Chemicals", - "REACH dossier support" - ] - }, - "oecdValidationPrinciples": { - "definedEndpoint": { - "description": "LogP, water solubility (log mol/L), vapor pressure (log Pa)", - "unit": "log scale" - }, - "unambiguousAlgorithm": { - "summary": "Random forest and support vector regression ensembles with descriptor selection.", - "methodClass": "Ensemble", - "implementation": "OPERA CLI 3.6", - "references": [ - { - "citation": "Mansouri et al. 2018", - "doi": "10.1021/acs.jcim.7b00524" - } - ] - }, - "definedApplicabilityDomain": { - "summary": "Combines leverage statistics with similarity to nearest neighbors in descriptor space.", - "relatedTools": [ - "opera.check_applicability_domain" - ], - "references": [ - { - "citation": "OPERA Technical Documentation 2024" - } - ] - }, - "goodnessOfFitMetrics": { - "internalValidation": [ - { - "name": "R2", - "value": 0.92, - "dataset": "training", - "description": "LogP" - } - ], - "externalValidation": [ - { - "name": "RMSE", - "value": 0.31, - "dataset": "external", - "description": "LogP", - "units": "log" - }, - { - "name": "RMSE", - "value": 0.45, - "dataset": "external", - "description": "Water Solubility", - "units": "log mol/L" - } - ] - }, - "mechanisticInterpretation": { - "summary": "Descriptors capture polar surface area, hydrogen bonding, and fragment counts aligned with property trends.", - "confidence": "moderate" - } - }, - "trainingData": { - "dataset": { - "name": "OPERA Training Library 2024", - "source": "EPA CompTox", - "description": "Consolidated experimental property measurements" - }, - "records": 2500, - "chemicalCount": 2200, - "descriptorCount": 45, - "preprocessing": "Standardized structures (neutralized), removal of salts, descriptor scaling.", - "classBalance": "Continuous endpoints" - }, - "evaluationData": { - "datasets": [ - { - "name": "OPERA External Validation", - "source": "EPA CompTox", - "description": "Hold-out dataset of curated property measurements" - } - ], - "validationApproach": "80/20 train-test split with 5-fold cross-validation", - "metrics": [ - { - "name": "MAE", - "value": 0.28, - "dataset": "external", - "description": "LogP" - }, - { - "name": "R2", - "value": 0.85, - "dataset": "external", - "description": "Vapor Pressure" - } - ], - "applicabilityDomainCoverage": 0.9 - }, - "applicabilityDomain": { - "summary": "Descriptor range checks plus nearest-neighbor similarity enforced before prediction delivery.", - "criteria": [ - { - "type": "descriptor_range", - "description": "Each descriptor must fall within training min/max after scaling.", - "parameters": { - "mode": "min_max" - } - }, - { - "type": "similarity", - "description": "Average Tanimoto similarity to top 5 training neighbors >= 0.6.", - "parameters": { - "threshold": 0.6, - "neighbors": 5 - } - } - ], - "enforcement": { - "mcpTools": [ - "opera.check_applicability_domain" - ], - "policy": "warn", - "errorCodes": [ - "OPERA_AD_WARN" - ] - }, - "confidenceBands": [ - { - "label": "High", - "minConfidence": 0.75, - "actions": [ - "Auto-approve" - ] - }, - { - "label": "Low", - "minConfidence": 0.5, - "actions": [ - "Escalate to SME" - ] - } - ], - "references": [ - { - "citation": "OPERA Manual 2025" - } - ] - }, - "ethicalConsiderations": { - "risks": [ - "Limited coverage for highly polar or reactive chemicals." - ], - "mitigations": [ - "Flag low-confidence predictions for manual review." - ] - }, - "provenance": { - "sourceRepositories": [ - "https://github.com/kmansouri/OPERA" - ], - "build": { - "id": "opera-build-2025-02-20", - "timestamp": "2025-02-20T14:30:00Z", - "environment": "GitHub Actions" - }, - "checksum": { - "algorithm": "SHA256", - "value": "79af18b3515e9a1d69037e2a154c7c6088cf3fae8c388ff901abdadf5a304a52" - }, - "reviewStatus": { - "approvedBy": [], - "notes": "Pending SME review" - } - } -} - -==================================================================================================== -FILE: metadata/model_cards/test_consensus.json -==================================================================================================== -{ - "schemaVersion": "1.0", - "modelDetails": { - "name": "TEST Consensus Acute Toxicity", - "version": "5.2.0", - "modelType": "QSAR", - "description": "Predicts acute aquatic toxicity using consensus of TEST models.", - "developers": [ - { - "name": "EPA Computational Toxicology" - } - ], - "organizations": [ - "US EPA" - ], - "releaseDate": "2025-01-15", - "license": "EPA Terms of Use" - }, - "intendedUse": { - "summary": "Supports screening-level acute aquatic toxicity assessments for organic chemicals.", - "inScope": [ - "Non-ionic organic chemicals", - "Screening-level prioritization" - ], - "outOfScope": [ - "Ionic species", - "Metals" - ], - "limitations": [ - "Do not apply to mixtures without expert review." - ], - "warnings": [ - "Use applicability domain checks prior to decision-making." - ], - "regulatoryPrograms": [ - "TSCA New Chemicals" - ] - }, - "oecdValidationPrinciples": { - "definedEndpoint": { - "description": "96-hour fathead minnow LC50", - "unit": "mg/L", - "speciesOrSystem": "Pimephales promelas" - }, - "unambiguousAlgorithm": { - "summary": "Consensus of multiple QSAR models combining regression and classification outputs.", - "methodClass": "Ensemble", - "implementation": "TEST v5.2", - "references": [ - { - "citation": "TEST user manual 2024" - } - ] - }, - "definedApplicabilityDomain": { - "summary": "Leverage and descriptor range checks against training set.", - "references": [ - { - "citation": "Mansouri et al. 2018", - "doi": "10.1021/acs.jcim.7b00524" - } - ], - "relatedTools": [ - "test.check_applicability_domain" - ] - }, - "goodnessOfFitMetrics": { - "internalValidation": [ - { - "name": "R2", - "value": 0.81, - "dataset": "training" - } - ], - "externalValidation": [ - { - "name": "Q2", - "value": 0.74, - "dataset": "external" - }, - { - "name": "RMSE", - "value": 0.45, - "units": "log10" - } - ] - }, - "mechanisticInterpretation": { - "summary": "Descriptors capture hydrophobicity and molecular size consistent with narcosis mode of action.", - "confidence": "moderate" - } - }, - "trainingData": { - "dataset": { - "name": "TEST Training Set 2024", - "source": "EPA CompTox", - "description": "Curated LC50 dataset for freshwater species" - }, - "records": 580, - "chemicalCount": 560, - "descriptorCount": 35, - "preprocessing": "Standardization of chemical identifiers and removal of salts." - }, - "evaluationData": { - "datasets": [ - { - "name": "Fathead Minnow External", - "source": "EPA AQUIRE", - "description": "Independent validation dataset" - } - ], - "validationApproach": "Hold-out external validation", - "metrics": [ - { - "name": "RMSE", - "value": 0.52, - "dataset": "external", - "units": "log10" - } - ], - "applicabilityDomainCoverage": 0.88 - }, - "applicabilityDomain": { - "summary": "Combines leverage thresholds with descriptor range checks.", - "criteria": [ - { - "type": "descriptor_range", - "description": "All descriptors must fall within 5th-95th percentile of training set.", - "parameters": { - "percentileLower": 0.05, - "percentileUpper": 0.95 - } - }, - { - "type": "similarity", - "description": "Tanimoto similarity to nearest neighbor must exceed 0.65.", - "parameters": { - "threshold": 0.65 - } - } - ], - "enforcement": { - "mcpTools": [ - "test.check_applicability_domain" - ], - "policy": "block", - "errorCodes": [ - "TEST_AD_FAIL" - ] - }, - "confidenceBands": [ - { - "label": "High", - "minConfidence": 0.8, - "actions": [ - "Eligible for automated workflow" - ] - }, - { - "label": "Moderate", - "minConfidence": 0.6, - "actions": [ - "Requires SME review" - ] - } - ] - }, - "ethicalConsiderations": { - "risks": [ - "Model is biased toward narcosis-class chemicals." - ], - "mitigations": [ - "Flag predictions with low similarity for SME review." - ], - "humanOversight": "Regulatory reviewer must approve high-impact predictions." - }, - "provenance": { - "sourceRepositories": [ - "https://github.com/epa/test" - ], - "build": { - "id": "build-2025-01-15", - "timestamp": "2025-01-15T10:00:00Z", - "environment": "GitHub Actions" - }, - "checksum": { - "algorithm": "SHA256", - "value": "4a2a288f4f9b15727ea63a2c70a786844bab608d75d0d70fd0d0d7e0dad32f90" - }, - "reviewStatus": { - "approvedBy": [ - { - "name": "Regulatory Affairs" - } - ], - "approvalDate": "2025-02-01" - } - } -} - -==================================================================================================== -FILE: metadata/applicability_domains/genra_read_across_ad.json -==================================================================================================== -{ - "model": "GenRA Read-Across Workflow", - "version": "2.1.0", - "criteria": [ - { - "type": "similarity", - "metric": "tanimoto", - "threshold": 0.7, - "minAnalogues": 3 - }, - { - "type": "coverage", - "requirements": ["in vivo", "in vitro"], - "minimumDomains": 2 - }, - { - "type": "expert_rule", - "rule": "Mode of action tags must align", - "allowableMismatch": 1 - } - ], - "policy": "block", - "errorCode": "GENRA_AD_FAIL", - "references": [ - { - "citation": "GenRA Technical Manual 2025" - } - ] -} - -==================================================================================================== -FILE: metadata/applicability_domains/opera_property_ad.json -==================================================================================================== -{ - "model": "OPERA Property Predictions", - "version": "3.6.1", - "criteria": [ - { - "type": "descriptor_range", - "descriptors": ["atomCount", "bondCount", "polarSurfaceArea"], - "range": {"mode": "min_max"} - }, - { - "type": "similarity", - "metric": "tanimoto", - "threshold": 0.6, - "neighbors": 5 - } - ], - "policy": "warn", - "errorCode": "OPERA_AD_WARN", - "references": [ - { - "citation": "OPERA Manual 2025" - } - ] -} - -==================================================================================================== -FILE: metadata/applicability_domains/test_consensus_ad.json -==================================================================================================== -{ - "model": "TEST Consensus Acute Toxicity", - "version": "5.2.0", - "criteria": [ - { - "type": "descriptor_range", - "descriptors": ["logS", "logP", "LUMO", "polarSurfaceArea"], - "range": {"lowerPercentile": 0.05, "upperPercentile": 0.95} - }, - { - "type": "similarity", - "metric": "tanimoto", - "threshold": 0.65, - "fingerprint": "pubchem" - } - ], - "policy": "block", - "errorCode": "TEST_AD_FAIL", - "references": [ - { - "citation": "Mansouri et al. 2018", - "doi": "10.1021/acs.jcim.7b00524" - } - ] -} diff --git a/triclosan_partition_distribution.png b/triclosan_partition_distribution.png deleted file mode 100644 index 408c2193cd04b03eaa0620fdd2d5c0664bca17c7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 62475 zcmd43byQUU_dY68BBde%0)k3+2+|0OIHb}@2@KsWF{mIQDj`FsNJ&Tx-O>y#H4Lr9 zkkVbh*YWfDe%HGH-Mj8ucis2my&Oko&N;8MUwc3Md7gbjpQ|a7U%h?x(xpq}uxC#+ zE?pw*ymSfQ;|dXYN4b=C1^gl5uAt|x>165dW$J2iN!8Td+1|r8b zPf&nQjEC9U-QC$uf}h{ve}99|$<>O#$-tuzd<%*5Gkv#9muPN4|KnMu4T59sc)^~? zX}wNfn<9?BIdbd!@4$N>KHyPt1&~n1%k_{@Q@g3@w)&~k3V!-{9p5ZGelL&Xo@I~z zP2rEEo3Y(zt?!}K0J4>Iu{Cu^Pyi}|eMn;tj{1?Cii~9Ft zVHtired)h{iCPK7bN%mY4Sk_UeE)sTrA*Yx`ad5)U#kA^hjV%8nB)HYI))X0{y*Os zm5GACHT>^utyr>1<^R6!iu!-`<(UDZQ|?0pnJ{GW?w~RTrp)O46VK&L>Va?7dc}lL z$8Hdf@s)*@wbK5+vX(d#fBl)H457GPg~Dp+_~)|V{DwQPSu0;=6!9L`{%P>*R#+mLBuYnNT;6rnCOPh0Aoc45j|REUiT_ zT09}sZAkTDn5=rdz};lOZM5J}@|v5!R~xl1Jl3jOgMBZPq5EzK!wnr{?}aKp)D(?A z{7dn#r}RNX>ax;)yT24irvKUuWd|8J)r;&d{kqRn(t0D^duK7-V|CPYy&OZ4lj8aE z@NezL$HF7YyE3DKi@f~8tF`MD26`@S)Gc*8eFnLW$zIdmzj0}A z`i{mY9BX+U55#fFn#+!%YoXRQ8;0j+2Qy=Fo@1sV0~HPvV}Ex;Msy5#8N4Q4Oed>c zSl3&?k}-OY+vJQ9>nYZHt~VR|u2X|$jd@P9G&gLN)9B12##Tm;b1X8hZ;rhC_)o*a z4v3}Ti&LCz$&bYZtJHx!?fkL*kUKuswBoMtn3ekec)|OVemlLUNL5jiPkc4jQcMz_ zYqSw)r^Yj#_{(9^n?SaJM7c0!aC2`?pB>>jYF1gjlt*5S~*Wn5qZ{ z?usCkaWcKM?>?E=l!tD-l(#Ev6$yg)cGy+epnaBAf9ey>KXXU8w6x`e^@+-e#tAr5 zl`9p*=11_RY^mQit-z~Cmfo>*;>L0Pt)W!h71KVe38Tof&hqfx{Y7EVwXu(CDH7C% zUY~C9a{aAYEv#Fu0+GO-DC+Rio&I3TYbL??1cc-*gc0fuv_{ho&o;OnD!{KU8tJZ% z{@|YW#-)5rRAOrUa$ivg3Tfeb-Hqvb?)t@#+=bt*r|Z0%5bFc!kK5SO{GxWGo$B{S z0}z`aG77B+Ghx5>Gs#oIssya^?|!W>MQk<^yG)SQJA%!bR|O8TU9tR`PM9lPYP++p zD9t=voQtn!>D$K!#EE4p{X<7X1yw^3!3q(uhW+!MepznM35UvnqPf=$zH24aLpkdG zN9W6kqos@pF3}mkox0VRh`kZ>i}rGianeCuMhcVv;qSVWWSMXfd5Q|9mc82zIKJ97 z4MC5U;ks2bU(xB;a{&%NJ@+?eWH%&~s&6fh@7wPN%^#Gl@>Wi2Y>w@(jO5Syy7cP% zZME$*_-DoFX@4|5yW zv0M|R)2odaXHGrGYa-6CO($>*uwbZ@-I(s1b5=rS(SG}nEXHJSHoRUGrk-3UOhZ1Zhhj9RliKBRS4^2 z;oXiP9k?)c&S#U~I6K}WwMy~OIZaiWx*slfZ~A)i;h#?-Om9vFrS`|ne+Sb$e!y11 z`-U9h!n%amj)648mCP{nsnGIe$_J zZLl4)Aj476+ZW6Ayk!-Rt;k`DV6x0p&wY>{9}ev!SrNS=nAVMD zb@tTPW`>ja{}= ze2u5un}0s>x1aYLaZWnOUED2AuD#hQ&(?w%D>nT-sLUTWQ_8=~IxfgG>a|gS&_4HN zYIJg>sr+|($eoW9ZBvBUs)=`!OiPNdB**RubyPc&G08}g(F!nIzC2hSVt?Q;{$Ls= zz`Z%`RJZkXZ6-Mmj@UUb`3$2kUzri;vrDB4%P3uXJK+xHvy? z1{c%e7Ao`nidT%YBEeE6e8tTR&+D?w`r+wLe+!5^hKLl9YaUGGZd|En$@LfN^yUFI zh#IOpO?%YciIZFJT^B;sd5`>CER#F=n`-4_y2be}%0$8aX(^4^~R2*R1)$>fh8Oz7K0 znGMy3W9LW+RK1n-D@S^n*#l+t6(=ejxMj`{=jysqKagyydApPAl{2Z{XXAE7Mq3k3 zjlH`yBvfo#HGFla+uei{iNqaZ7#Ab+FcBf_7%5-XD}{r6B- z1h+SX8*xQHs_E}ejT(}opskZIbFlO)<4@X~bF{Zr(`8bgYjJlqr7^xE3Pj=&y`mF5 zmd;iu0O1Jh?=aMwVP<3y$Mc54Jm1D=6=Sx~mN0tsJfoVcGhAg zfG%LAk5?Hr>}+NerUkX%>hNTvF~5v)_lucFvbfrCDKWMwYuJ?Uy}7)ojAP^3zFwdN z4JsNPseU{-+Z4!smipP_FUO?>%x+H+^k-*Qad!b%Powd6K01=Os0n$6w zadmCY3pHE>;^t8Lq)+7I%L8oHr@QDucJ8#}!_vzREiUP7exiMkQJY(Rk{hkH9)q2? zFJ>;z0<;S69tSu0f!8Wa& zx{xeXDmR|=S|vGDpb5Sv^nj1~xyGcx3aS?q6E;Y?7P%W+9z78f>v&{vS0z*b`;%ui zKd}4mXy&dwmIcN5WF$&Hs4s%$c?yC&m%HqRr_I;0%p%utxcVJs{gnCR&I%m;fV|LH zv^rQSI5qAYNFG{h1#)~ai#&kH#Y=DH2OJM!@rK5CK^}rPn7ldLBjr=>HM*=yP>B5c zO5&^6F9n*m&pH8#1jC&@^W1p#EVM>f-f;+N=i-z_t6Dy?A}^?;JeG(bC18=5-z6|K(%8 zqMSCj>e|h#yU>u-_lo~!q&k1nR7dHL^3j)srHMlhz2cAS?N_XH%gL+mxZ;{YKD@O$ zBHXFHhOo;UV^%s2k6rUrGQoW$KfZY)y*E+q7PJ%S%~GZOwr2ews#gB{C=#rdr7q~( za#g4kZx(66EZO1(iYo-u;onq~L>1z!?!hk4FJoBsTU7WI^{)hU!wK0Fn&3u4roRho z|K4V=R7S-P>9HeWY^)MWO$4byH%YtL@p9codG3H58?Ty1sho_VqS<(1a1!TlqEyI|WvrACUluK65Qo$C=u6wR+ac^E-J5b}F0VmI8E8)= zHbt{MGM*Bh6?4PMmkcV2Xxq{+*yijuP}^mgo%GFT?mDsAS`>e5%FnE$#!v`NB@iT_ zo@>*+f||G6_@JfJ%KjKL5I^Op_rhAXnkjt%M4eRf$|WqoG7>>G5lJ?3~!y?obP;3EJB=$ZfJL z36Q6Sn_N(_oDnU|gkF~-+mfO`(mt?$ZofKi-4DOuFhLGG@F{S5(L4)de0O+$b`k;* zC0i;JSy?>*y)Kb&iL-KF+n4=%>JnKfN~8yx%*^0su+4MF2U(wVq6*RP)7rX71wt8` zdNtS*yG4GneK%F(oy$xWq^s$dJ}~6j**cty=pa)PLX%{&)5Q_|%C#fwDxCxX&U+>e zgAqZ^x~WF$C3Qr-lAt0O4MHQ?J76Tp*+WomlOMh|Nm@sryBu8FFquN!=XkbnuOVC? zpD8;!dZazHlM}xybkvuA{CuQB4MSJtNbj6nx>57&*e3mW?Q2hWLFDwqaqAduom>|a zc=2j0mHKDOV{!aIG7h0oZVN9llUys~t|zVFR%n=;%E`er%D?O-G0zoO{HPeWP1wjK zHS9dxnrnGD<44uVURyhSX&%6GIsj8kdRWp3DuXE)6ZP4QT3&-q#hcsbVg5vQJu|sQ znZI7kYD3tvCtj?Cy;Flf8f#o2ov*P1wX=P@NRV;4w1`DIkTv7O#OiF%dT_lDXUIgP z**s|ehG5EUYeYz4(M%ZK8_=QaO=m<`WJhcG3tln zp?7|(MP;fl)&vX0;eG#Qv^0iF)xV2C)Sc~5aHkkx<6l*=b!J;N_hsJlDUrJjRdzue zv5T{3QMXgF;*2FmaYT6c*&=wNmtg~?T%7<1HlF=CAlUFZ_?zM;9`FWmE8c7aNk#$l z08nz#(&AeX+*n4FAkpVh%!tC5V{5u4YuoL!pmXhuzLrj=n*NrHO(p)CWA#ETSG{5g z1*1@t?n*tfJSl9Je#3<4aZ=(!sJiS~*(?ge{cI;EiFSp#557i`ZuB--#2p-Nytzoq z{i0^+a6_w)kNh#+$8l%Sj#~;X9AK@|0y^IRbnu+ru9^$)jwUpXQn-AA<(P2=D6oGw zF?jwxl^RFxkJ6&+4EY)15?sRKJeXiZ42M!^vd>EHmTy2G+HMeP<9ZHgjND)JtwWs3 zr~Mz}&3l3g+;9C|+Ucz34a3*H!2!1mLqz9q>szC=VqvHO9B5vm*wkX6JVa^-I4;}w zJuWB%l1Qj|5!{J?We~?_WR^>G*)y7xPSjqHR?^enPm2)S&=Nu!(KJTySar97a^X2g z#v@D4Rn}S9z^6y|5~2v42Mg=AyJ!t3%|7v(1#_17Fv(@=AxW$jlaL5l(0==LrUnLUooOJ0BfOkf2{!(OWlb9!flNh^^CD2C7 zC{!nhJ)Ew3RUJqEa8iJ3z$%I4@(_R2rF{VCnQoc*Z5>MYUnejGb+@Ic@%;A5H9Q}p zR~+}{Y$z^&kA9nelNh^7K$t6+MnR-ubx}5?Mv6=Ibu3s_Nw|A?A;}&E>oLscAwx21 zY$Si4NQ|8_t*@Jbz#j%?A_bU}IYHC6Z$(c)vt!1KSdkwpgpG=i$*wGFK3xKkIs7c) zc+mbK-iu>y+*pL{Ic=O()YD^^$Efz(cC9E@tV3`4BKT-HhG=&UTdHwQ@>h;}+8AGS zTRLNvC?iNiTUEl3TD+xS{e1gchP}y&=mI4u$KR1ABOfbPl(m6O?_{T!tN{(7U@}V57Pvz%9rP>6R!bUzm}MADwcy zGj+HLcbsbE{}kj%+hHyiSZPeEv~ScO1}4w|o~krD%`_lRr`VG0H3pCM!WX=aWMt2F zH37|L|HXU8FT!y3nUj+Y0NOv604>(!QEC0_+aFMzxvD7J?tuL6*Y^EIhNaILpg}Z# z0fMfs$PRELZK@Baw0>1O&Gfj_FsGXrI> zlUWcDCMAD9axWU;7~FbZxa?!CXpAT+!1~v%rq`E-(Ftdp0bs&qGb2_w*AbCs!w8b| zcdQYDbAQOF$53ePQ`?s<@2@6j%ry=70Qbk4iu{vt=Z_ z{39jS2ghp!on|_S1lAv)=Gs#Tht0F+pF~NgQg(bkV0)ar_S)ZaTR|vlSzTqNa|hfv zgPxyRMezG}DYdtjHYOb&G3D?OBWxn1_dg`;!Fe%k<4H1VqT+~n6~BrE2NRGJ%-Mt& zq{T$B)&{n(-NIeNBN?>94ef(_`oh38P>AmgtaJdI&diV<$nx9VEtx zg;bR;0+JvC!yo?8OTGBVw4xT;(H_isO?m?t_rAG<6iAL7(HuB)bzn>Ao|7Y@o?aLW@X7Evq{O{k3UqD|n zA};)YOT*AorVyKb3&{+Jk+<{s8&8FX)ca*l)@~O?c(AORPuUtm^kgx3>%Hgtc+g^R zl2!Nu*ibq))u4pS{3DzJW+LG=sOaJ8@F?Ot*5<=*5XUN^VWVLj`~{Du8Zc+X`{5&aKxK-$VR0532KCx_+spDMXUn{V3NF1Aw%J zyQofMCh0qu9-#T=WPd`eJJcJK@|^(wj(>+zHUi|Ay4h{O{uoU(dz~<8btv{F+Fl~On363#u_&3R^fdup?o*&>$49_AnQvpgVS+y+EySU|VT#pzCrKVK|$oUwmtuPAAG zuAvYvMlPJ**C7!y#~eM5*r3vTR8@kx-@ku&XF49+;13QR^eaO# z1Y|97l_XJKBIct6X5q7dcZHg2;;bsEQsNKXfoNfWI>~(BZ}#WAt(q1b2_;d$4M64j zAMQ^$2^anLLGpBLXkB310XG_A;(x?yoM8-VqBFp(mdED(snRU~uj>|tpw}+2{P{-U zUV9FfH9UNzF+xTs#Ijbq(eN9T&yTWRE5n>?;PM6Sl2x7th@QIn0XE`2f5YKDv7cE@R-qfp6lGZ@u}@vcrJtu_F7txdS?2L-(V3gw`aWAE{&Y%tCwm;Y7k> z3d}BRV>t(_B3!Bf2s`A-KrYGm}F%v1LulcC`daP+*F!sV~Q6O?b@3# z`oD@?b658siUtyqUp1<6N1vW{FuiHTqc4O3;BLDXF}d74gWnQNP6{P|Ku;<5dHm9) zw>uHKCRiIcCHMhoaaxs7Y;=COe!m&&7lhBR3>&)tQuqy^+y}YA2LYaNfmER5{6=;2 zeUjS&I6!7JvE1LHC5FO~=GZD;?NjA6q)t5@^t`sl_Rn@AEhLi17S@?2Plk%ITnZ`HPOB_j2 zdO(O$eiwzi(%(Ky_J-aW@n==~V3O)&5%ANuAl(7tutvJ~p&UjY!>W_{ zG+?|KaE!WS;d6kSB?pBtfm1B?>mBiKXGy6;T)fIN5kp1^_bZU%c$%s~d}M3L-n4m( z_vL$}ngl1+&tm|jUHJgLy3sL5dAauJ__NXwP@=@dVwiz z{t*XTyczURH*rDMog53%=XD$`xvIwY;vzht(2;;J^dgH2?7w+V@$~^j5oBKUU#cE{ z7Rf5*I9YXg2AFf&aLK<_QN~`sgJ{}&6a{RoJl9w3hq>7|`y3_+UdO+81T`|Ea#Z;4 z(&q0L;Yw3>kq7-iiBqC;z#>B|W{!JgmnmgWoeh~bN(bag+Yav5CZPXzj{`vz5jiNXGSJmpu#DT`P0O#lny>_~bq&E=f68kX@QOgEV$AH5o^Qg$KIup*dt`j4 zh__+GHhpwa6-a6=kZO`!zf5J7>TL;H1E%c?sz3tt@p8!5);7?PUWXWLOxwnaZ9t8C z^|$#t6#?R(kYag|Z&d5)!W?+sIoknrAe!!b^GRfaCaPO7BwL%uAdR@mA!DEkENqU( z#7~ehI>in#k`VaT4Uou8pY;ordCRrzsS0`FKllGmNp4(H zZ}90$F2b0+LDbzDWGjfUoBChblq#Bc1yf|QXjMq6(xuPU*Bj0#ISP}^bV-pq-E5ij zKzbDO9Z!P%nCdmlwrsF$hv%h`8NuJ38DSUTCT(K!U1{A%>Am0^A{@TMPx0nJy8)m< z-EagtZ8FX=*Q0>$*<$=U*uUux6BP-fi#}*8t9@~Ui8Nl3deKo)rSpQCmAoxqrK0N( z_!klrH-w{ZJciZw3HM8F(QZGyjQG-!($L^S&OI7}Dq_T9ZuWJ6TE+>HFr!*lkzQdU zqu)KHoxSP&6mmJ3l@eD=tX9+Wl1*Lr-&qA6^?~vWxuFOL2DMe*sm4Y&#nu6wKp>gx@ zHMpJmyW`x2RrhwrL1mJ}2?4D-Ukvo|T#JDB??D~i0WkPeQ8i%ZKk;>E0EIJV+76^u zo@Eb^oBY~<80hNAKM~1;0hswC5Tdib0R`O~GXm;HfWw3T(K9Dg!IspT^uQQ-W19Tg zx^*yscu3ao0C+OSErEhjqID(>q~a}{^!`}JY{?W*h=f=DK$|Gk-w65yR>qM1^$FcN zu#bg*#Ch^B?D52{gI1@=8k}wurrdTU&#W`@uJ}PHhQ0e2CD=im$*YHuqLF6?kle*C)l$AP@wVPjNm4>KTs!nraJd)8I1c*1VM?`4G{C^Zy;K9gZ z!?c?XcBHJn$|WMd9ql~@c8~6EV&Xbj@&Y}#UJ>Q-A^@Ryb|eZ-v+CFo>(45g(lo34 zJPOl8cW3TZkF%Wl_O0()ne7Lf8!fptM9b)Xmmcb;ktE(hu+6tvwEHsLH&6&vlu@N)2mVD>ycN z3;4L84ZXf9ocRlDE8G#y;vSmfInL?+cT(ICq-#NX0ZeN!4Et`4r)(kCfX|W_$*Zz> z>tqiUoM1|sLuNy85hd_WaEI<-VkYmTQtGcqv)rwXAX7ovCy6@T7B1%W+6111wt|PK z{`lDk^w?^s;f1=DQ}2v0(W@6{8*j|XZBak&c+Y>SdIPs~@s&sjFS9`m-^~e&BGar8 zKk~UaJHQ#r7`;iJgAk0a1O+_q%d*Nu0ji5jP&sz4m%yJ=YV%R@WK02ENSbpbN+xy^ z@6oL_I;-UCaVM@Hh%0(~z7P*ZfdL^Kf7I*imV-~{EPH-v-t&Ip z@K)klq(v^V8qs}h3;Ykt@%V{R%=_818!-LkOjU^NCumgZRjcqE=P`A)a zIlSU;ra@_aeefjREo~uQ@ohtIb-&=f$B6|>?{BSo_=Z6G4_agNvQLn4-xFn0^h&8C3idl3;zdpE%b1adBdjg=nIcnh4cD86Z zPwU*#1U4Llu%38MZom4XlZVpPUy4-lyO`i{TjKB&P_$@^I+z1fS07?t9N(tuWJ3zc z04;cn!RJpXdozRE=}Zz)^{wqYWivpCrcIlZKub_f!kPnj=wbVU-Np7EC5(m#*bq3v zo+;sifxWOvg%2%Q5Ds_+vFRy6AnhF{$Y0DJKhxpafVE3|{fLxPn^`oR0^FGGS!q@c zroEsr5%jMH=BIWk;4%L_D@m;Fj1dD4oLV(qNT50j)3Q%^WduKhmNKi)ngwM%$RI1y!s z(sn-ACiEiOJitd1uw!^plShUN|P&HQ5wl88#X8X$)9N8pfu#gY*cIHr40+PX&Ne?V8siHJesid^tF8j){_4s>gqb=^sb9^aCJIJi=xBs7L%~H7_h`cuYUg(3apkUIKG|uEb!Ga#6&^L6kio$^la5 z6NG*aeYb!Sd8o2y^c0#WKV6)$h%>s?$ga5tC|cesnH5#djFgw(v7LP^=GVcBtFhH` z$;G-xiK~k;V1ueF!q#$Gl8|IPC?YEb#%+wTFR#;?%Qy*a(d+mC+l|YUZTNYa^k(lX z7Ni*Z>kmgAtJ`!wW#aln@G^@TqsYHSFs4I5I|z$ZxZNdW#XEO*#EieM$tkIArCV=Q1K>K1S@eS`jc^H5Ee3%bm@OnvEt%i__zk!Q}-V7ucAi z)N6*HXiUG-iPMMqt99kSeboj@zs^T%dyDcUm%Hq)MXH74N3eXCH-QDPjKmPU=QP(W zSX1=z<*bdQ#5?tSx1ZRV>Kyx%t#XnbUzA{+T8-WP}{zFRZ^Q?+cSWxh2_nwVVWL&bC0pA^Dtf`4xLdb$S^-z5?u(TN+}Y)4MoK z=q^kjc`Q#t>KvA32y;*_%>+2^6@T)WeEYekj0d$ zYQEw9y@*RL>2fMW0#wyq+9FSuR|yuju5#{e(2jrDHa^PiFIh3R0y^F88-Ugidn8e1 zH1$Shs3;>GZZ_Zfg_ZcZ9aVWdTJ0WH+$FPIY;tu0AnXa>q!Gxw5)zJuw;rz+nK(-i zZ?ovhUrV?>s4Z8abK^t*Q-O$A_9d6Z*LMNt>)0r@bEOhsKaowbNhy?7dmH$m5Pl2!T09{PD2l?=Dw8xky+t+yg5wtUU{ZXrQaTwG-YQufAohzhs4B4?}{q% zfbD%+J%jZ}>+?^9ICQS|xuM?X`~DrhS$>4HV{;?Slnu019uy@z933ssVoT#CM-9y9L?$Tz*E5V}poD?D8s#HYO3;SI&@U3(2`FI6p*g*Iv+1Q7`LF zZ$s;g@4d#>=fUlx2VhM<3O?c!xx8%OB7f0C6XkDo%#_5Ee_gvitG*`Q4~e=-;H=!# z8WwajsfP=}?csUL;#hP?qYJ|WmkK={vq%>*Bt~ksmIpKEv=NW}u>q{o)aHN$B&cGP zO4-unC>5d!@PKW;eUlnBtBhdINJ#90eNC099zI~h5Bpq^*-8QgbYD&*$1KB01+^5n zl}O+NT`dWVx^yJ5S|F^+`1<%tlU9HYkK)@pJ?|LcY?7s7m;q+EW;tTUn>r?ly)i|- zH-qPpnQjU6n_&?O5@KqvWO4|##QrXn z<@=)vh`Lw6=tC|rpT)-W*v_M0mNolWxw1f57&6)M7XFm-s(QbRD-{6>pBmAgNXJQ7}x55rk! zzoY8Dkx+Tqco)pbB3#>}D6D*nf-IWN*k?IwKo6paHI);qDF&$& zFtM2qRcbhHE0O$>#cC}o0u&@o^z=ix@P~!h&cjxKA{gzE?8t7R9EHbi@5_U(ZK^Ml zqW8qpdPp?wBayBK|`T2vA&u3o=(a<6P|5Sjn2#bEk5?a)${~ zBw@R3*`&y&0E;8G-%6L~;ae`Bd~42Mr)!W0g%dUrD5(sOZsicbG!71cdQDP_)R@=> zFt;eCX0$q7u4KQctH#zG0TKAw{`^v8bHGLSB#LPj4he z^O+i(E`X=>^ds}Hfe7Z7tsZlHq)m-3CyX<6g;`&*-7(A~`D<<3{A$x%eK}|R zsdyfMzAo7>aUL#Vc_f96L48S?g%YkYI!Z)+@y-$^x5WEr0C!PZ;FMWJ1yN2-7FG*0 zf`Ft93&nHI{qj}x079#1HmSc=bARA*N9S_)@mHcX)zFiRD$iMUd(BbtQID<_rTbf_ zhdv@%gIk?v+fDcb#(?nP{t^YF`?F6$-WmitRD)(7*PRg!)&#*nKN1Fefedc=N=kco% zS$*jvW50$_2=a`Tqs{v98C?JiVn7y6q=lOyr881F>bngx@0K5h^f~$Nj z;>`rVI<^1YxA}Bz9rA#vh5{mc(l~*Jrw3V!PW=-sV$u3t;oHiv2@Rw)%}QC$#$euR zgB+1J$_O55^7?m>c-;L`kbnAdBNY>l1{nvyhme?}2O#>BD+7^qDx%^ZED38-uo9r^ zT(puY*p6)XGhshTrNINKqM2#Gz-27V5h~9*YB;Hy9L_=*J@rmQJ{2gU5FMZCs8WG7 zf_8vKSV-C%a#F|V@~S$Aypcj{+~mAh|4i5y^Gq(S*Ik&dye6j?VqKC3kM9K$?a^RFP~ileI)?~X zo}Cz7JUL3RkBWCwE>&NipM0sBmOhDhq82NT$8&r|&PGWovaSEzskn*g_lT}Z?@0qg zKEW#UX8o{+vPFOZK`ZjBSVsccOSWZ}N999ml0kQ*ecl7giTAuk?NZ1k{*JeE>JA1d z8@RfF698e8&Q`}?~zb=T# zXs?|xLNf(is^Kr=tTm50y)XjbJ6z1=4K6E3^ZrRx?zL0$ah@`xczz`iX^{f#{XI-cox@0AY)}MD-$BdFJAbGw?Z#v%}u*7 zY^3R{}7O-%L!Ze%`lKX z+w>x-GRE1GcD?^~LBi~|HiTgbSSkHhpaC$bISLQA1_%$vex~uyZpc+(%ySvL!{mOE z$Vbj8D34?-KlO{>!k;U?I&CHN`rRD%w$j-4J3L+3xYFmfC^;)OsE(8zr@i6?z6yQP z#>iTLluoD;Xe?!&aHl*83TgZa6_fgbge12*MNW!|4?nA022vWuqI$_IJ|5zvK>TpWLYv6$5 z&ZVrh9pT#Ve-LiI?HlgofCck4SO3}x^4ITbOiufq2mv>eHW|HWfwc*&A_H-<&sx6$ z87h92n0M4|Bsl{2Lkcj_6q@yk<9ccv%FNF2so2#D#()?~dItyjCe97$aWw1gu4U$` zzZ!|jXrGsK-uo0iY+_JhukQjbTNg;HOdf+2@e&@kb>XqD^^3niCQcZA5##oKpu8#% zGoO>@Uz1tUQy#8SVGeXdYN&3()|*8f!5qiPc!Y+GL7tlf4;$i6^!7gwJ;VqedTTsO zKaV^&N+&+s0(B$wBxM~8QiS84Hej-g7_$sD^IRDQ8*?t!fmMs}q!mz#`kQBf9M;;L zlmryWoJiT&k;mU^+@g`Oc(6#U#akW7QkjwSyKS*}@U^dC`s6gk>5pvu=1TdUmcH?krdhS4FMl1y1Cm?eCjMlf6JW zVfn^ifoYG=30rWBI1Sg#ipaerzKJjJ{aaBfrm6YouO&zi0l0LLTUktgB%L*!-6_ zzBWXPJWGnwe^$YYeH&f>n?ZW3m1>UhHhf@I6h(uk%`&{<&>?PsTzk}~AmqKgv8GsK z?C9Z~s+s+vmbjB4yIsb@kb_QsBGRQRg%hs-@WHNAVJLi6Bm9HtZ@=`kK@%H0|Lj&` zRvL~M3ZWV9?FffT{Iid%99eW~GDq|A)ZHS;^}9RKt8^H_E^QZ8^)A!{0{Fe5So;ge znN&UMdUp3y{L4RvA>t-^fvYrB`B8WKrnFC0PF%@<`}A4S_q!ETRYd+W(gjgcPzb*m zWBSiv(OS{2qEH92wkq#FIyl?!npc1YQ__O1x34gFQ^-1xWOz5A*Cl7%xcY`LG$-U@Y5gr6ttSzM!OCkxJr{`?M?#Y~m) z4qeBLJVP3bZ(YryWEhWU-w*iAm}sHoE=i#>e9LVbz~)>O+Y@wYDm3*5{n%7kfw6}F zy#D`oqV+!q%|s1DR)C{&n;|x6Sg*_u(`Whb%tW36zZE4gl8^!OG6LcBkp-r0N#u?H z)cF*=@nfSG5^z~K`OlpeE_4B5abLKTzL>?$Z}|I*y?V}nU$HP;Vy zy#<<&{~t{Af-}#QH~>6oFeFnjxDUvgf~i+B$jSmuoI(!m7D#~wb{EPBFs+=p002F7 z{wtG^+zx320@edx1#ro3!Hk|`cOi>4H0TLAsnV6>_8Q+c(4<2rqlNPU0D=h%IU$`va(KUoF_D$p9eDg3q-+u}CA{HbYxXZnZ$ZBm6U77#nW4{olPblfQ^Km+>@Eo4w~ z-syc9qCjKxzz}$Z2n{|k`*6N`T@XWF1u5g->@jF_G zBa8!6vfgI5B=NSP8N|;F;zH0UWeLkQxf3u@)Rn;s3rqgb47f(UH>g791IV>4|FdIn zmS`8CVblg=&Qc(LCUK1HF%yjdV&AYQHkG)Ey&zX5qq z?+C(d=l>@@+JLR31*oqyV9dRlHO??9SAj1b$oMh0?{S8J#;@@vStLK8j1^A9(FQMscXn@11KM8nj^2^j5S0%wbQ45$2Vt}^5 z05r+H(_YKj?Cy$!I$pCuk_%W?ulzt`^UQ zO9o%R|Fkuf0yOy3brugYSMqvq&jU-BKP2cr1`*x@VwnkwK3e>*tz9-bDT{JIuK&r9 ze!T`j7PHMnO4jrEanlgSKlY;loRI=_Lk?U*B#;P+UQH5nF@tQLX2rE&rrkwnMd!!M zpm+Gh?V%5=&iE&QPTvfeh&zE>kWd17hnB`l%xR@wJD<+ZQ_4WeZSX=}U>taVT0k11 zxp?EdlpZ1vCUEWVh*$$-jukXa-3+;+!)1<_Hi0q_48d(^0u{2?@DE1e#mc*Y&vO+yM-{dG!>~x!YcO z_UO+WvB=E_Ao>=5Fx781llK4lp{aVs_$fj|2=m@JeIXEP~u*m*yq)!tR9 zK`ZVJ9CBPH8a;17MTb&V3%>?on#wwnIsW^iFeuGnwgZoW4c zG^*P8Pxd(i)&Sr`)KN#m#0K9KUuUqArxY43{US$UoB1zCpF6!xJ}oe#44ebeqm#IP zc^;Y;G{FICWB|B%zjXtP+sW4IOS>O2I|t|;Ccn)lm7-YNe|}0YxXMu)rMwL(Hrg2&==8bx;g6vKwSRP3>3cc4R) zcYqM1t^f@e_^w`}3!KJu>D>&TWoV9cCh0Cr!#hroPTI$P3=m;vvF-m&GXFjALHi`R zbI_o)Msa~jU1i9y`51T?mHpR&(n#O9Si9%a70tP22dvn(yNU8o!)|K}l==a;6`zeX zbmg}x&iN04;qK3YLD~bc9<2S}<7pO=8$%aIl2YDi$1UH*QKi2cj^A%<>>q-m#@mez zAX2n_7%l9N_t)v!kttIM5OAh|Lm+`1RZumNul}kINwmMgQuXg5$Nwru-OhQ+#GLlP zMq>;JmuB@F#`Qiyz&2K5r^ld>;Z70TeqiRnZud5HX$zlbUk_Q5*Qx{$UyFX} zR3FWpGaUH^%<{E?7>CRTwB1M0Z1Qg)_J&2|QAoYWj(gIbtC@A4`qu1 zAbIPNn*zRq6dJ;T-0#ic7q3HUg6qcyXlY2nR9zU}w&9g_CJ4*`tL0}o*UOvGXfx#b z$Xb9nYT8E|z+H0)Zo{XB4Z#iusyNmA1NDoMLWU;ab2b1JE!`i^HVz&5N!K#coP5{h@c;bfhZ3}VHzHME~!f?;)YUr-xzmwcTd{8c%1 z?gZn=EwPP8x(hwt2g6p3kR$T-JsALr6l4j22Xv))P>DMTI8@6AcR>IA+7JT-Xez~J zP?chj7-?d`dU5(+%fZLJa_83%{^=9o0NgQ(e-u7nYWSBG@}#46?m8>Ra_ct{w5v<5 zm-TF*l#5P5<5f~2w!m^F@&@rNpSzOA&211&?X{!mH48o=Aq2@-j*IRQuqjeWfVK=% z54y!5XrhH^j|yZTxq`j=HB@)I#g)%D2zvYaEafpH+%|KnDLgKlV!wN-v%gH86z!Pb7eZmJB2=sN0JVvTyKk=ZRsmS<&5iQ{+ zz5O{r(k0m-q|yp^QrCoI@Br3JE1nG?ZQsY=N&xsL2yz%baGI8KB<>KGe4L1@1PKqEIp53)9-8SmiM>|eu~cYKyZt??(h58@z!bn%g$TNx z-mJj6P|#nC`?GSE-rSX2{4>|&Kks_Bhw0E82OBLZxAXan7hl};_raFd1`g+H4JlPy z``Z>o;!S_=J`RTSaS&9vrQh)uY63l?BjXWRO3Rmj!NWH`Fu6iyjO?3cPH3nwXL@tU zBrrh94A~tD_yYVg=ec=XhQm7oazh$xZ2*z*wcV%x+%_YEI1=LxU8_PvigkOy%u+sem~GInEzGE#wbdnT0M6AdE&e3)RCG=xGCi z`Jj6ElHJtwiP5+61=iB$_}s06Ew*MdhwkXDdn z35H9oe6N~{o?w37%eX<10tzR+EvbdX75}00wTJIs1az$_7uI01#t;Wn=BGh}Ot9Hl z4^7MWYx7iUVs}SdvsJG!unVRW1YYIosMuS>3X_4y0TCSl9(g@kZmAVu;~S+FfLT9t zUMuNnpDP2STCHdolAAfigs)SXrS_U~xgPBwyv4(Bju$j<25`)f)3WU=%wu&PJoMmT z%kD-$S8)6&*y%U=#O6N7mnD#vcZ3>U?~nji#%5@U@`O--!QM_A|BubC-Q~YpCSNy< zzy=)oORRygiN0>AcKT#rK`@LmPlJzx=W*_>SlqXEi6iH%@EbH&{||d_9!>TC{(BpC zvTG+%8H>V3$*^S}LZ!BOh*FecBcjYx5nE`GdD`a65J~1p8OpR}`XIAXri3yVQs;X4 z)c3y6I_KPf-1oiK`Q!Z7`u_1*>)Xdid%xeW*L6Lw=ksx?I~F_7dyNEa{q$Iv?LG2? zyr(MxS*xKJS;?gOj&I&wXM}Epy{WcUt?3|MR_7C!>S&#Cc5;+V;9d+DwMBqeLz;kM zv<2YwZ}68>b(OjJtb{{gczhYMnm`Od0H(x%6N~B)H-|=su|}?NHSC;lfz?ZL2CW_HhXD zazxFZ)@3HDjlsy?ebg>mpk9k_Bi+1s64mI4uvVLmEaKRPcd+tMW;!w=9^a-y4sMRd( z*uI_-Jf|}khqL&JvJ|fno9WO-GN!!Cv&(k<%adoHdS{6H`oGO^ev(qXE($at32K3~ zz-;7up07jg?|C%3#f2hgu2o5E4m^#KepI5#Iqud;&u30^D}Xu7Y^~P$l+V={UmhOC z_AiibMaP=qYA@rvGg>qYS%j&)J3NnM%u8a^x(A=_px(Fv5R?x7%`Y9K7WfViU7E_9 zS*zoJfVmH`RnFqJ95$Vrs0A^ivDmn>keupVvdez0AH;~18MCU17Z;S`6Jx}MgY!O^ zPCwI$DAu#_GYYrOF`057w&50JP;qJ14{?)=`;tCD+do1oHhiDGJ4WNcmDJiTUs?n6 zJj8$RkKGj&K+hn>xM17+C7ZNk+YbISdmL4B5`+!2c8jbZl;Z!>WOh2-A<9yQB7M3@ z4DsZa5vZyJdrI4+K~zk5n#6QFN?W@ko>Zhpo`~t^RnZxV*f{v8ufWM)Pn8C!eZl_39{Wh5Rut}j)Rb~XcW7hPER#V?>3R z`3n8yfmwOk4|9wJF#nAjzmhT<>g1ORt63;55Ct05R;4Qq=i%tvKo$3u-ohCZ_(3rDeoRSf>Zl_!wobYrJR&H_`4M?%JHvA9Uyr7DP^if(f z@;c@^){pJ2kIloAE=*?pBTLlz0j{RQMMN)v3x{CpR0xtHjYR#3T z(9d8E74j2c*5>tsVSvE5o&AHr4+~W_C%;3EW<|f}AN;N-D@vsZTrwNduW?qMDR^U< zwClt~IrGuBeAPB(xHC2iG250)Wgwhgg z@+tO)p`knv-5ZQAJo?@2XZRV1O0V=fK`gBP*&PIPuxVjg!h7lTGm-CfbLq}iVe*C3 zRO6%qS^@fWuqP;A8KYe!tIfp*V47>h6I4dhu)O^%;0b-u zS%ite4n^H>@iS-+G#3fz#7rM3Z#Ji;9YYGxS673@7rVP%1@*L55@s&rKaY7W%?`?R z#~t6P1XE%PZ?x|N-3}X+VaXhC3dW~v`FaoBnLWVVvLZoygB4Cefc!R|-kb4I)YerJFti1x!>n)9-5?65EJ$o_{ezn{;Cl{5b_l9>b<&AFveCf= zqA)9-a3`BzSmtjyv{=+_oEH&>jh(YMiMi@A|nfmahyP;W=3n9IuwOp!8>gQF4aiX1yZy*nbEK!+}y51IE`{fTTnY6bpzvegrupzxX=}T)sid2qeAt zaz??NGX&+gvL->0S?a+}HB?^+MO@6!7iiwtQ)c1e)&mc*TmdN7Y#~VUcJDC}ya5WEcRZJ*jTSgu z?ij{kiGBKx}cowN9g;LrvKGm4=FF^>`JAw;Kg2R_b_Hoo}LEfr9&20*9ta0!Xd(1`;Ue zGlVDH@83$+d*!p7Xs2L%@~O>AkgrA(um^$9#cDZ)8Gs<+pTHE6PCl zvhNfqCSxtGQ=?72C}8PV4Ze8Nisbcb#5F44>A<(~@Z1#7N}3;STOjEbRYIs6U$M3f zci6t??f^@@?du4S=S~jSA4idIh6S1d#Jm_Way&Apbgoq5|u~ zf?fHBmkkBwhEmUB?nZa=Ay)#@fm~ON)(oN|b#G?wC@QWJ)JbNCsEApn!yH%@LQoHn z23rquJ@t>~OuVGw=4y+#6!2=&LXzyLGjZo@G$@&$23m-*op;y)`nY0yl{(zN4Bu{Z z=Dd%4uNNJ_M(t6P9)*Kte_#j33*_ZiCphzhk#dA)EydjZF_$xo!iZ>4375d+Gf@@Wt_o4V9hejGq{n5hP`~ki}kXgP&G`H8VM|z6jcFK<{ljjjG?k?eAmd;!P^*N7mJo z7B1o#m}A;PXoR0lmLeqOjjyQS@r`(f9aw=Hp=oe0M$-Ek$Z#@MVvStfU6^n4AZ*ra zSq_AZ{6qtmhHxI9Kx##SSa(9!x8iF=6;J4H#QN|R%wx7Z;8E`{7g4or4q~pE;Cx-9 zC5nrfufjBPxQ*O+_>B36;G-A74RMxR6P-M|5oN%Oaf5<*)@z5wMaRk5Jl)>PO-Dd=S#u6DD7ZSQ~W0lf)bM=Ho!TlE40-A4mya zvtx-T;$&u%apq%-)c^!op~8XqymN7eYrVBJWFxb^fG0UH`8rOcb=!Y#FIAf88uozm zbOxBhrWyWe1v9}lWM-&!raOxUQa8eQ_XXgK(y$`)oaCp76%08YMhJ4h#XMnjx(J6; zY1IaXj4cotY+h&Oe)zKnwD#0~9r7e3!4CkzU)l+n!A8xGZ^Hb*^3p*ox!UwyQt8-(S@g~QrBm3S zohz7dIVMbz-A!^orXD*XsJggAx{r6#2Zns51wH4l)YGg2Q6a`v{FeU=Or4Xb_>x^; z9((rS+n8EGl1FBE9_Zh4gJNfs#?<`dMo^}6KZ;nlEaLR6KhQ>c0Ar|b#%Um^sgX=)Ap7p$qrL0%9O{LFG3X& z4T~|qdDAcA1h7p8+Q*7CgU+zco|a=Ok}4jIob`jOoz1ouw(Vij?=)%dr`xk|>w<+X z&JN>16%qKXu5Q1;%Nk#_7v~mp(2R?+F*Q`-I^;(%WF&Q9>6b|5+68|pBSdBttYCKF%ETQ) zL{DFNFwO3HKuP@TP{3zQ(-4^?EZ4f_L#1D0%6~( zhJFTp73SbH-RyB=sd0%;#x{Jl?xUk8BuK5%?-GY6cvVtxSN&`F0%}a(9#rbnd*@+m zen5D8l%H~?w3}#*?E_Or$`VO&(bvpkPO!3vFso*+u-+D6AeblcStBBLRE0NrL=033 z&qYakeGHi+;qjdrkFAq?D9fe8_zFGSWaau3Wtk6e8Vn}&T0i>WdXeCDQpsJTR$N@o z*Oh^_$;+nTR)BvUQgpnTzwV|giza`l9}uZv`o{(*3fQYp$hKS#!eFl1r|jJFNI@ml|XIKyim`mEtKC<+BqWj%JTM;e<&rYBSf$AN2sJUtKLD{$r~;RzRbTzjhuo zO-g0>GS>TU#60m2AisM=pFoeIm^K7mvMy-=-`|F)1+Nie>t>qB);P_0Ltd{l8gYmF zQ}V>PDDS40rH(py<`dZZIX;%`w7#6sC!TUrhem4E*C=vXD_!KQ$u|3hZ2i>08eYm2 zj(l`C-2Eh6ut9htt`CS*4Mg)UO7kk+N20ouM>X~8y4q#7uc_(3{M;D=0H8P*WiGpu z{G3=5pSaOItK%n{k7#UsG1KCYFltx_pX|R6M;AC7{V1P2K(EF3!aM zzGRL7?MsZn@u<8R%=qqesWetA&Xbb{%@4Yz=>{>!umr>H6cxGBjAB7|M=&O}v2YT# zzh*GVULI9rRkGnbyozG7S}IBVB1oTl)P%&|q_BYoByrj{V$nyVp&%vZqVO%>6ZxtS z6CXcaCd>;-RlB}>(iR`7eRB{^UX8tc)HKPZB!y_-v>18n*PEme!yY>rsZrd=eV!RS z6I(6NO|Fh13*WjUBCHzgCq5;>KJ;-i$Vyj56{GWW4B!IJ4U7;67zUA;ji zHir5pmh3tW&W*T$yKcfsFGzpBO7rb0ZD2N+tgec1x z6}goq`m$)a!*>aBs8*96o+JN*(Y4cEiKK4t7yM!v9ia7eJeTywU~&zL<8W4a!sU`$ z2Ch0@1zMd{+w=n!PY&`7q;|3a^xt$lqa~r~#L7|kNB4-$mL10uNRhSp6$$JMW46YW zJexKC#-wElXGdYFuf>A(uRx^uBr)>SHjP7Zr0<_^3)*?jg`}bjN~CUwSdZy?Gwxd;}WXo)Wgsnfmc3v>OIlTo|S?r;& zUn1h~XSVivH2=e_$Y5;()WZVy^sIKmX;r5vs)S~(^8xy_y%v#X5Jwhi*Dw54;=D=iTj1cdLa%_VKLzl&$j;60pH z8+(J@)SM>9Uun6CBjMprCq=4%no;7J?spE%xe_|Yp`239Juz_BadHs)qNw9O$-gdN z9>}xMlFbUfHgN1_P-EK%Q9s>RgNY0eza;*y)QIAg{Bt_Ozx4lcw+d3}?8*wJvXUB3 z8A;i7@+U+8;$EZ;5RJe3n>{n*nfL|xrrEw+H{{9$8AuADB?@RcDcXsy+4ew zeh5?$75@QLDqH*OFw5aZw3P=Jo?nek72N(C30`QU2r|UHhC-VQY#op0U>HV<;{YwF z90Zj&h>V6Do!~~;8@?EK;VK6h)OUjmGuHLOduY(u3(`Q_e^SyObtd%h`^7s0;!v!g z-{^fAOMk`nRR@gkByu90YpG!5V+VUwY*8(=7WDC2ppD+2WxDj&pS*Zn#Cs!Ytej%Z{!|r?+x%)?V+c}?1KMacXK&Px$~&y z+y+XI?C!Q1e)${1MEi8$8S2P4tDJ);j1+g^oHDp-**{l-X1lo}=GXV?-}V0D0b>q8 z3_XQoL5JP^_2#>C`|Vz`S=*?Gvg;dMSAtLEBghZiQD-c5a(EtCL3OIZM+udcZc{7> z=~@68j9W-$t7R-J-3@AzFZawyr1}LkbaiItPbm~7me&87faT~PNbmtMAp|T0c_S{} zWq7CSVCg!0Xx+J4IC~7IBe>}h*!K%W5|T!!-g_(fBtvNzk?lem^?~&&-+|hi;As}a zCkLY4A*JVFoC=1PLm~f|!lUb8RK_7+TxKneU)EJmcRiMIglbu@Mglt`9-iq%CRSJ& z#4#PhN}~&F2*f0wY`}+gpVb8@MlwvhhkzZ5RADA?ex9whI@$9R#LC?sP_rUOw(;6N z>kEucY=;I95~VEuRv%}CsFBwQjU}ifgG&B%(tQ{FYEc_yCYsmj#V_gp4urkO*~2^g zZCs|DLL*<-xtkG-GV&gqiV(uIe*HI{ZYG>6DiUpoC z4)rFA6H#Z_{#b}J)U83f7qL^*`4SqvJX~ToCDVayxDuD&P8aF-kQzccP{NHeGl5RQ z9vMX0d_xpW*?lF<*4E6h#~KVS_f*ZB!$pc)2=ksXK2-Gy_W|HqB~Ux_4;9s8MGY&H z0M%BH*Pwtf&!53Zjy}=Wx>pd76<|CDu6S8zq5duu+WDEt8@&dzio2ALF}n^Z zq!0vn`%;fo)}4N+M@+cU1}-Mq1eGkgT9^GX*+d4qA9B6YAX1=38e#qKF|WHA69uDV zY_9!AW>T4&n?TRFh05w46C@%Y-Uu@|Qo^dbKpON$;KaE5AJ^TCU-28oOZ`jQ>~ye$ z#M8ZKdjm^;g~v%dIo@mSUY*KLYg?*>X=`KNXBg(>8aO4mh$#w5sdXf?2qBpqx6P0~ zg(!_R7Tgv~%F)J@=6p*L-4mUEpQ$X;K%#yK6dsAwX=WF<1A_EEFgMtSwtbm2YdPEj*6#thHEi-ZG5{iUo4X_GE&bAkv=ASf&% zL}_=A6&cl!iRoc-JDUrcTiQbgK8VDxKpHLxbwoPSFMtLZ1WJFyr10V6dH}#uEGqo= zl4m__d*&H5PeH|TW6NB|guJ!M?wdd7E$VLXa(?|5`~tUNOo+G9L(W8R`nQ|VY4f;8 z!K)z5Gb4B=B7m8M)rgI2_Mk(P_zTPzjpqkW?Q=i#Qz^iW(Cod#WF|)8L0vPPnf4`@ zFh8jpNOaz=(@i;?kV3lW35(ElPC)KgdeEV`1JG#+v}ya!WS6ZW%aEr_7o1%padC1` z5{`}~V!9A!XAVB_4@_O5*i>C_%L!JH7-g^*p zvH$ryo?!S6a^il$9De#Wgwo^qfC<}f&Eq-KC($#g47uJnXf8bMhMZyyuB(Y?kIQSm za7C8D_n3C71HN2mH3IYPIsk@3kcH1CDmvvCoOmrq4_F^p-z$skcx3mFIb1q(Nr^MK z6M7y&xL;txw2iwC*Qcbl4NB3rlGz==gW`rhzevi8WvvN@|yzNJT zQKR$iVCH=Te}I!d7!BMI$+0#Qd5hsjR%p43vH?IHRH*Y!K}nH_CAh&vR+kYJ9C53_ z9Rp)>H*^`TySx{WxqZ4L!)$a;~Y9M(ADnPaj^WC z^DpaTC`^6m`7qe>_g#XLW^v*t;$yR*EoDEh=M#Ugw%>dWsf`%deuPy8-A~y9c7*JJ zpeG}uaKb2ZO)HSTqRba9-ijT4Ut-(?fkXJZ&TJQjp<6;d$^~uimb(ED$44+IF`<(d zUS%1L=CBR;C0`@dBBJ;fpo|K{k&iIgHSK-{@97&fgF&U;K$)Y$1@2<_kI6g0(9Htn z3Io*02ACAIG%xC+?#Nx~kC7gbUkDb6J9iLOn9k{+!1NH>NOmxzTfh`+oGgF};nGg` zeuhK37%vW-4_N44!+m%0LsD1zQy8o|M;vy9vhQ&x+F=pi0b?;)f6DgWefgD8P2GeX z(DqLYq)FEeb_vT9mwWO|Z2Dp7EXHIhS}_*oT**E+GSIT`NI(17OQ;SHD>Yv4X83#A z)NVqE;mI--zOA-*5pA49qZ(wH*kjg+F2cZ3nEx;$YHsF-WhfYwFMU7`fbww2I=4_x8}970SA5Y7Q`;&+>He~H{1Fp0HH0XI6@xIPGV|-<+GkLU$R)!#_O49T z!E@daAO%24`FSRmuwC@mz=x^cA|kl`Cp`3GKFUt_bEnECt^TZO>^9GXc2aVSLdAG$ z)ZwABOSt}fn=O6=X=|edik-3QDmrJ)I5!77as5W?m5N!wdk*8_y}s2@$u>BA&nTfH zU^3cxq45?bVA~71YkosL>4X%2-aT(wh8pO1Dl*fzV{Y%g==G5uvj*RvnlQW8rq~C+ z6i?tNCW}$gXp5?7^Al3MW|L9Zb(qkFUDA}>dk6AYjgAk8pV&j(PvSFi(5Z#7ZG52G zRH_N9W~h*iGx_oa5qsy`=!&L`hvE6OK$o#eE*jBh`rW0#IzEJanQ)IO7`giIv^eZv z<8im4Zctl_LZk14aO2Y7Xw``G2BM_*SH^TxW9{y379`>(f*bd(BJR(UYA!=@8a`MJ zVf^}*k(pWD=1(2Ej38nDd^Ym zE~UDD5L)hn$dyLBbJb2@1~4h}%ck`+3?z)(Nq)VZo~-9nk-HKIw6 zzMw?VCz??-lA-UZ*j&C?Em-U900rE<;uurvk{J(=?~?vZU^1)eHR1b4jMcjNNu_^J zYWogGn_U>ZmuU+;3cKjzNqFNe?rnm)sobLQZU0cB%}4pTkt=<8x;2b)IJA818F(Sh z5f=b`N*>V_j~x*Vuq+bcU>x!FI!5>X<#(`ovb>E%+cX8XG@+~ndnkXp&Paqm@3G&| zhTI=Czy(N={bLG*w+LL*Yc8Z2XKDJG?WSWSndL1;ei>sSJ04^B$sZ!{L%SH5xX&$> zVmcW+opBsD%AxgCgA~deNUZD)bm_0{huAtlH>?DdpwSiHPd8Oh4kLB2e$Jd_EDE z*>)cjh@%|1Wtx+V9_@m+WEDwX$3{p97!J-%ZgJV3UMWwUs5Td5G^x>LEJHh z0pahG01J_InunL5!J2)>T&aZB+9!(l8$t_w7sv@|U0sLEzr`)@^OKfS5h=tLiHRE) zTt(}&j)lPft)I1#qbpJ@itXHa?pPo@j`<%!yGwK*O3d=D8vOp*|B`2tU-jn<=TF=x zW@bV0wzbP=8T4RJ&69_|bMU<1=UB>QdMu$*n~v?pGF;V=i|>@(Dkq4B;t|^i=ixSN zPz`h-MN0ZdRe1!c(#B=)>z7{ZgEq;=M~T)g-w|g{kX236uga7@X9+BKxG|(I$1L*B zsZH2Q5N#v{OmfLH++u%)~CQ_Eb2hprkU2dMCI_6`QxtM=K^QZGPWG(P05rWc7- zi&l}S6{CwMjaNZWCc(8Q@7{=LZL_7S4?&)TdE^z7{O5FbFaFU4e?i(eP!K=AOp`=2 zjn>T?DEzY@vq()pvL(~#@}Q~esKNVt4j0HzFuf|?Y+r?hf>qYw%HxtW3*Q_;Vx)?T zL+t1WoHuFjk{v47S;{A5$J9Ea6K65hYcHmj9@g_B!qX9m2 zl~j4V<89+(O!qKSrPLUmm3sl$vY-G?x>rlr+DynFapoZIs_qzw-59?bH=t5&sn9Q$ ze+}2RYYx@I+b>SC;MxkrmNss%&(L^YA&ttdiF*olc$7$ee#6a2-pET@8EIo&)9<|c zGEVtKTJ{dP_@)=O-ua%ZMg5xAe|z2nAs&^i5ong#0w2}$KmQ=8T4x|Yd4eTaU=Qwj zk&;F|gdNJF4<*?cSjEp6lHKhbZpz0gQ&`oWTn9otnDf=#JE}<2_*!SZTuV6gj;x)3 zA3q|f+T%d%IHKC%=JQ)_CH?5B2afasGbJRtx*4lRniC!-5xFZoVa_iuNuE|65$}&% zlJDf`8Oo_y&-flvq)gi1)ea2d1aE$+mvSC1#zggi>s36fp(!q;>^u zSCr_ZBx{netjPkO+el$0djeIbqPQ{PSRGb>U52AxJDXeP_V=@%z&D2bHpR8|=``2t z+3-`%MN4^@hXm*qDhB!OB-;*z9hp4uftP6{41YK$xIPaRY(3lpWkD+LKH|4j=?9+o zN%?zZ_`>GLV1hV?V%G~7sW{WQf?>qI{0HmXug>6Bu9FYp2Jd*yuQsO{c6LTxF)h-H z+Rzr}VxVhEwq=e{aAZ%n@IS2DksQD>kXkFflB94*$l{GDR?7O1Ur_3FOdsz#t%6Wq zM#@_(L44JB;q+Ci;A0HkIipCXwgcE0{rrPGWK)ur4c}bCPvs*DnhLAF`R>f+T7J4J zM_xTsWIT5OTW(mzcR!sU1=xPXt`MtJS2W+Ajh0i5lcY3Zx*4_h99I|TctMh2r8&%h z{X-ND=X%;<=CmiTiu7%h$2F)TPrnulrV}^59{mz%*jUw9_)O%rEpC2P_WKCxxc240KqUu|w9W+n6 zPfJ_$V(L0koD#ZjIvySME9un-?niDl`>Dz{&o31E)tJ%18E#cHctp70AaA&+m-3CX zCY@n0Wq7MStK-Rz>wsF3*j~xZAkK~x@;IgGqSTyk6`nC>Sfo{Wg-vxqtiE0a2FjoA{>V<{Z#uTjT-H%5Cn|p@7lc$M!qEVgl-C zXby8!b$eu)F}fL8$=x^LnNs1Vj1%wN<4;Nv(b7=Kh<_VIVCa65<+g=>PbMZscwTkb z(O%eg>p_l}BXVVt4xeURa3}zoZ0>BFLoIA)YU)`jeD-AHfVnou!J-gXi`Q+r`NfAA z)A6b%43dobU2L6<$*=%Y6yKHn45t#&ntEiaI>x%g;gM>naopjTj82zV6(-vmr93XqeLStjEPK`@ zVeEVT#hpEoF=eOcOc$<`$y!Yb>KnG=Q@quh_$e*Dv+ShjWa{ru+H8K=lfj1r@OVQ2 zBnm7CV9)C}2QKW(cM27Xj3V6+ zoZhA(tQi$Ydf1ibvSsQCpYM*-wl7>m7}w8S_!g~epllQQ+cTvpXqutc$-Z&1sbK`f zL|J8o*)e4se^liBT*ap&>I&QPV)5Hf(xu7;V)M}M3;DbM3gJ5##!o!nqGiYYh2QYH zA{%)E6CSm-b}#cSzT4_bBz$g*dQGUgb75j-W*M#>kM0S$*!@BcPbCMSfB2ad2;ucj@%xGt(_ZDiWHUgka-CImeZp8Sn`= zq*V1%99+J#6N?t3udtOtyWZk&mmg))eJHz?z2=GVpEW!NfvPp3)UhUB!N>27u4+dvH}jJ*Lf&G{TlH3gT6FPuv~*v@Z&#Hk zSX~E3hGw1Ky?FmmurS%mKZdii9_=43^jid^f#s2p8Qr*bV#lG%quQdFT%`7&lRtp9 zB-gNVWRa#NsRzR`*1Mu;_6psJno$_k$w|r>(#{VxBsAXp=!;aV@_jMg7MAMdM$)h! z3_-SB(%i78Yu0*5WKN{+!mhcTqRTwkH<`#Am`0Ft*vTUDUGHnFRKgP|ZMr?WVmtfeM?7<;F4_-nx}4hn z?8#IG?mrrW7f>WVou#vZik*d5F#R}hXeSO6-gGphL88`DgAx&-CG4Mg@N~-wmzdkU z7g9!e#zp(S8W3(rV$z$&NJ^Nk;A);^m$fZwG>8b$$=OYJ9`l@$S2AZm$BRq`O>7U# z3C-u%SZ3ZD83*_~$*brvhZ-Eoyy`04#4qTDl)%Q72k*sc&BFLtgZI9@jQ3Y(j#J^K zwY`{zk-0)o$;X_YvMo`dJw_Kty#?UPn~aJCapZw$2t7|o$7a~{eihwP>q(cq6rC}& zW7^^-qkWcURLtEO$BFIfJNnd%$XE@@rr4n)HOhK>gE#PAI|vDKx*(7YCup}RTkrG` zKNt0k#G>j;_o~1DC-E4sO53gUyh@`d#`8azW1cbKu-wc=)pKWsceJd zN`ax7>R_uE8W6FW3CX|mM3LDy)p7D%#FS2w;n!QW5yOTi!=(Y0Ugcc=wZi#_U$Sjb z0^Vl)^7<4i*>*cHe(7fZ*{O7I@|A6Y$vAR{E_;)O`R$bl`)?EmG#MPtxh!P#Q7U?@ zj2SWW3o-8SwX@hS|b#DNz;uprY~XF@r3Ec?c3M=j~xh3?Cmc}~}N2mQWsH|E9{=5aL%pQ|#o&qu~Ok+k6M z!!kty7!CT#oCI=s%#X)k%`n|!@%D?NA{xK+EJI$5ty=oWv`QQ|wdUfr-FxK=LBAkn zYtZK|N#*#vHJ()?-;ruTKc;m?Wk;6jp z^t~2_ejxgpaU8s2QA+P&^BhWzPdL_~Wlh`w%>Q_sFYLCYRQP_S7iHo9Iy)1$G)Y@O ztcF&s(pb`BCrrEb*v^SfUpqYrs80YCaK@djK(kFKsrOCCqJ{4$0{jucAUL!*W#iw^ z#ejmz@rJSGVabiuqU~sL&CdotX-Pw1gyb3BWsX@$olpyCue-f*XPx zXl;~+j}mJv2wFiBRWJluyajv=0?PJSfKs*Pk|#`&#%7}t_80Lp)F4^;iLg3RWK~41 z@?8&xFIgV#4HF%VSj#zDXx-To%!|p3$2W<(o%QS zGb!(HsAqlCN<1pOk{Rp(y8|qMte3aE+=ircz!zYVe?M~)2v7BM@UMp;yx;ZVpC1r* zLncAQo^A}qT7D{DnmZbgw%~vc{)p*$p7XUxO;?IAAtxeRg@;#t~r(J_32Jw=v7PD zS8^8{^%F%=lBU9f|L-n@){6HavrF1MYzLkNrS_RAVqwjJ{2m0>hPreGT6dy*JB{~V zjYm$?n!@8o3)X?(U<7~?P4lnMHD7kC8gz?laZxWt@1s-|O%J+ws;?dM4(lkP{PxQ^ z+VyKyrpvRun4)s|xG`J*C|d6I2I7{8-SGy7PpoO{a7cdoJvj7Adqk0|zgtn)M#|GV zTIwYtfg-D}WHq~|{Z07fV58AFf6{X|?`R0eG-SxoC6PR@mA*6n2$n${LN@&LMF1Z_ z{_G8;^sCy(S#KbI2XP$)&Pxp$3K0;BjWVPUjZk=!OV@$>n#sM%;R`P(~mC*y7bj7@53*1Q@p!M+}FFW9hrs;1rSTN?FcZ zhjMc#vb#|R03BlL@C0p^_?7Bg4Yrg~A2c&~8^dghA4$%S*J*HncJY-ZM+zFGU%N00 zkJ3FyZJ=apIem2)kynJ-Y>frNrgG7Kfq$0h zWZIc$y<UhRlA z2qauXJ`iM!BUB4M2s7Y7k>ku>eZOUZ06Z~RAq1FfIGKaa-R95*qZ9*SO1SSinDM`X z>x0t+_CCFwR`{GA_!@>zT!BTHTwU;#om+$X%loGFL^QE&Q0e3O8U_c{qE$pPMukC` z1*aHsMBDf3K&1IE&MSGl(8`(?!nIe+#$F&ueUh*&_yjLucz~5gBQs6Aao*EfX+-sp zEz4wnA5tS*>&8GyI2^Y2zMgp|`5{)`n1cwLrOuStB7MG^2eDBIYu*4g;;I)h`c&VY9{!a_G9d1fhNLP4s$e}B;% zPjDKY+kY$1`>RLxk;Ma}$s5E$us4mGZ>4`z%lety;aw zbELuSx67o_t2AYiey>MV$haHiU)x0rR5HgQ7hl~XT6r+XSt3v|=%2a=$3uFARN6xJ z^01FsqAj1qsN9f>TE?(r;JK2x5dUmy1R~rQ=yEsbzPhQ3j90;Z^HRdgG#3>%tOeTk zdWJ=O!mVlrvXmAH;kUHpMUthUfT2Ar?UE(q*P>yPm96GC-7?5qD58l@EdE4h>*cM@ z&k;s#UXHGgWE3rss`~-mv@zWyo z03_naFw13B`79jvndY(6w;v_j=fM2)?9^n2pSMe+Y(h-rh_As)VB#p>sj$4B8#eV3 z%UTUS65VEkm8nGsja0>b!19wk<=vdfU%hqw1YD0OS-vpgB}eza1nS0>UFm{@DJlw-MbZb!xte|3kWgZS@VJe&5o+UC`K6fd zm(^`E8Q54`e*3fK#6I5gl+(KS+Q$++#z9}mg5ySaeG~KzLwBl)25ULh2K@r}#eJSz zq1=lP++nuI#+`x|WAhX1BPDbycK%|sEoz$~uwk3+n7IY-Da0>jguhGiI%<}6OMZxy z&8o?(UejySnpod5KA@gXp}FSEXRhNAq~)g(&n%mD14}zka?!WH%X&JCNj|`)XzfmT zgkF^c&xA|e&+JhXghgwra7b7(t1xj|YXRFIdY$+Ndc0SQj zOfM@7kv4y*O;>9~J&5)^<8Dm82vlD6ePJW-%(|cdFo7Z@pjB>_MpY5E>MXWxm;J|@ z+{sUh=w*xIASR1QpH_3?9?FT>mp3U#H^P3*?*;$`PD{-ruo#O1OjmCg@)37j+Sqx_ z*wEl+ZU>)P60BWo4{O)Ie&_=~lj-O3a>?9tjL#1{Ke!H6&u9qmvyYaV407MBoHipV z83U@saIg`H{;JkCIIm2)%37!O!7+8@_lBE)9Pc$pi~~%Azp)SD^{)aA`CV$e1I(lD zT)Of?J7%4CC6eSCpBVWHy7WyXpzJA2s3JCRQSh#~wZv`3lI=(>iwZs3ujcvr6xUzn z109b-ZZlGyVkIwEXq05^x~CUo%|OSZHes~6#c zr9pLarx0ei9gY>*0RBu9MjzunwEE=yytGa+BBeL?;Ao19B*y~Aj{55>BF~+vN50Fq z4G8YyT-`6-ocIz>@Oe+H88z^PCd9d&A1+ZwWTJ{xKI9qmS+}JUDD97Wk@>m=M zXM!?A^);9?Izdvno_d7pk1tM2ze-N*$Ed7-|HdO3hRr`KsFE6E@>xF%xs?X+Fdp zyP9w*7@EHuyK7vaYE12SDfO{PEuH{zLDs#6lx>Wg-N!O$Aw8}HK zWd;7ispySvzdJM_w<8dY=kEi4&71=!i*hY?p0*?K|OCv zGX{=1Mqn&h{v-dDR419B(je#hLE}#@kLX5ssgmO`OJ>L z!ej11yvt7rF<1&SHQyCC$>z_Q|7$fnb5aXK}GdD?*GB<}AY`~St^{a-))e-GdJ|HLme=O2cqQ(#3$#-IW)a@|7k z32>>^opKj>wbV)Jo%9LauOvS?ou!+$AFWI<25w#lG%A}$X+TEsfq_a;YDi(Tctq>q zuW)kaAjko#Ex*VE&!4im3Lcr8S(7%f<$2S3OlpeQj2j?Z5xC)kksTOJNFs{R3ZTG2 z&b!&a)bDqx>Rh~+papmhDXhIOz@^PY3eZiFFIAs@3D=s&MURejRSDVm@L!ul9bWz3 z3nI!WqO^(lKpVRyqF>r|sSnouc~_5KxKi@ZARPY{7eD+)N8KejNYk)kRv`NAK!^@6 z;22s4D;)cmHvwl+syb*s%1HCE2{H6c?mX1r#M@Y4@#c%g^k(q{Ke*VP!P~5SEJ_nO z_ID-9T-z`2F~g1w^|JFeWvBu5n}y3O0bQ1B8=wjAsYV)gvH6pK)h&MFI}3lsOeo}I zIVYic-wt#_BZPN6taJ?a|NMZ}Hf3^LQr^FwJgds>QTC<<^$@<&H756La!$r7HwHwiU$u-mnI|$zP6pPYe!6q1TOuZ;Fz)Xy%r3= zID{!~IUzFV?t&^fa9?X2e!80pyn(U@F3bOM@B$?ECkP=<9jaS{(3NrnO7D+|;nF{p z0P^rCVoOTGBL>^xtIJ@+ZbqOuxTVOH@$F(Soru%uuiNc3ceD0?E;*ckel?{+v*H2a zx@lB*LaY7!^D9lg8lV^PY_`mK#K=JN@zlyN0n)e?3XvThTO9XI5 zA(v#Kap-iDrraSb8+Hkr{q+~J|MKDCGd-^-O}hYJo;*VZq8Dkj!`CtT5&GyPyH!M+ zK(gBdV1RH#e&YckO=JDQCEVEf3_uv0)a8j)Bqbs)l?7-Vo|d|QUN`$1&RG!&($(+N zy<&@K|0QcR;+Ik;+klqc0Yc=Djk-ImR?=QC;$$HLu?r-#_uH8807asVB-Ava*0Df0kNvNLB&T}JY{l|tOb?9t+N-s1rTQGBeX0{t80cjasm#} zC`b*k{W(uN^Lj7SkAV^0|JN(ikn>LuZ`vNTo$<#q z2rEy&Y5(V9E>cfYRX!XFN336~k(DAs(gYzHSSw9gNoSz~wiAWK8lSh@d-Xr3oIKnr z;+xAHz!ut4*#fJbr3|Oykq>#kR6A+l@3Gsh$5Golid1MA*=WIPfGwX+B=`S6&05f! zbaba!pn1!_TX2j{-1r3%!`so}I?1wyhZ@hoH)TLS2VZlLPKtc}H@0h1d@cQM=Y6k5 zao(9~wI!{!fq~NUl6_P8qyN_4n}<`qw{OFm?Okcuji%#nTL>~B3YGCM43w>!y*(Ru~>M|Yt`QO@Ao{v<9*)eJ$}#o$9vrOao@+? zWUcS;`FyVHI?wYuuTw<2&_t7DlUqBY3J}Bv!D|SGNipVA3uZ97pF)G1hsd5f;^kf7 z*_nAI^Hc@rS!f*RkOu#kwR@8qb^duya+{yu^#seC8COEVi`#*Rm}{844sk$u=DrWBGl7*+fq2pSW=F z8#R_Pb+^R>7pk$>oBYZ8V=U*#KhZ0nb!CEi`ovJr`_{PYFyRt$cPcZbjvbPb4^xh( z@1gql9?(lIS-ZVFE>tuvOIH?sFrr&0kP)A!{1RW@`Vs=DcVV%t4O>R4+Ps zvL2BC0?>FFvT_}GcEVfGcOn1w@1?3+CS7lA`Mi=m^V^TEt$9WWr+W{%bv<#|8zH-q zvaHQD)2Q{f07!!Hx`hm=!DIo+JLNZ_%Osc$sXucL{^uiBSefJ8!Abg6% zgc223+(8-95?S&`OU*R=`{Rt`)l^he3dKBR^wl7CYOSMA9Vh&)?yAigG5;&D;G$lS z?=99U-1Yc3uRT1r%!f129qO#UToe>>d(TYgzEOLzwaew zvvc+Pl)wLh$hBGAIYJafn#m55+8j7?i@Sm;I%SW2`}=0gMtIo7>S6%LqHXvqSitKX zfBV}%DgRY|uhX2kYg#{3TP*f9*v#S9f}cfi%|%Go4|iDc<5B7s%)COgG${l&gTDL; z7^RzC`ajR$yn@=g!6LXg+k8iwaoKw2llm#{;PlAcVz}X-51Ml&8dW~`su84<%4+^T z*Q#&ZUdhO=HF0z*r-qRXqKT|w*bWQG2FH_KKRg>zy?vZQ-rz2nm!p=Hl*Ehk&D^uC zz=@O$W%vI!08RFu+(uI!W_kWyd+QlY>L)|iEr#pVLafHd zHwd?Qzs)LH-y0rbwe9cz`52{1{ajT}zNm<_GuHtrodMPyi2z#PZER$MQYYYyEyP&mmTQKJIV(yRL0So$~yVE9W z5nL-j#h6cS1HrFy9rc}8Ubd75HL%~yiZd?h_FtVLhsCB_Q#S9&O>{P_i8%iBr)I$B zGvPd4e#_JL$D_X5C2__RE|_Jb4+%V=0a%V#pQ2;_EXwlRTmI8-@U@rz zUuhml-2FCIK`QN>Q=;B{5v+|S9zmDuXKV8|paowE1zx>E356yuQ&UNb*rb_QN zWE{mant%k=_PPDDS58doM>mE9w>b~q%gxYXaQlC+L(I`SM3gz_JxmKM-Z_Z=y?}5& z^r8->T#Fm zItoY%An(>ANCpbaJ{41x2O475DNQSxOf;5k`fU&8h&v$%ZaGCGotKDFdvNQvT4jp2 zhWB>)y+h|TH+4=M-=Jphwp!X{Jnh=MzzzE?lhwY;j#a$QGQMnl%C!2V=06zx1a&IH zH01Ra_V=krCgGyggIfTxPn3)-pLg-5jJ$qzzsAd_zx{*T;Q;tL!RXa`oF>ve_B)ez z~6E(DgEWeQtNg;4Bo{bmLO-#2$KyTGO>P9b6{o6#r+9Rn$s3$ zOfH`ivwnKpT;7Wx*h%i5Jc9l$GmCc|i3Ysv#eQ<~Od!(cy7a}3@VUP#whVB`+PMNy#)CuzN7617o~g9$O0A25t~_vdfH&|RcSP4F8WMUr_~#pI;xEHPK@_Tc@BSZ!N%$6|8)(N z!etp7^A`ADAm(T``rzLqP~@ER;Don?$~hn52eatctbd`0RL=v+Cbe~**6rF#rr?%N!j?3y?0PS;@;4!2QH{? z2&y;XQJvZUE7Hw{Uw*RoNA@0Ank@EtUE`Dwxl;mHHM!!R^rdXJ3k&I$U*EW@ZiWw} z8#aIXVDULEy=Lv$Ki2D`Bt1iC(OS4)->>b9Z>IUC8_Qs~7(_LZyyg@h{O7TVsU-^~ z-YYAW@o)E;@tD7{X-1)rfFcdmqt@s9MgCg9(uA05L2B7$qft3M+h1~az@Wo(snk2x zvxT&$Z{LM!ixSz^6;F3RbG@+esq_U_^FzMD?5uURlX#4#P3d1R*N-Kc2!(;DnEZoA zY)boq(ke+G?Q=&TV}wsrN5?AN(po!%Zz4Kt7l~hw7S3y#kk5vW)m1b)|Fo6k`zO*D z7QT?aFf!s6K9 z0pEp8>9rQm7wgNP!Y8|YMpuRJ(JT>*w`4cN;qTT9ul$0@FKrzbbMbhdzOB_Jo;sm} zHCUddhYxkLw=Ch=zWKh%uC?P>EYXX^jIPk+Df#SR{~=S@6X$r>FB|`Ip<0k4B9r`S z=*9QK_1=%s;y(^7T_=%2i-^bQ+~j`d_+UmdJXbqCnnukd6PNyaa6A0h_MgJI6kX@r z&~nC6P^1~G3^;$(6nWvCV_?({+c7gGa-Tg+vKf$eJl`<^g`&+%XIL<>vwbGUML+z) z{fgU9r}UG&#H;&2lSxl!j5!;!>tKU8B4KHa40s9Sn)7Zw4RS&-AkJCC`DtCml|>Sq ziLIs4jzvou?Jq++q0ZcEG1g_6swo2qwPZ+db}h)2&>jTWESNSWU`pN=cE@D;zJqJ3 zmk01&hA(LvD@r0Ebn{WIU5g?Q*pq%SXu|(AsE7i-paN1iny%XVGE7?ZLO%|ZjExxt zBNH0|g_W6{asZE#eEFaAMd0(ghuRG`mZ4;>0w5FJ158=549dvRkc~JXs=@5giBp{t zq;-;e#|AGJwISqdjT9@5XEsz7N^P{;rCi$6*IH~iuBWHh#yxJjECJ_e6V@P)(aFxQ zg>Mc)Cl=;CQWf3aTwy!h5+hcM@El5|d&Aq0uqp}xeH2KYk6#g3;*ntI?}(m5UwAs9 z=EmA`$`S3|g|Z*S*0`9xj8u@>iUO5^U}gI}rV5<^{n}<`C=%I6)mBbrqWT@sHdC?d zU!Scja-aL)w(l_EChs{fOCVU)?O?P#nwQl5cs}^8j-1SsNsvRXEO5t~d;3Q2YYZUf2~oJ$fHz$UOe)$Q7T+$=y~ZJ~ClF9Fq%8D> zbXs-YQP`?B$XBD*CQSSORkNOx4^<$IVRxz4QN^}AwE^UqzI5aMuI+aLb2E^UB!knL zZ6?P3!dnfe6+H*SoA$!RULEJcL4LTUp8}Xs2^ZB@isTqq+B$^NBMXaJs;GtgLpR)~ zwyl|WdghZ!^jjmALg!Ta#c8KC%V*FpZd{q#M*q*V;Oc3FA^gqG`B%7y%)UL7o#+pE zepcI#Xu-p?eZ^1Dxu3Fr^&~m^1D7*Kc158u zTUb$+ye(X#iji@KAvw)_8Lc$dsa=yGAZ7hpx5)=)BhpXSO{aBnBQs0g%4VFyW*~jt z1Kdm_Qo&6Ntm0l0=h5u>O4{fb9FV}4M!W)a6-NHvCdFRBoEVUgvxk5o$JY*h!pYZZ zYcXo#*9>6_+sS7weLRmr1U9N93@ilyAm^J-n5JqWiFj22ur&;uDy!it=&xlK?Vc-# z&2@*z9yX0h%xEKD7G$EWkOluX$ChW)?+Z5pKC6UX z)7@u7egM(vh-Os8s4$Q^X`xuu#z2-^Lr&kf^2U-)%ZZ&FK*D3j{S1ugTPvC2grCwM z9YV<->fL`WI*zPxP}v8EFQ%`)F}GyXlSo7hZ#(>0S}RJfG7iz~r^+VqBWg4|!31I7 zd1imTG9%>Uau>Y9=f86xd%i9VcFzR>ZXET{g&d*&g08GQ=$vZgIan0x0(MzC!1jOS zL)3N8@#x^QNC-|_l>SLSX0Mn@YiY^hj@W>0CB^fr&vjF7v?pDzy6%q}li{enEArPB z|5AfF*rsn&C-D>}?`W)6Pb-f7c5>?bY-oS?SK^gMBj5ZB?EvAWsZ@4$`mT$5UZuP) z2FXU_&*si_hdc(Ii`r;4WMEiOnk#HPy@D-}{M>FBFn@6T&G;C&XJNWpt zl;yLFU+*)=#AowMS%+5T0VdLjFf>9F?|$b+?>sO)ls#mB*H9zPB!qRSQ2qAtCyRrM zwG=aTPK`_nE_9tyCup#`PO!^%JmTl3Ya5;o{gzfdY5h^Fw|@}G^D=`F3Rh3YNz_ll z9*qENkB7mHq#pA9N~}}8jKRwWtV3u<7z`7pOFx}p7(B^6dYe~&H-Jxz?%_%r5K#9p zuLR~7#=V0y-(bMjnOVvWi2HuCT6zVGx}1@gU0V`qM6C+aMXK)CUNe0rSUsEp1aHQ^ z?Z{)I6E_|%YHK1zSB5)%5zp(dSUv&BoH2KfGm6J(WbCYnDp5)k&@a&~c6ZCR1Qo7AdjhR?pH`@ zTfGZ!0b_iP8X~@e4q^T30hxp*Ix{pe@?2X;;ly5e%cp)D2W}P*S3kRC*C!e*{ooq8 z4q9)7ue-nsFAiw)7Rg@B@!11-We-%8S^%Xqeb}WIx)?ga-&w_t?62RH+?nQXjYob8 zhi$b?YgFJ?ts<)BZe|J7AYJ(UwTeQ;Z=^fNMb znh+dpYsTHB*5)PmrhN~VQfL1iVE>&j{P*Ng|EnlnDjiKD)=}|V`YI6=nN|hotM!y# zb_z5N@3sLD)rH6=4-c*?AXt@qqUX{IKb=r15|qpy=tv}K7F7wZSlY9pvUr3o(Fvqm zjT*Xy^IxnD7pqeYprCz$5M1f=G>y3P$}T=N8YO4~&ajG#Kghj|S-z-%~khhTs*2M$^jv`-qoW=n|Ky@Bbd)9sjEU&kE}$ zTj$SBN~3RIRpRbLx$gvDUBfJ@XMr$y zUxoIQYX|-W`1CI4mvYJs;34G5K;oi-?CjVbma6Swy|V2$T0CW_8KsY62zhB*=dS;~ zFfNI;mVNB4lU2rao(1ccI~=34-S6=Ttbz6-h1m=6Y9fjxMm9eR308*Klzv!_cGQ7K ztd$=J+FqIg$kO_?6U~+4CLQ<;aax5r(r)|3CjKiFqj!r(c6ExYv9Q6dsWTUQ>bQTp z1q>@l+v4$^2R$Fc3NE#U?eZ#%-rwx8>D+MCtwC8H*0F58^IR-K9+Z$tamabM?%K2+ zrPQ;f3vPCx7d5j*bLsa|sS}U45e93V5rC(4^7&GE&@<0Aucc`q7O9PylHy7|AAOM4 zY36=k1!235r&HSLj4D0bX08%4k2N{F>H*RTYT~u;UrtqWU08$P$5>s@+ zhN~VctqN#K)o@?yejvY95HO}Z@`0aRj`YbIlNMr#)IhZyN?$A-zdt5>7-eW=X`Gnp zgI^)DwC@DcoSMmjP_yIs7|8PSC!_xbuz3f$`Mjv^cyqpLKP-UTLtiRg)x%|bg!04a31RO8&00;Tm8 z{dsYK*tk2zeRwT)+DkSEz`G`Mq$a~DX{^k_d-IIe%}2}k(rl4aAWErgyAthufz;X7 z{S~K)+em#~zQN&P?83(N(Oh`+bb`*kNo)XaS9g6u$l17+^Q9meP`CRq9LTE}iIKqv zu9Bt|XaKG~l#H)kFk~o!+Ao}jQE&c5LYA*^$J;8|UB?`EO{}aCCmy+X(H2Q3?=e>d-#8lu54uuX-eO>9`JrHiwd=l+^s=1II z_xBRZ66?c-FL(^nap=Z!UJz0V0x{|yC{zAcKyDv6@+d&3i_`Eq_8(}2;dTZ~J zJI_j>in?w=MHUrqP^NFOmP0{*!aFUk=;RCZdRGBZ4*~-ksKEZz#PsQUd-v#>;F)ys z+p9!NPjhfQm1Ur`NkhpvjMG*iD}rJ)bN7rJ8vjFC-^MtQEmB&>{o#GQR-qrtLpQHu z-m|T<_Z4zDj}NZ%SW0jF6y)fYcyy5pEvBY{Cw%#Kl5@D=b04gm&fjMU@1d@CVwUzJ z@Ku2*)=#t=?5atvZKf=x-a6K=5|%K!&RYNqWdR^(H7U3aZ^}NFecUQzzLjMKzlDLY z0+}an3%(&?>UO;Bkcv~;&HP2TYM&1M_H*A>xu_)jw|7`2=CgU$Yt8OY_=Xn#I1p8PSb6ZfhKW8BA1n#IfyExN{gy zH1r+#nUoJU<(3}BCuY7sk`Zi@Em2xW^D2SdrUU=?#edpvvo99o z{UM!X9Ugfp%ce#|2o*!NL@#Dcq;bYzi@O&|jf+I#9i$ZPEH89GFyPP5K!TeL46IWU zfWW3RC%Ct25Cw$rT^55|bhyAQIROdDRzH4ek)fWguugYDmez6QlzZ1X%2i(-RZE%P zMr{!CP`p&Z3frl4zz25F3_t1G=LNC1G^TbC)3M4^o%`uw>TjpF-9%GGCz$;brBWx@ zDz-(}|IwS$>}Cd8Mf?uelJ7Y@yX4OFALW1GzsvuBm;e7CmH%5a$h zzhz3>%+SM2ylKuFcQX}@KFa=u08w|s)V6dW3t$ulIvH-RaYg2K+GYBn%cra4$d2t>4k3g&>=lmhN7 z_@D_0gI~Y!?G5BI#^Sbe}}X9dvRV%*@?Yy|NeSL%KAL z0X5`{#?apM&BCt5v03W7g1N}>;}RWE6aP~>@*+>Y>KKer7&yHhUjKj@rr{AWu)9Xgq!|{ADP{3a>&7I%H$#QLF&Bq;S(twFa(v7EjbV+~GN1dw{Ff$e&)HCV zIuF?=8|YS!hSyh`QSkjTPa;(4>I9mUF7Dhs;PYwJ@=nv`(tJsY(fkL0|7ONP{ffNk zDw97d4_EvLct7knbHAML$)JHh;_T?x&TAI%pJo%P04U+1TDyMO5%39}crDidJbYk0 zO(;>TfDi=PMm^#p_yJ2cu4I1eZSn~ns0VxDbYUGvvnf--p6!TFbmzy=o%GB{nD%bK zM`Mtx79BYD7fDcnvQ^#qfdx4MtPmDDP3l?p5b(0hJMC^X3_A>+dR@?blb%H60(@Ww zlm8X%VF#XwIIAd*#UBZNh0*-`z<5Gn1BbQ9b@E%MB=LcaJ%OhoaH;gOxq7HYc zM-x~TjQPSpRDEZU-)PAf{739R9%}VCmDG0&& z2XcXdQ3$T2WO>(D!v5V>E?uK%**Gs((jI<7oe=SA6UrEM1SR~-(j^KfjuD{A536c) z{~eRzl5CGfG9V6CQ4!g%)6$0S4owKiJx-A*g@o=J38oNAP})$_=z-`R2TkIyLTLK$ zq-0}b2Glj{nMc7yb}pM3RwbQ?Jla$GrEt8RNGr9-vewDPluM^2%UI^rJ3(5T4hESZ zt{rXQo*)Ypzfy)#rgj&UOmygy*x6UxC;WnvY?kwgwax*be-wuL8x8KKtM3c zBjie@ng0psP1#!WCxJtB4koJx2op1;PK`2M=*E*rms}Ne&A}etl%UFkQ|nh$AK>OZ zs3$8|cfTrW>MdvYe|zOKzS(Oq)uODfLsRB`mifT(#>Eayh2GLqB7CZQk5Db-RQ5{F zB5$lGLQ+L)%?|#KggIzg;OVq!|5~ z`@6sJwpwrV9_zM9q*8#2Jg-)PrsoBuQ*^lR{)WH%JlEOp$`blUylK zR2?4bHR3kELTAUi>knbREUrLgHrfadxuh8#QdYvk#Y=gl)`_<$i7Lb$t(hJGHNa#=vogWx7y~i>h+gHVgCeQOh2;OQmviSdOb)Aoa=7P9`$v<0GQF=f@zN=nEw3=5nw&y*u2s|LnBh~F{p|3> zvQr;#=3FZ)lJ7XvJA}7C*@@rGC4C9a+!1t<3_#mDKgEOkPp&=c^%<#dFv z1<^FTse=_U29beHt8HkbsEbj~%(9&K(6h_>Z;p;@t~ERr9wPNHhQPlV+7 z8o&1Dn-_%DNhDSU+{E_8hlKJ6<*LHFNHs%UCD6P(1v~nz8?97~j zvt^h#|8D3T!b?}G2=${$q%HA$OZ%R{gJ9ndSCW|iCAR9M^%|lNL-rX6k#f2a7}qtA zwM>Lk0jsD9cJxGO$9=>QnG%jQKb`%B6P#BQdY_OfyZe~I+SN6zN1=(WIA=ayfF(M6}<)0_(I|-v6D7gU$a*IGM~>MNbcoLyB;asS9nY zV?7LYMlAATL=2Q^cRoB}I;3#olhI+$ZUMVMi`Y*kcVw{&RKXMNh$(7dj=f{D{LLr= zxS*LN5$BPVnr5EvR-3T%&3Ozo=)tJ>K5^` zkY{ppw#!xi5pn9Wq&6wPNCHYB(s`X1BtFNg#rcJJcDn~zm%T_Kg9_=L+3%N#&la85sc{!HVj@^;QMa!q2NI(yUU+;& zJr#RPfk$fkB)P#m))g^aTpcB=RNZo6$X=koAJk3E0hR3h+za-{`U1j(mk)2d9FA;S zzU|7c{>9lyEbK!EjuxIIE~@t%grip3%@mAtF_p2BurksuY1oU|HjxT=nW|35x3j-! z?U}h);+CKrW(Bn=75T%W!Sq5Dww*%9WyG(&<)iuqq#&b2klCxhaO?@7%}F@{SBR%Z zwPxV&Tpq$KYUmxeu<&kq=th&{QiCdduZ)6geaTdPn|7598+%6)IdkhhTO95I{7b&0 zbXhlgC$$T)9r9qd!Wy^bxskKa^4uw3$%4N4Bt4TAPs{3+xAyCHo`W=pRr}#`o%o-N zK5_{@uf^hv|0eisl@Ia{YGSDACXbX*R3b7?Y3w%{eT65Gt(1-NcooRCtQ2v1Q z1W5qx#RVfbsakAC>HU0-D-CNc?b1hR@bSxO8e`^jI5<%Dkiu%Db=kyq%Ehzb*2ual z>e#wE>Jz6Tz&lVtGKjib-J6R0?mU16$-1rR&~mRI-nMLZN{i7YKJ(ZjHjn~?FNsr5 z$NzGr>PGJGHx~rfwWHlTY1Z?Q)g^cHQ0sIerj@1XwU*Wz=Jy9>K_?uy%3pS+kB-Bq zO7UuotYKNS-3`PBS4>_#{_CaSm3GFZgt*gc_UOK$SM7*#M2~MJ7 z^aDr4mrS~K#q!Y>M??pt3fsoo)+xO>WZeEJIYf;Lm)RB z7RQRYpMXQF%}y`GE(h9(cvu;4EsIpRUpw>_ss5{8s5CR(-}BYs1@DdnJ5VU=Oo$jmg)WEwtm5#=hTNy8OSWE54R%c3d4rNTHDYLKdz zp_>isbInkcCX*7IVBmL-5RIk^%3vm_XVk4n-|B;k%fRhl6UM{lcnBKKrXfcxV(nA; zAZsXPA-*5rmc@YTub)Yz+Lx3x7w)2uD4OJdfm-l#`HH63E!L%53~tmkq(QF0KtZq< zXTn~{W>YtgqlZoF+B)rWX7Oy!U3!=zt=E%r6zcgFeO>%#ERNonHruq-n=8K24F7ZSaQVF6U z3hYj1W$kA)w5pS7KvzS~z|W!fDyZgWRPLA|6c}xDabO6jXZi_$LiA?Cb}kZT45IN1 z-OmoK_%_7(D9QzDu@LA!m`Z%ahbOsrks}av~#$j&JAP_YpSL+>o+as0t2&8RX zwfu5$JdHBU{vZbCP=c#ICanUs{-ue>BquL8rn)`?b$}d7#v(35{%=Y6+z3P_}*7o5*i>M8CMzW(nsylhoTZe2iUaBofGgq0r zXAZHJ5a3IPQdiA6Pc?>|%ghQ(*%HQCrSZOiny!6@Y28AT7-wBGU#{=F16-p$xRx@& zDzq--SJ!{gxj#BXX?O>p>0zOms1c-RA;URT+I)@d^;}nH8X&+!!b$rb6JWcfhIvJ% zz0+ZO_01Qlkmu0BE#Ww?+t^0jJo`Kweh(36O0CxjkTox7X{@))UV&EFfP`siL8I;+M2+mx|1V~(kCsGG%onRwbUjRrm6b$lz0{>-gt8f_fn&w6CZ zK151$NKs~QTVxroe7OhZB`df}=joavB7K=MUEPN7B{Wjjn2Y?LnO7xlJ~|2n*_~u9 z+sb36Y6s5s70}jrSLE2hX_sND5e?K>ByjBZ4vBNR?~YnrwRROsBO~uo!9d9}aL#d{#FIH z_*0$`{Q?I2!|i(Wex7q^4^hI}^=mTPK$nU`58& z40QQQZ}ahNpIdBL(`utHxeGEBO-a%~ zN#{oC+6YV1Csbp_vwqvu7eb|@wfFObWfI;xQNsrq%inI6w0tM2=?w@JxifcNp?oXhux z1H$L^xvvbISur9bulwic`30+7F3%l9F0Mh2F???idz^b9H}8q%*@jGQDuqkj4ed4v zWHVc*KS6cw{89MbieQ0ei>WS|QK-Jn{9Y{@Rk~+;EcW`^;D;#VmUo#7xJ64f5;LG#ftoBIVn^%D(a0HvhxR+n27<3GNUKa2L-Qx#iJt{SELzr`v*0 z20+I~4%sQX&oAaIG&KFI^kZQ8=o-6sl^X?R&bM$b3apCnR<5xvzDhmZ?qe43%yw0H3=APcvJ)po!OV5?K3>Ugs{|5 z5HZI+3|*>8wX6uh;|pC~0V}sNC~TD+0=H6DVELZxRm={Dk8?VdUKM!vUAEBU^FZPz z+6au0zm$B%y?YVw%nzH+g3Z{w`E?G6Hf6okCD4B~Be&@us3^rA$QzP2z8ZcDXj(Ku zGu$9k(UMu5l_|d14ZSKXM+?A<0xToh&bVtLizG~q4$F}Zax1Ve-ER4!@iaRj9MB9i zpD$;Y=@FkER1l3u-%BSx*XzS0=uV5z#E?&_=Khgc2aaW`6ru-F^g{7C+3nj3N^1P_ zZgbc=^@5R!erpB}J1bTXu&ArL56YXHqXhjk&FdcFjnu&bK%{|C=f65C1Z7v7fKv zX>NLqzDJgpe{gL@<-KZ~hgnVI14VVT7nux_7a7^0P&bo>Lpv$E-_+al+;oV=8{ zHh4|RO>$R?gi?Xs8JZH~vj1z{T=8eNwl9{kPPfSG4n|z69qL=HtoU}m_Cw@4ni@EM z4KDIpt@?5)EKf%(E)yN10&;nN{XZk}JpD9bYc%T9YV(WL`jqo0*!i6Ier%Cr^5#(1`v?May)x+XrXO{elV$IN^k ziP-#=)dA6kDr$a7lbT*1UZL)(Ji}k~Kyqx&`p9|Nsf*nx<(Yf`-fe}F-awmgh6@Lu zYmDaQq!D`QZp>4gF@=5Fh=Z~KR}_^tTkRg$)#_*sn=A1iJ24HpU- z)UGuVypSR!u>0bo;03xt=T&F7$I~%0Zfu$yTa+{;^iISkL-p}P2+i9@8Y^tJJ{_Ga z-VnRFKzF$u5SoVitu_*>a6O?C{Q{ILh8p!-vNvQ~`kiX*ySrLv^P_v=y!r=~H>G5MvwNO7cQ7IR%z4416;65g7JbPUdQ~c4e0bckL0RZ?g6UA@ z#)UFbHV@f)n8XynAuwIdF zMocZs^Un`kcD}1u_mVnK_g=q9g_EL5!S2*PfEJQ7So1_w zb1oX~c6odfe2AFiyXv(jmKTgdSdX6QbjfDr$6R}AE#9P8ymMEbgNkvzW35rKbA!vp zxe8J0)-U~+*&d*~ODhklePY)+{eZK@L~P!<$s1+xm^tcbUeM zhxuVgk_-DZr{M<}Z0YUc?bp95o@f2j(z)?l`FB;1Nz^=Qga*K-TG?KVm>n2usey(l zOS>OZeagjH`ko}FOL6W>&2v%RE-e0R)ki*!pP#pGjSp_qpVH4;7OTAXtSjLPepb~- z-vyfF?Dh>p*)A?EBidzTt?n3cPPW`;e_iXX6_)S(;HJ>Do!zPJQy2yyak9HxrmK4$_^bG+JMc%R7t!lO(GnE6xXgej>DqeRdE(1s&vjabnB99eq_S^8 zWVy;4DKuEyxH4mBJ;SorOXL&RkbW9#s$r+T#?(es=Sv8avqm~tcUbUdb4$!ZH_xcy zTcsiyw-rk-eNgzL=Z$#p6Lk?pVGw{#mALIg<<4MhA=B^&&6#m_W?f0Q7bwff zXu67tbuT&gvQu4Vr!aVnugH+AmsoUt$n46uKSpXMd-r8RHMQTe3R=*`|HRF!_K}?6 z{&}AqFq+sslFrjpua2sC1&;^^Alz~FGmBuz)kivoDJpusUY*mtP*xPxF zR6715kw-mTCQ{Dn%zLa7+4b?@MiV(hX($m)_do0!)%e}RPF-m3ZriRD*5;K){)>A2 zSPKtUmVbjbif^485+ii&lnVW>SPLza*=so}6D0dBrcGP0x8jaMNsX9y z-ZQyn#pYRo64TlSjfbqIg|&a<)cK4V5{6-0UGG1(r1(IMmTmaX)B6)jmZT#ac@p$reI!sv;8C9^2{s8QoYOjxzx)$8verhvwGW)dt-TbQEqMf|g zQ}0#!`;Sq3$*xXF)Fj5fTu^#>^F2ZL#i~DDd&xbyA*W@0 zv^QEI_s=oroyq+P@~D~4CcX2faZPsfl{BT_`*4bV+Qt@4auGD_lOx$}UI2Ma2|hOX z9Feu*ncPyVz?dUmXN%@>imFy+X2hDG%=$Gzz;vE)@q>|c55aC7t@R1MCTz!9g^mKH zZwc}(j&kV)x7xpH$3sbl%kdix9i0?5@#}Y`Q&WoKG)9L+d52?R<}&d|!7%0{#4^ z!P$Dk!1!`k-rkD2GftxY)^=tGI-H?Hdft3;DhoQJ5^>K}U;Km1=dD%wu)l^R2HobW zejj)nQ5Gxd1@roTk$E}*No6A8o^0j!J-l7~cc-}%q*p~f%TwxjKgGD7aN(RJ#MYo! zqx-kx?)tOyLKa=A`!;@+x4UKf?y1gm6u)c_11Fq6Z(jW7l`m7v(5@0j9UOvJRlh8I zCvSf~PFSps6b5@Su)9L`_9YIeq8BKmIQ@)d*4m diff --git a/triclosan_tissue_distribution_httk.png b/triclosan_tissue_distribution_httk.png deleted file mode 100644 index c0aad87cae06a220dc45647e35b4880601ac4d1b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 247522 zcmeFZWmuG3+dr&Z_7(#KK~g|L2^Elrp;StxOQe;M28p4=*dmQ=5Rn$71*9d%prnWH zQ4oe6$pHqK_q@38=l{m1=i~cfa~vL`2Qjm*wa)Wb=L*%*P@y`>a`MQLBUE>8E9x9M zLMeLW2sI7mU+_O=)y;kI7s^BFzK5>MBM+~KZq`TCA9_4?bn$Ssv$)`C?dERh;w&N{ zbX`F5+67w=kH_vPK|!bg^A!RvZZ?8njL}{2Q%*d-ZRmdF2;*7g&rzGSUY#RHj~uz9 zcvH_id1?Q6oc_R$c5}fn5VS;@25xZs{H$-kNmF}q+FUO#Y1PI=beI)Lt%xzXNTmy^CD4H z*1}e;ug1kK`3f@pwFmEh3#KYCXT;@Xd9}rDeLi;Pm&34aW~9CE_|@V%VGgf`ommGg zs*Sj09<;x?bH8?_#z8&m_>m)8r;tbcgs*7siCyPeHmf)T>*Hwm6@6M>oe^!5y}`uG zC=KIU7weCAD{w39+GY!|B(GUg2O`fOm!7VnaV%!5+ktt4?*y$#%*=keW} ztoO@NVlce4xj5u&Bn7r05xK4a)lPYzbRHf<$&7k*X;`=EgPW8l0oG}qrXJyQzk1;+Vi$CTJsy~D=pC0s-u_-!t{UTGk$Bx}E`V5!_Jv22;W5lSLp=4htfv!Q)Bimi6Xm&5DF6W*A(-7#i_ z&3OYU6W2FGV&+Q}k5Ay!f>NdZ8O^+ZCO0>@_4``+hG3FdzQ|iNhHTX0_5}(*O@;Y{ zJ$Ig{nb9V(I)1!aGnWzGo2#8^WYW;e@^L;pX_k+|0`>QH1m^e|_VHhV#7RGwQTu{I z9b7}edZ0+BsBU7=!H&COmmFzn14G_-satM|V2)k+G#yn=XdumE%iO!9Y$jbQM;^7a z#Rri|I9*dy(*bv-iF2FxtyqU0itBr2);nUelbmZzg3S-s-uSc~YbG|*X}(7m*+y>; z2W!4m(&F~x%P)E->FiVYJx8q9akM=>A7w@Ba;TJSJj-8C8HQG}6aBJY-@Dtw?ziMU zLN~?CTl(PdsZUL%@E@YP$rEnv{MDVx(h>ceulw}%t01dA*8Alt-8)A3s3t&Sknk&=w=qt!s4omh{{@H8cNeYNCGJt$ilQ zIE>Gk#eTa^xpu?{pW>br&F9WiS>i2c89%b?4>yi!YO zs0+g0VM_7G9%V?$#+ESW9b_s9p{f$6TR4I`*xn*l??4(gHsx*Zzp9iPbs;_>RtA;L z8ZU-w(2!Ye+{I*mlX^yLk@)m}#x>e*3du5eJ*RVgCTLe)rGwCF{ zv}IUn<8=8((PA$-0=-ehad&qfC$(=_y;=vgxXaJ%;OG|gLoH0sn|gjmt~ImKrDi_c zpC4;za@Exd7eF*^nB;Xa@yp^>aNl2z4(dQ_QJhth4Os7#nE%d`*3o{>z$E4Xc8vJQ zBhF(g6z+ahS^-5e7EXylY+PLECHKKYGN?{fL#o`5V>7-amAThDBf#sV_X!hk|;o2`E-7>Kvk@TFbN;7Ad3*NO} zp-Jf!`bYLzM8>j;@s&~zQG@b%raSCUsa8)n3S}rrmWnnCRlynE^DERd)AU#puN&lw zlFTq{sfJR&w$94dvg*7;2qGt?;V9{AV5_aK)9b!=EbA1vTN*CWkEZ={oA!MvX{8q5 zH9IU{B$oPfE;;)2F{Gn~7rV~&yxXa#xp)8J3+1XLhoWk?2l{p7j1-js71mG(sqG)n z*Tr2UR}YeVxd$6EOk38O(F{k0TsyV5a>?E;&C;Ty0ao@Qiv{#IxoEG`bbEMgr@1yW zOOZ5XxU|Wu8c|OY0dx4*?Q^z1d}HeQy?ZdfpeT{xQm2r)!s(|`$HB?O^DH#6?J><4 z3Irkq9&hoJX*0Fz4|$TAuzRtqq+drhl4vpxWo##ahWI{_SH;RTM4Wjh;=*Ujo)C9M zpA7aWXotzm8Hu*;O24&KCnaF-#452tbGwu5JrybxCn`8VzxXK8F1zN8-1dWv7DM+= zF`oNX(3ob>r})dYCg3b2nN~{PTViY}+W7gBV-USEE>+AbYO6I6a7KFRV_k#l8v!&& z(4Ol);jlUd{CvS!RFDN-h>yeT>7NV<4<=x*>X}DFgYujhF0tepo_%&opmh;2#%_&} zWpnx5IP|v_Z=xOacNe!%l--hd4k6->m z1CB&b#Mv$mC?-WN8y24r5CNd{1Yn?vrBbI)KZ7@kzmi-U-agoer)WSX zd^NO58JJtI0HCJlB=V5FPjp$U_THI!vxrNhN*jPRLy-rexiDb9u}-dw4c^bxCjm_9 z4?7K1=d+&8kf~SadSq3S(Fig#59_x)q^a ziQX`r|9a;pMkPK}E25_GwYvDo`@ldVlig%OE4#Ln{SymRI1~SQ^q`TDg_N?MM1^8X z+{h`xx>q?_Lk6E!)4CCyK@t@UinSZ zS>8b!choD2AASqYI?myLT{EjkPdU;eKQtjRLs+$W=8bu(yu_sUbe*V*)id7g)sSn) zV}x?vS-&wA*VGm%@na_$IEc_5(^GrcKkQDci=*#WQfR{P$GpvG?ZN6OCGm~Yz#%X` zq?2lB&L%Gh=$>>tBB)pPVm#h4Uu9;`3(lsv*a8yOXcxrnd5^uE`xT*)Ip=5w6vW+k z2l{7yVAp!KB26Pave$zq?w#W!=hmBA;uuTxJU=Rpt~EDGv8u|O+dAWY#E_lb5T6~- z8}zm*Aad*M{AdtBeEfxoyrzvl&9p&NuZdm0s~VC&|4y!yLrFQLnH8F7 zPxCt6u&#sbNuF3$-Q`7KLfvV!9`t>;I)qxz>%XxjU;LvbjQW65F`VySSbFSi*gSWb z*M{$6p#;`%QZLDA30LZc3#dAn7)!=yy!(^k{sn%(vpaRGsnL0&oRS44e46=uqiWb- zs3)Ug2oZmf-#6OT$7y_%g0|oOeLzPQ`)sP;3U}32;k)#(cA^p3+JN1SJH-vV_NnVK z%9$!h6gy4?b&3t?EmQhIGk3>NXsfWj#s@}I=OnDlM2P^R!ox$KeaZC2T-~FKp{?I% zoFRjYShHBvjy}t^Is|-_yUa{YL+LpFLs{48QUGT`W3Q@U%2+F3PVztZjO?=faR$at z73XP0_>NwEVMHOKw#0m8P>IKbzs=-o)B1TrT?Mf8l^qW$E z8^vp7UK2m&i5rivXOB8s4h{CYi_(ALjp=+JL_V0WCr-AHJm+*!q?k}jZ&G436~#Jo z(9-CS{6E)FmtV=tz47*RX((-j5DRgkpjZPip`VfMyBBUYQ&6$q<_v~tNFDHx$r**`2SHvHZ%xrF0mz86yl+FdDAsFU9s zwv0x=`7c_sLh$MDDHE&gVGo{o=QR3d7m5xvghd>q^$;p?yxw${&{?|YcU>wih0-dF zsIL}&NL~1@Sg^cH@SGmP9H6^?{00g)B>Dtotp{#2h@7J)!nx@kf~~1wu``MIm~#Kh zmPJ4XZFLMk=QGBly0y0CfQyxp^gkzR&;{M&TF`aREaY^BXlx}Y)IUV+RJL8{NmY_< zN#2S%F&!x}*?N;o*Ufo{@-{EZWqO<+3+z+Iw0Q@gmX;$E64eG>Nc_08q6$$%dRcGX zj@Kyj;SY@pG_9V_$Yl7^pX!mE(NR0!v%FKMQ_dbeTIh4YlhZnSHnGhfu;(6EB)cri z=3&T<`~*}cQ;PYPSKIr#zDzdQOq}62-SsEGWlxftYJ#0Av+%ZE?%0HcFx!H{%DV`s zq&5pt4#F0{zlJ}+xQ z`kjK%xm#8|cGuE>%;%&zZ+vR1sN6z@9J+MVrATUFxr= zTc^ulXcZ&+?X-xRJ|Rxuz_en3ut6yb(COS7k@A;q(M-H?QNsiX3CALx4f+kO?&bmt zqt#WSze%Gm%gEnZbasRxaI?UC=1KMMz4dPVX|@5y*>sw|3kYGlj3{}ZMY(zxi%XhW zc1R^H>`5w@HS<9%L3u?qQLbjh=DwSYi0T@g>knH#r5-s)-NZZJMm;;%BT%4hp+EGsQR(`ZP6=GHiX3Kh&{!(>p~Z+4x6=EN==mcV(?X|NM0qzC1_}&G z8rK$Y&A<|gi}$ICL^4K*WHq*~GTGs@tQtd5XU1GG``(KgWuGW-BUNKymo&{m9-fm5 zcW_!pD?d@96{YDmjZw;c_}AZ@xfVc3)pSuISF^=J?3BaO^Mk{@?o!O_wR^07Jtg=a z(IsAMt6_DmuI((l1W98Z&r^x$FG0=NB=W}3hTsK-Q~w6-=c;BmR;w<_GXLe^)%W;lm4B)Z(dC&_!5Y9(-BQ(Lw_r*7A>1&-*_~cEJ@{vYh zA#n#=RhY;jKK2ytm=o#4F0tz3dFG6n)|UBCJ`S1rWs&#r@|%mP{%haIs~P(!ukScQ zd8#fGe#SBhij@ie^;;2ceGfUzTHDomC|~Nw{yN>N8~%4f{3T&8Q3EygvurEacjQrr z;+biN6mp3(Z1L_OB1n5K2KPZcTCaGaDp>CkW^EUOe+-IIclQJBE^q40b;;CFpV_xO z@=32Vmf*x0?XRC3W4!t2s^f>MNna^H1xtj!?lFH+U+t(+rfuDr%Z#4QyAu|(>?$i% zp@MzK_T|smMAJ;M5y`)(&ktK=hf#?SU+}V5u_1#nVUmWv^oK=;hDE^noDT;ZIr3TT zvcA8bM7+Ld#5VzNYx>Vejhl_qo)hRHbrp_)4R2Dj$hl>nQ6q|^Ch00+?+e+lL-9F3 z18M8d9<^WHQz2awPHq%ocuLFaQs>$=TJrgiQ@Oa#PKc$5GUS{ zmu;_QHg67@r;!7~^SBR4IPxHwy}KFqMG}Zz346r0tHma1Gw*-Wa}hA)zykq9{I+%Y z&x3CRXcjA7XQAij!J?9*MO>NCB}~?@=MLPy$)0i}GRwy0+A)QQ?8vBW-2RG}RH4`V zQ_Aes2<4A>^r`nDS|*}LZNMV_N)OIB_AzTV!7^(1GKw)X z)8qT)HyyfBig@qxPNuAY%>e^^%osl{TOk(9VMxLGiYMyamy_5L=0#40n-LAs+Iuf- zUt+PWoKHBR5yLtvUYB5E8M>aPZndwlf#`@O8ULfWX8xA$W; z<-U)#1eP?G(YAzlEoGNhDyn#e*a$??4KcTapfj{oNz-jRo`lF5o$}z2f8-F3Aag`teNf9&H>DfA2su zkhsr_p0$puWOtG(;+ZTN$hHX~+FEVhhqvfXqgYNHd6dMD!`MEj{m9U*S!ckiAx*n{ zB{d7=wQI*InlOB;>`_O`DgO@f@>R2@wYH0-Gzou@?cJgnACXXWY1Af}>lrWSgX}e2 z@Li=I9f~7LT1qozn4iI{vqx&GXA`fQ2JX!069q@_?)ljv8fkjr3B>!zmJ*th6c$ws zTSGwdaT);r64mQhP+=LCXyEa7TTnxJpHXt|dr{n0)Vig%Pm{Ll<#ARy$Mu^}KWNvu zjUX~HsQQ}_Y9?cyA0?kD!jBrxP}l^uyqyhAC9TEb^DAcDm4Pi<=&U5F;1HZ#sh>-? zUJKie?&_I)9HHGq+fS21;NJxQXXdi-iN^W}5*t@1*YjKDc}x!{6SXI?M_IU*rX_{$ z%8%YIFP$f?3gMAVIUp9YUnOJ4AzVAQ=O%Du4Nqj0A4Enh~gWRXaG zyqW?nNo9oV$wgGDCCfAK;>Lgo+JhK0C+gMMSCHdU2|gZ=8ArHS!j^<-uhV_Gq`i{+ zl5#5~&&Q%Bw(^RfyV4U4D_cpxs_W&qR?Q+vj7KkFFLDqsms;zc(h03ZToMlexxa|$ zG`WB^%<)yZC_Hy5vto~t0__s*P3u)h<(?Z>w4p~rZKFZtccV^7>IY=i(ug^#G#2%N zxvX2x4+TAQJzyT>+e|~NslaB;2_b$}8BN*?j5k?aqq#l_v^9X%d~ZU2;Yzu9Ed}rz zespewe&Oh?R6DjGcn4M0S-rqmJt$T24 zbpk_sB>-wpf3Z2nqxIs;+)J?S=uuT@G;j*D-zF(QBEwIn`CDaq$9tBqn znMtD;2i5uEtFn6D39PjqemU~1Lil))?}U&4z2#^0^L?zwT{e`*Z7{0t)fQm3{GD_Q zE-D27@;iHKK*=w1H+0Kb6*#eqoU^}%-HJ4*`-OA}04-1I8oK`|;bHIw5G9G4<&O`) zEmTEDWf;{!n{qd_4;VMPT7;2G2~<*XyN4uTw&`Q$x|$551i}b&1X~ z^wO|Uir-4LojuJm&x(3E&nPB_OJ$bCrs@xt!jXftu5&nem)XA9(15k?JUOZY)mC9? z(x`L}P{#J^WuqMlBV{&;0b3(H+X0|Z&4YE*in9TMMic}I&5xbe$~tt=IM7>!0zehm zAweHcuJK$p+N|po^WB&$hvJf@!tQekz0l9MzgZll4ll+g(dZAB460(N4od*r}EGMTJAy!HnsfT@ABVYPgS9{N3+(u<>dtj zO3gP2I1Grv9?dE|2hgTdFgRe1vGg>;{@cIHp$KIIdKFLyJ~9tfL(|x?M5!S9xb;WS zGu|VVxiB1g#=3FH^L^rJnd&opN0q-qZx>)@FMztS!KQaq{=dH*w}W=j{pGZ*@5XKN zc8ffP2z=KOwXFZVI^~uqwmy!7-!J#}SC!3FhNwF7h%#TjtuZ3 zr4o$2rGZ)*9$?jqjq2{{GfdIA^xOyIniHX$bOA8fzF`*AY z9`7`~XOaIx(uU$%VR`Resrz|ZZ%gd5_G?ytSUr~x?9j&0e2 zoz#bPvA<3;y{hpdq`uh>?AKPw>Xsv2tI)IJ&5DZ>439DJFF6;I@FoRz5of0(WVuF_h3id&7lSwdBtlglt-ChFoF1< zWHXR3*Kxq}wkXCxva9tmy$0@K55=NN)Oo|IK_iC&m%cpRoEo!w-vKlzya!bXh^PxF z!W`_@Kx?Wm0!7m&0N~O(?sYpjmFAdzkS!OfQFH0L(@nkLRrnW85y1C-?>zkRp?-1I z@FVDtF8TrINGkQ}YRQ?U&Ph6kRfy&j-bPQ%$RqV?#pRSzgC0^{BNwpy3%9$qY%Jk1 z*HdFuQD713*#qX|UO=SSU@oGo9GG(i zBi;YKf95M%DpCOY?%XdC^=Fu3Cc#PK9+qDTvX%n(%TNLcS%g6vV!aHw<3Q7yHdX0Y zNUn0j&6ZCzj}C$5l8{!lEeEB35Z8BqHqoJ|=FqPa3O>y&Y$i1&caC@^K9>xzwp?ju z6uiGhU*d{sPz16+ECVVWa+Ol*p+dlL;DIGSp&#ec$=bqKe66Xg~N;6N4mj$7ci#{o>Q6T#8f&@xBr*2E{* z>3sHScu~v*_9h~{wT1(NZ8+dN0tIPcw*iz-H76?s-HfJyHw6yowOci|0Z$zeM$Fd<7tIVyckBHxk#T0xfy?a0k;fiVvG7u25sC*2Y zY1Xn^e9UnkLAXfl0DObuURb<0?RB90&R#nPvXVagceOrtz&tYl1Z@BQY4`30!`*1v zt^4dxW%&@(c3160%v&yHO%>^oEgIsrQVtUjtWx9(N9OpnoIC&qDdR6l;qphET=Fb- z_$L_Ds@?Kr|9v^Eaz>#d=>#^Fr83jjA=WJ_-gmW$u@9uX*f;PMGDBZLmG3*jll*E( z#jhR=e(qrx5GuQdXo;Mqk&SWyjMW39GR$f{=Ktve@@CRawTuA?3U)zj=B7E|!wsN! zv+Kf*=T8?qC8r~*{Q&^vqh8;!iVs=5@4wo# zSbR0!HYogB>1_eah=a8I@h|TJ$6E9@q?z)Y*^k=n#3rJo_kMp{7jNgUGVz5j=RO!? zs}O^p2F~39AE4q4UEUeu3Dq%ZD zNLWm7{II61+tw_n4(rZMERNP=48T{`InjXf_Cu_JEOdW_oTWNDUjs9#A%VI zd+p{2!kx0j@j|&4?j*;5qAW^bVv6`A+6#x%r~;*W@QASECpnCE5uY>DiueTY=k#Mb`xMQHpt+X z)nnVG6!XqFZ={vA9p+7X1Q4O!u3MJafs2+u*iP06u;4@o-G^LKfv0zHLRliY;!@?6Ubq z;e3K&)Z_M?7q~}Klb0nG(&A5OqhQ9za)hF&75fM)~JJf4goyZQ`ManWG4ZFg_v{oSniA+^y6oPs_C^shTy zDdN%l>xRA@UcH|6%9z4ZA0~(_npK|Ng&>8EtuYroeg;5qrz%Awe1q_g&nWB<9==m| z6lm#n$_Lsm#Pe+))IziFL*%~ZNY*#xC{HFs&M+GpVfP4j*_!WoYl}_VMXn1OrJeMI zJzHJcUm`+E&2MKD#=AkTzroyk$d-(;)p`^{$-WDD^I%ImFLDm#49Uat@g%Zcj0e6| zj*d}v0W@*57PHTKjC+tABnA#;kGi8$7oQj{Zg zC5IstUm-=nP~I(g;mvU#L7Ex}U)X@SfV5@~-P3becFWKwQTEFv_lDTs-b{4wgK7^Nu{J8|=d)oS3ZifZ!(W3*3TIym=tF2yCwf0hrvYGW4y|$CUh!r2qk^PVc}Z#)lk0E z&h?vvdtP;!oIUT>0!Dw!No>I8G}x~P#BcZF+`U#D7p2YJD{VC!y{gd$IDV&C+Uq^{ zv(pk9GAZr(CNI7w>`^9!?DJGr`XeQq`#54t+^A*j(`ek=idmDJWpe4DmR@=#&-LKq zEO+tj-*g(SY2emtKtgU3yaF!SMqYe#V+fup>wG3z6WTC$J6ooG`*ro^eyf?+23Gh5poBkhxqys*W5mU_99t)N@zk^ zm7eHlL97!h%qycDw7X_oP!jMJObHXr3b5<94=|Fm2-k+<4OnW>hAA|QFUBr?&`Egm z&w*#k7AGTS)d+NkR}oO1zi45UN%KVzNvrG=R;k1I=dxf#g@$iIC(FL2`uiuMN!d%sJR1vCM4l!zw)Y{fh^`u(U%00y|Zc z!0-X6@jP(B_LPK}7)$70cLeJCS`sVah@VrS0B>Z(2Lfk!;nkIXe} zRai0Dwp^9?X_At62W+7PGiczu8brx^AI5^n`|CK`bow!MK9GDJ8*th<+EzUubM~OU z)jeYhE=3#97~1UWsZUDW;fQdDIcAh0hGtmy0j$-Q@*4=io;NJ>fLHM|)Za3Hq&xczr&4{5`m#@&j$b ztj_nzYE32K=}#riDKd~ylvG)BW25xt$$i>#+M{k4pY>78ZU68X&^MUDOz)fRxF}G3 zY?9`Z;~HyXarh7;5)i>Gk{y_Y@ED(@*4_0tn7u5+A-%!F);qfZM(&GSnm)S`ai5ZV?hcL+-i_itrMhw z?n@x40O6)JXkh&=5+FO&0Feo-v($jN4jOujgK3UNO7qoe0CM7Ae8SKOVJKKA7?xjf zvY0wgS1|S!{>M0I3Dz}3$vVsr+aF-Z+a^+i@&^92;0w9ss_+^l!DOVDjRUJ|dLrER zIF@~i1O(7b!nP-UPj*yQ;XfYF*3w8BA{FfTkq`D4iy#%fi>8W%1#ym1VqhlJDM>?+ zG0EO7YF18npr~+E)&W#n0hVG%5VdlH#1R_p1+HL!ak!`MazY6>NcC42WaA;rp0@(x zj2(7URDbZUdc_w+!bAyF&O_SVEDV!bSW#~B0)gZQl88Dm0sSYIK!o-$k9DwBPtlhv zC~5Swi8p+dw>Vniu3Us``|%2a&F#68lhK{C)E38A^mis?sZDo5szflt#S9kj%; zX|SUau~wzGx>B^|JofM0lFY9Uz(79H(3T6U8?o43$e(3Mhq0kF)D)bUI6wVQ;kvlDxmwY1J zqU|^kFGxmMO8HD0a6NruByD$z+-y3bFGmw0-H1xOrX%$SN;ya6uVAL{dl1e%wzQWmHf{E8`(jakCteNf!PqUu?D4;<5XwiXI^ zR5_YGMWs#i}y7FyjVU!ZzS-i59Jcs+Pe8m=j>zc?E>TpMX)^@={(02~c; zE{pl|qA+jcevx>+692-rl_oZ?@tB+&Arah2{@d9YyTt&!DY@x_aEs#XvtWxxB%V?N12zACx2rYY!$kR|(>Gl9hK@%nLrM$Xq823R{B#kE z*Ao(mx4{;VsXTW5EjcpO+Ww>`vxKt=3URQAkQ}T29wZT-W!VyC2V=%}14vuyh+&8V z=cX6QkOuCpw)C4F3Ji!YcR17vhR|4zB)pD+x$(o1_JnvOj?oDUiT+R|i`W$;xm%qH zv!`-&AILcJ!5ev-x7P9bi_I|bS>rLUgAQBeVB!ix=9UHgBH`tA{wbgz*2!T%=of?1IKdG_oD;hzgyuh92hE!VT6 zHgfxZxeuspyqV9;>vG7l{=yZ5ljZ>qMOC;F4kVN{JW#jZL8YT$TKy;Alphv1T=F-t z`Kv&T)a`)7JGeS2d4^h~ZfqZbL6!wJm*ge^ClApj{+#pC8U@XzXH4Md`{1&B3ngS! zbV)6@akn>R#~jPD!05?A;&!GGC!AiUd&-I^V=Hk<$e=fp=l1wUC84%q1eV_FxFIro zI_t0q`5%pH_NNz-7)s__$qfHhLj!|10`(ic<;Yl;-r}lXT#&`k{gBVG3G7x77-?l@ zn+j|aJAGdAI$OX7w)*+UY72VTVe+ zm5L0-v9%n=K@#b~ue{e#*$_aQwIsrHfz9nuy*;X?b><;vYr8hYExn*)?Pu@oyEn`u z-0-)pF?{o*hBvOuuAM?*a>@e+>&sQRY#4ne*umt2D$EtBIfp9+D6!T$+h!F<4fjkOY`^iBJev^`pC6?#6MXJmfgfJDi2x2@;i&Qmqh4T{F~ zkW^paP^D@jxH@6uE$wrfVIXujR9uJepfl_V#@1bX(EFvLau#n35|x_6*sUI%(q2K- zx59i@IJMdR5U%R6M-;aWni*@0NyrfIIw7Wbs51tcadbj*vkY$>5T}9EjBivl40BUr zhBTFN6{78uJwcWjQV(leY=A)e!lNXo%DsSK=0~}p!_F@Zb&B?>p#|%H-^!eE$Y;OO zO^ah4BNqqlJD9>xSVVcuX$_UxbngaOfK_U_w#AeD_|}1RrNgIAu$T2V_q1(b&ZNhC zYzuZm5nO?=86*g~Gc;Ts;jke`fK@_+MTp+7hB(#io?-dx>>{Dl9CCHe;3P2>pN6bj z+YIl)0fdOOhfE-lb#KxJJlR|%@4_;1Se?c|bTx{UW7R(6D*Ku!z48TQPV>zK^@;V2 zhN&_kItLLr1wlng8P`l}Jn|MnPTR-9RT71@hY1S7Q5zGLZ;N3Le1Q)6CIfntLvU6b zoST_#KDfvuF7AGf;iI=%D~S2zO@cKi7s?Yeqvb~Dq%tO&#j&Z#^#_mM6n=WS$}Y8# zo4G(R#)^lR?n5BHm~=oQB>7j-cbmu%UptkT5LIVnW#F_g6pKXB5(`RG!-=M@1jJTd zZDS;f-ji=?K&0Ubh(Q?#MYKVFWa_O=Y90x8_(D9?l09PxtTorlubRQpB<81brIL;w z`%6uW@%83M5YQv5H0v}Ur3Gy5ZffIKy-Kb@G;`VsxvJnqDmfTG41IJFQN&KPF`~~e zLh5U&2Cm2W)Ic3(^L~_+o#;B+w z4+E#Uj9i9ZJRXG(zJ1QdxJAA=NUxb0ksImN|?8M@oi8a`tYt_K;B6o&*F> zt!i0?ILXL89}u2<&~;YG?N_TPyt{JP<3rsfc2yJih|f_mpG(vuBBEn(KOx(58??^E zAH&Ger~#*NE#&Yy?tB7CPJb6V&sysm)5ASPIY*)w(c6WyFz&4m#=li*8AP2|wCAEZX&W?hOw|;v&VOnlQTg){829s3} z*fW1$ZQ|X_eFKW3c(i@=V(7A^qMOwmWz7Ch*r1xay)a*xEDo6+f0skkeq~K=^hTxbX?Yy z-}h*6S&bNnts$kVkj#@E5Q;lh;)In8i&=$X->ID>aOjOqiV*uqp0%lr&N|drawbl9 zt#y-kI{oJJ|AvXWJ3vH_UzAvoDdJ1b10W=Yp6|V!0wUx*l7rtEcWuZedTVg?-8#>; zG6ZQ#bBwxGPzYXN<>m)uqxM3K?A4-B>V$U`XZWoT4r*4KA&U`eqS}MGcqH>A<)E_1 z<|ICnVHO|H_Ljd7>7qdrC5?}98xG8L!rUxx$y zq$Z;G))m|t*xGuJ{s=wIyy_f^mtUAdvPAgEX%%L!_M3G{8L3>laKXW`n{_YY#*JiR zxWhhl!?lJ^+HRdX(k-j(yw|xHZ^jo$Hm(jG^C#z|zN=G{pG1+g98xm(*uy@^VD^_? zTE+xRHR*NbMe!pefH&Q?UVMe)oHlmf3KQaWw@w6#%cL@QvMo;CI>Zo$LkrB{&KNBv zV--Ev>1v%fMb&;(lJHg^jHw$rX5H_tE=+&)y8RRJ4dyHXc2G{uL3)$mQ4u z2ZQ|1b2!tTNn#ukXL(f4tNcqI{4I2aj*q#e2MO7_U&N7+4cDwCGCu8IhlUnyyf3>A z&MVI_l5r}OKY-Dk{srO~U{)h^61*XSV#0W}`vjbi{)}lx!yB9;w-jD?FW)nE(jPKf z^%tz@*NKd3Ebds*;t&6J?2L>SMs8snnHhPI)XJjufef3f#TYF6{h|ZY@yDh=;YfCl zHDYKX>cs3~>bmlh-8UEOPzUL}MO-lq$$h9%FMsYWuPALl+zbQJtL#vd@C!x-xi4FV zd*k9C*-|1S?URAvXE|xVD-C!}>u_HFx__{|)IdTT~0S0&9m z{MaqZ7k|0Iysg@AhYX5^`ZtA&fTekYLS3Cu@J=1Z zpL{GuA6scr3f0smGIdUlqL6D6*6~oAlHVAG((eC+R>>+Ta^t5<5Xtx;!XezjF%$;3 zF9kKv?ekefB6S|zv5w3dF83RjVUBb{yK*Z|?+l8~cGlq#zr8kt%&p}|1)DpTH1z_m zj>RCCGEEmEMvyLY7ftK$sX#r}hwRHHrDyfSPM?W+aXGcPjc8xD2wtJ&9m2DWDDEL- zAYzp6@Fp;^v(N34j1W}RJG6%89|Nfh#f#58rV&!8JId(ByJ_OIz+tlL0Qc>E4T2%8erRZ1Wak#i3ntdxb9wpxv1oLJy z1n7A#1?}hZwhGX{Ue1L?lUP|au-fIZ3BH$5QAluo?*pS#dcq%<7e4-U*AICRnlNQ) zVPW(X3F07GGoO<$r_Pr&dN9*DI7F()&}-9 zm#qn&dl1m<^~g}iV=+w^W*wrdCo-prK!Pnhqj3Ga2Xk|$U#C=5^e4MlW&1bf{+g4C zvbJDZJmRtgGO0@M!7IN0MXOesDp_Gnu3>;jsKMRKXh6PHj`1~7%b>qs4A$o1?_B_Ol!Iz?@p zN-eGzg#{IV+_iq`M8T?98zxS(oYQJTx12uOdO-P%SNWU7EX*esoiUC?XGx_X9azBgA~40bcOEB?Mpaj?xo> zhkeM6VQKaEGwPLk+6CeIit`j$y$34R*%=v=pc^e5X0FrC-YX(=XA6B}X+Wa%a~}L4 z^Xbf}N zDGhoe5kCcuy!gzcZ6sePU70zdblf|cD}L^y^TPR9)frJ1Bnw?OYHZ!3rP}QZRbF)Q zJPb|66Vh+dI`)iz4x5EZ&DO26ERRM){aA@IU2M99MtbTPZSH4GVI+=j6-%Q0GmbD+ z@zzqK$8Dh*MFqr))fL_UpP~CMowGOTV*ccwq>8~_nmiYo*P+;d{WffWrnJLZt7ba3 zax5P!m%%qcifPi0qj!aNtMjx!V6`Xj!BnJ!PSeoch(Drk#C_Kf2JUlJQ;H@9jquY0 zCm3v~7AY=)dmXwKnJxzb@ymwE5%*`*n{&3wz02=D83?iX=?E(vj$8asL~P|FLShah zV$wQ9@Q5GT1ljUt^Ck?}d^)h_!i4wlAR(9{(%i2|(e0lGm|gvXw!UOH{i5RYKRHI2 zGh~%{b`Z~9I+*p5q^%QcF1;b|S4iHiZ-6)^#b45Rmd>MNXiBC~yS(cAP9R*|k&ukH zE&49zq(wnR|CDuAF25s~*6`d{;3Emh2;u62jNYT8c4{&lv`h?74Pu^Nu8ck{(p1x| zIQz=hYt_X{t?KL$8=;lbGbBBVa?9S4B`hNfX(i+X7*@+a9Pj!}k@0*92t|P=W%e7tqQ{^eK&16zR5SB&sg zh1F`fY$prav+uKjFg0o<{hn%;o|s#F$L`GQ5zfRh_AtM5Dak^K^$Zq&ds(UQqV8o< zDW)qpH0m+H1s-U#!egSnD=oRdDf>e-1}y&GL2|UxJ3LDL==Zo{Zean%;INO-=cB7M zvEL_#@knf zau(d;SG@5VQb6}`I~WL{L$G9>o-p3L5+oB&=O+ln{i{^0TEDA|1OyS#axN5ZN*6TT zl|E}w2$_I9wL_qE%uha<*$dq*NuCiVw;pkDK!8UdN%|ehy`n5-4z0cVId|h6{`S0F zR$nC}+pO!PMZaSf(>XF+7$#Klnkk0k=AT0O$fa|RZew$o3iMUXvl5EyTO(*x3(pxc zaf&EJC<+LR>9u!hdB_CAEsbUNHdDxjGRGcqXURg6MMVEO3ui?TtqG_kC$uAe9~@4@ zcd)a;xhqE!c*&~}<$$gNgPpkaF|Kl3{dgcuJPFToQ4)&yj-;hJpMFncqCJ0ny` zW6VDc$}AA@$?h6~e@!zF!>}5U?J;bgF~4OKGSwsiqq}zm3|vzIke6Ul{MlD+Pa)sG zZsXyG@H=&9WB#%)Y9gI+@?3PcEyMiAlku3r5xOhLRg*ioNNv3+x0V-+s;cRZY zHuf!D=4v?YE~BZ3_9&3(99u1g4s&+CgKvm@se)#lZ?UxUj!=tUqH?-C>jBB||EAx( z){t5HX>`8Q@dEgFMB&k%!49h>`M=o2b)3okUiW3{%gOet07NB(8pF$DI2vNFbSD?)ZL@wBj$k8G=9K26X0ad@$9~k+CmjWQSA-88fnWPXq@+tL zM(mnjQ{s=UxC4zci!O?H@N}%(=_t&in0#_*aY7fFwjQ**~^|6SLCp5P$d>(lbdR5 zuqVIAT_{()k>ja3EbxA*)ZtPkl4)${Vz2oEs{gVT4%$pAs-s9x-3SvM{G$G7?MKjk zc$`}ggav1b4mhjWbSys>*1<*G`q^1_?|pOvp3IRn+XNAi{-^`R)CFg7!v{S_u4z#R z($%IJxKv-B*twv;qN6?Mc~0_y=|Ne-bObFr7|oBhUwqd<25YT!aNRJ;iY#JYCv==A zg+oED^9WjPZk4Q7$g>L6{B_d-a-w{;3*LDdt_mVfk>98^%y!TG7qY*j0t z_}Tkjc;fBz;Oeh8_tZUDl$gzTf&W4;k%@B3(-RJg{AHrr8epxKZu4Z{URi_uc9q^a%|coUu3{iw*iRTL8*E6)d8X zh@qK@qDftX^~|F;oww)M_2U_v=M-PAvMN0yyFGyfZi>P_R@QZ77od5kOOA7BWs8C7 zuKaASAB*~r+RY+~F(wA4RNYPqjN*^YE?e17%zYu5gA9L*xQXrJ`-IoluyqID4j6oN zD`@tc!nw^0`^1jz>4+D+za;H-!1rK(ZrB4V{m5O z$f8~}l{6dKkUo%QJtu|n257V*I ze3k)IwV2Sj=_xIDq}%>vyEgaeX0{XaYW6D%x`fQ=2w1VSgX^jHDZ}X!rTQG(n?nOL zmUdj=X>csPv$&eA#+PYl>FwnzHpv+=uU{YiYf;6JKtk#P&;PO}MjKe`M!4P;tGKgo z68XAptuV&CY>g6=8?w za{rN1LYTFvGPyd7Ho_GZo+XGMp)EygyF?KEtaRV4r1BnCZ8Zt6k4TEoPC;l_Yz z0*>vil649mS3JtfeS#cW>T-9PZgYQMKJT}>cZwM)TdA8y-eP%+@)%!tu;&79+l~Z- zUj2kKDcPidB;R_Ugfd&TZA=3GK0ZS`AWN|B*^*BelkI}pKFs~PsK%AaW0Rkz4NJzn zng@uc&!(RbNJ^gZAuwx^OTy*P6+wfi){j}9ITjypjapTWlR%pXTeyVeqo}13+yX7? zOmd~4IX5hqe5%1DBGVMs`RW^WA?*1r(92@1b?$Gx171-AQfp;)xFud7T}b%`ttiQA z{vJN1gYNe}E*V}XwOYUGCR@;oLrX4dbgxBQ+vOaUS5XOL^WeMv9xpleraHQqE z-f9Ptj=}{L1LehzFjXnLQ7eG_H1QT~o2PNcqS4$o^~a4q!^Zk@tu=Cc%mXe>Q>%C z)eDEw$nAY_)&mxNSdkG*fDe6_F}h+c_FumVOb9xC;)sT|)KKmuUQ*=68m4w+06h9j7Ce7oYG1;FFeP zcBjyzF3$f4fa#}vLu`w63`q|?CNKacJlx6>PT_2&?>|XUZfEhPK+%$)D5erULV* z-?G2BdEz>Pp0EJN&uk%G0=(jaSh*xD^7u^rN#%iFzg*E#H6^gG8KpTi7g&%axB?no z8|VLg_cXl|e$#mva`MiIchDZ-nsU%H>=+P=d5n;Ip=&2 z>f1m6KCNLcH;-MBi+iKu*XgCKSy3wydH^9@0|ce(f1kB7oDu`(XU?GPkDxQQ-|=|; z=dosUzay)ts$x&59&^Mc}HkRiSn zv+}P~+@`{I4;kPW*v)i29eI!bHYKzksc|UvAiBvMAIEQ9Ldjv351LrBZ%;k?N=ryp zpr>9tvite(JZWrHx*k7|B6nua>BT2MQ4`n1H&EzFlCE&P9}u|EDqOZRkDTLkn^g>L z2i#_Z_;0K-dr`#G@bK`+&PkpTU?<|H0?DGLb&O?zzGYT}O=b051?@sXJrSbOCn}`G zqZ>xpw;G+wdikb5-#1Ms)X(2u6+I|}JqYr5diV?+(polk0-d%hm&lNCLvgrj&mw5f||Co7mPfF|kbVEV;!CP+I!xI{}gA z5jTi#XHN9tKewH8%-iy#9jKHkqb!mDsn&cbQm0}em590W4hKS;

Ak%subk8 zSzEb8e=nNR<>w|RSGHT7(?C12CP}A%FX0~#M^bRxz18VPDlC~$A4JMS-3Fq>7{{P~2Gm;m^M>W#jOcwarm%w6v zW+y8H%6}ss{7%1f=Xd=e)~2Exf~>Ev~4X!%c13xE|^MP!;)#2rD`$BFo>lJ#tS^yXsI+A zn*Q|LerGspB?75SS?0i}zlk}N4kNMVE-WIZGC;v7A1WAWFzzx=xsT>&xb110a#33W z;fZdV?hn~tK~mWwah}bRZt5gQL$nk~w8VfTA>HlBigRiE9Y^`|_t|_3m#BV$bGw$$ zqjCFJ?pbNFk<#5WCfA|w;=~SR718sM{CPLWcUWb%;ZC&lGe$E&t0X%gpiny4BreDL zigB-BNVgAlF9v%34gt~@s>UfGKy84&MnanG&kzgW-E!);-+3y9q$EkM$-XvegZ(4U z9gYLzn0aBN03X#rI2{FR|M{6n$)serr>EDj9zZBn4+Xh~_dn2|-|Y_kI~`6dIjwtg zTl#%Vu{K#?u~%lr1D426(A^lX+O~g9S}Bj>kfp*rNnk+)b+nzaV&InS1BiR^ea|eM z39ou!GO?t-WSlVNPg*d!Mb;G#HkE{se9(%Tk&`2iwlD!|>i^nH-NyMy%hL!Q=!42a zfr8lN@8@f#oxgmP=4JFj`8dO^r<$DcV{RU_RO;F|yal`JHiRx5Tr?&>VApJ8cll_8 z_F6I0Zh8gm#r{@z%$3_)(MdLV8@HZ1KYhw_dv5jj=YcU^RuA~@rJaNdW8?(Mxr=Gj zGzxn4#ryg545L*@4~k#Hw_m|T8wEE^Io~_Wl9H#BGVT|+L%Ca95gR7#OMyOY!5X2T zpP!cRK*pr+0owE8#8IH3PTX`p&*#bSq49P%mKn@xSIU#ETMQI;a<~A%&!> zgb*~)P8)ms*4z_%yl}C&1gvMHm zJ1;EGT$XtNGb%JG+@f(U>5p61y}J}y4^0Zw2`?tE!?*}1FSYafW2qa3{LU7Zf*f# zVQ>h-jmrN1j~x?6Vz%)qc%FZm{&Vg9bgy9cgO9|A7lq=}vzI0%`!hx!xbV;F>}|TT z!+aw`c>Nli`43q5*2X{kxt@OZtv3tBr$v^|-S_i4|1&1Gt_(DM+nDh+V{EPa+i_GmI_Ah8Rof)XS064A$K^K<}#g-M#-h9twpXzqK5P zbgJw&Nj%RTmr&1_A&vBrArjM78tIB88K`@F&UQeBHnb{+rQzNN9Y2)qf<`6O&}zL7GaZ z{M(+K4UW{6e_cy2xW0CCNCZ@|yA+Ixm^U!bjc5&Kw9@~55&&TCkvkCcTu+IqQ$yb5 zUEgn{gSztC{ol#{mmDb0MkA}dbqVD$_h9`Q84l{=&pJqL7 z2ud}8;`{QgT&{TGNJHXyRN5v&_};e{`_-kR0h zdc*lPjC6heTaX^M5YNJVK9sCwdb6(o95zJvvRqP6yHVMOcPISmvfHny%bRqvWW=Dr zi$iJM;7aepOL+rqFBe)r-@mxtqk1FQiPpyW{8Wt9A`6!xLHQgR$&}Vda-_xZi&@@@ zdU!oHZOlF>@2lOoMGR}=AS2c^`(+o>;Dp8khvs~Uf~JC-BC*Gc(Ll}K-gyI_Q`VWI zb@KU5+uExb@l~dT(toOP6ALXpSp4?#RnZ8hb-~oDrx_&>pk?-$iqts-R8&G8v4P|l zmRtpKi=keDZej6N1H=vA25oUGYSwq~Q22TG(->|ld4~Bgq-}JoRHpWh%$EO1*BkHC z#v@g#F(XEFKq8pNR)IqGL?5A)*ZBNYG}EGl726M^l}@Zz(9$!WZ=81&1R zlhA7C4^IlesxPcfr$A5)NzOakE2Qyz5NGY*UYbd!@jePy(G)3sFGk39jtK2C`~SY* z2w3}jI~6q;11f&`Ey4xvLU!h>FTr@ESfUMm$Y%#yNl!5YLM#9FyfBv*xDRcT2S_mQ z0MSI7vM|Crs18!r`VniTNppjK*;!l=Zi@#?ww|Ief{MN%`0M$=LcIb%-$EgfEnJG>43;aotr(N^^y*cVawUgPjG*;3b*W`k-A)*02lLeCzjvsqu z^3fF?(Av`kJFTV)G=pfh5U%7mlh$YJBmpVB&PO7XN_-s%cPt~u{Hg(^Bf$*v zSevJ@C$vH@0m)gtiilb9C{?xI-aNVCqvBR9GkynR*;Y)3ayy9d6Z5WKG4%@mv0)|U z?Z8T|xGoz1Isu{lWq@5(-D#Q2PHajS@EK;dKqI9cbY z+k|ZWjW_v|_aN~!9G5`K>iK0n4l+DXbe@9z#uygC7`gUl%JoQW#Y};j1e=h*2_6de zAr#@+Kh`a6#qT`EmS9wA5pDcC=#}wzYe;={9Wj+0{s}%C<{|311|OV8RDW{P?||57 zp?k&2y>%za4Rkp{2JtMEkgjgIwQK4!w^E-<91qpm-eKI_R^s-xU=~U}IT2FMCg6>< z-MgF^@DMh68Z9DX2rr7voeG)^y{En{$sX65jGugmJ-UTRUo`ZofvP`@ky5Gg*v=~v z&|1o`#i8IccAE;t*`?0VdJZEtDCPM4+85kt8c5q9Uw>`(_CXa=7obi(O?f~7 zd(XIS3OceIf_YG8S~0p7M;1C;a?ZyalI#F&-{TaVpf-iq?{s{F9-L z2gkJq5l};v;#4M~xj=b)@&V@e_xj0PyiBGigM#-1WATw?%Hgd7^@w-Y;w zW-QxJ|j8xHoaGQ-1EhZ$wvNb%%*C(j;F;va57AbP}378YiDe ztvJP2B+*WDCouN2X%#8mXm&dzK~eusJx~yQ^1XzRNuG9-J!Ui(ET8)2d{K&H(H){q zssytgham+KPx=afEU*gfCx=feuAk*V^45T=jw-|B;q3|Hx4O<4&zsr?Eu2#xj|2jh zkVM*}pr}niB8FEI-$e$br7s{|i#p$_*APv8aQv@yR1?yO2K?*QKp0j7k&JeNXNqJ* zvOwXA-?`o-`i_(NmVubsFTeVUjtm_9K8j3oodkYgd=GfTFK?0nsgRM}1L@fjx=$K< zzC~LdO}HS25f94Q-tAMFgxwDEhR87!k8?0)MIxeiJ@%Dk3tK=@uBVuug8Qy^Y+nYy zoH3k1j7#3t2AJzkb3*7K0EX(p8a@$>p-;cUN6IRp zs!0!l#>d7|chkBgOGFx4U|4B3khW{Lko&v()a>BNj7s@2VrG^lejDaarC9k12$zH3 z2{UwejI>r0;`?UXZO#xx` zowM(#N!uIPVEag^sc^AT6{!YgN%ln91b%Aoes1T2R;mX48U`UStZC5D(sKM)pi7VP zkH2PXfwsDzrg1{h2kkK1c%*DL8zX-IHC%)AQ#|c78gJx|>P0$&0WtC!-rg_{4S~xN z;(!F-m4S*((UjG#&9CM#+9vIYyM2cq8=u*`*8s>Ftk;33sKfgvkuJtd!bM7^u@;Q=M5?qHFPh37(zZy+Oupqmbta}P z{QH?Swd)|ZxbfZflke1;HEHGd`YxONC%=Q_rD3_UUrha^U0T@FS92%-ZlUoXEUX4@J{~O&Kcjh5u!5<=)y4O3;%?WF#Teff(ZYV|A-6s^Eaf8(Lgh&pdx&(@VQkm z<+PVTo6LXggL$@*U$AUS7@WG>p47D|A16I(W4D7-KQlx}JxvY3lsBW7kB$)JMRSP! z^QOHA1&jQOCBc`lo#aUX_8qDK8o=%*>XE@l{K2ykvA3H}2@P2SE?rx;gR!|L)_?o> zhenW8@Pc8Xprm8W9B)>N1Vza9od28?=ca1r;qlax)uGz*sl2sv^R+icB3u&5OGF%$ zH?brwuUIUYd*W)o%O zB;fha#meGhYg0A_4`ZK~-)@5sV~1^XQu#1-E`=R_XJt)7BuO(y%7`o9;(?I%%3DAK zE73f{nyWdvrg2$VD?hmEGF&zX%GBFfAow2Djl?S$_|bY8a9&Cy{{7ugmwkhRAMRlX zTG04hu$ralcIAgupE;SQU8kwhcM-~ZZvkXvp>{Ta=4o52IaAnHX2MKdozeB(13+ZU zkm@d^L(+jN^75-gA6`gPQ}Zx7wXBe>grNCHJyLipk5%mC6-yZDB_|)v-`gZYOhy#4 zIXd7FLr9eG2oM0|-YQubbtH*2d3`5}wEWwo{UcjH_?M;Vgm;Pt65+r(y$iM0zp*7!*ah={%c9;OcMIm&rs#4)V+wJXP}sT`FSR>S z&>_vYXBG+hsQ)G}3VkP-z_+!IlZR}$Zt`oyXJ?<6#AmUL*xI?j_?+0?bL)-Cx_CK* zKY#wB--_1$;yJRBOQ$q!%vG0NwT>d2*qYn>g(3R5e%Tsw!e(LvQ`Ff0knZq|gpS zEOMjl<&;y`I>W@n^8iG@PN;JSBdT}=@6pahJtg$=+hY7VP|5G{1Hh)MTWrC~w5I>s zuBXQVywrmxZ-pwz{e17VxOuck@qiEHR~hHqv5}k?v9Dc_?3dxZKAZc`*FKKNjSgdV z7FtbZIQJ(S@&~7$s&c{He;Jt=j?UL82#>2Q^MPIbq0&DUdOg0NTylVDjTiCgJALX; zu`ZNr>z|4oRvg7&pMx(vuJex}J(im;m2$;K6(@#-HVgN=C96sOGvIe#gh`>s*r={!>R zB?kNdv7HjTjkRU`z#UuZ3ERbPxCUY3MJVibe?^O=B8{UUQ=}0`4A200rQ5$4ll^cF zu@WkU=hnatS|eLtd9@!flnX)`b79-pe%!=)W!v3XSZs8g{uPO>hdwnN>y3Ui8m32W}7t=$z zVL9k0i>TTC*RQ`ADi?$5*N?_^i;m6#{!FO$vKEQI@nqOd5c>#Y$i@{pUU_ zIZ$Eo@Duf89C(|2$_micNbxebK9Ew>X_&?Up#4pV@-O0pE|_Lr3O?g=iNvWRsZ|!g z?ZaI+c?w6^jr%=VA%VQ48h8vU>8u zdPss?P=3#;(^TRybd7?#_R71Hao64Bu0lAAn%mJ5VFx! z7#b6si=?R(cApE5V_~A%9bY9J^;(MvkaNyPz%^EjJI7R?V^?6Cnw&d=na^ zRv3jA2)Ig#Br}J>-fP{TIOX!S-oT7(?tPYh5qRSOE-to`l_=>ekp@Tt{<{eT)f%Oh z5Db-AN(zepE&o`cLM9$zKkP(xB-b6F@CA}fCpGdo5F9&UBTl2!a^J@Pa+;#QvvSm8 z)(RgrDtnVX;&9zDc_$J^_Um9BZ$YpR`WOK5>>4f0)DUuv^i> zbl~}}6z~_`8efW>*;qu<5w$`|AOt@m{%e7^;JFf8oEG;H!iF6iS!m*in1DaA@g=cX za$kC;(qFxKWTkM~F+D_Op~_6XZVF1htUq4-^T;&ujWz)SS0u@G*X7$c_)q9h{?TIQ zvRa8nd9S9vPvZ6RRg-_Bi0mK!+xr>+qy+}*|Jca?m0<6GzM7)n`*y^gG&Hh}j>99j zhQ4kn>LnE~CVfTHb{XgMuOu?KB`lQ51m&X8Q;rhMTx!R12Zr2I=(G?V@e9wVc$5}W z%W9P`kPg3H{iQ7w&eg%t?Sv&Hf|Cn)@w29$q{IVz`3)Cn4*7FTs?&^1wnCLJ5K~!7 zQP%9rnsf^i8<=IZj?o@o15A(+X(RKG(wI5Ibq;mIQvAXKf~+ZiBcX-oAht15kt~vW zC|?r1QtF{YbbC8?@^r;1E5H>U!x5R}IJzHUc38uqBPfLLL?1&Zijg5iy(fHn7)T<` zI|+ocK~fP-@@UFg4@k+bIp@`k);J&v4zZKhc>lzujU`Jj!#^^jI^@+05UhJ^$u{X* z`_v#k)7?00q%G^-ek4SXP*Ir&RMitT_9XqiQ)hc`GQQz$h2Ux4^smUTd8qqjWCTem zi5;bRzCh5=Lv~IPb}}{k5s(=@lTb@=Ndz5xP|UqzHnpEQ&dg-h^FywgbAp4BGQbO{ z8{Gpk9@_(~t@vBP$@#|1spib~?8?|Zn7eJCBq!~Kge`|%-0wvw(R5=DPPK>l)gqEG zZoEfbn~%sCqbDH4rw>4O2+a*5G=+KH?#X*db2Qf0`i=75!Ixl$D#Y$E?}tu&p93WU zWbbofg4AF;Q@{pt!N{y~I5e~VAHW=PtDDJn$3bwU12$|?xuB)i5$^8}Po(wE4Z4lm z`)_IxmeuC^`9pg7?G8syr%gD|?rU|v7nosO7)N0(%R;%8@v;tb8w#^GmdyJbY39nB zY?B)~Zrp@`aePY$?OIm)>_z)nIaN2SzY4QyHp+(D?!)Z<9A5s{HFCed#S zPCI@4ArY)pR>_LF2a(s?{rtzIWY&#;Papi;-yj>Dwso1-%*k@1FlX_^x|QojeBJD% zpRBRgQZlFtRJ!z7`#Q7!1%ZrOi9h2o;l1)RWT|>h#~Zfcx)gz_$i2TgxrP?*Ogw4R zsHMhx94zSfrsm#02*&p{x=d}gw=-n4enYufKVcU_I9lXgkXBmgVLJH@5=emc{AeB~ zolQ~+`trWXA*vg`YL9oDP2^VLhi|8TOrPEwBW6+tWKHKt8wfX@qzY~33HYuQ5Ca== z6=;9~hfCDtXFg5H@_*bT%m$=3fF!xT4dGG8^q{Po`>g=14>^EB7hBqCv^m3@A)!1? zTZK%_TkHmk7$oT-;Mu>bP@lZ3X&2!Jn0a7jn3se&4mZ1AcwK<=Ol(WSoI0@4!jQ0( zql0)Z6?&M};p&sOmrWH=zY5^vm2)?)D{F#410M~on9ALiWho1py!vSpQvdQPcJdo8 z4nuLIQt$`L^mSQA(t8&De4}68UR^ysJ?Za{Y9)x;yaoyk4!$+YCD29x@I@|ZOvLv~ zv2$gOn6WnB*7K9TGi|>-_jqruKzGl`?nKFW8Oh*hwd=5}?dnoCQ5eK}&*|uNp1T`M zP`O%wlB?SjRLn5lZ`!oTuM@Rfknv8GE~0&;B|$tU_(@@^LQZclWCZ&6nQAdWdGO^O zFXtXge9KfC2E3<;miJFA{Wp>^Wek%3JHN?DF@?V^#`iE@{^=X1G8&!#79aV){nmfd zhyT~m%dcNOfGL-ckf6QQqRTT-VYvJ>9mIm7HAG+Fg;28#4n`Rj*B&Y$IOni;$2;#w z<&2av9-@ScjKi}mMTne@Q(Q-lZq~>Vcq}g<{Au*(8Qg=n`;{$WESsL4hd+x9iZQGM z$STWGSzbu<^7`^HDX|o|D5q$4zAj~m9Ea$n-k*1QOK3|3{!8MKF;1a1q{)I&4_S%0 zdRNOfQ$)_)bw%Evhf(lpHtI&diim2B{%7Bd?TTQQhXaR;dM$nbKw-(Yex`qorasLZRu z^f@XFOvMCWCF#OLHGV#Yxdi3xei{#Fd6*5Ld5E~#?NU_FI0&c0P{Q@Ve0OBZT|$y` zH$UMf9#;pB7ys)Y_>O6z$y>I{J*99jjSCCwkx{fC^X?wO;YvTu8*68J5!QjDa+<0_ zFmm%y{%loUD2lgBbnl%!$65!B%mmC}^l&J*l>*sP+8Fm5t0Y##y?Yo1ZjDF&I9Sqf zQblZSbgQ*H^(ja9TTu!DJRamX&dgF{=w=P3kd>;tly7yw+=}j!;g+oyjM-2_feJ zHt3v>^M7Iih;y+wvEWsCV@jc7Pd?##qkFIbved9iq}Y|<7cx>Q;5yu-1H!a!BYwfc z_2kmDCA=Fl{*ZX&1e}s@83wiIO zr^zhD@ycXy-9DHUcp^51~ zh*T*g6Y;>GLMXXLdVEcVmo~pRw&eJHuqKB16&VjpV==>=M4rVA8foGTuL0juV^&~R zet6$f|2eWO{QC|d*0e?>Cl<|vZp#>;0`eLKCvluYJ$Bt{+pf-MAhLn<1E1}*4%Lme zBmyj})EEwstx2>bE_+(zsh>tbjmEszx4cGx7J~immemVTcd>Dyqfw@m`^T5^B)qb5 zTFN~-$W-p@1ga81SuH6VQN(S1^w0U1&6bJE>2GBCQR~iUGiZslY?@9I69n;t$;LmU zo7H*{l7*oq>yDxqpg?TxVXO^$Jvu=56?GCyLpky-Q2P&vLtT)u5~gqPEMr?-GUB{C zu!okfTDeStXrf;R`_oIl=ds6q9d4nW7N zj-F&$93p=C6YRtz02qdM4r{$W6;yefx|ys{rV7My6`)~56lhpMg2dJ+c)pkw0Nm3A zGNN8H|7y{o#uH*Yw|pcsM`J(M@UQ;&Emgph^671rHQ+@yL&la}w?bV=LdUzhawgRu z4`W%-hNc;E&CwljhT%r`k$VV+bMj+pk7*iEC?y{QraFPKdz)!2L?^2Eq12y1F>RUC zj)(Jzb4rW_fEvfwAEq1j+j4GlFT~(zKF34rL@!A&W-KS^>(VTGCyT|cs+aIQ3i8kG z1m9FM39y0nm0BHUAzr}gxmcxg*c+oo&;!(AmbKYz1t=z%dbZ8se_DnLoLW7zI&)5} zkP45;dszDX*;QUtfw0i*Qwy!L29TG~RQH9WyHOU+gx zIFWKH!v6BlTz1wBaQrhOIRKh?NsgU}1jWUc({R_FZMn7Fx*4iPQ zDlJ-P2G@`Lg>Ettsh3by5&FRJ;c&PekmXiI@T*iTWS(~kY1>t%<VNyBP_T}qQD=J}3 zAs$e;%Mx~>G!{_5p|K(DqSVw)VXKECvW3#1V5-Zxq+||}2AQQPyp_cAoc_#+#f4cJ z;i1GyCT@}6{9Rb3LRP$F_W34QR7Y! zDuAi_7PPNdA_)+B9{%7Yp?pNBsQ0w$Vpzw>^oa>y@-2Rk>pxlg>$gj&2+d$blL!a& zj?@h8t_so!Uu>dk(;9#kT8z5sT&kC0fH|l2t8d`~DpvUMz~i$OF6-M@RL%knEyBr@ zr&I=};R2uq=0?t-6^r#hwyy9nDcEapWC2BHevi-1q_QJ&m~}~?P)UD;OhqS?tI-xl z&IR@MasUb3q}{ou-vO%^O7RV%w^(|rWAC>4idde|sQnvWB^1zrTpH-1lnYw{IOpw^ zBL_n*hZoUNDpVnHxZor>$YqF-&5p5B56Md8u!K#QVZfqPwKXL>!e&1gn>kZ|+Zx0z zogiw3BIjRCs;t|eRVCdAdOepYk@=1<@=p9IbwENc+HXVQNt6nd7)Agw;o5x&j)=)b zW-+%;ZQ&y}bzU~UwVu4`xqAvu9j<{DH!n1^KfZlC^IjSzNi)6bgE|NarI--Up94XW z@7-?~g{ah_)xxN}TClT9AI9yZ-hM!sCAs$}II;yCBQN5Ufo*pWWo7NB8X!Pr(~UEt zhnm$3PYeUh=>)F5$J@8EawZ~A7eYcLA%t3r)4@aIi1U%co3~OB-$_{~VWB62*rGEz zN_He}CK_qRGn!NxrNM^2wg+4_PXSni%r0wi;kCcU{*-%8WYYZtf5MBx+Zo5nD%@aJ zUdIOQt|5>nNv|q)=1i(tF)I*8-C5M9artlLge#r+scb(ChjVdL@ce=YN3+`YgE>@= zwj?f6&sD@ZD5z3jX0nAK*X|w~^GTvQ?E81xC7)D#G4zRZzH2+h?=-8?`sESBjEW&! z&LDI+XDQZT>(-$_t^im4kWcW`a>jLT`)T$zfcbeEYJxZPMadLbI_xm>Fvz$>R91GY zifbh>tj2ppgX%K0zV9(O^-|r?vf}-W7vC)*p)HaeXu7O*$0hu1<_GCNIB}?|cHU@5 zLNE0Z7P}|}bi5zVWYBU!y^fBV-`9F$RWB>Q#AWB+08;$ZPpZi6+UfRzn2)IP%ETD& z&7iV@H8L~t(Z`+CKGo}EPM2G@>}O<@bUu6+>d;mw+5{nE%3}IMYF@Y)!@^Pg*h%B5 z0p(?CeL4N|n5DbOhGVd0Mfw&Mo*XakSs_Xcg>lK(H`vV0UJ?B3oLO=@eH~TypCbdmxU2Yx&`U|wh*Uvlq+nzK z+iTf9fM0Jx{FGnj-IiyjhE9~qgD<44O~Ysd$9*b0a)|O|9ZpwQ-(fjsa>=Cbnfm=c zxI{d3p3Z|=C-g)D>^0q^i%n#0hLhIiv2XPMi?~f|5bjicyZ-$lsHdFNX2+}v5H&mh zdc{mu?r9a*RD1|0pduM6Zy(q$to6!KN^q1PpxA{V8j%ejP%ln(Re?dTQuxX$-Bm~& zY%oKlH9UeKP`Zm;!6R^#xX7T=T(n&5Ak{mbeI0-nLx8#5%_1$qsG#`|S?E4|e&#P* zlv8y#6dt4KgUsKGt9uHLAEuHBB5EOjk|(8Riuz*$CCr_=@BSvG;S8JouwStn)47cq z1&QucSqEydNp+npVJ35ihNb7az-~qBJouEo$e+I%!J)~+>8LjPGH)ZDgIpxJQH5-s z9bkd1ie5w!nC@>WIQ|2$DrJZ)7ZUhnibN>&wyNR2ip47Der!!7&(*D>}Izc9!y2>-+B#Ing z>_>^uC}P=(*u9U2&z<<6AnrF8)frfTXuJIk;uGhVJ>!UDU?bW zX$iB9&CVqz9wF}-=v&hUx6QOmPS{*Ik#&1N4gu{SD^!zs1?Gk~%>&ot4%x9P1#UU6 zBPqV_uHJ=d6>_Y%ju>vQ2x2BBD|?rS)&!*D8cip7RScag=Fj=cEN!EC3UsaP5PPZy zqr7}1N_oS+VpkSw&-Jzm4nir)JEXPRlaoMaZoNe+VX`;wBE(@p&6s#^?A=G^AG6h$ zP@I@|*i^(2*Ex;j)K8}sh|qc)qi6<;DEARtOxGoeZ2kcL{n_+C%6>*9NB|N3N~hXx z^8cM+Q%YSb2P2NNeP#ZW>>Hyd3HLD{l|@k}{=ANjJ=GV~jc(-KbzxrUh&GQ@J~czv z3PbJ}ORNu&!It-j8O+a5c?$u0r5cKsByyRlI&gpKl2(~68{g@F%(MV_0vsk0~8z6*JDi+#GjZY?GnOb3Aa(X@a7{ zlm#QC-V@b+y|RQq;%JzoP=uI-2K#1f&ze_X0?W^j4Yg|}8YdXO81}%p-)VR7 zDMYoBqM zlha>ij*<*P@Og?Rx;8?m5E=Ulja`GKDjy~rN0!k_n`~@qHZC*ZZ^VTw#P|&nJGDrnK}Y-}+9VDjz0~|<5pSOs z2>7Ici4WYkwc3{cFLx)LW^uJ?I+gWIhgr*JQvi!h5z>#;gE$_h+gohA22uG5X&8Cj zsf3)_3VE++d7{{wyt|JGCDCrBn6508v0-%{<^5TT;i*Q#ufZcQ6CNYX;rHp3$1^jT zqTZ4EUAfR@7}9=<=A$&&yYiB+p>~DDeV`DA-%RQ-R8p6%0|kN*)I$$vwYCBkrj$9)_G68}+pn!4{+*Bd(q9x3kG79GwQ#u>)q=hEN-Ty%*IsVrokP zj_}bWJnrV3&k=5Q;s4>I(a3+KD0_2+>mI0_@mZ^ge zQToys;XIC$wMiH&iACP#+TVbm0&(>#)&Y;Y ziKqjR9sI*u>Pb83+>6sHTJz$JfD)lq#4dob&=Hx7>|KtS!DY~Z)4$pW(tf_y@Gcm1 z7nG-?U3N#G$chg)V4UeZ^TObYzc*C+5S_OBK%Q&KTN|z=qIx#nCs_`4-6N1B;riZ9 z>w4Bmn}!RVLN|rWB}A*&i+oLO!Wr{%nOBkf6Vf`S~m4^gUCxEnIq$oG@ z8|k)q$|ioSr_uGw29=pCFQzY1`HP5ieZAbK6*K>O(UQDGRN7|KN#w22dn*I!aeY}P z3+9|GkDev6!Jmbzt47{(0@-3Iyx=ZlQJV@2a=%Us-*;j?L^)@vfNEN|8H7%FlK_l~8#P+(y0!xtu55OjA(S$_UFK#jA8bB|p&`kuwiYvT#z>2H`p| z5DpMZ#5mr{M-cljb|w=yOU2ATM_yjq63MPc4uJuj&`3l?!oWpa1rIktuy zu)DUth*_fY8}UvvQo@_&lnEc_}M@_~TY1PY;Sd6{XM>nD|?pI*iL5`>9I9M`2gEN zyiag&O&)tGU?7v0!;XN@>rlyc)gn_tphF!d-Lx3akA6=Tr2G8MmxL!~QL{|kTan1u z4CLl_;+&PCuXINYwZur`R-$8O5tv}#6V5RbZ8#%3e52upiccH0^XE7tLkCEL&0D+% zheNaJq&rg9sG!+KeF7@12zQ)TqU|Hqbz|6T&~Oe>_>c=iY^cizBdBC@+eippS%*4T zn!Z;>%-k0(OIkN{Np>B{<0*lOT150A=3O?e!KQmmyC>@3qTw|oA>8^0BA^bSKr3Gd zvRH*&&3Y5wqadoui|{)2?js7D5xt)V&UuMx1-1RUhLJ*}>ywcR>#ENbW5}>DJp1J( zMt?4DU0`GD$HHN=@VH|G_E;MvEE%}>Nq@({S~z#I*eTG;DCn=5&th~osh&NVr0qlC zYQ(8GwmxskvbSz`jJ#jNGp^tm+F|OlDPuZ;L$NVzG@pY=6RDXGpA+-N_Jh$23j_jJ ze3Y&TpGV<`P<*zsv)PLWcS`x?qAn2|Gp|$hIFt82BBJI^INpuu5x;S^Pod0Uk=OKj zD9WBE4)5MwHJ`&@pGM}t)E_-tE&B;{6v~efF(9|^=P&Tnp)74(@B!d;YRW?zpUWUXmBWaTp}XldVhoYK1=I)De9y~~To&;le z0BaH3;Acu?iE@S|liYLXB#VqGkiyj8Rl@}SSnwi7;EF7yj>J-xRLrcO%%zy2dDfZ1(vIynu3MN?sU1ZO^)r1tm9LR(w)5^s8vjW8Y*}PC3_@ z%TFYOEmV;$p=o+vld7Rw9LCj9*P|%M7e>qlXw~)tjMMdeuZm}E8SIO=B44)(*ql6y zR-H5^UZQw1Ox&WC=p3{ip?Hx!20RWk`=gR_gtYNO4TQm;4q`EJDlo|Soe-nwXDr6Rb&$^?JgKDb+EHG2kYTGJ1#6(4w`|dc2qqSAL%< z+G~O=?*IWpCPX^~WsuRKq_cpNiSXQd;}k1LOaCZKe~nMCA;`8e^0U!) z0Z|^bJ_`<@N)uZ-=fy^WE9@b`h$?RAt5kg2WN1g}n#RfFRIFfHn3t+*T^YmijL;mlg--J*!L|QuJM&_4j)MB;Tt;S`OnC=4XjktU4We+b$P15l5+3QKR*E*4|GzQqk8`3SBoFT@YB- zN|LMWs$6zi3q1)xdJG#|nz$J|_hzJ`z6A$UP_+6+gh=aSkG!W;)A)8O zOj18di)#wmqb)WX{&qQkH_;`mkWvN_{3GWl zk!Hv)I7+z~Jh`Z40BQhHg^Yd<8mmCZ8}J0T+RpXm;jO%_p5;xbo$wOx+Fm8XK!Cww zQE!_ljp|dR8Qhr`HaY?|KOnKQz2#t6Ph_+($+Ogt3)bwJPHCYZ^W=rqAtf*nk*3TX zj_!~u3N~spmUq=~ixV^StPSZiSp%>Z6wv9dvCZvAk6h8eVz)52cXl=Bc(&Q z`gN%V8?7I^*vw7RJ(+PI4fOz!b1|Wv_f$)|m)uF0gAYbRyEwlGF3?gK3Hk16;cq*;C<71j8`c z)yp}$Ao#PFQSL%RGTXDNzc&_2S0p_^0}VG$+pdwj^)Xh5yXEqH9wNz~N9>~)AF+AN z5lLlOXk5nt_A*phctJ8X%eWtYx>9~$%a@7qRgsDl-1{nMGLc_dDr3N$Oy2p$B%0nq z7zR0BsyD8IUF@W?$;d4-P~unn;Hq67GB1ltD&c39WpV!d{?XKpDxhy`IGkKjhb_1B zAs;nB%ePafCI?WEM@vNH4};Q~wRctOQrr`)|B*JaC76Cp=mxDhLudwtTdp z#9*eXMWDfqBdlX=h=vO2RrweG!*>Qe$sCw<|8aQ zlo2Js`0>zx6MVO#HE8OkCd`Oz=zsr*8;nKxrG1TpTYCMsq%X#Prg!`=)3fMH{)dN+ zMfjh!rmgTlrxAUnN+HmV$obw~HIdhm7ZvjM5UnNh&Wd+#wv|)`OE+zx5~4x6yp7K?@wA?JPZR{F zmi!gHq;mza&33;%?0&b$Lm!u4n&(MMV${bcx;_!OsPEhMr;%{- z(8(h4+uk!+d9Sw@e>hD&sp#djM%n5#5Jd@y&Z+V55v;6n1=TX=2Y1|w9Xrinv)ya|0@#=ZQRD&#ulMyeX$8=1^nj$!B!u-5mxT?r%39lH?DuYpoqC-st| zd&ACZ4(KMrTa2}_ToT%($IAet;w*t;Qiwi7dYVo;OReoEkt;Kypm2~wvGy8@D+W)O z5jQZVhbk6m(^bqpg~&5BJy49TvAYGox&TWw%G5M)8-i5o#Qx zCS5{YP$Ds~zm59lX3<0n>s8@x#i+WMS*&$p&nSiS!fsaDX(p-Z?9x7y44_lS!uy=3Au8HqE z7-fl6^?M1O{H>CPHlLQZ9w)iY)D)E?=H4t+gXe`77p3Bxd`MIALob9D`8p3$He$5O zQ{C5`VrR$tIU>P3Ig8FoJ7TP{Qn{O@{#YPx;ZR}?o7R~Y>!~Ni1hstla5>Y|zW2MJ>+7Y4;@|h3?8k0Ol ztgAZBFmf&h#S=l(Sivgmsvljs<2WM3sE%=1(w^um=$iPV#bVf60n;e0D03Zs-2aZU zNSYVGRbyM&3tUf=(aT7g9^emd0yoZvU+q=Y&-om5cvtu6`r8g`izhIWK`+BN+;@i? zzjrlwkFN&hyV_%KcNK~>tBqeE+44#+>UrerY(H0L@MR0|#-~hI&WHP&Bmg3FJxuub z-S*h>2&Y4Xambua$HjQe3+)w<(o`12Cpww;c>BCWx#6OzsD}|!JVvu=U&{N>)LpA{ ziaLIH8c8KZTD4nJ4#9V}*$|O&dcT8J;b!E8TVB-TLK^AacSjWEb)<06Rv}3WOu?N9g7$b4rtRj zHRLoCJLck#^Ilk%cwAYgXFCc9KHfQ|qSgv!GGKPZ8-V3;LXsFSAr}Wnsjj8Bi)MRN z9iq{-@sv~K_mUi|?Ohc8ee&Bw73X*>miV@faMRpQk9}@hiQe6ZyFb4T>5KP9xx4CC zMo22CE3&ZJ89(lV?P2vkKk&WBP4}aJ|i> zL0QnsHGZ(7rh`tsZ#z6?L#WU(h3 zPr7^dv!dW1$Tvm2{vg@i?wsDD;4vWK=PeR-+9Hrbjg}KjzRn~K%{T(qlZjE}{z`Y-))~xQZkDw~ubQ}VL5rDS3sI^s}!}?Kd zSw%D5T^D$meT0G2Ra2_*YCC#3+;T~{kSM4Q#StA}sv z$8--lW1(RL&ME=H68@5L|*v-1t)sxd8$6gM(-Z%-;;2ssQR;;5@x1%!2c{v)-8XV zb?xEIZuuK9x*?|mFI>iEj(f@CoBr;TSIx7p)XbcDZ=(tbAr7KUgv80RS`M$RT>$E) zLjT-iAi#T@M{o(tHlOrAjD=ftv1s*`IZt2+3VP2&nEzsyGzNaOtS$Wf@$ZPC3tpe! zylU|D&v6nGpYXb1y-u|(VXfEHX@_22(+Z|Hhyif(w`kOq( z(>`5m6UujkTd}JD>Du^(*Eg2HBN!bj|N2%x(<#KXF2z-S94or3LC1f>^z+AWcxpdwb2^K`erem;glZSsdlgJ{r9##O_7ye>4*eUgGwv% z^3+mO%1Iu&=g6svwMuC-S&bfF%riIC0a!ChzTnVl+oG7J-X?3>N8dbn-skb>eFOe6 zUJssmoI3Dv)oAjLk4cF-C^;2}4jLN@Q0s4HO}24C5JrPQhJa`!P`g^Oz8I3Pf-9@; zefB=(-gfd8G{qE#&UyW@=g*T-US7>Y{k!?w-Hreti|qj){hA_=Mq6DX#=oxHXzLdQ zOg34uCpYwP`RC`37>dy?-{0oElHYgCg*sfQ^-5RX!d;|Ec<|%sn4>5)Bqgjzh|DZ? z4?tde^_|zGQA#7|s%j7b_s*!2L+y{0UT*ak4|+EmToAb0E`$)UsN54iyJ>_}jm_^h z7ztg{&9*kZdNj*Za?E>?ky#V}d(q7zr0sJ z)23@rEvl|;F=im(7c_0oTe8V)a5U1_daykJuG!;!nDej?^-eG-p3Uezta74w^rU>ZA0El4G|*`DhrEoZc>YBb~3EC zM?r*0_a6HkhJzMcOO563s7B(Xp&rM_#8#KgiL`xw3TS#wbPs4QODWi>TSrVKFfL>- zS7B%GSoof6fqaB19`Iw~bE+zp^Z(-3=ek#*y2;;tFs^s+>g^GILQ-qOBQvC3 zuUzVJM7{jW4FOhP-WTsv2YqVh3tzm`me;MVU4QyrKZqrByWrunoVX>^iZ{IYwp(kI zQ8qhW2r=^P3p!G!28o^=+~!qs0wW`~Z$E$t!Q~LXN`?L6=xE}EXV;L0qyPGEl_M~i z@2`%m$wGg{{dvom9dTWr+17OR>pnos0pl=8MAQ$`{!Qyp2N|1_PY(P z;kTB)&HU;6w+wzG?VCHwWT-_=rE+b|4KTS}&AMJq$XlAafJ+_qxbggQGiWZWdH189 zr1vpw9I&}s+*q{;m;B@NUO_!w&OX&fDtH9mW<9Ehd{`#DyGw_Oc=K_8O~3j&*N5&I zsWBo=)1_oc8!=>Pxa>CV3KsP% zChq3{i@i4w$8vAmhSh4022-Yzl$j8U5)Dc!AwncW#!zTdh9=5P8B3998Ol_WQj~d! z(u|eND=JeZQN8eSK*@Qu7* zNK;xPrYVQEMzK2X!ofo!9i+5Z(Y4pykxQ^&D2v8BlKMz_xMlFz7n{RTwD zfS6TnsD`hgYl5QW^tK~z?E8UDEYj@7(J02)$ZU@8>y@(Vo-5Nl7~-*~)?7U@zt(}p zz3kxY#~07OdL{A(Vcs93(wP%f`AoEnNo?5ONp;}oPmo99cVyLUTg;(1X|epHGZwzA z9t?SdN;K-IMmt}!KDVZOdRgZY;sT!g@{%Hhy{o)iM)a$j;8607**t%&6bWx`umeW0 z{L}5%zBAc^x>u4N?&0^slFABgP4Ue|qOI&#c?>Dp*$@0mq4OHRvQD$OL-kY2a(vp3L3N@UyxNRVS)-+g;HPu~5)!HUkg&!GCP7vtzs ze%$!-#hUi(m3P{IXj-2}t(ez1jz;1mrw$V%%(Um{tRQenJNW5Tn03JMEa|G&jI_ib zU~{)#M6LVT^@E$`4Y*oDYHE{Qr_M)cdxM#fmhpRXA6FsJcEibwf0)ah> zviVn6hoQB(dkZv<6?$GlYs-5=pO>tZc+qfjWLU?(RZeJ7cPGbfCmpZat|!h5B-gez zT_ENCh;;chd3V)dr^X_~r#x{z_sh%7USU3KG~_3Y>uSjLtfiRnK+cE7kpQ!9R>EOAH4>=61CuWcfsD( zK73GLLYV}pNq5zE^m}{@>$cdG)U^Jfs{YEf{4~`bn>|xrtrkub-gQ1Q{Du0YxuxrG zX7e;;ujq_msp_S5n~Zsrj_?W5h%Q!2#%D*o*Iw*~=~iZqwXutkq<$hms{C< z(ujVy7;rKEb-xTDQCT-QHQfi@BKPfim;F9s>OdpKuO*fJ`I6s}hJySS^_16Eq~&+h z;wHh?+W=NB%;)e#Iz8fKebPhWh8CCa)#}i*UJ+Vsm%BGNL80D1f*#=oZ$FKaPOTmZ z>u#q0@d8JnaeV>143Dw4tWAj@SvPuKjZpP1XizZ`+ZEyenD^dzjVD)H2Mf%}-Nbcn z&^w%}092^gNAiJ+1s!O)qvzKQS*m!`ALO;Iv4*^_x;?&;rmN}x$P7n-IG0U-GrpFP z?$3or)R-DC??a{$-^g*}vg+^3ysBC6P{_6q!JWq+ePpo9AGam=HvvLvSC%{$sNKFK zy>`%hwZRrmLJ%tp_=i+03OLxv{j~j#Dy*=dO*-4fp;YO;gkVedd}Gvwtsvg!S>MI3 z%xn)^EMVvI>vBcA^L%$)gzPh#5vPX##S!4wKNxoXsLPqNTius>wa@s*uzr9Y*Rd&* z-9q@6Enxla^3v|qEnV4zF8#^%!$rkszBNKm@v?t_P)d=ZCe&23MIN|~|ABBOaqv-R zEgRFj3bc2gIa^5(#rkNTTD>k_hQck|Jxr?i_kRoy4cUgy&j`|bX}!z1OI37=TmQ$! z-a{LmQa-cp+qLne=}On(V_)kb)U_mKF~HrN8yXcA-@^+!=e}}R4YXe%mlo7Eyu>jJ zGKR=OR>9llSsjW67$gkZd(UOY24f`^X7zsO=^q`7w~=$CKulDk|*76hsMVV$&l#+9&j> z!6(^u$M1;2YUDgqKiKvux^qE@klODnn#2q4;BneJHBvuy=E!X24HvRo*7?dv9DHfZ zGQaHLn`4tBZ?bY!Vb$EW`mi>|oA4y30ZC4bZ)|?wFJK$2`oo##qA%wc=eoVJTBkv!On8P)c7?cP#W#kPg}UZ;#egC8}uDK@lby>_;(_=wi@ zlYWbSV;d@;S}(9s+uSD2C;$Ai9`+j7FUEI;POz}a`7O`RU%s+uZOzojvCCA+84!>h z4+*L4+?h9^BC2Bx!GX$)hkie31ZX7otRJsdMMdFAwd--!xU#_y zBeABGrz_nFXW#g8K}Jwx2QI}tf>qxyJhH>}_!qGB2We2N4;4q`VB3*_>y@=qW#`Ja z8=gO&|3po#Q%W~3Znt)L=Sll*L5F|kH=Yd$7 z)Jk$K`*XX5LO5+HTG0ds;nQ}t+LGvb5&jYp>T5jfHwQ`;L>V_Pl$2}hvn}zZ2U7Sd z$coNzw{yqNk2FMds$^UiSmS%2`q#P-i`ows$h@j=0nR6(79#VG+WQ!0=D2mPLwQ9R zT9Kp?FB;3%>k8s>D4+T)e_%g!EQ>i(jq6s3#t#IyUOlAq_K%fWZEc6O6FM(^eHCd% zZenKxm5;@6_#6AHidN7**uzm91r6O-y6UN#u&`3rsfBvI< ztu??F2XDb0cl`Wg-_V>^Rhx%)s_my^S9NG$Cu-W{qNayx>HW@KBtzOzAN!`(VJcZe zy|UF73%>UBh}R@zx%r}vXsw^gyT=?n-f0^dw`0<{{@^dH+WKu1T(s-vp>MKwRVfm1 zpL5W6L;u&R-Exw`B!X=9G$sG{sex^I4=es$k>0q+>jkOL>Lqxszwfb`8N1Ic6>S}GtK9H5_GN~^7VR$EfYYqsM|9mcuY`tWkt`DQ z_lvDn-7=DuZMI4>2S09^;(kpHiZrHd*Ba;kepikUtKhC>LTSNmIok}EpUmf8@-DPf zwV;Z(tMcf^ZLGXzUNZ^dsdHVhW)1JViqv^|aVt8Fx!M8E46nI#-9x0!-b&^__$TPS zKd$4;3(AGZd#GxsNF~oWSZDlnW02*o{5+oKjixGAqc==?&$5a2)jDBGpIvFE0eXI! zxo5FS@ve4rk(XYq*V?BWeacHN1;y6p@Uv)gQsL{#=eH0YL)yKEdr z?~UO%iZ2g(p$6C2a2`Zw24o}o*|{HDvSf9BxX&K+&;FTOyV zYan7usD+=Z)Rnzt&T7CV!o_>9oSPTuyInr;d5p|WI!Ssh8fUSBUQgl}x&A#JdcEPT z(Kwd)bBZZ=#k@Shd;$DF9O zyCzEYhjP{_{E>dlob(kdu3Std^%H)gaM_bz+Z&QqBoqpF+10p528nELkH9z6G8h={ z>3dt2vFV)!eX$Xt@oY~`F!fGC_dwe3L6>wHxTqEwuI`T7$w`TMt4#|FmJiTd z>{5?EcKAP8_3h^#y&ZNsc#kU7`P|^C{4g#nygk=>F3`GdG|G);eiuXh(#PR5ed61+ z@a$!hH`2-qcAI2!Jx)N~*+)xCo@R~yPT8Rtso&U0o=a|BNV<$kDp~Wv$(RGP^vuR< zFVI^q^{??Pa?pgC)U7wilS&>ydB=qY;UF=ZG}%&GOpEzSag5ALUjPWz(-0E;B!|(a zW#9}giO1};0|!s>MH-}oTIFp}r(ROowBSXsaao9grVOD|&|ze3V6`fu{MIyEg~1d`pE{L#=Vi8`un1jy$EMfU_F+<$jk5ligs3axE>wS z9GJzM^IBb{P|2N&XMeaBlS? zIch5$@%@N1-px-f{3>5smq)9)Z-KVkXf$7Pog+36&M8#@Gym??ufVGB8Z4fG$pnS1;ZgBZv26LZBQ zB*F>&=KaqxosO`V7Y|`|RB%$!Fg^V9tyltoukhM7gvU=BE)!yz=m~yu#0@omL*yx> z?{tfp;J0^;V=PFiPb=VQ?7PmADh<92^Ws}zyYear;54!g>ZtBldSi)S#J57_Q7_nP zX7vCtv;${7xYKxWRB5uA!MlN51kfak{yHPfBGNdm zoRC4iQ$9S;$Vc?zDl!hDS2T}EgL)BAMt0OE#`)kq7cC^akG0or9`bpp8CwX^KDtjc zmXJW6>BYc|?yTRoYx-#d8uOJv1k(1v7R2+wsf*TdLKGB4e({GtbtzW%6Ckr1zeH-&y1z3hcA6+$W=q5Ie8*TNVax~H8Nx{>aUbEJjqXcYt5 z&ZL~tq1nBcz#FR$E8kbc@hc_8gM^p|($3>lgET#3+PWjqeG4>YE(GE;wW>~Gnpgqk z*V0NARGcOxWG8#0>~DCpeSr<{K##^k_mor6dE^PcmChspI{5sV-n^gg}WtpElo71EN-$A;>BlJio>Z<-;YR#Ur8|Te0 zVl{3y$BFiX=IpkB>&qq1Trjh}!dKNdjtvC9!&VMSpkW3rdSM$8;&1hE< zu9{y;-r(eA(1@;!RC4lHsP~+(P-WkrIvG}@&*9Ojuov9CIcV!ft1g%dxT6Zm<@o8> zdi&i_ZU2DjfV3bAP&Y5M2h%zW^SVG4qen zLV0Nr+4BG>JUDc>fLx>TgqH$*tAAr6!{Ngm#M4DSaC4_+?LZpG60(l za^`-&MJhxZ`X{ac(|W%xx={!1WSw3j#6sGP8y4R8nV_9{zZ>XH4cY{O%y=3j-(_Ua z58zGOX*EOS(C3R@xLrH2223gk4aC6e-P;d^?&2EU;d*O4aHMfr3|VMn^qPc=|CMX^ zieCwF5?_DKcL;eFQ^dIiJtq@P07&H?aK~aLny5cba)wo(qwb&nPu;@0KWa|)k9Gs| zsAnKNT3@%k>?Jzxh2-@^HcFLp4{=TyucF#ZGXINiU>&?CptI}4%(*grGUt&UHzguW zfG>HV^6Ei6QVJWl-7S3ljp{9U-(a#k9dr1xKTFewW#GE!P|tta$#!)`tM>W?vN8YjErC2-E1lD3dg-i zS_R1@pGu_5LRg6vkt%^{uZv(`YA$*kPJ&xE!Xe1?F5;!U3UDP#sG;DaSQBb zoQxomD#iB2(%X08$&-EK-b(=Zl|%gJ1;_wP->at25G_W-v0cj;fbp8ty7W?6r1#lo zIBPu)zwFf)&atD-6%=f&c4TC2pF=wiSJ*B(fztWP2A$_>f6ZS zl2On)$?$ETBq1LyiwhO?nO(SnR38Hb+CS(fbdZcasAKfRGws~#GIoHicZaJNVFq(H zj{dw>au`nfqnOy!`(*>v72G^aqm6t{%ky3%pYf5v1=W`f9AxAk^pi0?O}CL=%=qFp zLB4u?M6-+Q4W%zLU4Yz!Zdz_c6s~n3m$5w=y#?a5W!x_t-N{rKeoc2g8~&9S?TE9M z_uls%GixPK|M5|No*-CDJ;L(zlq=^_b#u>NTPk1v2(p)#4r$j~pjAn~)=!e|5;S+b zGzQv;2nU1f>vnczE$#jRN|y^&k{N^DFZP^#F^G1B++?lDnT}cw7i>w1*d~C{O2@mO z&%bAEI{M4Cxbi^HF|X+4W4^SBkwQQ1tzq1yV9J(t^X_U$_*x+l55Ss$%o|CxI8fwI zqwLB;bOHvci>Th%) zk@ui`=vCX7Y-!ElKKQN$((%20;QjWDZx<0PlfJH734QgzsqLPlA5?beyzKRk{f(dE z45nr;xKDke>p^er4%@2Z9=q<43cU|(WPNAR^{fzceuf$^YTX-4RvxRX@qToo=e4ZrB;5Hr$xC%`Y~LijZa(y&sDv;rpJk903Xngu=d(x09OfW0g| zsb?S32WQl0=zS0SlGg^k%q?TC{>BD)VI$L@lI$_Q5D+dqvyi4Cf|)RI^7*rZ10 zjbk(dS+yIM8xqpX2zQU3C9fWKO_jzNLF2vJ)1ME;(ckLowbNlD_2bJ*g!pI~ z+9MMHRz!%5*r+9IAqWRZZSW`0SCcHmG`Rin?K@{W+3q7(D7f%R(jF695mQE0#~nwo zWl|A>**PU-_KNq@R=6QB{L48XCa(T;8mX2bYY)p9>qz@v&(WKt?y0!xu=zqFS`Ot} z*@=8k%kS*`7Cf(hc-}MgwY~^9q*UQ{aSM9%uCgEQg|3518OFBI zJOls_E2WhX1P_sOv{j=G5Qr939_imyo6a+qOfQ1L%Tj7^@K*b`V*csB74xN@qoJ>G zA+H_Tf?HYg!(8fpZG`&j9b|$pH)%GX!*cPoExUY9p;av-imd)q_9Bf*v*dZ?mB`g* z6$u=-r!UFhc5Qpfzg6rPsbW7VjEY_M`M0q*JNY#6%9SfN$3Ki)R|k0ar@#G`acuvx z)y%151ZX&C*I?7B5>dgsre0pnl|@!vDWwJb7vNA87N7g!RR6|g>>*4pjpVTE2X9ym z%+d&fSAe_&o!(iDRfk(Gr-j2qz}Qd zOiS__CtYqrM`_Ie3+_k@rdtB=zolu-F4e^UCL9 z#|ZJ%z?ewcZL{q{&SLdw!o{OuE3E%wi=kNACvhU0OY_}{Alh&_^zfP!^} zyA?`9N8jbBxay<5fSMTddV+))gJ?@)Xex!hYhF+SeO|GDtd|!z8NkIMNQ7`aa-4L* zlvZR}FOEY3woNgo8RJm@xGmGz?D0XxpYc$Nqt@P%7P5tmxFbgd;rFQG1Kug+8@n;G zNrraG<-KJ2T^BvJY>!1`RnRuZ7@j_OeCl{Ms12;iTCm;y5a2)y#v1_zY?&{=GY^WEtx$WY;;*qSEUagM9V3w)7|*%E+$*zpAz{` zmJtDD9`ZSd#v=6{vCV333!b_F3hZQPO^w1||B!A!FuxF_KvYqv{AQz}4Z-uXnD7#^ zL`R(}PAUDLH;s(Z$hPEt59K6^1axXiBk!qO20k5uiV0sGBzOvi?c;wwRpbOn zLrugA&_G!UiM3LfkU4Scj!v3j!CNig1_L5|nbu1fddeagjn=K;8}BiN&sNP?^T4ro zz-|KBQ#py$pg$q!7AvGfKg45?ll!T6{PStbRvOPk94mcHRye@B{Gdo(Nn1V-L4ZNS zX;fXpxh8F3tR_yJW2XuASTG(h=NnbXlRf?MDo85UNt??_WJ!~Q(jPfBhE4@ zsbqub1x_-PTISRPS|JFLL6vGEl8vY6hlV)xt}j*k5B${k9~M0;;}etXlvuoXEe!CV zI$M`hL!(+*NJM`Emm7CUrljS8hta?7Hk(}%cw55K%&k~}MR;A|(b;QVV}CRb$6fqQ zrEr!F&l`8_=2bO#_AK|d?W0t$oI5;#wuR|&e8*Co~ObVy?=4Go1{tMjrJtLcej?2AyQ3Bvw2 zggP5+HJL}wJ!Y@bQM9~YA1uJj#-|W%AxQefK9$iokB;N6P+x;JJ}pR;!N9>g)Ysg{ z8t=>47uJUgw`0Hf7acz8j>#O0?4{dHFvn}vQtYQYyZZf08I&~w+cj&warQ4`U9P+P zXoXV~!Dj{)lIyauq+qmZJR6Zn_r4jVuH8$qlE5Xv8$npMsR2C2_D%tX@q0nnynlRG zHp;@usoi%T=x3+-ebkzB4N~=EdaO2mawT%m zr7>ONajfNoIa4*8PZB!<*7aNNC!fp`JY#10j${b}mmCk@_tVre5XY_MwST@73-``| z9r29OnDfTRs8w7&-+w#@cN_1g*jz*oH59{ajnuqGzL3=;nQGN`Z*4f0EsLb$0kEt) z>ejtX59pFhs%YZ!5HRh2p6Mh4wtJoLj&(VXH#1^Vl?hU5(#y00?l#aoRyY{xOCLDq zi?FEFM5Q*aM3AfZH3tz6A-okZQY_%vuMc+&2O1vID$A%#;WGiIr4rZ{oBxI1ck^W`^TVsZW(Jn zJI$R()u5fpt1F~DK%>3s+M&*GUA9L)Px?H)UCv6)fQQx{x+a|@VCi@^aI7*irj3v= z@F(?XNE&T2M#-T;&d3<-EI)QCTi&KR+G8i4cJ>!22dr>NdN^-xX(P(1RreNgeflzbGK`8P6%u<3kz&uAOsBx*g4xXybZ+E|w|>*b70OL@ ztv95ukr*F|``&3nD%1{oQKRV^;HSSM9?{l?=Gint$FrtP)VL45*LE2vS->o3d&+|N z4O_mVHt!e)DIGa9Rw2hXj%OnhZd0_ZkRiY3`-OVeCn@>@znp_7sv`H}GU<)o%sycd zV*PUn75nZnx9aq4Vb{Iph?+!ymEjTK;#%d9#Vn%*TFOqr-B~URC}{~olSxO$AHWDa zFs#+Y9AAVgnSrd)FRq?9k&ov4Xp-kLq`ygC+E|Hhg`na`7V-Cjtg55s7{_J+CwNxi z{Fj_pYg%!DeZgnEW689VKA!|FZ56Ejy#9K$`# z!=@@E&c6>7PqnOeW3Bj&<Vl)cdx;m*? z9a08<(zT-CJF=)1u1xK-c|`ouc5M9&GtZlF^O;YkcLei=M-bpmVluMF6r`QF&UbY~ z>-Qu|u%~P;1@c}tK_Smp+4gC)95FwTx{h}l z4kW?cH5SK-45QE%vu(X*^9kw^w6QV*?V}oTwK1J93P7LvubGJDTmb-lzxfbb z2*L)*V2fv^#~~6uUT^Gd41P~@m_$}l2TlR)`&HN@@=<$5Tr>^m8{ZqN@1^z~^Cj$_ zM%Wm%1aXQMv=mcsk1!RC8RB-Q(jfjh)F99$kwaK*^uL6_F_bsp&{iH+5@KuM)Qo0i zvBB;@eboh?kAE0GS-i2KS{o0sTh8fy?;!PxeS08fk;b(7S8vp5Su5Rybp`o z@EnV>g%5s5*2g+ZKdlIfhX%u^Kdu;kKl8qABFB6o^-!Vh?Zoqtx1ZEAH@z+H#@CwNb)j6*-~k_Z1gu4uD;@GJdsbYgw`VEN*x&Mm!(=N z6IT~e=SAo*6$g(8f>pYb8NH?DSmrEjnP*4wsTI|4Y-{IS2ym?B1jatB&{h%YG06!_ zhtIEyRj2u{jkf`|@t5!|lWsplq9cui$i{OO98v%EeM9gZCk6WtJ`-C)6zpq z-?GU_V_hK6Y-SY?YHPAAjE0!oofS$;=4FI1PLJ3P=RZ^?^Mgd^f~?1OhD@T#VGs26 zrRbnyb9ogV^x=ibPhBpvKNEZ3q(X$oDzE^P_+}bICJ>MwDY9pynysX}u9a-(sa_+Y z)xb?i1nkMwQuprRPXw-3S)CsJ9EB}lTu_l(fjq2r615RAu+wbd9M6ioA3)NUmFNTe zc@~WRbmpyze5Bz?*1cTnn7gqd(P_FKi{*opQ>|Bw6`tO|raFs6UlGi`hOAreqb z?uG_#D%7Zi`seSX8cIN$!Ly+)7-{&5bSp`dfjn}SYso|2-58hF1H26N*WqQeM$i4x zX^9T*`Q?Nbkn80P8B$H;O)COo6#fF*l7qA}t(~Qa2qf56Xq&A>L6(gD6AX>vJ7x4M zNjV?$rLd!3ME;qySPwm~knc=EvwSaD4>N1_keB%|wo+R~gG%JKU1@db*Y)GU1Fb_A z!Zo=TMZR2<`DmjQT_bIKO!GPgwM$)z>&IC3`xLopwuU%}*Pq zL73N3S2hL8H_Qeb+Nyy1{Jh1EB&`U>N1XIf=v;Z@#;P-^j!M8S4o{Qmyj#)I7z(MR z#z7o;xXPEp41qPIexlq04eK?!4(_aD;t-?Q-`L2e@2C1>(KZWag(B1hr^&RHSsDw- zKyO~r*vXSfu59mHkn*sY;5ou#{G=Drpc0*}0VPssxc`FWj2eGqLy-_f86Oq9(i5k; z+czHK7*twmR{Ic}3Sm%0gD(}HdG%vieVUMmVbvq!&{A9XM%yX@#dEx zX;YRkNads}^EA~!lH`(P;sHKu2eapeg=J&&k0UHNaU0gI(q}p9bG^No`cd>6H9L>T z0iYlSV9A%h)`9m(7z_%5PX1K1%6^B}k^Y^~QTajHq~@uLLealEtyw-q$BI`rI*lQ4-~xaJEpA+&!xtRlx6~xHss=&DlPWoupQ3VXA}0} zt{Xr-_R3URm?|)$%kuP!!0?;S_@&XDp4?vi-~;T7g21xW%(BtcvpxZ7p`6+W6o8w~ zDcIi}?gGI?_P^H~MRJ;;ubpm*QvUiMO@RC^HkdKk29OI4`$U)5;7NyE_`HRxLy1(L zyil*i?`e(YR|i%Us^=`!sDt=hPd|aLLl-?#zh|m{FC=rdt_>AuMagz#7Aa9_Z$YtB zY4BWrwDh~lX@<%!vFYOaVGpJf=Z7Tobx$`THy$*wH)pX4V60ZEa4haTsHJr}F@d8y zZI6~1#@vX7q3bJCwOqXg#SJM;Lk)CS2Nt zo!aopy68MTfP{QBIbiC33A2?+Uaye@=908AeO(sbvEv&6W;xh-udKQF*qxSjc4<@$&tcULY2;M z>|Bo#-lG~Us)O{OHIv!fO6_0DeX+r5Yyr0^zvV`2KAVQVk{j;xEvD~9J+tDr>u|;b zG);0ak<0j>bf7i4FC5VTD>&6FH0haNcC{`TlSWpX#(1QSKpJU~{H=P6@VdbARVRrl zLz_E~+7A5soae+yw_u*w4^q0NxzS{eMiStl?C1aMK9h7m%Yblns)Y99c`OF)9XYr) zED+b1Y6L1YFC%W!b`kWLbpk!2IVnsX1eH8x)wNK)wBq1DjF?U6%NG!fm!-Jstvs+= zc2W9bsV%=0l1976vw@M_Vv3~k_?AoSOF*%$bVtd>KeCn=T@P{RSx9jVD@D7wQEX{w z$hou>GY1i6l=8mN((TC9dDE8c1JXjCZHzP-6-1@<{JN|we{V!Ntrz)-b-lzcZhztynqtYJ7^*qLp9oiMK~QGgW6~JzGEe4DKWt$dmu1LmnIQN|p@-wc-$ijl?2ux-uB|5M$>1VQTjsr=@Y2p)NGGSNiQ~qrLnNE}0g&H9Z zByEs%xr10Hkr5XozV%8mJUk*yR<1xodmO*&M1j-UU^O2;0!r4*yFOik)|M)d=!RZA z?+9|dq>4=ZM_y|$_SsBRZv@gOz#~RESq^B~A><#z$tx~#jy-j4V{YHxs` zu@!}65Q@4*x!W|IW+H`!OUhBXAdwm-yq;T4W&*#KE6VIz`A4D0`BQEe;qW9N1TFbP zCMQvI+{vehtqynr#}ZimCGBlglGFE8i5D zf02^4HYjiRc`9-<=NJ6AwxVfptl*Um`6WI(I_+3u!Ic-<|MZCJODQvo+q6mh17KvXx zP%331AggKbl&QLZwK_f{6DQ-zt&Zf^?hIHuV2u5HoL0=9_`_10?>r3>~7J zeO+R~wKzPX1RTmaH{PPH9#wy_wiCDItT1aMJf93S8R`Hhm<-`7tFYOWhc<{{@{gkr z6ak~cCv(idUn@xkz;Dc$%`z|%tt2j?xtqm55Di-?6XCRKfUKbHe7Y%mNq_&WhIX0` z&#&^*AOlqv%%*nQ*NIx0iIT{TmOfnD#4;|89$OYJQ{aQsh+KOK+N^Tedh{9}_0i0` zL2Iv>SMpviKGJ#X?J>bS5&&JfZwyROFWH8w81;$TWG;Vye=XCv=Zoy`{?(=9FOhEU z7Plh-24H4|*$P6NO6UU86yI0<(RVSIGsF*Llh?k;Zn&B?lEB1uw?9tG?Z0lEzInvu zPvEasuTWxb zyaj+NW{C}PY`9hEO~%d{cV;R&C&E_V-UjMZOl-J@h6dBJAslR<_ltDr9#k*Fg|@k! zj%=S*rzJ`2+L`6vWWv|}mu|sjrUHtUYb(sgdMBf``eu*DJ`l?n$zKy?=6!a`eSpSm zo(!Pi?jBMa1C-30w*tKZPOXG?NZ~~-4r7Oj?aCCJ8^on0iFr|zHm_Sn~#8Gj1H%k(N-kPPTZ>M%t@vbuK!u)!oVoatF0r-BHO zMKzQke`=InE?lrK{+M1KcwRUDFgp2ItHebpJ-|gJNO@((e8~$R%(de|Uqt(o7+C{g zhs%WiHzY28Sc}l#FTkDD3kMilT6(UCbt0vC2=0FP(GU>O!4A(+V9LCCx&qW_a~UlK zU<>1XDIicsn={mEL)44t!NwsnB|YNMXOvr<1*MJ5@-4`P{Q-T;5$1y`#p4~|DHo)K zGGNwor&s0+bfQIMw!z|es1XPYlroSez6^I5VF#}5%g-PP%FCAD<(A&wHVmE5^sxpk zvv(pNB`v0?L874Z*)G9`Pj2(^-K!-uk$nixJ+9AuDJhe&%25Bgw;P85bnm#;6~uLJ|bOLv+L zATNZFi@xDDBkY3jIIKps5N@_INy>RkBggZ>B5nf}x-{@btNv0xzW`7}E6;869^!Dd z9-l6A8rEr%6)xH+k!U<|WSKz>_3VAOiNb^mkO8HfZRI@wrvxS;;duPlLLKh!Eu%kM zb7$k6nJC&ws>{fm{1bbVz9Vm3C|h) z9EYM^?7KTGnrjDBp56agr_@>&65_vbBCCyok2xHuDhHF`YBzq?zw@4P!JY)~;#R0; zEKa*Q(OiWwa;7#?}AW$S}mv1D9sQToc1EFpE{51~O8O{m_#o4GWvCHKC zl?F{l4;$|DkB0E8RaafoMubf1F&fl)cHLhjXo_kH!fwB`L8|8PDBg8+BA*>03Ex|O z;F+yZ%e8+ZLO24fT?ry&MgSqS8Ci4?KXiG)hHD~cI9C&fW@9vnFhL^;gSnr1lU{ZH zoewo!Nwv^x!^0UQzYsViR>eJk15T}$=}rJT1#`DfJCOJvx&`TD&`<&WLldbw|NlNW}O3vcJNz=Q`>YS9g z^T#E`pPq3}*{pHNU`!$el0~>BozVaG1_3(I zU&KIH^Zp4cqi}HNkhTp)*3#UEnihdn5A4Z>0dh^>y{$)ua^l5knhD_aMlS4 zTB`yHo+Lv7Q~NjA@8buSRE7}omQ)V$URhB+YhV35>S`qt|YTk46DLL}c9Gh7INaV=FJ5e&YAnLEfsufF`>G=L_m?YKbC502UxP2yK-J?$w{viR7={g^_!i9n4q@u{Z^jQzov3db>XxYz%4Ys1ldB?(+LA4Ts9daDM#ru`sp^a>5IHGko|bS@(Y( zy=s~2zp*_@!xa?(Y%sKOL_bf+)8#edE3F=93TT>ZKQ)zF;-UV-De`OUZJQ%kb5?w; zs#AJGodWB;70V1aM87899jlBTX-A+-;ke0dnfEa}z)iYlQlq*c#Y8%60$b+?M|_D~ zk3*1hBtTc^-OvFG!_(k##3wHzazdmmhTs6|R@x$YNdZS#+@2%IxSiXCKc+`@)79~uBmfu1(=Z|Bco9wI@AT-?hy`%fN4CTlcM4BjL|&@}(s5qK^;ek1GmU1J zWWRJ{UdYaPHhtHxU{MCrshM{U=GF}BRM?e6`AL8BXD# zV;HNPiicGNgW{HnD|LHVdo?D$D%H(%4!CO_X-!JTJw7SF!{j6 z2ZS;%Z5;S)Ra))Vsl+N?o{5674Bt7bL2vX;_Q1QB`BFKm zBsZgIndh^{p!FdGtJqD96vdF(~NoaRCVH6T9r!`6a!oTMsR4#lJTEBVhy{o0$NAX>j|0_NJe-9z}|9_e; zKV9!*di+q3lM@}&=ONs)1H3qKaJ0cXDzBU4c%Ukwg9=&_sn8G;N&N_``H&;M};(aE@kf z-X~Pl>k!_l%|DG=l=L8seGZ;jRkY`p7Rzh;TFc6aiN!eOVG_m(z8EE!*7!Ae!+7Q~ z*d^XbF0XlDmpoMXM$e^Dv59u#wkfoY)#KF*6cz#iSN9MW^czSo~ z(~DfppuAN^M}ohuT?dPd{(eI9K~ls@DiR~nzHdzHkjXQVPh0KV$D!;vg7K*WBjV0x z%94dJ1hAo?;sPa4dn6}-OIj+9@IDX4&)}w9UhsZ1xGB!-A{-8R#ZtRK{IA+Qr~(3t zSU2Q4|G}(AC9|5`Y|;$bB$pBw%=`HhQY2@_C=76P$Uclsp?a~R!&9+m2e5YRsnm-t zP^e8_K&s|sAPi)I=-puR?qwG3N$XJJCg&|;^Ez@Nn!Nuv(WJN|O%@!&zq6g9a236u za--bc2esnw#-h&vf|nzNjZ)r2Y9JFEx3^?Si;^bM^in)(`0I^ZNS_KZKnO)H;{QoQ zMQarfeM6s2_M%l5zBg9pc#2KOghBNnA=6YzZB4c~44Ko`^mn19c_6vltY@lz)KS>; zdPM<4({XKpQeacGf;)r9B&$1Y%OLgP_D4nLGo~n1uX7jV@y@YB`rk z_iTAB=ea+?3}Mt+%Nmv*fyrC|iK>WVV5kbr9zI-k%#TBK-SFr4t$@O7?w{s7BewaD zUmm%wcr*d?Wq2;^9QJL82(99pB6Sx8Y|sGR;m_}QnWWr(D*Rd|OYcGld zF2OWr!MwV=amrg?-IQ#1bf=liB+e;G-{bFpeSF>|w+- z;w?_1dR0E0j+#|lyM54G-F&*o;2GMC`t;aocU_(zxI0>=YDncCJMqK} zeZ39kba`&#f>TNqp*6NlTcV$S^&5b9so*Qpe~y#gvi6BBvqhe0RwzeJ1tvwCQHA>r z0d9NOQcs|tB%N%NEkdh*-eeOkDkT$?b;i3P`6r1?&qmXoM)%O?ME#O(n%hj^v$@KqRIQ;DJvcPLMzB#ss?-R8oKU`SZ&~gLMT$Anr0Eu z)h#FVF_B2DZk(RjuO9e5cl>-Owib)Y_mSzLM4T5 ztnd~$dyYuk63LDt+uP>Q07*%{z_ay&AmY{Q>z>!?&;JWD3A-&N2_K_@VS@N|xl;`@ z=KT^sc2>@sNR%3iT?XhR@pnxA%F#>_zV4aPT2(ZWWJ2)|%_2#AD0F=3M3Z0g=Kcw{ zq?5-vS1!W}ofZOqa1mr^p)5J#w-EA8vl+NZ+ys!#5UNJV-`aw^S}83^EH0@Gnroq$zQ8U9Sz;$LW;hg1KM6AHW^#3{P>>D`4}4CsJsP;GJ9wF^ufV`h|b}8V~3(6>@O((Z-O~4 z?IfJ#go&zCxEB*2NzV@M!0q@`*_s=?E1O2X9Pu)}_LW`Ne$`BnQAXx&&TJnNa@s^9j6&9{9EsjSmCH7P8-dpaZwYA0q2$*yATz} z=TQ}n9}OF4-nlv}5qujKo6XQIi5X%FD3BU^nu79ZrjNvJ__x2NCAKr<-{FxZG`R$@ zFBeV^)2d{?bO~lvjjkNZq{tA9MWpP?<6Qa!{z2kZ_a4wG&~Ks(^`UUsZk=caB_HSbY1WV>54Ig$*gG3A~O>p(QlvNP@0Fe3-Gd- zKgYd0>Kf-RkFaV465k4n9zP6qEhk|Si3y1;bb$+933NZ?@j$8(@-(u9(Oj=Meo>|- z^W~5H*xAF&5-$KKvV9mJLA!Yo&1umXDb=>QF${|aP$U?VEG6=6BWrQ0!#=*km&q*u z`CDM;s2;O3mc%k{ybtMG#X+beNPsOKjn25O8otL#4M8#tl3N1E(g5-3xf;Ji>LZRY zOSLlimiTI1UU8aOFtu;S;8zfdsupjt@Bsi_3VGle?{s8anH=LO%Lub3-i^vB>K9fD z(Vag`cfKPQ&#cZBni3!ig$BhL6+sR_L2Z?{i8T4`_LAmYDx}5G+(+78^+5V}Hx^=$ zvw-n*gWRs@>0YYgVo~Rf0Y*a=V;_~jq!)nFB!GAsfl8X+6J?p6vNij=K}NT^mhB zGv*Sa;sSD^4CCKtqka!iKzY8E1~!OcA_ij)JyGVmiUqY{Ta5?<$wH=U?_#B-vixl|-%XsnORPKv@@r z9JT`ABcDB2`kMZ_B^#C)--Q4BOK7Al;dN3y=R}>xc@i)7)FbCC@&|lv z3(oX1CbS-~0*>5_aAkgvl*#ur`_Xl_nE+w4M9~`*q*N~!b?Yj0zN`&#WU&jNyBI2# zbBpT}<rU`{x zU8!mPEA=MQ^zagLkfDg)w-Pf5oMe&VY^#eZYv@oL3(G(hj)d&aB>5N`IlZTTM&?nX zUR25++uM7uBd2%p3KM4X6d`HyECOB0AJVPqQQl(=S1&_X)(WoApCFS%?%S!1*FZ(u zLKrT=g)0;2z=&NGp7UTD&5q2FN{oL%#Whhuw27RYf)GTj8)ncjhRUX9zpdKJNvy#F zEau;5+2O9I%@$r)jJdj2lA%JXCQ9xnG?8tIlA1XD6>Ap$LE|CRyMLH-j+}3@U|AT^ zrFcdA0v{%I{LnMciE4ib+FHT!rL3Wyn7)-}#)bMVB-AED^-{urO)Dtta!0te85^7? zzn2vV1f+Hv`ATv-niFQcp3sY5C_7t|F^)w-f)1o9?bgn_YBOyfjZq9GXx-j&eLdzn z&MZy0H9BUC~~~ zheyx>G2Q}@KpiABGfoXrwLJ@7E8%Zp-%?F1bNr@=rvuj?EmnA_=Z?qA?bAt2XvgKl|?YYDNlDlOlUlbN4A#NkkE=E-gr$t_J znImj9_lBmP#_5lIkcFCfsXFnFJL+?cLPq)==l&l@5g=FW-eu+quS4dXlK6!3lgy6W zMEUZ;fp2@ClKp{=LO56mJ(bF?%^rhP?sgUdqpNx^saNHH;|F!4h8CLRPTiQnVs0Uf zWTc-QBktBs$|qVTp66%_Q6TwKWFeHT!-49k`i|QZzW`UNz%%th=<4Y-t9%2pU1#(- zj24y1dvF$|kWg_Gv$|R7)mpAKc(fC~%d=z_R=@Q*cU-}3(Zki$?Du7JnrUA0C7>E4 z=*8dIu=Hh9`WsnMC#f^~EdwZ1B|eeqthw1k`!@xdo+>lb@v|#uYFqdc0)xqYF5*$0 z)tD(^^d$U0K+Cs4;!)fAG)YmkVLVItMBFIzOFB~&60>9MZ8n@Us6!&T1tio>@X^AD zE0l5itO}2ggKhHqV&Y(+H|%#DR0XEMqwKnigd_zu>-e2G=}|!*iyCslB`D12L6-?* z1GUL>ke2fn8d^}_oM3$2<}pMvjn06QP=;-xGiqv8T(W6Gx^x{V(%%%CY+TQ$qmqd0vjfvPd`wH2e zfoCUrcMiG(#xVhf@f<=8cm+{VvQ+1-zQas~HT|wrgsqfJ4kkk`RQ$YTF0j%YC~)q; z&cA{Y2mvt#}fVaOgF`%SMG>2Zbulk!DJzd z{HAlW@QIZdhSeJ-a?hd2PlFqvE z4x7eN(y=J5(4hj$`L~bm*QGk%#>DCBA#0na|H!Zn@5{HzpwFlx-*ez*h zkmeCFAKCuxiiEGYSlD)TT8P?*VxcXQxbXx^^hET&Cu?&(gQGA^D&%iY|&e%jGMN~&4v@ZQV zm-Y!IzX83s^WLo`(6PxYo8gN@$8Z{H((vht2{h#D5-nultl3$Wa?R4=x3HQWTg&H9 z9N2@NTW_`g9BgLuLev+8Qp$A&on0{=(2WHvbRsr~2?K~pN7}Ww*Os6A>4|e_GBz3oTKe{vNQQQ=vaMI(P2p;nERly_F{k|<42r4Dl84JSSorS35y<0OJB>eF2?oXIIc2^k#)zfpt+~CW z`7}D>GUVX6BPTwfKqRKVU2FE+2Q?p;SkbA^55(;ufvJEcxfL+73apcpo6K8xZEyNH z2?g?4cH^`WTd@@4k_pF&fx^mfgK_WkOI&8BJ7LXRPj zVYp}qAWEw={XWe%Wx<+D^+GJ_#I<<6>~+;=_PL1oEU4QQ{?ue2DzNJ7x@0S{q9JjiOe3yf zX8dMUt!AjO8x#6ajt5XsBa{K&O>P$IQR(z%a@U>@dbtFJ9m!bX%84ZEP1tnOY9%myLruoSopa>&Dw=a9gAH3L&> zom^#GXn__@Sdt$bJ^SgOLuH;P%3mZXpLZ_afTjA*({luO)ZRY<5^o_*k^lj#>a4mx zfRM?R8)ZZiU(z?y&}`l!tA6xW9ThC(E<-3RbZ<)#$90^kh@Y0Eqb%ezO)vTl@U1GR zd1NSo6?EHW++RUHY}(;!X6uvqnG?grhhV?Eh#U^M7H{Da#Bcu57NGL>Q(ZU4dsIoj zp{zokvDg*}q;`-5Kn{%~W8?fSU;kPr3_oJpo&ji+wN-aL~S$@O-%jzs4xK!qL! zuq##FUTw5h%zXi<=2kS8UK+MOVeEP?$8<9Si;Sw6Syoejxo=%Ut(?@|Nk z!T23RZd5ghmfP(r#P-TqtotpIN3GtpL)t?>71mqs(FaAHZ!+_a7I)+P1O_I)V}w@ z$JTQZ`o7nJ8ssT-B5QF2s|%v10%w7XJRI)Henh@pi(-k|y1X5wAh#lOH%c$*Xg9+5 zG1sP!fmY0SR8CJhY%WwC_o4e)T2#+gBejY1^vZ>NctPQ$jWj~8v{KFcK&4FIidti3?Y0fgK_rk! z4@OnV(y>lO?LCr6AZBJx4a@}0;^7s$noT@Ev?k|&u=nQSRPO!1_}cBQ+A2+k5*4M) z6b&Rq%20@f29iWkk)c7Es!f9iL>bD|GB&8BB2fw@Qj11P5%y}5p=4aY*ZXGe=XuWW zcdp;}I_F&HI)9w?$8+sxZVHZFPd~~){ z#j%b}tPO$8iadtCP0ntvae%h9+(FT`q^Dq_AjHO!@j}9W#}X0AiX)nhGb@$5@$5BV ze?Ey?b6(X_ldsH}I{&s=mn_JiODATB(OFUNSCBlL_G|O|TZ0-gD5BzyQFWlzYT4+| zb^m^Tj#yyT6e^m5{`RL7!%nHIar1tnCQVQpo!0S6^C~lW5Ql!=gRj=%R2g;mQJ;G5 z{7dhI(X5IK+*c)6!a^~Y%j(pb7pfYrT6ZUa*HQb9pT|t@FZ5PToNi;NWy8@o5lRr zU$d!tdQ;6VM$*Zi!(=*&CLNcimN-UD4)yVh4yHMf6!R1z&7$(q9; zO^y7|iKH=Ee_C}MgobWczvX{!{t?2B$49?1J=Le)P(*7cD%w*A$GFX1lBN2K#!HYD z3}xYy+Gt;M`w7^)_18SbkdW2yt&|S4_kSR|nJ9uo!$`-bvd?^L$W3axjFLYS$RHV zZ_L5;1RqT*Gv(D3!u7y5*k4j#InubLk%lLyVA4~hn!&b>V#4>_v z)MqU}$Kph`7nw{eE6>3&KLg~UTd%?E$69}CaD+!GjEhJ}R9wPQzfUL(G^!7?(bZol zC?Yo&*f24yuz}Ie@v&qM?~=HKR?$2237KkIi%8z~NMoIq*df~MPSg`ODX*4!&FS-t zON~@AA?iooZWUn@K1!wN$fDe*H3cM`ee%}KXF!CdTt{cKP|7${W1GoV7Y>1?`#4HY zVN1BFLt5G9vtG+CZ~IOM-Pch8i&~Z{9*-S1K9lX5^+yC}*En+hOKcXtq{JxePD_Nm z7?*PmM1L8el}V&hOt=(TJP8M6{`XTH{bg~_uYpx-IXV`_Gk>E8+39rg(DuD5Az4AW zsr&sHMHK=Ow8F1ooO$z&lc}rLw>;t%EL_Ppe);Eh-#>zl5}9k;fhx9)e9}Y(k<+}} zz$AZ9q(4smHi!{F?zm6=FA+9W*J~YCUtpyaMuJvaYlQWX0`B1*gHzJ15ukMb6jWeoYqcZEa^BUHv$JtLd~L%xj|Zs37%;;lj06>a+UBgJjT5Qx%^>kN z8j;g=y9l2kJ;Z7Zeh+&&7WNYwB2Vo(^x{63Rqt{pPvJF`lLCw;#*2&~q?lr8O372z z)r&&BjLsR#7P?$x88>RaT*xeLA$)>I?7Ndu#dtx_QHCN{k{*9D*ZN#9VlSbcv+B7b z_bXfsr(<+(7FcPL$?Ed!pkXvC*P4jRTCVZ&B3QqkD&a`B0ITNp8IuvH1*V-5b>qO~FC# zEp!yect(wj_9>zu3E{v*T_ch8Gl@*0vxzib6k)m%TzTF`BS}(JMcr)IwlIG=hMTu9 zPSgvAF(ZJv8_{c2sVG)|PCpt-=)$|r$+Gm2iA+E-RdPOg`x(8{KQ$Qh_=o>eg% z&*5E(XfH-7%-j0n7|G~jXOjsh@c8ur;T4?77$3#xr+onI+C$=sOX9KK7N1a2i*`QSY-Q89hn&0n7Z6zSw7=c{LFZBv+EwA9H74BJJRYK>6&BkG@kW zk@utGxJ=2V+S3A|0PuFXU##V=U1ZT)h<)`utdF`ISgZf4=qX>!x~pUA_#j<>$&#K( z-5)iSdFAtbKMqv|$4<1xhl&Hh3^qb@`^aU6*#*nY>6O1IYJjpfx?4og68SHUP?4i( zGH2ZMHomifM$9&pi{zYgJ&nVk^Z?!ur#l3@K^cGf4;n9FFSfBKK+HhW$2(${$@aOl zymda$tlR;)qfY~Az)8qT`Y)Z@nLk8UO1M#5vJeg|*M+RiS;aN*KCM@UDV~-wNM=xV-cqU6^Vy5ps85~Qx z72tA+Yusxt6Nb+qOJt0_oJ{UDa{MsV}hk6Ws^X3d-VV-&80@AXZ~5~0R}LT|epOx)F0y_)5qqhPP!4Y;pkTO=hf=mnXl zPQM3-ZM#tvdyNyh9S8|0<`l%nfK;oR^zKi;zdqqGpzixTOkueME)M?|; zSu`^`ens{^h^m`~$I)!Acdj1yN9D|gXMy(Lju^Opy3ugIt$I`uE?P{yWg{XwPGV3% zbMW%yqAg4E<)hYmaa=O>w#ZiCJLnr}WTTXj>c(Mw49Dd>JM})GCL5`tRGDl&@361x z*#jb(TqiD=N1Y?6Dm!WKaM4{fRcl4fR*lLGQsTGw5aLypX1xC| zEfu5Kwd#}y;i&9J$p)x%^6HvkTRc^3&Y3Ke*|S8_Y)uFY;N)aVS51(KG+qDXNaxf7 zP|V%0APnO!MCA0*m#JB!Gu5{2A4lT9Q3vxDTad8ZM~Kq+<})$dz!gaho+tbf!#C#C z&sGEsKzy5Jv#`y08ZJbcu8l|?exxyvOi%>{IQR2H46;mNdaz6$=Wo1S#cIz7)D=nz z(GjjY$a&}Y7H@yrj7>j(70;E?8WX33q75`7Ran@U#KRfw9j(sZ`T)LUQQ@=SJwq*Zh%+4j()MIW33 z-W2HVw&qTqZc+_>mB(Z7qt7Ql@H?%q^04EQ^U5D{{Q|SNzi|3QzrOWb@X;kGYGSfc z7K=(6_p7F#DIN_4CR=J}&8m|9hX^bt?7FDH!tqV|nn^x;SIu19N+|iISrB&ABLxEL+Y7!>kwKt|=rSW#aA0aFhK^^c6mBu+bXY z#7SL@ldQL(H3?L-A*tZ_>z!2(QZ3mpeb zWNcI^8;ddZ)Gb^Y$|N}c2pOe%>+bB|SYZo^*eZ-$=(X{(a=4-#M_iaEyEJZb#M&iv zs*y{f$l)U5Kl9wusTU~$Mk-C^*|BMQx)fG&L7mJ2dFcS%5+W6i=%eL{m%+LF zI5~tSFOuUDz2lO1%}{Jt?LS&`?E?wCi7v1^`U=H`_k|-zWl?=QFCrZrkWvIleHyzL zc=|FlbRGL*il+-0tZXe?EX6sb~V$D70riJy7Lk$10BbAPI<_TiXzDHuIBV)<8HXRx0GRR7fJa{l}D3 z=VNo|sMXu|V5T2Ag|byUkUMn(ss{q!d}@ztHnZnyRIYf9U7b-H(AKqNED@0952%;L zCK4nCUX|G6=JL@bUWa*H4$_RX>n!?m1LRm&em&hFjORyTX}Ieuf|H0-G=iF)MtxMM zNB~^nn##?>bM$64rp)_{lLc`56UL#Ij}xL4Mp};4;GCAoG%$338D2a(#Ebn0Da<{h zDbYjfyYWq^;e`A3toO_anSv-};@nN6C9IQ}dKYG-rA2}L4Or{P!BNk&IA#nl=tl%J zA#~!mU?F`X66;duM$pM~IU#(lQq;1z*ob0b%CRhb81Y8^*wn?^c3Xa7kM}X2UfI0x zQ^t93`S&ozoFpX>HKDJt2b@naanZEC#rWD%HDDH-LRMd8Ojl|HIM)205;~&{r$DIw zP(?vFnQVlsUx_{OLF?N?9HvidXmMUpUh8ahH_AcS<>L2@x4=D3Akzvj)8q@$w@BD;<9Y!!y{LeSqaG}79H05Bu{C~^ja z9Bh8+>kIGZxR1`1E5o)VK@ueqEpiJWUTJZ}32WRBw3A|#1$()WcBYRvB8cK-jO~;o z`GcVOOEZ*AdyQEe?$L5pyv|#v!r3*ER0T9(^y+ilbxp!!Lv||Eg##Z{XDm_+IJLz! zSuze6P4*1&Wdm(O>OYiBz2qzkUFrr!Pz-3wrnIEDeU7$4DK~ z#Fq3I6Vab_e*69IMpJ6&<%x>P8)leg`xHjoR}hM|w~M+P=gbrC3Ok^w(M04>z*0S& zSriRG`DSEE6k3ei$covVdHf?P&<2{k3E^%j8L)}8z5FVCttA0*6qH|z*6{t?;lU+! z{N+**!Q3JQ>e?xaiA8~?O9$+k^l|r#*&)fAB!VNzhXxFg-J=AcwhEPZcV%UAV_w1S z!Q{a>g>rDjk;3I;uwh)qQ#>topJFV!@@<55)(cOzST9UfLLW`C=JpV%VH6Ks~AEjcSCP&&| z9nV_rPY;inZE}0D<3VADGJ-~cLVTLbg$_x18-KeRtLlqS&JweZJBtCS5)UZMm&im9 z1%?7G|I0*pK19X2dwPw$8iujdH!~z!D;_y=dVceK49xr+Gh7!%&!c;c_Jleb{nC+m z+bECrV!HN(08*kkjz#Z>$&e_Ne6|jE>e=u+wyZ&Vvv@QOq+NvR8q!#*d%wj3+96Es zDvDTAa&+7v?G7oQk*~1(0S-oKK-Z2>@6!r*bq1bHal1zTHUeB*>xyN4~qEG3%WeH2YjbMSp(rHkVTVjy216Kg@? z0_31MFqQUD^|82qw{PKqAdG@$%I#JAEtuwqXY61mVi8b4;~4NPK|x5;TaU#I%l2Xd z0i#5|6Wz+hmjM_r^5iI<+cO+{Q-|;*Ysl!a3Px|!Y(iBQ;6VafY zyl-G28MiCJTQ^DQ51megnWZqaAFR8@YBt+MU0JNO^n02t%MTV;zBdu47_hL_%ZMI*pf=B%Yw^GPjaB3#DTV@3f)mK6C8ELE ziR*lvLUn0>fmKGDu{0n!VkF&PeAB_dHyv5Tvz zL%+e?B&%>I=xe=jV@pYZiQtKAs@A-a>Kek_$Yfu=DwR!KWs|RNT&qN$eyVABEH`N}gwO>=0&k1VJso9t?h{InxPh z3NyHHn4o;pvoMAppw-M)N72*N@6p+y=12#Fk5%mvV3%dcZb)3W3R{O7iJ(ewf19nD zj~8sj$h9w%LY7)IQYTOYQ}6|F2Q#-3ON!YB82cblGw8;qj=!ndohOkaZ`s!y7y~qCY zQG01usCSn&v6)QU9<3hs1zQ!!(L-VVklJQUI%6tqJxnMluSMPPjq3JOf~U2C*-uua zsvE%0*nmX1qYU?f4^!X|nG~a(perqf!19Fc``#8#_d7dy|ls8Gr}0 zVIxkGWAv=sZOj1lO#yerB$VPjKbP3?a=s|@`*#~m@TAH65MD^hV_wh{mQcR(nZ-yM zpz#Ngm5<#>InIp!qzO7Iy!5SJV_8OpKvDj&m)7OYGsa?*tRM&5ZY=ODdKg7yEj;VX zHMvT|4j?`Vn9lreFlqz+*Cdh`nM-!oweZNH0Q99YcaC5CM~cHIr;q0X_#?>`?u-KK zIOOSS*f`s9k@4ElmWoee=}HDoDa$=*8cCK(Nz%08Q^1^GKN%UoO_zKKF$kBj@iMlV zGWh397%(TzYN3pcD~;Wu1{m@kvj<=rq#+*gZktbnZOC?dW)A+wM#io1Yio1}sV%7! zr5={a)&xLK1L$aih%U3svEF+!P3xa?s7}sndO$|oJ;}iQ{JNo*8Beg}l}Ldx)vPa;;ZmJ|bYyNuBK z(ttvNn_h!X?B*~CSC?U5Y9P!G&6YByM-V8m5v?&*6CYXkDP4iAo|JLYtwp)LRc6VD ziaQ2B%LE28AmuIDRYkNuhEot3*#rE#alb@Kk|(l=^t|Mx0h)h#_f+P3#bok5y~nWB zr%~B#x0cLh-Z4(_lFfngkx>LrIZAZ~jX1#aY7ZRTb#i9XmtO#k=%`m?DY?};s;J>E zk5+1VG2s!_)Az@a9BCjgBje%iL0?joF&^I-Ppj2Vv_{CeK~jW<=``h~y`MZlbVTA% z_?V~!lgZn`Q^l0|Q@I8qA;kgs1QZ7h@PHa)8kt1d57Y=9nnD%CFGLH%@pj^2ZThak zHN%q2WOH6Yqlx#5axw7(G@QE)LM zBV$j1usRt5T((;mufbtWY14I=U4u1Oz^Hf=Dx!;1>$VIIa6lgpl^z#p+JBVJf67;Lq8rzKVAhOnu5QmPy;er zwOx^XtkFuJJqByCigQ^$-8!t`jReWi*m+X0NVzh7#HCL`gC%2dUb+F^%Yu57g*gA5 zAN-}%4|w~M3roHZg8uNQNtrR_zD(IZ->=#=ylhTR^#AH>;yjN z;OpNakzrPbFpd z?!AcGIdXIxfc9q?@{$1v4mPJfOUB|j75Br24GF*(Y}0B3bG^+AZ>?~AHL!DyU%jwn zf_R}$!drKEMf&PGKD%y+UadaOA z@8^})8#hoC0TpZeHpgBROsek+sf<$2vC8vqqFQ7lMUo)6I{MQu9DI3pJEtZf9Qy7s zOH|OSQO%p#FF?mno8~~V3+jOG+8z{eAA4~epT6Y*RK@Rwo5|-<*$A^Gy&?hD_yZ+O z!p!0yY$`}o19dx815OjD(B{11ncc`gXb&8(Z|_P_hyfiAna}9Dj3|_fjC9U?v@tLI}0|5C6&dz~6V1YlEe{iHz(t0@JkM zD^42(<%f*oUgGQ|kyW5ae&Xwa>OiyeH0BVLX3I-=m=#s!R4Xh8XaEaA%N;6v_fA^b|`B=%wXSY`|CsGPj{Y=q|{ios#VOJuBaKGCHS zFt)Sv7adwO28meE3@9Z%@`x{OQoCpBjfUo9%ag3S9!E!K+o~XnCc&RRi5;J;!RK)n z2E~`eADF^3yV{v*|EaAI^7ayHu4|pYLdHqmAOUVI&KCfSspcgdkbxpXp-qb%*Hazy z)zH$Dx|Ief+6&E4v1m+uJ4c-J-o$?Z*PJzcWk?u&dEPlX?4Lc~m|E^^ecj;oK=jHiU0 zCy7BlXSuJ?Z-}G9+e9a`M!-98SJP&n^R=S-nm@ql+Jk_hhpk>~2T+KV7EtTmadjS2 zh+1#sk$nd5#V;JA{2SYQdRLUimd;fIdB0gib|t@;Es0AS7-;UkJDgL0iNr)fr+Kfc z=hTBinSG8?P z%s>7WmGML5}vIS3MLesuj#X175$p1-Lvos`YC(*;HBCOJ}q1W=)Vm0F`rM~{EFS{4@ z_fujg6TuT?SG|?f<&Q_ABGIs?D|M!uV@Un+&ej?jJ>vPU;PDnc4)S!heKs3iT|kQa z^EJE6R`Ol#Aq44eqc9~BVON9|7d6tDM%}a;ZP`1m@OD*gON@V)O>*y|L#dgcuq-Q5 zW%0k|&mko_Qz%n@^K~~$n6R67(0y&b-49GK?xq8DO)Y!uDI1U0={e2Q#kaVVu-@1D zq63gnTLcIwthuDKtpCA83sHiv^e&^bSw*eROMKt#Eofdfr0XxfJH&IGu{hy%$G#t* zY}nZ4Xl8@ha|qvmY4LWIPTu_SE2#wgeBOM^t|cx*q3uWYzMoY=13sbKKcgmM6LygJ z5*bP<&|Ow^`siY`z)>0t(eQFTXoik}54&A#%VKDRjuCA@qn(K=ox1!(3lzf=(J!F) zk4?Z$qxa7ir-60o+` zstIPws|qet&Nd^$t?Ea0f7G#;BrCkFoch}9D|-T_BEvYd%=Xvl>Y^FIpVzjv*#3H} zd~7@ZSuqZw+bypFhm0M)*IcE7$*Ycsj@ zOoB#ESNiulr0!qHfMuYhH)fzqCO{l(z~dR7Z`&FuR0=La_UpFiZ@@uLa1zSZ3MCz$I}J) zUD?1tLA{3B=eM_aufKt@>{0H0_{XYRFzuzOZ%}MP*+(acx`kEVH}>9MqG0vV#g-5? zRe5vw4?5YJE8|<%e1TP>GG~7x!d!GdYgfwh%k&1uyU0~OB*R!EWFil3m|ENZs7__G zN~gKSP2UQ);Xf%Da=x3Ze_Z#uv)<(jb8xB_eeJ~-5QdXLwd!w5-0BE~*HoGyN{z?W z$65j5Er*G*WAyt-8c%*^xkc;lERXQ@dUsxYDZeq>&DAf?WB_iI=E>jPZuF20JK=giL;Q}{irb&RI&V@xb?#9lE?_di#TVIXZUXR>1 zeO-|!wn9Tnz2(O$k6q6vxHggKJKBuYL4A3tMG+L2Y3F8~edd&25*-46wqXbhV52A4t4H6qkHiBH!wzFc%GWO z=JbC}D>~0MlDQNg%U&!ay*X>?snWQ2E$lhdZN3`orPsgi?2SCJNZRv5eKvM~UAra5 zL7sBCD~)SY=Ww#7LHp?bD7-Y{#EiGVJ0dPDVVEbJr{a2Qi_L@fRr7np16AwNvj2!w z+{6i5$E^?At47^)U%ngP?HB<+12+}!RApK{uHYz&@#;S?!zk3GKcnP1lekWj=b8ER zos3PI!X-kU>eGXi_`pa?>g3rr#?dlXxzU?I1w9el#jW>uQ%tAsmWgAHaWj%;u{KPt zG#ZIsGT2YOU%8XFx$3m9ldx?l;a$^6}{K%(Owok_<@%6(dX)`>CR^-f>-tr z{09B!n#*F6H&CV&e1k^wEU#nK^Z5g7pQWhJ;3EQ7KF%?XZ(sf8Cs2-V&su-UZa!BM?~K#vB#<`Fg7HUK4t=1YE|G?eFZ9zM`NxBS3ip~95STlgW0(=+E5 zJFevKt_9kek{3NtNFR6te;0r4+hmTS2uZWBBTWs0K?KRD00x!T>w&SD{K5g!&Jw;j zf2YF|2@A#$dp8{T#wO7lV3;!5s|aKD{qqJZN29b z%FnB!%OMP_P+(^5h$m^D+GBfDbL*`XGY#uggke>QC)RUUe2#QIoc_|R|J^Cg_!}3` zB3C15-RZ1xgyiU%YwQEe(Q|VpM z0Xg%NaIGt&e|Jr@+%8*LXpy)bc;}dQ!r&fn@`rjy!E1gW(2m&6@v~8)@kf6Kg?V!e z9-xFDj=ywFvz~+p2EByeA#Z9eN|AL9?eLb&_^6l_h3X=v#}|deSD@3N8Y5T z*Mwin;V&auqmQqDbx|p7z*Bxe72p+50!q8FCeN2!$^8|{=~A#c6}v98ec3e#5UbjJ z)UXBhtw~j!jH0<-YB3gMzvlpcNV)s-=G7T@PyN)oAS`VH_8rkzzR zZSL2L9(caDhw%N0kM-vR6m9-~ug_MV*Kfxg)y%%J*re&pV}qV+?l^K(yxszCjBm9z z3k8)|%EzjW-82kDAQvY z%8n__UAjb=$8lE8e7kgsyIRD7-+7N~M28zYu1*sd;@OT|PsZg{>xC0-+62In)W|&-373S`XTPqV)vrG1P5xjQY zZHVt!{IU_vSF~xLtey!Z-&G5`w}zc4ZZ&aV963yk2&#c~YJDQ2Ym+!#-?M}4gcOt` z*7{dGK^-K{1U5knd2@$+!Lt0+U6XTq9pcx7{na@jUYKMNbUvr&rcCU>LaPgBitG2e zJI&3hzggp*{Ap|n!b^CpRW`1#?GYWgbk3URvj{vw zM`Mlork?sX?(lpomQujGYBQ%?c)jP`l=fqC_Y+|Q|HDAI47+B9qfJZ40E&Ut6&&~9 zGmkF$^fF=Z?Q}JCWUWC}H~XvH#e;fMG-6MwU2E>zT2=W=Si`USOc`(fB8}c*1=j;U z@f8C;te;d34oIrQ`jcJ}`O@pdG<)>|ob1D0b<)+Wt0hv=?Dg0BOvB(%%HJinOvAtX zT-FATnsoJww$3I`w|6<+so&S6)_?9a+UK@{@0Y4pT7-T<=l1fj2kgc*k-fqETJvj-qj1mgZa*_ zx_2*TJmK#zy4{&Fec1RVUofqRePzRvt7RTv?rhx{HtVP4){W}BjtlKuetyfBJ3>>I zEj8l268eK0wzu7q}5=a4BTgHJeE|Nj&(nd#2qDZrERLx6#BszSd=S zXu&Hiv!L9s3Y#K|$MOueW+{j3#;c~J-R4+_zX$ZdK*jFvj;ZQ*3%*>?L}ZM&OQOZS zV9iZNS;Kb#acNrgrn=;d=8C(73{wb-|Cz_xqe15S_WitBR9WQ-f&+g@X+F`8O(H+6 zqOHEm3ate}D5OiK20}T!@1}H4lE0)J&bhHZyeJH9+70E39XQIHEthM5%+oFRFO7Nmj<(O%KDhE@xjiu;h6HwN)e*5LZ#fA!((AOBXt%z8+|f zj_UP&7(4I9wbb2uRlF0geUl5)IKo1stP~5caoWEr9JaEly@K$d7MT%NHV1vXeA@}U zMpJ6{!Qok|M0T}9J8|kX%%dnjDM3}!uB**5Kb!lueU$a&ji*-5Kf)A5lM_^5>7n9d zc=u#2(j@y`9#Frnx%6ILb85ZgdJ{g%SZzIB)WCUPt3#N%4n@0RdRBS*v*PitO6{N* zi~6j0hWhHHg?*&NPZAXrW$n0fe4jITi)7bHdAW#Zo!Lr@NjR2sGn6DD6&k=D{zrY=Jb6&D1b6+zX46-aEe80Z=A(5zXN8SEexY($@qbmxxKZ0On-3OyDY@745 zIG?7b76mNf&o@)L!e%?jN{_PX?$XnvaVLEXk$K>kb=mS+{choTGZmhOzp#m*ab#Ca z-49X({j@N#m8l;{`_-3{RYJ49*Zw3D-7)DafYD~sIbpe;`sQ7THmN;)m`Q%GR?tS5g!#}*# z8=~zex!1}r?v6{pea{-{g%xY}r`+0e&-@w5XuB82y@ldUl9=hhf!p={my1W$r$$dg z(nfyY&3Gwn@~bA(G%D|U3yGuE^&5JokXAKl=V*?*Sd+~~V~Hd_lYkL^;8eL z6a;IF((PW|uY4FO%a*yla@hq|wOnFsbxAegq3mtO2SooiHk9Rb=Zw`;&!!oR{`6yiLMDrw0ORZ+hYAH1&BzuMoy?I_ePqtuEWf8xp%lizeEqy zw$>iPyY6Tf03(h|&Ty#>$@YhmBk0h)$m@F5Z3&^)#?MW-Pp73Cnnkq`ng?vO zsnHX~Il=bAzmMyfB5Az)Mj}`=mol6DSAYm4S{k*rAT!tqW>U3xh4xhIb)5R}pVscJ zGtp#@Dpy!-wKiab6T|$?v8gkbC^9=V!Q{8#>jAumJ(8!B(+CjoTNvqkY&$gnB&&P? z4eI0O^sg&w69(SC0<;(OqnrClQemkkx)AT88PfZBw=A~b!IEfp*6go5W4b+3(Vr7Q zQ+Jozm;_a$$X@ezmX?B)FUZx(tDhdA?OiASPF{Mdn>qLDnzhNPnj`1j=5AmAw8#EmutIAK6cE^GBZh#x|F@$`z{T-u9?A`pFh9@*VkCT;#AZ zZl)4js&8;c#kggV@v}wdPb;lH_8l3a-xM2VzkT7pZOhzils`RA^=txe8)<6n3l}?V zTCrO#UEP>h!WUYV&o^r=Fqg2b&E0DE?HaF`PXU05>F$3jL`AAzP7gS(dL_L~=*}WN z2m4l_H6CR&#`lm4_iDt8JRAZ`0ggOYy>@1{!*r~FzBJXC1pRhv@N(Q7Qv*)lnAiWJ zJygMXcTakM0X16vajH@2tR-2Jlrv$3I=vA<Z7d^q;<=B;85` z=+`rEu63U1Z|8h{4P9N3SMt6W<>N=W6Chp2ezTc(%d|*MdR=C@QP1w_(xXr=@Unka zJHlz-*9Q>r(e8pX18JfL#?R+~t-TTQ?#7yfEQ%8zIB`87Q-%98wbfMSY+PiZr}jIm zJ+u0oMXwm&-@*B5wmoVp?pJSL>oVcKip!dJ+bJ`}dAf9>n|v5I+%B@%$ZY^XOw7xO z6VEyqIOY%3%p&Uha@{J)Um1$P!z}{)kol8q`B@8lT1WjZBhy1CjPI}H6n*ep)pIk# zU_#DVmxwG=j*N2T(F1Ei_Y{-Ko0U{K`8y*ooBxkAr6d4$L{g{AQtT=I<*zr~q1a?=;RkZ3k=$=|a&E9Jv1)6`LQ#G4f^OjONE z|MYvNK?(62N>UYj7w`{^z8aybn-+GRH!LXYG$9fnTPcIAWp%QQN$+%<(Z&*W`vAhI ziKZ`Vs@65@RTcV;E8V8BaFwFNbaM$eidysc(hkw$ewR^L5qYA>S2?YGB%)r+hVd_x zYIMXhtJ<`%XT061tt*4JQkDNWYg5Iei~B>lGV+S|;dc?I6vs7k;iB)1gU*nxq;Nyx zu>B$1HjkY@CfHewqbY_*JMR%Bp8M;Of zh;T!C*tyd~LanaMw)tUPf9j2njS*T=GjYWlO*L}d`73jRKI8Drzx&ywcbhqX{adgk z-E%_RxJ6exWc#Ai8d-|nK(>@Iyq`Y3u#RNVN!Bt;iF zve!?_{KAJyI??QV`jycYVU&wL;yw53sVrAzVeyjf!(o+)fCLS%oi%_5qb_WmIZvx% z*v@sre{hmlTU|-ZI&>>zw3~bqJ3`JTR|}a?TlYelkYvj|XRM5a-0#CkTPre8Ij3T- z->cs;{&bskoUO2lB+dk)^_d$RuH$^rvoea*^hF_Hx$~3VhkQ~B$g2|~Q4M|2I8s}+ z5^+G%#khBxpRQ2qXWRnMsnCOm8O---uAo5^q?w>$MlU*>QHEWb4T%)q@DRz9XcPjn zI!=(Gf@UZHZC>oYHz%W!1a+vsJVf&w8XEc*pv+~TOo_zHy+<%vCfLtP$1{vR61~{d zAoUu2CB6R_5iRtu0jTl|8IqTNM*m)>WAzmMw>dRdW=vQxAmw>?1e08p{{q9hluS9) zMwF)+{Qg`uWHNB>w5FlIChi5^^6|^y3kQV9uy1qAMVhhh@ob8afBFXemP4(rt&L>DFPbn}&F<$qq*ClE4 z@54tw^&Wa_c5IK$O?UT$s9|-;RB-L(y6w3(Oft{=9cVkB|4qg!dRgwve0o=^QquL| zh!tL56jFq2b4!&$F&Rrgdi)moaYP;vy>u1iuhAjo=w@WdTli=|(8y5L7+`$_Q@5F+ zheSD%waez(IStneW9zK8c5oa8s?zIT#s6B1`WUqQBZeKxu)F zxJU{=@xONq{#=2)qJn}#Y`R>gt=q4|qFXYybdk=JqRn8n3J3QRrQWZVmvTifl>>1ydU<@ z2#+|s=$AoZN&&~&%siAslZg{|44E8BRDd2Co?Tj{13I5+O`nsVSpZcV)KemhK4y&v zoVzAtNZAJTWR#(~atgVoro99`=G}6pFfmJ0`561pjcBmQxy`usFv4t2#*d+g<8HyY zEvuIvhe1zo7a|+$x>H-nPSf>IE^NVFI1cnqi^OMS{7i^MJr=!ga!LVLHfIWgzc2r! zaU>yc$SXlF_ZOJmodUC-%{7nxp++?@n6*38ayeIjaZOI&LS%b{^dw+ytGYfpJDFm= z)ZRHl=-`Z)!BM#$gKxshIIRD>&vo_W5LmUJT=g~t8Jep*_}}izUR*C`v)TCQg>9AW zOrnI!@SUO;F{z_oDByBt5539ik0UjeQ!vmhW*Xr(%#{cY9r};KM#uH<%Vq(V=x#$R zsI)tf*Nbj*SFVS|&`+?3)8{881!c_=whHyx_Y2)jlTn93emzBEuc+foi*CgKUkPh8#NE zpYKS62tpDDg|F69M|7JrT`o-V_6bU@|68*PC z|NruFLbJqlNI{e_`mjP+uN;R`9|9 zmw&bY;j8w)?}2z=PdTF$>8e(G6Iv7UQe(+xbV{$PO}$}e&4t4)?%km0iNz8=i|Iva z7Ku|S;22Oy2_8u)3`qTDbABg$$)`i%o8=#+3x}dLL84dFy^Ge!f&llTr z_;As{k8E0;AFmYS02QU~VH@^Z%F$a53wq%5Eu^@nWK?2ys0=1w7)hp?SOi#AGCR+) zYyT;EYDCWcA(>^H_24@dxJ*$6fTX3jgC+{o;m!1nw;aVL*9E ztE{Xh&{ag-)wOOi>3sq0^$bRQg6uDB4H~+Z`sX%0JmO7>=vPs~EQ2Ab(KD!b-Q}m@ z24iwepEDC^PwKQA@BFGX_;pzs8tg6dWnjvsN=iy-$U!OUbvuvy6gE}W3u9Q;-f925 z`FR6Ft#{af|ArJT#3|H@>2pEG*yMT+y$*B~4A)oC95bLu`hRhVKU~P+} zWGTu9r^Br6y}W|Lqx&mKjkI%M`p^~^Tt4fmB@>-TA!(#!Qrvcn+$zkxYYZE(f4gYghvuX7D9COt2_e zN$)3lZ&ZbAdYWPA=tn2z)MURWy#@*X06?S)9~_;EY|&L> zy@Gz^lk)yhZ;I5XLdmrV86lTS_tn?JLNj2KT#x$Y6kYL=P z6oFQ^Y#nIA25_!nq-NB4@Xc*_F8B>Q=OSj?`~ILSWDs+gCyy>a?0YfAb>j+B1$^cXiCSqQ zEe$h^E-)K_Lvs-KPI>4viyH{ayz|#4d#6Zc9?(Zi+VW-(Gt*)ym?EQ&i5cM*>az)^R0jB-XwBgHjddr1fYWvWv2Di*BjK4KNZKrdzO{V6*1D>X9MhnOJnQx>`k z`})CFmY_G>h@?Ayi8prblaRFA_OJiv8!f{(0_c2_oM>a!+ORYhP@_(+Wn^^nBp!oH zl_!O*L$6>pe>ygJG9usXB+0qVwVI;(@;RKD%mf|8wv?Zq3^Nr?Oz{Hz^8Ed2`fHX6 zzPg*sYuqFOUC3A8aysZgV^Nq0ZUqH-jMZSWF^~Z=@pAKw{1$(1SMp%mD7^*3n zR|#wQ>NN;|#Tga_s>k}g$@$Q=o5ssNwcJm7WEkVEo$iyY%Wvo#vJ?{;`j(aZX71yi z$5R>_n1?87cvQvN$&MoXme;DK44!it&#hvTZ{1qI(i2CDj}|5K&7agvj`#^xu-P=hhyCZn$)hLk@HBh&WVd~LgZhKK}Qe1R*N*@4~XhV?-hkqKJ zQ$xqN`f^3hr4PV>O&}k^RADDdZvfFdgANL6IakEbSFzyv?NzI+Md{v%SODxKuVjM; zo$xGHhCRIj#7vBT5F*QkyWqg`p5l&!_aw~&vOwNI5Hpwu$V4dwzYSRLzR=3J2CRqoJ!aCx}$%o2rqA@Y_3f9KZX4zRQV#Z+|B(#>ACC0*$XY)mXDP&at zAt}pPkxA2tqIh1_m<6C{`(mMmxvJPD*kWr;hK(6d6DS|R1-}tM-f=?ssIG82nCeGa z!tK{^bRGvXd|kUs@a{E#{@A+rcj0?~&OXa{-Dn`%RU>6hp(KjzL1Y6#Z$(T>Oip zME}wSBm;U>RMZ2IB&YQtnPOgS)%~IDw3J&5r(zc;LvjOlgJTr$&jjsf>cIZ7ml9#} zv<)$oRjzfy&@bvIec*F86a(`}NT-R76ebNkZw@l~bc(Pr2z#UWYLZZ4+|uAH!vLFu z@qVFXL5-WCRWk3_+oAu;0(HN7B()%vm?r`lDt^?v(?z!jEd zvXfJERMf&_LqD4oIT;zygD?4op=S@%bQGuklC5~4GQac>-96Sw&Auuep1?h~><>L@ zoG3Vn6cGk9o_aazX=wW(vvq~_{nYRGK?F@g!4dfHS3pK~K1TJ@uvVHpc@$pE?FlE3 zqQ+f#K=65lMeRyVB}()Svu-_wVL$GjV4u8;rILeAW;v`aG=05erzMk61V62IxZt<( zZDbkLn8fD(3*I4F!1i^o;M1yq{O2+pK8goo=(kv|gU~zuU*H>|-c&Rvqyf=gj?6D- z^4g=A%k!dGgYThUhV0U z9s0~QbMX;qcu^zD)Z>)Mh+gOlMpXE{F6it-c-C=>G9`elU>iZKp7n)9M%;qL;DQf- z=(g%#nu*T>iNgb^%3}|m%P`J*8h%<~jQYhZ#lnp51hvNn50cc-OJ7{@$2|m_PsmCfy=Y`B_lJ^snUYPJC@bVcsl=nR?4`k1jWntd zM|N`;EL7)!SXut~nrBpTWOl+N)G9jy-=WK-(1w+sf}KHVMN;xGg$%M>HMwC+aM>RU zBESeF@&%u1&xL7v=l=&(qhtKra2Y`>m7zOqj88;p{2nk59nQQR6Y;co1qz<_tWdxQ zWVywoY)^`P6yx1tegF_CtD+2EiivRVXQns^R>aOTxGQ-6C1@}$$?Peb1q~@BC#jB@ zjBh87f9tCQ7MB5WQLc#*ur#oEh~;4A#^4=P5zHy&`U7E+Jed1fc_Mj`^9)U|d%V>qqkcgJ+ynMAY6aa`ZY`YrvYEXPNm zFNl$9{MTd00T|+OKzb#(+Mx}nTz3+5&AWDd=*KPNGf@j+i7X}u8Kb!bLO?@sQOxVn zfK9ZQ6_1C08j_fhP%VnlK~J1690yT7^NN|sD6r|D|FhgNG++7vCG3^EV^-tOqhHUg z2pC!^tepu%E&r2gf-7rjOb2muMeMvg(+1y}6}(2Ysz>lEN#p)O)Cf?Pk;PeUV@V0V z^cunyFd>hER3#RMqhZ#207~HWOTk@H*4ZC=n4XD7h~y8G7m=o_LT?@9kJ7?t+bseB zzJYn56jLVQ0UE@)WOFioCAcs80V~IOArZ6GWcE2xOyjY|kgSPq*sNMy*8pl@LPe0C zAd4yDS7{6C!+&Z+$Iz^AXk~AI)^H5PU!saN@frGB`f}s8{L%x}WlLrardNb8GVa92SsJs@Y8l7JjwMPpXTYP=rEIUWzlVWR}kI(adFZkpcw1$77 zpK%5RcMNW>%ByYMKEO7%?W%&GK?C;;y)*K9735XjlL9^ zntfHA;*c5vOkG}%l)!qxD`Y_EcAjpWgLncQ8he+Hz`=Rfju@f-ta_qPb z@TeSfKA|-W=VCl%RJ5ZY=Lg!uj4=o0Ba)(KG(h4`!_+^$+dTB!mY-=yAx4@ga*7V4 zt!)nSN@KKqTq%x;`abG8FYLAu+*@hcr|QgzT##Wodw&W}mheA8P?esLj!)mjh}#5b zA&5w9@JRlSvBNpy1w6B=qkX3h#-%g@THqTs2@2*6V}`~F@%fwqNuzna`+p%YNvGrbI%@t7d?42CS0*<9aKpyuvcIvCRO7UKKu0v_b$7=vNR(VD zIS+?^*3N(9z-;k&;`v&acYGdzRVKrV$|J0>#bgkpT{E>c>P?1#CbO)LAKI4m_tQhV zWQ4S4*|J5b94K0+%FvnwlK+loD_-Vd8>Nvelmt#hgZn=)Nz#k)sK#jzDZp+OeNzC1 zA`{O^hPVk-;)UuLhVBuoT;}N&R_U?H6DbB1(EbRTHKqJeap=DXYh~!A$L@SiD_r$L39*TT3N2gVCE5vRU?G8K($cFm=}27sW9D`Q#n6Oub*c@Xyt!)VOYSZ zmIj7GiO`fQ4DbaH0b#&`ecE!zXsz+%Db?O!-bUnWDQ*oCeDq;s1}pJZtDX&;HV*s2 z4t2p(BxjW@BTJA}h%mVpnOC$KjtPo7HubO_`d1sAMAd521q(GR_&@&YoDtN8QBO&H zD6u@yw>YX;v+e<9GvOri4&);+ z=rH8)n#fL`L@vg)6;k8(~5ju?vOF+#KQcsq`c&t)XPqmWN38UEYN5qJ-^ z5-!<@e~p?}c5<5I&-H*rw;|D+5bYP;2L;EoVmy`|IbF#T{&@XO40U95!bPi9RY;Fw zS}#!RSibxdP!!=!K*Y}6c?Xmy1&*x#g|dek`vNmc3YbA^3m6v*N(367V%w!V^uz?OwZMHIG0}A=@h&uWr>MKqbKs?M zNeRs+ra>Q+{(Klmy?UGapk%eGhPRr2!OcK8U`H|&FtLGsgToxRfNG%c-=9uw3 zzi4eUpsw93;xrYgGKEX!II+pE<_lKv_A~>-rQ{l-EN)urgeH1nR7DB7tUL~;mXAi1 zz(m)fKNhSw^Xa(pq!yq({25MXCbx(3%#}W4&s4J*UDh~B@czF z$XhOy#6LtVE4~&K4R`soaYyM}H{jP8d+?!2k-+^IADToNzO*sAy3G~biL5^Xec^aG zH>GhwQTIx^Uw+1vk&sMl;5o(x{#H$Lw@dQPAr=@&>>NvW4qt=-r7dfo(dbtOVMnU`fMgm`zUlmzhNM)T6BLI5aVUR_7+BKf^(h*G=yII>bn?fpBM z*af>!KUXH|E~Zc_TKWu{bN+;OmeotaGR)EVxb(MMNb322*n9J^9M`vPI8zoXmP{qH zq&ZEbRAg!*4JauzM=T|YCagkHY0^AsE{alAl(D()G$0`~DU{~9zWun>dY<32J-_dL z|9H3U*}iXk@4r|pch`Mi=XIXPu^;c}+4@{9>>6gz{8qk@n=^YVkQ{Pi!MT z*pm2q^r~Q2x2NEVd9PmeN-jaIv=!CQ)jthK_!OKch7nRvlr#3k3p^e>qz^}mVoGC> zDZPgr`Z0l0Zw&dD4^l%XdZ7dChw>@cWYletXpLB&Jv5#6AMu!Y!am<(lezx~`2;xW z!+fJS%y0-<7#g)y_RFo!A6a|abR%SHk3BIr)9HFc%VvbZ(29R$eK(}3#VUkx_3P># zD$FuhS6sq|TNx_dw(O#4+HKg(7vjI!s>`Fspd*VX!QCRP?CiZ}MSzm}~*I%VlWaO$=Hn zEbPjCqWrY9b}zhfQ&TT13T`-aG92j>FAPG7zC_ycn3KFs=lOm5`cigt@R?k#noGGh zd>tQRy&Iq}9xQ2juqs6Rdv)^dHnwYcuG&y}g-?88k=Y9m3~x-U053Xx1)qAbcKgUR z&hcAfdSLfFke>J_w!kLS16^5f(fKS5|JyGJChyrlqR*;Ha^ zQc6Hg_Tht?0 zl%kcUe$`~(z#=BQkUL=!3ePevaV!R)lHmap0AA#_|9xU2=@9i}kHSZ`@-Z$!(SLXR z^Bl{!^#%zXn$pU{vB)-LrG@pO>mI!w2ZE z|3e46;*Ag)n`TjiTez-wnYa!`^~znWECb_h$JtmSAxbq8 z)zG^4qF}Ac0l+JAe--}8`Uvvs%~0LyS}FDd*x2%=C+=;%llk6~qGOy<+Uw6-qfPb_-g9aPd%tdQb-4E`VnZIJcPfdgL4W zkO_gfc^Y>$dtHHcP|iCM|0(ASR*&+I=baREj48$1i0JspT@gEQilop#2{g1^=4i~~ zBBPbV#ged}|A80)e`d{zL_;#X83Em~xBDcJ=(f5`48oxbI z0syOvvsHm1G-?Rp%=-=H-t%xdK?JqCR zqG&}iaoTl7!4!i^B!*283X+exN%s}t>QH$env5VyBv5akM&;#v~F!o`n6aG3rFF+BS<47MP78`Qb(}lN&Ky?iO|PM+v<14p5z@lnxpz5|XR1H&(_A zv>rkykHL#>;FWc&F$0_!=U(Q536Qt$!qRM+JvZ^|3k0YA3A5p%?`k#6sjK1%Ao{27 z!Ko~fzzQMRZO26AIw@BPrjYy8g4&d7?xIxRAPKW(PO(@%E3y$khmzOkGake#gD*=T zYaiHM5+kckF$px5!#lQUhSK`tu`XO?Viz9cD@`~VEo)>zSzf?^+(qy0E|Ff`*nG^{ z{I4V)q^j%F-j2h-HA&RzAn_De!b6qX!cj{W7XWTp@kuD$*OKg!nS+D^Uk~Rw?PpEm36hlHf)efJh5{Z=EIp^+`W+Ghfi~P?PDKg@^pJ&zPf+W2^`vlb z6-y4ljO4*EI9puX7#TjN(RRFqk`QsH#cUP_-H8bs^_Gr9*Y~^jD5)WO>bB6kduS*8 zKmOtE_lXUChG^r@b5N!IP-BUV9PJo4fSlp<#hYBVa;JAwtxg2+13J6;ESt}^D7S9bx>iov92lFWJH!_eu^AMt(vp~?>xdjS z02E~jcI;EMFHqD_ZNcwvt?N$Z&;e}h6jn&gLxS#JQN$u_1)X%^_df|Iu20(|AwA zrGW54WxG4{Bn`>}R4vjEw?St^IaT@KLvq0y)a?m20mZxRYYsGGU%kql`@#T2uJjcS zh-Fy+j+CO&#PRVD7L7tw6fP?F$(j;6*^q*IecKwHlKf^h#&}P>!Ye@e`qHLmvM*O6O;`SkLQ$XP4Qb`N+T7rHM>S$8AOq3vT+SAa(RhKGB@azVIV36W zFYD5a$CU{8whX4CBX)O_cn7wyM-U%TcnoDuLR-r)*9+xjc3NLNTQ&-TT)xG~aM-nRPVJc@IZapUdro#L{Mc{T>05jO$L z_cTDh0vrn5@J3SChahW^!yEi~hl_7yhR;P2en={LnFMdQK;~4Fh%KaJByu&zNh`=b zlX*CSpfL}?U^UNM=SpS1rk^J(W0ZejQy)AH$*>pZgnnT>!8#U2j2T=7fX!U8qXmWv z)BDw768_>H6oNLdu4sS!0+{+bsmKGcVCjv8YNCUnLBWLtG& zCrW`9Uuovd4ZXAdhjRF7v>ouD*Bq;Dwpm#MCl(!#97|$)H-birf{v58UL31pa@2eX z^dLgXCKMOev2HTkGtahX{IGhITL_zPFWc8jCA4x<;Hpe5#Ky zgmi9v?t$c z)6Loi<3px3Z&%=ASudG)CkkcCHO*ONWL8_h%E!$Gc)-SEsE%)t({4({793Dr2c!9S zd%OAXHgbsL6ep`v(SSPCuHE8^+%E)@@A$l6=l^5FLoubzQxKb!tluuduG$HBUgB2c z2&$%tykV@!()v0WK@3zyj4dEGpn-_h1@3WeX7S$EwuX@;Ir0nA=>m#yi+xtT%Mp8g z*SrN}pONwgGZTy5uPB-p<&oH1vCB^4EE-pdu$#1lyS_UQc48d!s42dW!NIMMI^sEJ zG)>_KR%VwJBes=W^D7<@q=6B?hrDw^evLaZxZo^3uiZsK1i=EUmolsi*fx*0x7_F^ zcx`y1@69JKT%cHGO|l(Gs*tPRk@UfP&wL~;+|hiGiHb|Mh0{>sLh}Fn3Rvla!LBrj zM;mjGN60%`7b@tiQTsBU2c?kxH%g-`?auC{=QnB(E?=1Z5)q-W!x0yjb?G@d_a*bGg>E{X`<#u6uPLqzjCV5_nDtTC zm5u9_p1}4q5|aO(B#SKBga4=goYk#+khx0b>q$x?5(bc)XdP8WOU*cXba04A1| zDwlecm7dTFdO~X~Px9>}c9vZ1mvx#JrMi0)ViAGqPS=f>+CcL=hXcwmS?sFdO#HhUHRUogNR$JO zmb#9BkFMg!d;hjiy!{J&xTenS&`I_3_%UblbDam2IQFfjwYuE&7}cP|7#hjf=S7{# z!B-b49=nPY|Fd%dPfT~ke6q1i(LT6&1X!CFlN60a$Hs5irCR&vL>gkZWI2Y5#iI+agaBo-1G+Q*xpyv z)kf=PMA4Te0LVJ>NbJ0)n13%L6APy341j5~W?*8{Ht5(FeOs$Bd%2(IzRh4{Ji-jA z>*M-XkpPMYKDr(o{URD;l8d8gh+9Hn-C<#ioXVl9!(zAy8z;(UiiTjRY5$4!!i%$4 zUZh3W7Os6*%ypa`bcE&4=@>A_D<1_{W zNwM9F7}=whVvAb0SCVFsLc|E^CxA*$ZD;o7bvma5Z}~*vI=%f^zktcuMp~P%IHf(Y zWBI%GYDLKCoZ9XGM?Hp>#1Ly|1e6=j8!BQ^jH14F5m(}L)9m53>`I;3qiML?ZG>{Z zc_0fsI?)44WJgaODic!}z|^XM+^_a_#qbN;fvfzAnA$WrNSRK3A;WewG5ZCviv{KB zOtTNIB-thO4U`1PsNV?L7`abUDr~(}6>JY4>u1}TzFCEJ#Rb#6 ziv1T?9u`cr+_t&acR6Ys_&DvE9{W^jwoHuJ3+j6na;Vy7FiOJ2nH3L+EB1lcw09ia zzgYjTtt8>SvY1lB1~7lL*11Bym_+_lF(+WaGNf|Mv`sc{Hk-wxYYTch&`j&&QDj<- zH)-Zc&sfncC6tP#Af&>)#=e((X0I>(`M3J)`6K|BnSL{A{STCj#IxbC-2KH>Tj~o& zpJfZV3C#ueaxk}>_u~{BpfUkrD}xu$mHVYgj^CqW<_8+l;IavE(&ORgEBxe~o( zH)1tBy-m3{2{o0o!YJcq?u?sLXSQyyAQl1$zgAf#@hg>(FlIgP3nq(Ya*n$Mx^=nl z?5^RWh)n%Y-J}=P+9ShD)h*Lg4h7oa-wSXhr5?0-+W+waMel~WM+1i*VYqRk-7GGO z;}Aqe)Cf{N-Tfhwc^NOcYysz~+QrArQxYh!$4IC-@;Pk-$?J(yLZM7)?sJuss=Z5v zOTCje$H*G_APl!=b>(J?w~;#x-*DpYv~+Ai<4RRl`BAsZ4g&n#w{KCprVEcR)-%FGTVOQ?JKp1Qu?V>!uA1?1LQN*Tp>%3`h~tpVx$j1F>(Q?D2!KA8h@c%YW(#A$WD$%-J#9;ubzj-5b!8gwp@jd zAXJQ>u99$vxUg~xuAO@^iz5=TQ`jZ;SS7wU8BS>c^{UyKpMN9d5RdRM8bJ|9fjVUw zTR`{?>M-EnVj+o*ek4mfgV{&?_xt?r*lmj&LC^y2mr=6@pa0;Q*d#Wcz-k#A? zqLwq$%V6fTF!@fJ3LORH8c-#F1b&V=u5N1b+{ktjWbmgN&{QheBMDA@!Nb0)hMj#>w zizH|RjHN)>`tJGrCx;BI-NixE`QT^&!-q)Ntj97kNqG{zJh{6E~-xboswaNo6#3U zV;aj0=Iy%ODoc}Y+J5Dy_+^5I;IiWocP8XekM8UxZSqbOM$~)Cyt{|lQUT7FTP=Gr zh!(AbW^427-&dr{{Dx@%CKEt8&u;_?V6mSfnoYzVWj+(lSrxPr>g~y6lZ5wQfx&p< z3T23+dk}?3OL7=oDqk|G=gWkssTuRsj{Sm@&mAs^1)zH`pquX22AQSeMM51{Lyr+Tck^ zP1NjnQ2mx1mJf?mwkwVi4jYm%tW@EZsZx%X zJZrwWNDAINDuXv@iK*$hq1KnZtOtbY|Io?>;j0q!-NTddIs-(N+UleF4w7ARTPJT_gaNGSq+~-c8RqX=dO{j z7l^WinE1ca!oBF z(%?FDg~f;1p!*No)_gdi<~8{>g^=l0cNFm&A6=oxnp2tze5MWyIHi1jSS8p9kM&z0 z#diyaeN*PUOWdL~U-{PQ`?+l-XDrcdE!Tc-L&mtHXJ*pW!oyRN?FA0erPND0Z2{3| z3Gi}4W0`~lLPV);Q<98vI^wfj%Q+o$0F#aySLQ|(iE-Rh2bGJL&unXkYx;G92sfw^ zmyoDq%=!ZZP2uWnGw1Mab6N)}4Wh$3i}E{(6i$i3UkJN7cqZm37desh7{hhvTzg$? z0eWu9aG0%=Bi2`p2|S5Th7Iqgg745J8if$kvQ1M#cLmM^0sbewbKxih2CyGVy_qSp z#8aBP@aFQCjVHvg*a#M6Dwoh}YzJ0&j1ED*zW{UJ-}LiA)iG?MgSMa?eWe1Em%IxO zo}t5t+!kofQ#Q?MEVh=vl#(8M54qy)PclPKK)Eeg7jj}gh!93(!5}le6Kd=2hgW>I%A&epGj9GDHT%=O#xg>hT zy^q+p$FmKGk@g%vFLytrUI*lyoF(Yncu1yPc_dgv{u_j!e`wd+O<*tOW>-v&E zVcJtPX@9P z?Nm7#L>B?t#K?j|{$>VB{FdHWUw!Pw#{O1BpRHK~f3dw0;SupppD;CIvfI$fZ0MhB z2VS54-Bqq!mG?L;0^N2z1Y@829NN0IZ&avRih~mfq?C05DzW5=oXrY1-6q zK0=TE6^H8$iao!%eB(l#hay9$R%{ZYAfO^*Bu$1=ax2hDQYbXpA>_e~a4LsIQgYAD zDK46nY*Ot4a&zP9tyR~OB+s^i^1m4ROsx`??WCy8n=M)WYGn>?kj9^+iLrbN27YM44s|{&nn=-s(LIm| zFq{#)d%QbhoT(%wJdO}LtYsXH^5nt6nr5qyeHW%}_P| zr2B8#2f;u-zFbbSexp$WJ?1Y-5=HQlQDR&apJ|>Fc668b$fV+83Z{scPc#mWhY=uG zQtHu8C`E3I(4e`QdW*>5*ql?9=MtSqzZ){?iB6*I63jCO_;PT)s$C0na`fo5(JamXMIC%^IOLGzhl+q?~ccQGx zpSDE^5z~AIOD!W0>(w%G!7&u5KO0d0mPGYqTdoNbyG$!zw2e`viCD7-3k+!K3qeni zwe1Bw4Wn@cF}n^dAQ4aFh5uV7o*H&sriiIju*}94j9rpBbdQn zrG|@WYMTH7ML!yFj=mWCWxZy`?_nyfID6*C$38YtpuptwFdqd7Zi$3K4gz6<(##?f z5AY+{SRTWOE<*vAolwL4*S&dlEa>~hEv{Zum#$rl@dF}BnNJX*TO6ApW?4w1M<%xb zX9I(&$%>RF_CoU&I-_;W!JDRriaCY_O413ITT20?T%`6OUqR7+^h+CQJjOJb;`#SNUb^k{`X8d|{Ql*N z&;BpH7xoeV*<o|dmc;Y00Zjo&e(VA3Cm(By>e!2T+gw_5&cJM zi%*z2r)I}jdc?;r_G&r1wHK5@Pv|b2nt<3Xz-T4IXhx*__)_1v9sEDUp}r$is;+Jz z)bJrQyZIVt6VZ#l5{3q){BO_WXGzfe;1_A!B9e?#cSyaT>?t7LlwQ4FM8p&Pml4Y` zAf$mS4rN&Cv_A$)zk?=5AXk6vZX+c*C2tavLhMC3WHH{yv0dKXqI=H5Fa@D}CX9>k zQzhqlCvgi2EL0`DmBg1YS6wyUf64yVb3uMNs)=RDzdymSK2Z;2mauVE932TeiHk48|w3iwaJp?9yD`XrStKA^s>R9*3zcH^s9_4C1Y>GCOIUS zzJ95O3MhMv{imMcKj)t`JN64_pfku81rJH{VGRA%0Vp9Pacgo;FPO?dcD4GJ78&Gl+$6JUC6v1@4S{e}n0Q5fIwQm{deP-i}PP{=Cv%ys$RYOU_ab1`LkH4+4) zho&#qrXR<_;4X7EqUph%v|<7Ky2_5jK(D?+t2OfKv`kJ6Zy4Ew; zb87og`I(0!ck)_cfai`jRl<4iknAYUP$BaVGCCCZW98(GNuh)3m|C?VXiU~;?1u8fTbc{qz+8=f zKHePbyuugdt~?AQJhC#Up+uCSsZD7*>d}pdS3zNe(05D}>Dx;wS%EBId#uL8XLI`t zvE>9Pge-4aXQkSDHr6T>qg^YO_l;8+A7t1M;;Z4y+f!1OTZ*2Ub%9wQ(8-VC@F9x1cc|-b}<*PaZN9|(kHHSWh zW+uy}x4#AMn`(^vD=6!PYF$%075zFdo~(9JdE%G&IrgLX)cax=(;zME>)C?kUDloH z-~OVfqYzdN`!nlRyvi!A>hP7T#DV=#;Cx7Byt%S|^iF~Kj`7^m?75hz9PX2?+FUsn zoDrrv{zUMj9j2j;qM4$Oaku;UM}I(@yRS0cjFDzrdR{KZ@ih#9e1O;c-^pobrf$U~ zmFHi~b2)g=W$OOz8uts_khFD3XrIvy-u(GeZG4t@?oWS}GW|xr`H6QIHd?|WW?|0N z1yYL?w{F#5Fvhr$d#W$*v&g-|w37CYe!2dj+AKTM_RtGvpToPu`wz#;Ns}ks8HX-A zE5xQtdPkQyTjGu`z9mg+>5g+6zHov6z{ly(m z*C5(5a~Qqrme(DJ!_#Y=ZQ>rxrFB)?OoW-cb!Y)?c$M(fC`trK>>)UVTHE4!G7u!T zebvuiI)T1HvJ&)49#E?I(g7@eq4X4+MT0c2@8vaXq=)J>MjE$-{-mvy%oJ!;nv~s< z)o51E^^U-{+I436Eu|fxI*#(g6{I)%tvn*WzQPVbR3aP9jOSh1hK{{D$T!F=6=HDJ z6Iy!$vT&0{wpQsk@VD~HPeC77)+^K>x2();DeVeX#&pH&7S}JCL!)2?3a0uF)9K*f z(}{W?#&52n;dVf59pIgS;7!nVY9y?FwSK z2i^pwEQ2bM;`<*l$|i3Wp<(})uTZ0W^>Q9D3Q~^<;SdZo<@j6L41*m4R_x^R6EK1rGcP#pm8ja5yK{Le>?5KqLrTZI>vQB-vrd9j*NM$L;B0Bf zNhdFHk8LT*>ZW@O89_nj)`sa}lnLyUv1Yv7Mw)SJYqQ{#`Z&}R|DHzU^E0-$p1#Jq9fB*Z&UK5y`e(P4l{y^)+WZN_vTF7d91ym{lb zi_q%Z+1ERaekedu7f>H}qNTK2Go*C0cixMjFDoN%Mf75~TF(gM9z0xJpX(Q75x3S$ zH*=S%TVa1$gI~ujt}~ir9A^1G$|kaA`Jr-UbSH(shQ$JitaQ-Kg=@p0PON3z&+j$=HwW6}hZtdS^G09-la(hzqok zIufv@| zC3|}=)J6ulNqnmWx!>wYX;zhL`Qy~mvnfTmrw87&m@ya!7n2{SeLE0D2_*^1 z6BxLge8h10(D!b^TIqFV=KF$fj>A(*JKqgq0lT;47?baFpuYjgS<4iaJ)m_mI9;zRMXf z^Zl*5>aSvoqy8YJ>yv-bxKvhKQgV`$;Vw@e-3Dp<<&7Om;k$gpjr`fz2W9&}0aXv& z?|-&XXpOJdlQObEU2&IjVg+iuzi#9e5GEz_c3uB6d5~R`(^ijQKf8O2jXkR|?~qhc z-`x`ut;277b0}5L;yg47k|yHRpr}QMD$8Z@nt4f|C6CW6QsNY!=xh}>(|aqaBv&tb zJTgACOwW+4YF2Cs(EURiYD{Z|ZTlL@h9&Ww=q^(TOCbQiwLO=ZX>m}y* zmu3G7+PbRgM|*JNi9QRfZi4(jjI|xe6DaBNkr=9?5yU=I7k3z&MxIH>P(ZMYOqrc)52rqSm}KA%1ksaDl`(a*m)6z?mGfYl}%$3 zl%1TrHm)1LK@|J%J%dSkyG&=0J;jYWMD08=o?E0gB}gi6UG1JAbyGc)<{Qp{zD};7OTVPPE!c@sMaP-J8i@oVGwoZECM+1VcAbBoD=NGZ z<-CRd(sxqp0qO+?%>q=&61Y<)=&W@K6l2_ubtxcCHIbFE@Uxj!hj?%F#aO$@RAt7i zeLTDZOwn9|LM`c>waAc!qcsQgu<0!Bc)RQAqj$jZ40hMsgy7x}!L-Y?d$sC&HQ&R84##-FP$E7Bo zAz&3)-z&5XfcuDZx^@mgSw#uoT2|{f$KM=hylBte+ ztk{YfX;*X$WwFy7Bw%ybLKzk~CfaysX=*uHzCUq3ktbO$)h5!>Qd_}5Mr;hL#Gk8u zaI}zj=40^9n|!QRUS?gSYkbqe(i%;~w zC-Q;L);auM${dylrv5E{@Q6x1Bcf|qLsy}-L#fF@RVySaK4pryy@+i$z2 zudqRDofPB}jx*MCIwjch&ab?vR8P}!xYbI<$`-7{e6l+P1<`WwWV26;5$mxLn3sx7 z5%HZfk`P{$P?>^LYupE-L6%6a62EV-k22CZKMb?R>Vuaqj;HzvD9qG}_lsR~3+VcT z{N|Fa?<{Ukd(oTdG_lu#h5Yb^Up1KI>-mGOIqi1nd*_j6EYP9w9kW%F0atXb1Q^vEZ zGqADM-eOSn?jCB>w+Gu|xSn!n z?+FBcp(1NmTVc1z%%Scr)-oVVtQ%h58+)GX1~91ON#zgQwkp{=dJzdF>C+ks88t?I zWZN@7+gW!4gVYo6f;yqEvgsqxj828|ZUtTsWmZj-fWVQTh1dB84{3(91@Q~~gk)z# zbPC2TEM@u}70r=9AVAa}lSheJ?4qXB&hRr$Y*w^aDq@>a#C;V@I5#% z`T0zfSz?E6E}(#Tr{V_>9Ll;@{MokLhaBaNFw_};?Q)(&lJTuO%KDCV=E&Y6w);4Q zUy@#kCf8i|@@O1yrZB9_Uq<|qy|#T2D-tmBPJbeqA8#J`C(SqqzpF&U0wr(p_JPvUL$j&d8rrf%`=G z@(u}MftEv#WAfU?=c-#SsNEeBS*ktC6QY?ITs)*K!CzYFyjMJZclfQrQ|1A=g=dTE zE7;B!MtZR!g`90iEIT&*L%qagn)tf>wnpF?6oJa+RwcLda9#aZ3^z}D2NZ4XRB8Lw z)-)$6neBu0-zvhr7uFB_*Gg+Q=iWPP{&ht5*^kie6f6xOq9w{|6fhO}HAz`P1&sMBYt9cIhK;E`dt;+(^H+5xD#z!+&Pr z8=E7Qj;cgRyYXFO&9g?+=WpXn+xHFrs2cis$M{I1aL)GdIlY6UsUDbJrF`H()`8z( z!d!nFp=r}C=g1YVjMk4xd-j$@*|B16?^6fULHi_m51g-st?`imoAK?zPyZ?14)tL1jPCkwvwwtapIBurW!5NMo^AbR928N> zS}Pmrla6dXCR_ScO#jx8d#JL=t+dp8nybGWyOfDpLaD$WbloR!Y+X*OlUad-{4Fnl zqZ&3u8prjePG77hpt{XC|5^{qG<$c6`^gMR#{AhqvA3mbBs3aJe^{WT9f=Oz<$E4D z&sVU>`1=V+(3_bn7_?*zs@q!~Cx-w}%&U4q7)V0{F;)gSn(aOlS=FojBF}@n{6;&< zBO!wAcJ6Z0`P2f?_HKqc`khhp^`zg(D%<)4t+9Pt-5bB!Bk%`G*IZ(6dqtUpga zosTHcP4zcM!nLQ)j(hmKK=3d1(e$N|lj(UT_jdK#qz!cxOq(mGVH6O+=2Y}!wZaZ7 z-NkKPhxW}%D*OTWho>*6Q-!C&J2T!|+R4OijO2enPCm1-TT=Rfa%Tk5;Kh@3G&jnd zxwVw`lzytvV8*|E`J1zN6m?KCTNlN}aPgY{N^1-vtVk)7u? z)_kbh#sea0wtqg@z&XeyATgLLtUzJftga6U({t1;DW!axt<1S>;7dCGYYQ*t3SIO5 z&L^xOmOfK&PWsVU*2Ei7?xCf`8-MTwvo-I4~x4MU?$WBJ5MaR^%@M42pY89#Xsj{xI>2fH| zEM#~W@Yl&U*=?NZD{(Kz$MJLgJ8W$wR;C{cBRz^&+Oo1Vjx7VTD^^HJiA+^gWPIcb zq>&#C=ZHB(snGduqhM3tZD$1VVDvTm5-IVk-#{@*>D z=G3UD#+7;^GKwL?KGOnqEm7NY#ID^M_kwZpA=~u8<(D7%7IT#ZJy|_-yUpjGglJEJ zj0TA(=Q+fiS03SL*I^xx-_bO1z|^3Hr7h;N>q41d&Ja@t3kZ0Z%J~akY}!Rn|3>$p6JC&QT`Cb6owh>CWT7s=>aVzW zsRFNWZrxU!GWY~8uuoKEP1fCx5KvWaJarP+Q13qCSX6&S=z{O}u2FGS_2oIrqu3ew z3;6Sn4}L@qyKWtGIKq9I+xpjrnGTf%+4`tB?wldj=wewu@DWIH@yfmB=gOws4m};p zHCG^`Zm9}KO3PEd@@mGjdv`8`Pt%cc37X~}2cUv>wV zWZBmHJTVO2DzX*e`Xh9|jA9`F2SH20jr(p+`#`uXS14m^Z>c;EEZmnzK3^h-PK;Ub z-#<-?g=YmkOG;17lFkvG>g>!*%Sdfy063zo@|_0nCQ!#2H;tCMRJ z-twi)?h6n^`#)=CYcA|s#ii!cVi@ib#09hSvw_WwUJt{O+taTb*EJtGw@>R9$o9zm)*qg@ui<{95xOy+wTW8 z$0*+nDfJumqwRK0`i1q`FovlqxzLzp2kLu(xX|FCTNl}aHmp6>-N80ebmU@N-hmnb zWa$qqoV>apu9jHcm=)Tj+?A!!>p;Dq>bhXIn4cTu!gV)jDh239+Lx?!6ME%yQL6Xw z*V~Cj(j_N2Q8}g_p_!tq3z1S>g46;QZvux@RpSMN`>OwLHx#;-ja*Z=0CLf zRo@wNg$glzYK8F+g3B~Z8t4~Eiem)CUjnNF^6e4!>IHS%6mGR{pQM8C)5rqpk~!@g%7kcz~;?7WjP9ory(4yM$}_EL|AAw3tsC^N;3dn8!Es z&o?}opPK$0`T9=~e}2CtH>XSXa8$@$=aEShvH)MmivU*C_a7>90NK&&>0U9Qb%EHV z8laWC)~fEtYI+qr>UJD{B^0TIjEg&ymPLU@hYiAz2Phd&r~C%ulW%ftZsRvop3*?E~hoTc;dHiD{_ zAG0&|6C;a$Ce1JO_n%l;xR-HpM_wyz+feVYF(t|lXQjEZX+(xCh{3m+e}!acx}+lK z&1Y=K+wIOObzqHF6v~Y<));x#np%B>u>h8?1TScM%ojeX8>{)HML*NPE#@}>TQWF# zRzpF-%*%#YV)P>e%JhDI&)5L{STg?;ci}l~V;N5z$(7QjB^$>chS z+^@1nObKk<6rn^+J@uCTep8nsW;4zF%m3Eu{5N;@4+5{4FYaZ`7L)$07`=uL8RqL> z-n?9h{mC@Rx&P@y)+9yQ;VYw;%TBd_StCO?|0AF0-#yCzN8bGZQ(sn?F8BGF z+&P#9P^=hjrgSeT*Bf?*nNfC=PK&{kQ`mh6kmuDvvP%e{ZQe{{w?qNEyL%FmT?H(B~Vq+sIED5a0HJt>eWOBL4;M|u$Y?sH-d-2zD(_nml z<6jWuze20FjBMPosq+zndn35o2%_+TXL*<6|u5!0Br;tN&p33mkve$USBMVzAGbF-8MdO5L3klA_QR^ zw2`8?m!5odv=nL{DQP|VT=GLfr#YYYIZ#tyVPc_WFtJau47B!Q2HorQc{EV%)bMwUSuK2)&k(qzvUdGF}&IpalY4XP*mg!XruP_AUA;9VS@-3T*TT8jdI~M82SdIc* zTR=8M1vbdvHq^^ebZB~lG80mYMcLQ)gZ#h1(Pt>Z*+({)T*vX)nZt2ct zg1`R(i@CJ1dBQV{%E{>NIt7=JjE8$^3g+x4AJI-3)EE3Q@w36$vn_vN?DGnI65Zos zO0O=>5m`bCMaN~tn4*_?=5)#G&m_l!1?QgoowN#uHm&>pKUpe%%))P5?IAQD4QIab zUQZm|e%722?O65kOSj=)J=-8`CUNa~QJ{s+j}M64o` zIu+!nz8yw6K(EN4z}P6e$PJre%}F++f|mz@{8^Kjps5p79ltyPA${cUrlRW(UnHU6 zBw7N{Y;uw{1KSGu$5D0@Yr(~IyPF#~k#a~{MzNToSjG3j>%PS{10-!#a;Kx7nS4t~ z%f0~s#V##dN`rzY2W6wT&2UHfmj-T+HH()UA zg_XXF;wr%bGbHl=>0gvYtT1=tuzl)R2ybL$@3>pk+}`0q`;pO3SNX<-)fz3uwG`Ma6VU zNltX)!Vlr$mUP;&sS`b|_7-V;7)`g;j(}b}v4t|FN!Y^7phaAy<-mZmKyH2kFu^6n zAp7#-%HAroDARhUjUw4+0@FW4IH#<*ZQ?PDe4B$4J2~@rm5k@e4>-KM8ff+A4f_*S z{ANYYUbqi!M~&9jlraMrfhbdvhtN(Zo_1vAj{!)Z)OH`$)|#`(FHg#Y&)AzZl01~H zPM#=KS=e_ezF5%6;%vI0ux=KoWq4;Us?8l*4}d6F4+X{VWZkYNVuR1NEN(Dk@c#XJ zr^T1Ac=7iaFscOfGVesJUynzrbE(4El_f*YZ)bLAoLXU1Dt%munI2ZLJtHap2b;2@ zlCS}|cE|zPMI1vdo@&Md?i(NJhUsj+z0N3q(`%hi!lthDcv4Fa+4 zNu1h%e%})f)+-8LffCk<(jhv@t$;=nC?!3DwvuoCPfWuOHVhRvoY;P!mj%mFdHYsv z-8~aeJ_~oSiyJqhE0X;JX*&PaE^qko2wFt|?-2M}>5ijsdPTYzoKvPWoZ28kNgu^l ztVY!_T>}vM9Te?Qn5>X%j=!JWluVt3m*QxR#;@N*ai#N1HGD}!Mb8h#9EAS$eAWhu zLN!j>Vrg546um3Dmv)FOB7uBw8XX4F6FZ%-s_9cyRNJ3CrrMKU>X&iR4p6+`$B$%0&3s9+tL&*hg5AIayWiQ{c~xMS9K*Icw(pmc5_wcMP(1a}FrE;> zhcub0F$&s3dZs%=!`NQE0IshOht7Du_B{Y30gN8_h3teDhN6p8AC8u>cTqYz^2^cyY{~R8n++QEO0RM28@0FDRFVeO1X}@(SD(1G^VQf0t6&3I(B}LwE%!t z9jk{xKbaPHT=tJm_5N+)tf<7!1bd+X=R1EQ`i8Us*HV&<;S3Z#MsjTM5E%&#l2zZ) zW+A&Ny{!YMr1ZD3iEF8NgyZpYX*C^wZ|h&1Kd1acoz-hp&lgU1s81VF8$82#VMI2e z0C%>3e{=WviAO>q6lhPd?`^1^e65UE!+QKcfcBw~iAO`x_2vLK(;E+giOg~@K|Q6! z3O<&3HQAHfotPD!-c%C;*_L#|u;b19zp_%Gy`p5JJw{cNZBoWQ)EV>)14zR(5&n!+ zE?KzO{kef29_?aVG`A{~H%mhhg@YJKj5N$wQ$E=ZBv$RT>}sMuGoJ|7>)_FD`;Q-= z!Y)Op5JAuzc(cmD)>3%&^D1pJY}}FwLU=+vAmcv1XySXbX`+*(NZ`U) z(D?*vH|y`!NZ@Fz*N1*i&vK`K9V?Ig83U;5s|V2*>d`WhoAWvOuXY$apBC zgT=ZSM+{z!&68!D6;ygsmC}*i5wYTW2dC*t)_v=poj9as`}T&=T`iQReuC^23c3-v~_^- zCQhzlpzlB1Ugrpo`Ua{Cl`MtRDqIT-b>KYx4b@!<9NSN zTH)&d@=HDgOuYd{rZ9QSl1)=IEV_ARNsXwJQAR8sZ2Q*@N4_rhwJ?~tx+8j};_3{` zuK~Chnx@2j^FTqdtH1qYL*W{)Q1cKn_M^EbiAa~8P%vBf|FS;`Q*}?nnY-){u?}y! zCi*tXqB+v-wZm2}H33VoWN2Dn;<~-dYxxE^3#Xm={ze#%C8_@bbr44K-@rS@<`dTp zc;|l&L;Sn8oe((5bri9K`dCkZ{>*0hr_)%Ao|pAf}#F-emMaXdG18yGChaR3}c4e5EQI<%FCh?V+ zQO{&bqPO(ccRlH z4Dom!mqX#fe3=JKM`Q^Fr!h}C4C8K)KAXJa^p*;}#orH`)RAi%Dz7bTye8m67Rfox z+ssP(WD3dySJ3Vyxd;<4l^OL3&KW(zwawH_Qm!=BnauoP1O-rxVb|mleq?BVg|`K2 z*6Z}8(tP|IY9Vs5Cr2#**mYo-ogiacURfnHi*eomH1N|UM`v(e2&kNCsy1-`y|`Jc@HN30}Fe+$+X;!lmGh$-gP=n$;o$D zp@~!h^m5@|FVo4%gY@)HoM#2tNXNAdCQ-4#zg?i5M;E8i%Ny)Utb1Mpj`cDuYbtUk zz!$|eagEa@wofiCE~GlpsaY~{CetCi`WTpVUKX7=H0*-I-}0a?UxQXP)!G?lj6d;q zm{(FXAGiMU%(Z~ny3sF|rCF%o6;Vrt~WmYxDhD(F+eWHqAk zPh7D1bv@}GP)B~h)Kvay^(wxhZrX}&D$X*9W(zlWEV>5oQ2~nZ;BxMn_y>{K=+;v0 zL**&X^8~&3V50=Ic?o0e+zF{#K^}}9DTMGCJ!72>1&o2{KPLXQ`gPRtwB;}}-JiM< zgw=^wFyN`He%Bc6ak;5Vtu=)aV<({~h*6tI?y~x*Y$e7fGXNDi*Wh#^uWV+NDa1A} zsbPUQ`an(g<2<}Q)A^M^u-daXq~2V`{$ImXp_7!UO-GU0Yd5)uBZKDQlhb3yY~!U3PWsmEH)eSW#+Zbo*W?x^sYK8W z4Tr(-pHIa3yX+KR8(?K!=T7{4jPMUA?4t1%y@LNlA@>gTrs`kMi6#VKs>kb%0@^OL z$%kG-KC_%E6#Elc_?=j$udqKK9^YlPP7*~VX`YyN6*vk06Snhb$Zu>1dj^Hi2wt2+ zXLHf>d*5z*1lTg8F}sTRcwLUtS3`_+xv{q7RI-bw2rJ9aPfmqW*$K<%J3~1bSFvZ4 zmY!acC-LDbptqS{L9w^5P^9t@xi##e|%yN0ZrH988yvp=a{IBIE;B=J^f;uxRk@c)gFpVTNfL zK`12QCIEcb;Zcxn@Zy>HrIJVSb^Qf<2EQY2ARph)7hnL zeG0Pj#_4|yt%E0pVB#dbOGk|^F+iC|_N}}T6VSltn*7&B;1`wFG zX#o>hcE`yZ>SC$4e|&OFjodJ=V)7f3+)O_UImeB*$$?H}7QH2Uw;KTC&IV7Lk1Z;F zQ(HoG@pnmHI{ToG%BJ?zk8$?djKz{YlL4p|YJZqty-H+tB03Ri>I4ck>e=$C?5=eq ztf2vLhSy!V^)EVn<~5z=)QP{=W?L#PBwcIl8;HgD6;5`)JzY`4(9XB?{M(7^HD@)o z__oZPjZy`f)O&SLe`f7~vE@RWX?v z9cjT?A$E;z53R#ualk%w95+CFuC|=yTu^9gA{Z`#DzRpJ)_9 zzDcF^>@=BdFRJ4MdWYJo2?>`V&#y<|cYY-R5Ur7wJEOHG`M@l_!WPp$R~m(f`eKZMGW6bNd&MOsr;)ezvsqNvlFn5@FAqZjzdDq$Ic@hTHJ)~XYnjO- za1T{|J3N^!S@F22qJr%))?U+DOu6n7I1DWA08?}4e}@By%MHSsF?qeQ`rc

I-;qH!g=i3k3~3%!%9e06X&_3a`FGuKoX_X`{XL&& zt!F*owVt(}_1piP;~eeY!|Qdw?)$p0>$)qqO3h^R*1z0M&d6j)M=Ej}n>mpL3Y2P) z;JP5<(a#4Nq2hbxiZ~4sPPIq6?b+NA+J4;f(=l)#jJ+J;@+t+>PMYYn8+UPG#1xZR zdJ-XK{vt%96>9dn?5Q;6Gl17p4G} z_YiwhqS8ye{c)00J%WIE8acSoXXA7S2Da%NHr%aAEoKSP)9&y&nfr)D*EmO0;ze!Nx|1JBIj{D$0e z*%DcbUg-R(@sq`_=4k$jjl-VGUs1 zl*V(KVgVCa7e#~_**nsY5Lot;Z-Sxjg2^O9U?TsHT{FOQ0@(Ts< zzM7dNvu?)zHKu^hX;b)R13#)utnEQrya+RB9@1=|Pq%da|DhP=|9swyxmYqwaGqz= zcwu6^Gt<0(_9@BpEdiKn7KXB{=nnYRYl;!Lc_`4kYXPQF_V(GYcNcc|X78^T$!dan zPI)=d`DycR#)th=~pG9IqaXbfu*EMMLUl50bhyzh*KeE{}k+a5Ra*`g*BL|6#hFvT6f={{3BM3 z4B;36;`_A!!4RK(lf@Y4czAU-zPYJq173@THfFJ%$IvTlc$6?7o0o~5#*6#^JG!m( zYoh_(eje7tRw>59z2U zwM9hvDCr+0{VFzMI9MWwkru+Pqv8xBAk_*1Ci4NUt+PNeG{~j4(dFL@gpKa_nssQg z-Wa%yS*CfP&=EK8Fv$;CP&4)M_qBx46%Ne#Sw6Tm>f^empsxH5Fz3dA8JabDp9uGR z;gnj4C~&QJ)vr>l9!>T!G<~f?MY3VsjYISD{&oLUilVrF zCQ*h>1Owtd#_3vP0zN_w&X2Ry*rV!>C_R+p0IypT=_S;&Ci*B+(8LSVm&GUllWro! zKKBx-L=IU@2o>>8zrWM65weNXK^Bc4D4WL1V={a+6we`{d{mKDH%N^-h7T#|C(S|d zFILF5Lc`v;dYPn!#IsrORFw2RMB5}^R2S3Y-%4mT%G<2}dC%rOgBi4|JW>Wsi?p~JbD3p;`ov$ybh#w3|GxE%#o0(Z4?cI z6nKj>f_uY&qhi@9&E7EBo)fe?g**Movg^Z!jM1wg;}~iSoqFq66(%MfaU$17 zm>4~UHaXwsyc2i9wvRQq1itk%Ob&ngeyYTgWkFg)B_z&b8#4tl%}vg0L*iuo@@Qx% zpw%qwYAGU(!1YOqDLhcF5Tl09DxSzE>Xm5L`2Rcm{K?^4>P8bwO4jpO&wxg8uQgDG z!+5w66>;=}w|Bsc#9B=&a=!e;7{a$nrzccg)f7=RobEahyR{3|-_8nJ zO7{#6Vwyn}jdR(L1HrGC#Jx#v(I?@PQ*v!6Ch8vbkdkGW#u{&2nq z7tRQ~(ET8$FBRz^NYU)s)JHflZb+%Y&WI9O-|fRWKQEwU+HY1t+EfYs7l@^}r2n7j zzAE>upd6rjzCCI@8`B-m$KaEs(ddHd8C!I=M!^SKmEfEEP}lB)2;~CKt6ho3{~HGL zeZ%Wza-E+J)m+s*t&-p0-OwYwEhQ-8EU`%?G^tc1gFvlyqvle_zEb;7rVKC!z<~J>T(9YxeG@!s8rwwEXXH+YxLg+ zv~T~a4ervawuji806Xik9zmw{U~sBh^*35!Y)t0+zjx>Le{D}b7qFmNeiZsER()As zgVG*^3?0KjgBeMMum9^+b@+!#-@SQnYm6D_$EL;>vupqz4mz0@BAL919mRY&Xw!3I z{|g?Hr1a~57TEcP25H}-Ns^ua25BsjaA&IFF=)txgR<55$jL?-UH6BB@5a{+JmEH+ znIZvRPVgo#jSLU+jROUcFUYXOO;4WStn&kTWe&%0dTd%#K-d_zGW($%; zZ4MC490vLll#=#gkHo8L6ShTF^G(1^Um@HXLRp`7_9d;yr=V`3!2cT?=+fho;e3&0Xwe5Kx)#p;k5>_xOcANLr(+l`E$_KmomnPxI^aXg)Zn+^Aqnd}XV*ZrZM@5wHL*_l`-yIO;6N^XJ zEd%ZRnHZmKgqNCN*G|1mqIiMDxSzhnicJ@*2mU`4OgX#@GD@vY+&~F^lgjF4tQd&&xENU^*3?3kB3z$m#;YOmWx4i;#VRmw+ zp}HjvH3VAtHUY07LVMr8Zj#|A!zXV7P2U6Scs{X#+q!VvOvT}PP!Y)PMEgNtlbNKD zrY;g7Tva+Gz97ZJ>}**@@WWMUE}0m0|QT|7-iFCPXSLHGT0z|>NJCob;^AIUP;ns8N=rMDG1 z%YICmSV0`Vt&UMRs^$$KugIdji%jon(9!<#P3tnL6eE7$%LB)C7H0rf`hZLKgq(Jc zIz$bPZCw05bAGDU0Dj8{IuY^;yoXExPT;7}@f&M#QZPF6SnJyyDzZVOo<;<*ro4zDl0NLp=2`BzE+`os$7Qmc`p+FON z@6j;>T!EEdWe!buAQ##MB+S|a+b9=B47T3pNoWEx5Z z$A!qqlvR(IM(JRzVHhj>ISU%Tr#a1*;eRMeatMqOt zE}?leaZfx#pZZXZ?w|%Xr(YG3@~MqddX}z1|Eol5OqT?jV7WbCXdo>OJ$nVJ-!5zb zFB)F)6>l)$EX#2Wj?17+^sCF(TF9-}_X-?5*!M0CVEc{oZx0C&pr#~b~sRoqp+SWF;8jJF-5170?j^NI; ztkG^(rB*XrdR9J%f?*llpIPPO0Che&_$eY*y@u!DrGc1 zOONA zKproN_TPRQYM1~;$#WnwgT6baxsv2CxNFqXJ8>4b%)hy+Dys`%i)l&(rLzr05p?WPPGNyEh zUz=l6Sv?+N+!00hsqE|0%_pfzC>ya8&u5YNj3&dZ*^fX({0g?Aen&el#$3xxW^26f zeBp1LGqXADH{h1hV4Cj$stgmC@KnxY`K!T-DVaS4Mm<43X?w=87C#~xpQ{f|8Kn*z zLln4uyx*2V`x=*UUPY9u_)5<5H}Hl_M?iwmoq+pEbSWUZwF;$bmbQBt&31mg56JwHjL z9?3c_))J2QsB1n5p~XyUDieHcfaBNDl{ludIUl@Nk39=dt-VHKhB@Yg1=B9Y1!i7f zvFLWljSXmYU*A9{vh|k6C$DgV=jnOTh*zo!11|a!hoL}BVeoGNK#Bt6ztO|ry5h@| zVbQxSZ7eKx)E`qb5Y5SKkortjd5P{YP&YUmKG@el(y-0;;mYgUJIHK%Le%2u;(b7I z1NvRCa%;#@iyE+`AqZd*woh@}mnNH&6xCGzT0xfg?Li9fL`YkXVp2EtEK1!#w}GZr zqm;X8Wb`^o$IWQMEvY+D8+9VDoqhd^&;DJdcBfCyK;>Jq>*SyXcJ8df(VH5ds{L+Uzjs2(r1^#x!6ZMw4-1#7kDROLZ9}x-LWwyR;Mfg>`Ctkjf7+08^ zvdiCii-N2oJ7O1fJd2_g31BDLPMpr>a=F`H;KAqnG?JwY@e!EZ5W>yvHxX76o{gIC zr*}q0bk;C-+mRWp2z6R~O>@)~)zNj26U=<6X5{(>ena^(ejH$OGIgh2jf(EK-b(h~~N6ZiI#1h#-r6MHpf(=GQu#eYOp7 zn*rVfu4uaK#;cU#;Gpo=9+6so<+PM)rkg0TK0}-VZa(Xk|m+yK9X~28!4>1bYi67mh`Y`iAEn8 z^E2@!cb|$W=<3#Xl1mECAK_dTUke&pjF^%(Ty66kxZgw{khc`##!DU+ezdLKtRPMq zOMvDenaaopNf*L{D!Olt$V}dr3=qxl3}3;MJEbs!!~0!Jq?H++1cGEh+XZgo?4)T@ znA!CeltI4>6DS|zj$MX?KXJQ=0mw{xi-)l`E~4ryW?MxSHEL^cFfCMIBN-@S>&mF< z${DZB()oJ4xS~*H-!NV0kXA=>1adofb8WD~iX@M@xW&S)t4(Drwa$pO4ML-& zQ}T=~V22Leu-rqm9-WBi(4+1cFari&lQn(s1teK};@&ktb~TSXnzMSB$WBbQlv793 zeZr(m!kMI>Y`hzZ4g!X^uyee0xvYxG3g&zDpdUhxr* zOEhZ$!CPJbsaUs!N^|6df-gw5v+R-zx2XQb4w9}@=P8^KTN?D*+#^pw zd&i%W@zCiJZbau!2JwtvJ{9wCej~=q5giU1(0LH@ZmbBnC;ss`Hny>S_|xep#Sv`g zz^i)kONRIXv|txpb-kz-NdU1VpeGcC`PN3gSZ$GMdc{Sac^7L4MbG;Aj%+*9MJ#z74DqIWXJQube&CY< zRR!ij;a7Y<{r)qi#B3Y;!7?rvJ3iIVGRMHDVAmZ)sDKW1QbXpd{ZX_fx9B2(NR_e3 zEn;26b>mu6iGQ^%jOL^zG7_H>tY{CC6%N(YL*0WgA z=f%w*NqRmON6&W({1_B=*{UD0U|Zr;zTCM2lb|7BDRRzPpJF{JAI!!~$ycZ-KTr&o zoyq<Gki(s#9R?fDr_GO{5%*NXH~%^`b! zb%5LOpShQ>f8*lY*gt3ahbZIv3g9pk1;=#aSR$2|cLkX|n@Unu<9S0wg$ixhuUVu( zeR<2&Ib3W@j=|;7?G6BGv$0W46PGZlisK(iT2>Vj)hN+-`RXq{OViT2*Yg`nNx-ja z$45%iX5t#tB9>=<4*b((-ihyM^=;>;mv;~ZTasLW02rGZUugaplT*$t(ERIRRyB5~ zj%X&t0DawkVx7f`r^G*o*U7iNUT&fFoU9tbd`&RiQUg8r?RM8Rpnfs_J`-6_coyq) zMJ5w7G}=feNj6GsF4NtE$ahiFCU#~Ws(Ia}g3!WC7SmnN4R(hQDsVIymM9v^&We@- z0thU$^3gb-Oty2nP5I!i=yx=4(b4B+8#?`AVP6IvzrWj=mcQ~;BmIkFLRb1WswGX7 zPtv3`;BNY!qWihuuByzx7Sol_Rng`XE+E*tdb`YqTi}Y!DnhbKB9;%R7Q;StIu{04 zCi$06!`zD4yq+)U;|AuA;CfKrFjtntj*z}rIAO9;u*rK>O#hOiD#Et|34_1p$r-PT zi_W8Qdbi+p4p;u=N;FtySJr{hx=o}tsk5F)8VwbN4fPSnD;f$a3o37z9a2(T!#=vY zLe_=Cu4W{;gP1RJh8IJFU%;1Qt}hH(toKpC`%2isR+xp7aspl0`E!~*`8Gx8#O#8C$3~znaC`Xr+VtA=`t(I8bLzSQ4LguJ zNLJZ0-O2T$wSpI-i)K4u_`S}z!d7)6^|%<`-(pDxjzdWFRGOkPp*CE1Pas!;+5qt6 zaZxA^3YXT^G`PszBzZrTEBqFLL|4{@kN>TkR}gyEM?vWZ4Rb~uLx?o!u-(d@JOhx7 z?6B1VA>Yy8(dj8Vzc6hsNx^ZNi}K#D4)6%tBEET}^8oyRKETvN7rK1OCFtE|A+_4t zbeX^JkBxcbXcuKF-ofBe$IFZg(QLm3!hh!-tF$uSGu^DUg}-XRYb>n=fm~ z#$*0@<61yV8!ctDF3fF|Pl~U{cH5z<#CEJlv7xSii zI!3VaZ;-C3Meo6Si7u?=Jqr++E>FgWW z3x`(+m(W#X#2X=w-VAHZ5vg%h@NxS;o`ML=MMoSc*PXgtMtkW#GdKSLjl7J)S&T&J z6b&(HCQk49;=i%=Ew(4h^0L>H`FJzWF9V3s7M)GCO9iX`#@}4DO4Km{{ggm1 zhifpq%##184ChB;`S9(io=43s1A3={^#RmXUPmx zaQ%9IhhHZGig`UpCjOFT)Wgn4s4a9i4kB@xLUXS3I_%6imQEfzF{knx;zy$u^W@uA za}H}`q|Rxolk(p8k%G!_=0TFz(ab9(u^~Q>W+lndzNV@;6XAEe;g*W%NV#D{5;Nr0 zP9$cKVpCS_89QImh&`Xzpe-_!sCj3g8;juW%_+JXJBELqXPKkv4hLQZO^(d!n9_o- zyjkv{3oxDoD5igTFw*ZEqxjcv)yS3Y<#(TXbM4rk^bLEu063@lh39;`+Wm^$T&d(l zSRvPP*FNK5NSu4->9YPd`jQZ~*a@EWn{Jt)C;bHQ40ps&`ID^cq(zjpij(`%O zPEvinlq5bj;zcG$#EBTtyVZ36TC50_=-o1E_6{QJL`H08PtrRd`qD>1B^(w3KC7hN zhmsgS6Fx0%kpfBUZ6}8kA)lQwXBZ)=3t4S1W16Nk1P>7$XtZ0-nRHrvBlX|bKR@eeWp zGRfnFI^-#Qg-{}4xHyk{mQv`0alCaK_k_o)8f`x*-j>AET%)eRS%vmIBu7^*i!awb zBDr7N5pJ`R<*SF-`V&9>X`fg#ulc+gn%U8Bt(?s3nz!<9m4^y_xHHHkMD;GUN(-AM zQmGmN)zWf|f$CGt};ksw{;g%KC+T9Y#Wo1=r-gAIipYOKWvsCUr!UE{aGKm)eg zk^obOT=Ip|J?o(u%~(^B^AQoZX^E3AGtg(0uyNB=|A@Z|^1LNAX=^sl&DC_*a};!W zA+Goc-vb?+S7qsON_EiknnnJE-!COuuqUp`bNC(I9)&QDf9Yi55@QADO$uX{sHn8d zsU;riXfo#C;4RkTAIe#(EAD+MX&bxHihqn|mF?bUajq2k@S``CTwp#W-{LP#Kd+b( zB$z@6=LGLNAWr)wdKvB3+AiPVxZhxNj_Xia$srMqMpPbe;%nvf2n2WE!@_g!)8Qd@ ziQ3}d9z*fDn{+Oa$;1?>3!%`-v-3l`n`&^YyJYpaOmfY@Jb;DXAQgZT8}epX*y86!=Rjr`lt-f!o>G4v3Kh$40fxnvSZ!K&2rxI?NU2 zQ(!Tj-esv;d+gZzB7Bq+5=`YS&w7!dk=pksbrTh{u;tr*)#X_w_jB7`LfsY#-=s2cv?4J&#f#@94DUa{d5GpDV<`40@xl% zkS+qtK$ZxbFt;v)lS{6Zx3XUG@|p|Z(el;V;(;kdWXyQ$KH_W*IGGE>47&#u)S_aP z)UF*#IfFbKSz17HFh#N&q?R1gn>Y2VV=Z$UlpS{a3dhCB1?|M!wXCvylEV~r+Ug*)MPJ~l2UsYw++wt;-6z&_ z2)P1paSnSNcm4rG>P(266br^tDMb!pMF1t(?+1{kts_I9SP|GqHsR8I!*b(_=7P-;{#^w{eg%iU#is*WJ-{cI64E#jx}TfACFV_m!OgxF z;SgU*Q|DeJu303}RNd>4`-%O#OjR@(ce2AWDeZe{t!gCVL<*t(B%Y+&O{xkRm+on9 z479Dz>mvD_`*tA-I{wsH9Nw?jCfwM{snJT# z3a*o3h`_~gWy#f;wrRp8K1m1XotWUP2BMx{)tlm$^|?hMEm66}m)bhoyB#l`5F`H+ zQ8OYi=Gg@t3xf<)fd)pmsjG~RhRZ{=(z?&2H087^w-xZU%;YidCDL?|zj2M3OoOe3 zTsF?pMKm$Or(-`R4>J@Qocg9_Gg(#=R9s|&i&lf9Y=QEsRtO-zqZF4SmM0#B`U7cQ zDV}6O=vt~ZD`O*LCMbFj8KEK*IbpfCY^v|QCN?#(j+x{v=C5}+q3Sx4lH}A3sdn_~WLsV) zLOWFhrJ?~tCaU~tiI;J!Ou+bkj15__P@&A9pI1bKnBJc%Sa>N`b)?v|jVGm=eEP8x z+&ms@T^1HzZ>}vcplBFanOk$|6S*^IqW!27lWo`gCQxzrC702u8@Qdr@FZg5SyNag zwRN+c`HOP|V;QjAI+3y(gR=;9OUV?X)Y&^p-ywI_iO~I+-lcm=YuRxv3A z8SCieHT5o45jVMX;&+u^xuiL~(>f`uolIUvwQ(H+3|zC)ctJVEz@fyG_W>db9DAi@ zc;3a3f~A)znjEJ5&obB?ZCf`%tAsOuS!BF#yXtqMSLY6e(W&#wJ}_PV3p~(7dzfBa znT|kCiBH)l&QR8vy=^PIU~U-59TWGCcDwAyS)3U>ZFgki@x!HfX@z9AttVn_qh{kU zOMTd=c@Ey{wl;)LEE4~XD7~ePml81_f>dJEx6^%{6KgEIhwjTVI*U!fq|1AkI;dh> z4|v~Fdqi;bjKt1*EVz{~EW<-Xxot%4C57Syp10&;^5G*@Cbc2`ZvOsq$BhY5n*t4j z{VdoH9@-T<3oj;;`1)YLC-gCi)es<*N1KIWZlRZDfEFSEwNbA(x#cR35Qt0XPp^#_#EEBOu`43fet|MVdXW_Ayua`5{EA(%lunB^* z?q zu2USVQtQoDlO47s#@K0qTE_cHGXiVk&81EVhUrAp@nfA1tt*99?1X~FI5P>?+8kh) zk=KA@KH{sH%rfH+$1qc0v9{ut^N@ZVcD_xu-4TopA7o z1S|!GC7kyWKH``IbuRR!w}LX{9VU~9dpFKV8rFbcd=cHImDDhfJ@OcDNS5UPgld92 z;WwlwlzXXFa64pJSD>kbwj!EPvXRe7gHOt1g~^u`=ED+Bj#q#3<_p2oQDpDhE4Zu6 zF}a$k@A_HC`YbR~;T+)*Luc3R7PD3%mDK&RhQGyxcWJH8EUIy*qOWtSy}7QTh;^#v zOzy$p$-=SXebli1j!V-Kzu2Ob-xKxH@+P8S$^F}xLed(Y!$LpxQP$Pm724uGk>xOZ zjoBczN2C2L_zMdjS3+J-jQu=Eb%0TDxpm{u9r@)z=X$3%GvI-i0G;L!-wDd32*oEr zBP%#iV8BsxW{3O{ZBSR9!=9+3TZ70RA6)GdJG-7#8h@T7eua z1AH!UgW0!FaZ02)k}*haqxG%=#g;-(xJ1w5_ow66&DfO3uGKsJ4qO8ZkxZgEp#!Ip zFXo@8=ilUB? zDMD@=(QH8XNGNUev({`sPwwowi4^^o%l%OsxY zsREsiQ(ty1$lnG#EF4ffl;_J>?jy$=i)thc#;VcOJAMVr{Hy6a%PlE;x2tV+;A^L> zipkL<_1Z2{=fMDHi%UhzoU%>0@Wlsq$Shmth}Usb3@gmh6kvgI^b~5%O!ycsm^szN z`~-XFY>pEDEUZ4ahc~Ki zK(W;JiX0PYgHe+=+V79C?=7=DVvU`{-*iD^aodlVb1>XD?;_F7P`ZA{YSNV~Y6^d} zB$su;E|LtWc#KUZ$w99lxU~tJMIu|7?dU5zpOZ6)jADMYEa%KlbPpH!&IUN}sC1mS z_%zP^8ekjK07+e!6LFKH;S)*+{98n=!A4~g-OU$?PfWACWJ8|P0Jm7zM~WT$h_?bp zk&*xQSr($FZUzd#LD?QRR0>PyMk$xN2wCKbs5+;Kc)O_n`)1R$$u@wIkMdgtirbgO zBJYVkxcnA6nG;<;d-!#zV^)yJZk`&PTG}j9aw&(=v8r|DN34@Nt(O{`Ax7vys#eTr zbfz|(jK{`rNkRaVVrXCe?>9~D}r2)C)1$FxXZM`U9mHfplov4)8mwjW79|My@ zC%_HqVdBAZUVs~J`*2*M1W&a{u^I4}@M{uiJpShHVzYDX#JH!E4UZ}ri+nr~f|TjO z!3i%r4)7L``Y~y^796FbT5K13x&$!JvP4f>fzu*-W!nLZm~+)mRkwI?LFlpR#Fb(E z#MNXYDKDaSUO5DgLxgtF_{yAeeY5G<$=M7mDDFnQ1o{>iiN!?JbhgUu+nz~tC^;$Yv zPn>PJfutx57h!LDx9hjd!V{Z~BaHvnubWopCS2O&Wxz@1Ve5`b&{ z8NZYC+bDG-4_>f{ct*)2NMRSK)Tsc<)22e8J<-~Aj`Hh?P0t7UE~b$5CzNi3w1|$7qgl57m-GT$enBjl6<=NZ=2`iHzyP?xb zSx4h=Y5?goVz&Wg`vC=~h$$l4_YUN_-53e}a(8G!6vzzn&r$qtkqW7}7<|kuKrhGO3=T+6sEU?tenwUBVHkA-v4*?8 zzaQX*G;3i4QC&`SfeIdBZS@hi7f{)yGJdvucY7zzh)OjxLnHMnnFYH^yZ5&uXY$WpEbN#|F*dF!IDDS_7>6mDIGcYbi&dia$-bI3 zY&zTMX?flxU+%SokUuSO?-yKT?~qpuV2`kHT4T(f!OJ}l7o-!z#DCi)^vW_P+HEu& zwfyB$1z+7d6JymfrkDN3y9zI*9;d5-}!B%^JqSxGwZjRA1cm3#KgVYpw5wERBJ$UQ|@YG`kjn%pjZbjK-blth$+y zB|)5A{g(&k3#L+;Kt|H!>`ke*gN9{3Pv}u_Y-8>|@5Z?YoEWi;XCWh(pN2zDWEsQ1 z0)*;sH%8zxo1??C@t6$?dkm1ymdJM6a98OjfTwzRE#M4>@G{ zHd5hG{-n|0xFLF&4eD1}+YB7YTXaln3p`B+fjap+dOqThn~x3Q zML7)%`spv zk4UfUt#tGrAk#&7_69_py6GIeYCK?COF`@<$yp>$A=0#LFR@{Drkvg^`1{4Qx`rI5 z;@$pr90Gbk(HXakn026)s1-kt0N$@uZChJ^9V$+FR7JzQGbcCbKjpr0xg@g zq;}02a+Ra}=BXGqFu_{-8~Ji3;05SDc|r!iHXHf!nlW~L)vc}!NtT=knUs;E+*pQyVTdhy%_3`ceAT{;F}e%#sIM@5h*H+Y zlYNQ=y$QA-fNm)p3?M_PVuS0DZ5vlF9ig)OBU0Hj#9RXrXzz3KKgViZSAdLU%p+T@ z15Q}XVb2vw`D`D7WDN-QXBTcydyXS+GSQTClY@}>g#1nI)RP-nZuRi@Gt11vX!9w-C#}quMuUI&K?1>b_ ziLP|JDKsxSwlFBSbRy;Tq`KOV#A7LApID$2+yfBD4(U5-M6$4a`XX(snbYMf%27G8 zl^}~XDlkv#;B2wo_Go}ra=}=n9pH1NLqfQMvQg3JeMzAD~{36W#;s30OB!u%%l1Axcp;W z@woDw3NMH`>xYYr!hzgG>UQ!VBGn8MzLBwp?0GGzMdEC{R9_eZ2D|B`o+1`gfl?}( z33+P=qljy@95c?CHb()F1~`NF`YVnkVY=ujE4xIFOqXq`!vt=nl14Kz9r(rM#V`AQ zCn`7ll78swBK}r>X~gflChNyOoWmm1VVq#` z?2qA7`%z)-n1TA82mDju3D$|fF} zmNoR#$w2P3K4+Iyjz-9yN3l#`V&SA+5!wQ=ze4I4tdQsAFpBRsBQ<)KpXW=9eVV)D z)(OINxy|25(8Sr@)=5YbsnCh7Y_cb{ZWq>z=exf8WeG&2y?vFiUiXRq}pka*wYmc)EHiSpv$aH7G}2Kow@ z@NB?dGiW}6D9fsavx%{&!ckBIDff9w6v%{t=0ks=phev&JKu0;bUNoN%>7A}&$NZK zrC#rZH7h9h5zd&eaF~mZiP%dg(a_=46VR&OR0hka(%59= zvjLA6Aww|1I67)P-+nFErBQa~EzHG?!!}^CDeVOh9;zzXJ7Jd}{!~IyJ`?5hOhyla zP5yQ8)q{y&?#CtWf4|o!Momd+#Hi%x1(QZuESj5>?(R0~WTN}5b0@=xtns_uGB?Cr zQ**pd@MXMlEF@5*9le*Hp@Wzi$c zJ+Ir}c9mCDI0a3**r*XwTCJ>bAOi+QkufoHWPtn%=~RupMz{9St>ds!XIBaapsgwK z|4nU#o58%nnabVHYkPHc_>qY;%!e@i2<*$I}NRi1h?Rf&dEe~8tN5TCS zWKq`>t-s*`g#G<42faU7&ag_>DotHna=C3#;1_dYyFd|cs2Ukk9 zxMk>g8rR`esTb_S_&KA>SyEpZGS$Mu;#0pezVoBzxeoeP#q~I94oYW!pSiT#%>yGv zctb{8OO+`dvL3Z>{SZccL?QulxmMI&J?h9bc#e_riq+$Z_?FD#$MQp1=fbhUBB}@5 zKjJdZq6lKp3ua-{>I-t@xyg02$JJ@BsH)L-`I3 zsH1VeN{ZpCjy6kgF>bu~0W4jOi;I(vBq}+!d#X6L-Rrf%hO@gVXMswV%uqbcJ(Bml zedtUa>F=NR9-mHzoJ?{BRB$s0s`NQHo*3j#FQ6=H`3j=SF&aKCOr8ipWABs#?+Sl^ z|FteIE-8hmA;+U=C0|svZOo6Btfkphuk}Y6t~b&lhUyY@NxwI4I4*%qese*X+A@b! zQ@_B#Y!vi9BbNI4`8@{>OmYG9!yjksI9MlRmxe`|pQ6h4^W(t*vR_Vo;^CLD6Cmpa z3MzZRBW7B4#DhojCLeNcw#8oN8(6B#=vi%Ae?p9$W8rH)h9OSe19W<##?0jQAI%)a*{VWz)+-H&fdW6_O-PbqZu^^;RTfWac#TBQxFpW??*Kd(CGrhr7k^UTo`5C~tCc_BnLR(v#o%1MuPMrhSe6<5@3ZUI#`q~K))AF8* zSwBeDpB=L#F*mx<*PVzJ8v=~fL{t&ofYf>G~JNneyE z(a*CqE9cPdJ5o)<0GMP9M8yq1pP(DYI2>uv8B+9^cJKfg)k&dPY` z+|D3%*EISS=FOiUnVznWM?lJBp%Z2i zPf?TDLSCpC6@XMcCPt_vc)P_F#R86e8j!!=A9|1Dt>?}#toZT1BATf@NRFEn>dof7 zfrspVBq~~9(C$Lwu)kDWUfGUww6g^Y?rSnX-j5P>bd%4WN>V(A6n#IQGNTeBvsO%A zWV%db+&1IK-!^B$&lb^Llw2h$$kS1%cz7own6M2<1tX(U+?_!bBtmm($VDMrsrK6C zACHImU1v-eR{rk2j|L&p8E6!vmpgRw7OsUq?Lwm6UqrrcSw-dUegGm`fQQHphEeZg zWsIOGw(U8d?$w(&6^VJW98e+Sm_s{*W-(BodCHSJ2ngB8*Je=hoU(zU;*L>Aa$0e~ z;p5GGO#!;9yJ1?fAB4DUBA`s3JXzR=l176#ABwMb64VMh%@Q~PjbJ7e;y7=7>}xuV zmFz}p-2CJ_$QUf2iqV282-na6!8}NRH3QFPL9mBVdAIY&y9t)16}g1i$pxV*9O{p^ zIO686CMPR}Y@01aCY|)&1E=i#qha}V$;3;5lcG0;g-irpGHN9_@jIgao8NObnwec* z>55mDK;Fj?R0cm0st$+_Y=1wWdt5Sf;iwzD61K9%N( zF%T-!x#x)Fk|gn9hJZjfV<8mR(Um=qiXNX)1l%g#J+}Cdyfp8jlTilTUykZ-H((}D z#2-cYiKRU0RN$l1~g;%xXDr)Y;v_xc9al2jwEvr;^jD@#$!CR+G=x` zfZO@~5) zZkz|G!0e{iL$W}$fP%n;uGt@AEY|@T{M^m5WZYhN@8N7^!K0fM4J>=!QYTOJqH%qO%Z)Jgcc+k z%p{K(0=(^D5BLJDsFS^-=09#HAn;)^8t|m4C)DQyxdp-~fRW-+L%qg!NAEW3++Pi)AeuH;2cB-o%8 z&JHR4sLn!f-H4B`h-1FviNg|#(>EvGT#PBXh~JwG4NJYxN?Yt*4gm>cElo)cu>%!f z{f*Ym8RD;!F~XudKSI!xQi_L)NmL7xPxpOfk&*H73*k;FMFbmDXnmIKeY?VF!;g#3 zXZFz>!vGJ;VW0z@LJZ@j`k73|RNk`X$8{w(rd;R+md^N2`pW5_qm9O0oPozMr8=#f z^COWoaiX&-zm7I7=uvaz3__=o2ShOQC7S1%P@UsP?(7$k5t}&uJYbk4^2ha@GhmW~ zAAc~12lJ5p2H+31B1yL`=~+>wmric4FAkqc zUIU$dBp<&R9~U?8!Dmmn{qE$BO3}%u(O0sr;UEt^UzYyWle~}V@V>SAPyFg*X?G7% z&6$SinMiWc(eWWm?Y9+NUa6*|t4jmA&3H79)Nf@cCLz5~Lk%%^(V|87)dz*TP3Tph zr|FsxZhSis-fP-N;wT(}vTwud0xyyULe-ZUAN$&d3?Z2Pbgqd*#+YT#5$|S@w~f=I zj~}UEYWX^UM|YmX<4Q8>A&4jx=stJfYMd0K%YSG6sP9;;PgXRr0D_~brh%XP^oFgr z4vBuGwrq^+O5}=PPc1qoO>Bp{xEiWKFwx7-sqz!T{|v%(&}=!B`qnr49;mj@Auot; zbi3*V8Z6)IU3bx;tC7<&2imD$|M6qI8MDt_x?G8Lbbt7(CJ^8K!$^(qtIn6BtXJhQ z*?dXV=!N%ZrFY!gYw=VJ(9>RGML`{3DzwPw7t|doaI(TL6^lE(f@IsMxKrXnRC!o2N)ijG*##vx)*aIVsmLbX_nF< zgv!ROUm{ft{ic)ciLY=ZtuwoK^`*BK^@$gL8vHdwgt9ELHiYo6T2KGPQKeeYFLU(A`4~oCQ#)7oRp*^_HTM{$+f?zH7?M>$CztrTNmVOB-4Hri+xX@B z$}dwKgmyW}sUL~a6w+TW?a;RIEnf;;a39U>N>G%zx78@1HdMg2&>=${wSmUv(P?Ag z=SVy!n%EQarWFr|ncD!P4*xKXSn7gz-1IDK!|Ag$V4c3cerGlM6n>8mrA4Y)tarv6 zm?~V_QT%PC(e~hWw9a`a)_WFTlid$Mp5GgFVUf1#J(@7oViMDOIlUc%RH^p*!uj_H z3j~}RqQ(RDUO~PJEv5zcwyLfhgvw;W*CkZTB)I^px@(sfnw5PVUS`~xRcOKQ!gQA0 zrhxQTBxIDvjSdYQt^00)i$PC3W$=_778jjOKHlTGK3ov9!KyE;^xzPJGCNgb!(Fj3 z2vAv86=0oq83HIaP5dM?RZKu#Nzzi95H&hw^}vc^-QK$1<3NzlIyW9sS7Q{zumHA~!fZ88VVA>R;b2+=1N5?m>A` z_dx%nR+mNUzOwFm2NzfJnes%0w|8pP?Qo5fzl^UQZ?{_~xUi4zLMx`1^x ztu$z@fl9I#XDr48SnO6N_u%MUPtzaenk!A!lm;$*dl=gfDshvkwY7D8RMZ@pl05cs zG3QX3Zb~TzetCdi zns1dPIF&*t3SPc0GIv+^sC#-HQWYT@@Y624&p{1U^H(ml*xyh9hDy*~{q%*%=~E$~ zPyFG3oH?Hhins47`sHQcf&H1)Y-{>+STYHh$%?)-hfuDI}6HQ_67ARP|r zx6kcLEpMG39;x4-uB7hq^5x4h?>LXvshW9KsSF+s)NlfLH@bY$r?SnxwOweg8yz4b zOvX_q^_is!1pgd>;dh}uGDTtH<&{Sl?mJf6`Z`M6ca*DEQy2Vgw?!?GxR-%RJP)3z zH+Jz*580c9zh3dEC=m7Du@j$cZEdGE4vs#a6^MZMCRF9Xl-M0MHOT`c$KQi}VQuAp zY^K7uqeeMMD3ek)^b+G%e^2EU`+?IMebQ$}#Ygo@@;A|@Z_<&JLbrQMR2aHnd&fVZTL8Dh#yzS;GnEBkHnX67ydl5qt zADkeHXiI$a!Kuz^M^o-bskM1gL5H&o8xBA#eMo#YEx*L-M=GY%i%U7v>*+)jV4~D5%-@5$y4}(_UCTc{|U`hY#g!Cxl znYI{wCul%T>r_4E)#C_)h&B^{9>hFK1u+2MZva=3~q?+XW1|7avL^# zH0V(v`5@Cm*QyAY_et?UE^-fF$$RDNXe;oz5~U4M);mhu9wyrl^yR!0-?83xd((0F zCU0vxLNhYB?{d1rHV2lErI8a(r;o;w9Z1dEPO=nBRBuOPga0Yy^$M43<=4mt8YzPp z7_iGpEG?&@=3-{kRldO}=F~T-#=~J?FO8{+Y8e;%E_%0jg?^Z-a(#io_wMed2eC7w z)jG=3Vxn3^8;{25DaFReh7BUg;2r9?R@oS;-=N#`wz0SV_+v6yQXKt9(_WCBnr63# zr06=X=V}=8Paa$cLz^C{SR%4;`6sFPCjsEnmcYx`(ks7rNn;gf2{a8oE_|!<={at5 z9_RU@-Z$R{VlNVe(OKp9TE)V~S|=~a+x2uSB8%PQZ`mBG`R&^(YgQuN+^rRh-_Ved#A6dxU>)>_sp1>Wt5%6CBdggxJ?%MO`JkO(2A z6ILJ8~9i)5%xg6UdNDwj|?80;(hR5&ohax zDypLI-83A0YZd7^s!a4_^@4m)+c8Z)#+el(Q7xBKjFUsRtSs-uVoqZ_oDy zW{0=e*bTJ~Ip}%)_U#?!+Xs>`=<9{t;~FZwJ>FFUAUatU)jq3VJ*wU6?*oeA_Otpg zl3{JCh5z2ueil(Gx2o$Km&t}dm9~gCc}1F3&6J=RCY9>>rgra6U17;S?EPimLjK@F zN$)mDZd)w3DL$X)?1_(h;Bmeh&C!y5>ul2gd>9?Ac58IC^xN-aUmYCjVoOI^a!#V( zDRXT0qRZEuD{jQaO%d|R;FTuU9aV?uWkf}^b3>VAY2(5cqR+i=i0Ljh5;--e+S+t| zT8Im`st<5Vp%l@S_|bTAJ?-r|%ZkLj0rdATJgSG>BRe_)>9mDc>+k+#_<-P*e9B#2vyL1UvKU0Nr;j%K4?!m#qBPzB-5-8lA_nSA{aFS2Yh8|Dt z$}`kqh)v&7Q~bT?Q2Im~;B&taO@^cvX@%bPc@jqJZLaz0^SRLl({mwT53uU3uxg() z%tvFK(NPR-J$}grm~%CuaTRvehFiS+^kpQI0{Zj|^Y3kHTBdLml|_jX&>9wN!JIKs zN?YMfx$&EQKNS*3k3k}v_})DrU_;WQJ4#PN4bR9cgt^$RAND61i9U6_5uaw$85F}K z`ess9)VBstPf4|^SW0(1IJJ8wa$P^w6#WQ!k#9R zx{*(e&h;J})qm`ZrcaE<`hJ<{^Bb&Np!I>Z)hkDZ2)XFeVqN>-(zdC=PP1PQf~1#s z`8X1=ZS_aSpfmYC!of%6L)lmTDVMLx9KReDyUtK^Eg-sz%Jv3L*Z)P_n}<`mzis1& z)NYUlO-f3sM1!%Ap`=1mgiH;F%o>mip+QAdGNla3kfLPFTsu;xL@D0zx7vW-D}!X zYUiunSDt@2e+wgKlmt(_o1KL1(QWlo)A`$Ur<6N)SlIp)|2BwoFe|@4H$AhjLcy&3 zctWbcsuyN!RENJi3dB75xO;X?EbBLckeE{kEFR15ivc7orl)~$^CQ~0SsvRPnK@eF zcBCGm%T3PcX!mkV5{IiUw6!w|v!yqsCQmsPBRtpQiqt#7(DD3%ktKRG3fDR;R!%O_d`!dit+@xl_M%iN?IA<33SlKoVWpF?CJ$;IYtL0SpjfP89 zuPn0l9iK)PQ6AP)1(UW+TGkJ#VExJ-%d8RN*X%!0n019s=R13`@+8S^&05kE%KNXq zG`LoJW|L!Y%%ThB<)1qXAkxz0IX&Cge!IrAYtC*vC(YhtFlrFmaDHguoGah=X8pv- zc_#xT6_$le?w70Dy>pw@==i1*B=SzhW;%OrVtG7!!U_|l0nxAua+>Z9dKp(y&SSQ+ zFb&+5ok5JgbK=`tQ;3t~@uL@kBuC8t_oGEIh3aSSdKHylRZ%hCyN&0p;cScjk0hFs z)Yi1`$b05FTrl+x1aZ=wl$1GPcmv!BgMix9bo4rj#e~oG`kyT;qlY`FP2RHJz zW~s6dw~A>t8fYG0CaCwX(&RZ*$rPltAXe19!!Vw|~6 z(Li0$>39wOW&Nir@tF9Tj|uoR3(M>)y|>l&D@rSCZv=ltTN=H_cCN{)n%u&+ zh65w#GzR#9f7EguCu z!-=8n-ciEbFHkyNNaP>1#2)W8r8q6GS=ei+zbZJFtKVXSUbk4lZY%Zq$>I)s)+;_M zzgZ#3BGhwByi$3f!-~_p?#_`ARu(rULfu*RPq%Pu$L_;WnK})VqJZQZ$1GySSA?pY zH5?x}{p>q?TE}Af&LNc6UayXQTJf;(rBc}G33?w_H`||g{TMf;zW^M@6{N>k>V08$ zlqB6rtsIk-J5n|@D}J<|u6k1L(`BuUd$O(*dcUh^nnz|nst{kLEImOZ{AuDgOgd;^+I8K!85QjQB5CQC@mD?PL!;pS^shQUEu)B~+fa1Zk zyY=+sCMy;ee?zfM!o+fgU&!KCeZS(+y+9 zHnpkF&0-={$I)$2Bk)T}l`Ksstgm#tZvhFPLg!CiR!6 z;?w+wzAYeXERh*4SFp|K&WvTb5XH%ZiuC+OVpTmo;V&vS1f;8^wCxkQI0H(}J`iRv z7oEDTVKHya>w4ucnWVQ$eC+Go>)wQT(aGep;KwbKGM=jo__DER4CnmJ?li3sO%9~I z^M=pT5MyNBjBam8xa$(+D#a>ghi$1Ls8@T~%llVPUpQELQ}>%#d3s>!;Oi;1 zHI8}vahe{{$ldM|k*=}gj!=VK(SSb6frb|uJ9hBsd1DI;$)M}G^G6}%Df~L_P*J#t z_#Yu(cb`JNE0MK9d{KxBkBNHhw{Fcv$${KuS5$by)f|-BgDf6fKXzlS@f#<|d#bSh z3LC4Q(KN>ctzY~6viUhLL_bL)?gn*)HmMMc-1y9|SB~S%rwOBhN+W&Eomo5B?+6;6 zTx{`v6ni+Led~#WD+D<2#~25Ue^c=>u|6bheD%G1_mYk|1KUd4?`3Zr`E$GDZZ2q- z4qVB;BCY`Ja8!y=Ksbt>Ohp$1-h1}N8io~)tHjkdW>m=YHdA^pbKfF-1IM(;xaorD zTu`EBbXWzlw5GyQ)kP@erNtz7`?ng|<^6IN<&km28v36*->g|$+3dzXbj4+3OIK}`x!llL9nA^68)(*ok! z24vcLv$O6%=jV1Y4O}>KAnK*c#`PTnE)DiMQSYz+2+?#?3ieo%vd{AN52P!nL}nMIAq}%jp$u zoWScR`#EVr@_Fv{z4#uIgNEC4bz*ZryDrfP*RZ@z#<_KpJq^z^+$zequR8)clu1ES zy055`c))3=@cLE1=ABJPCRXkj=u3+mHn;7 zP7%)1n2kMtE{izDiQ8VoTjAaLd0&8_+Vql;cgZZ`W9)kg-ILW5=f`)<*GdPEZG#oT(5DTB641J6^4#Kd?04 zZ{C3u<5v)9Z^oVwYg4h`eqe8_o#s9#6nl6QO~hl4C@7wq>WL-&4z_5 zuKDOn-|r9%X@kYM3Bi6{QEOZTWgIn9&xt35o}Q^ub=CfU)Z@$NqU=OCWFn8LyGU_3 zM6m?=i9|kp%|+ZY>%xoN;(lyS8oh2mq+&}nqfevubtwIGZ~&m^1y@5Rx2D+wg@3hI zLw>u9V9)s<`a!kS7^x)jQe;Ee&~Aa)&Nb=^PZOW3*IY}qh|Iv4mk7N`>G>3cBLV~^ zZs9A@62D@CrY5QmP+vg*sC@M&q6tfVq`%w|W?3B~BWSfnM5RVX!%b(B=SgsS=8Gmo z3ZB*QxGZj{G)ddiqBZw`=J*?a7X>)>MY(~=5PpKH;Gk8q5$+N&Yl+26i`gx;8q*Ym z3m0=#CHHdADKY{I%`iWA3XNW>yI$rgG2#Ik=PBdB6&d9R@| z?6*UnLxL19H;%E?Dh6*^Vme9Lot>h#%-0_hi&hs!hxdbZ9-`*p+quL-#m8^f2c#Fp z(N5cHmVPgPy};s&@9WEYioq)d)zti=4})ForPoA9rkG{OVPpEndWHZ;waZyLh(Bpc zDp%AX4BIO1Yf6)33Gipd>aeir%Z~s8eguQ*h##1taHLAyALe{m@nlG%z;11A$@cD) z2QKkiZ9g!C&&WWDL~Dp#6SU-kdr8k^1c=HSg%31XDV}!|02~Q#~H^cw9_`j?s70al3DZmcHNuUz!W+ zaVH(-W!;+A0kxoF9=agEfA_`g{F4z7?6?sVyN>OxC2c_%q&OMCB+wHPartYy!7Aqm3#TsOE2^XnAp|L{tXF!W5TAD3g*goSEuNQ^ zDG{ZW7%&tH5WFMNtcHtxku zAh4*(sQ2wjwJkB_Nlqs~3`lmAaO3|xu+{9-z=Q;oapLyVuPs+ET{r>8wufC#^`q3n z6QFioRN#gO|8*(5gQQ0z8RF(JITz}ZY0z%w!y|F`{dbfI3y?V*e|a5S^urLJ6@D*R zEKSa5uqM94kkfpZ&d~d|SGS$gwK;-6{rWWfWvIe=p|Y=i;t(sRVIJ}MWP9OX`hI{r z^2+uwx$FZqn2QX@q^E6IU}8d0Hyzx5bN$20wqGnXP+kSX-IsMh_n^D-fvG{^+46lbde)pF17c+zjb4)vPEp~ZNTjQ z^;q=i@a-jb!|Z!bvhie417(xTn4-s?^z=7bNMEelsiPC2MXIDna=FehwYKzs`QZM2AHe0NQBWvYT3P8g*^Rwr0SyeHRMb!w?Hu^lpwPkJ z)`>v4{YaQguLa(G_pax}nafP5#o#Vo-I|qQ%$cXqyeRyZ7YV`^U~1KLlYcu9?Nj0lH^MqY~Z0URQ4$U@>1 zz*)pV_&|Q;*?SrF@v&w@iNX_6^|hf5l+@_drsg9sVVjhJs>(`Jf!GHRF4M>l3Nt@J zT8@>I_qCs(W}?E`a}s+4QZ3>@DPZW^u$1SJ0F&XJpX)lho}!`m)NndQ&jrCLNr5bn zhp??+UMgIW4GWF??3KduFDPLWXrpVPin=#v&pRAnWO&VZg`?51`Am>sb(IT?ic+MR zzb;v6bBe?k^irrpr#(sV1Q}yk?>F<;uUW)Zu~%-!VFVj(VIc_#2_z8rnwX2#+}90% z#OG0;Q6U&#UMMV78QX0Yu5FjvcmwK>=PDX|_uj7@drtrZ;9`8{EAj;W_2TB$+)=jd z%a<>SzOSpRx9aL%4`43yaAi^PvSoMg-J`bjy0Njb7i(+v{{E10^vAn*sh^qdJX9WW z?pdB-UNqRGaANIdFj%yGzkfd(&)j$wlKy%yES|r$&TAp&=C7CMjTShYs_f3jcUKzx}$ks(x0u)g5qJRWwkB1$f+H zD8PJ^C-B4Mi+GN6C9U^pTUuI1q^4>YG1o^KTEHh?#M7ddo7!70&7F>#^xxn7`q9&p zyLRom>K~bvWpHooJ;IEFFJ1os63eY3K3hnkdThIp15>FsQ-%5Uxkct-rULRLliE0d z44Z9i;{TorG>wU!3+_W4i9$fUPuakHtD0z2p3(LjlB=S(U-wlD+_`JlbTYBUHgf1l z=EEzmkHyk6uVv0Ew}TVS_dF-kBo*R_I@ASvOu?rY*XCFx6Gw}73P2l<&P_u5VCO8p zg!wVaOP6$wE;rpz5)Se&_J+&Tmeqk?5HjomF__WR9bAfoH9QRV{PPbe?Ai={b!n*tl~1BPUduBF{HjXZx ze}8B6%@yxET}Pb=3nwKNX;6};a0=eJ_0TW=798TbfOOxA_W_%M@v@&MD$e~4*t0mO zb{|1+zeLf*{-;yv}-kciEa?<}WZEow?^T~^K_ z>!qWmHEZUw?avFr>W)ZE)JS4(MvS*W^}BFPfK+bsboleS+>4|%Ak9>?e&BfQ{NxT5 z2l>}li&)KH{A3s&I#?Yv4D7khH@t!f%IisNN}L9A>(`eI8tQoL#DS|F-QbgGR)W!Jl`-~!0365wjCCOOf?kgp{8)E zG4ef&<-}W}N`Y}>-bE4Dks;_ZP&4$V3h^tDj#6dy#vTvY$>z@{3B^sr9-zy704%jW zg3Foz0UH$B2jKPC=+n{kAlbCNGOuEPwC`OgARvIyEPPvf@2h#~y$&1Dl^OC9>+AE^ z4nL%7V9F;E#^pS>PdfPKOORJ>CQn(!$f=$eukLMijGx5)sTNUVZFScl<`+m)2N81}_iX(iE zyLhKwMtjAZO-{iN`rS?6*KXbVxwiV@M5Z*01xM2qPA_8q6H~M|sHka@Z4xx~DbF4< z@7!&*s)FK7nxI?oJT^e-rZ!&mQ^}2WYou?Jriehmd}IN0sOKvIfdv2e-iJ{L<7MdP z=D~&o!ie$~>OGbUQ31I((ouQ@rPpV~XeXOeM$>L80pW0%9`XvIixCabOP8;9M?fVr zYmnkNPSF~Xe|qmT4?>c>d9YE&VC{L4k_{4~_B~sdbCpA!BnmYHv@;z6ldpjEL}ZO- zzlAXG2$0Qe(RQ6oQ%GT~JD&ovzj)^64pH%=(|KP~vqp3a**m*<=W|SRUMi&p2N)MP=}NvTj{Y{JAVdw8%B+m7ZR&)&emK(oux&ijCL1` zBZ}kI_vcds*rsWCT!^}QT7<#-@Vtad#;Tm5rZok57iT6}NNDWbqF29u z+Xs+ZN>5Aj5dVj#inXj|-7#AiYlmW`;IliLI(5PpWS_J@)M?2W|FUz}k4Csr5dLKH z2}=8q{p>(aSDRps#D?LogNA((@loy5E$(^|$d6nw8zt-O`X9LA1VfyIi7tR&Bx&2r^5PZin;P}@TzfVf9MccWnsP?g&5t(wfv8=HER#tL5 za(C7uUQOGPq2pGKc3AI>+kF0q`HJ_*nu?-tl9(jX&4av^L8+LrZ?Lwy5l5ue8(W&r zsKktV8?D1hD7^jv^(jObE&HoI=*!|t$1b5@9&R4wFS{2iP>$51+`Y$ZIax+>X(j7L zE4N(voW|gABO#oQeLT{AlrS=PQ|eY3@bCj};R0zv=6`$1H8LlYDL4^+kJ60~An14Y<$3aLLlJ-_Bv^FLD0Bnuf9=`&JxZN@Ebay8vru*fg> zHE`0I81Q1~?a+$hT)Y0;wY4p;vJF((6s~rqEe}!JI#j1QdON9XRAE$KQ}|sED8(tnh&b8a;h#iwS-t!Hsbbm2x5BIo0o;3ZtE`6e7vO|$IE=Un(7AlL+HGc zOg9(`OUiO?>y5>U{u9kt>>nBGr?%?{uTLy2n!L3Ip}bkmWYXS5Daq-a99XRk2EdIe zueU)2Q=ecBq|=8M7sFN#(}55}Z-S3+_!u*q5tkga#o6*$!-dPuNW!Z0ZP0p$Expbo zo~F?^!=DDryYAgSl53alYHIr9%d70{d*fJy^BO|w-VUh`UoPD=%07HZ8GF92eV*06 zt(sLT=#A63@ZzE6gt{6>`D=U9f&9XzJR`Ye4J_AN;)rWZeubn`((Uyi=;rU(I^z!v zE2-GXbbxH|f~_AVuU4x(jEH8-Lsi|SzSqUfW*f*GKXYB;3rXQQ?R%6#AdMFh63kR8 zJUzApsZB52sQ|L{)8s!&YS5N6(Z=f?7F%Kg%5}g5hQ5O>kyQC*r=cM)5W6B3%OnV% z|CvE@P^1feblRI|s&&q=M%&f?U-Rq~>3=06)+sNUkxcl z1g|(ox%X^mkyAr`RJ){ZCc68FF&VGQenmURcZc_(CAi#C;ybZe*;Sf%}C@4bW7v z3dEccTjo~E0i~pun~yrSZ1R$%4y_E)HZfzU{K*KJ<5hL15_`JT(+(W)4vezlQSAZiZR`E zrvCSm`zsAAut2BbUK8n$!Uy6ebWsI~*hRMNi1aQZ*~OJDGzXYBjrLF4i=OEq77V$3hFz40n$Cp z3z-M+YGNbM6kZg4Y(*5lw!r*F(qU8K+AE?{sd*b^g6k@>m~VKTIIV2wV*u{Ap~wav z1e*$qT#Qk{B`*h$CX&o!;W}OC84G>P_0kSjIp_k?J4q-a=K~_=Q4(WJ?I`wVGIXrC z+Ej`53y4b;geW)A%G8~z^P42vMYwDy_|{ zXIb~0;nadGDIXB2)$V?xr2MH2j$^&!!{L3}E7BMeb21oIsFrxSn!@LhgA>MQY+p8XT zLJfC>!#@DBc$v7q9Y`s|sS;sl-QfD!+2Gj5E$!*)*`=*r1qk@pHw(Wo5xESU>6RRP z8Olo?yhvBfG2fEduMqCb3z3Z#qxT2<)b)#~W78wFfT$gKaInn)DqGrFvJMx#X(Z^{6OOn?!Bqf;NAt7?R)b!~LPYDXv@H}L4 zzWYDeOa=?IrvryoLj02C5lrO)+*%iy|F5E%iz$q6KqB0GIiObYO7r>j* zA~gpc2t^=Arhe7N==D^(fcbgmO4w$%u}(fcmfVF8`=x#~8^dI{mnuFzN~d`v7VAZl z+?=S7R{hTaV$lbghW;JoI=;$LT>9ZZ(TY@xvluUh=`+g`ZbCR;wBX_nk~@ODd&M6? z>t;ZbzwKoQ|9cd6ocQKE)SirWvryJ4O6FtUOW6jDod}51sJ7`ww=O9^#r%Up(TuWb z(;+{ACU)zp|MA7jIdZQ7tQDIzQD z?YuE`CO$k_qCR=ac=5wtgHN${<*&|9Y+164hZi_G`CuzbeV}Cv0>cY*+^YcYd_j zh|zZb2DkDNJ}L;>jptgp0A^no;jXb0VPByCvm!>bLzf@B7=m;(gNZWZz?5^@W*fSCf^%&dr~O*SrTMeqLb-$QF(rfUd9kli+lmIHM;p_N&wlTj*}^Dr@?~ zd=HzT+s0{Qds8wB9$r_YZ_Guvl-{5)TO)uvr3;yh#jR>__^pZXkhw^uJdm93V}3u; zl~9G*5b?;VvP=#sY31-2ic_c;Als{(uiD0 z7(@UX`Ql&+At7SFQC~UAJ;x2zR;jbHQ)zYqoTwh=&(=?V(;NaQ zk^C)%E9D6CqdkRkm^~0ZHbTjIlyY@Ye>12@N>bC)^IV4>N;7wH1^ww|WtUJ%*+Aw& zl2X$sJf~)-x>q)@M55s>W%pS;Q@wE7)zh=fbBNfUG-U>I5`!6J3J?(c0Vs;qn;h)R z_24obY)C=^K!^G40d3~9OR&Z+dhu@r>@VI##NwdEQZT_uQn-X!xZy^j5eByAJ9bim zobHfL@xdJI{KIS;$7)Jt3svfxG<9vsryMyK<;b&;KeqC>LEt#r-h~9zYR@rL4N$nU zN*%MsVF{o690x3Jdf(^q!l1f<=&)7a>EHYcC8v4tK6ug;n_ce51IemQw=`?A$G^a+ zn;y}M!C^P}qDCE8yYAMXFPjmV=IHOn-vl82)$|8017IPHOc-b=KRkRpsl~in-sN&> z#O&Qb)$m(WT=7HzV8 zi1|bIs6{>X%;#Cg&B`gb(rjd?#hl!x+aKbIb5zH;oiPD_s;EZ_8Vt(?Fc{~2fX)=9 zadD_acyW$X9+V7BKY%E-NNb&&k;w0;(=NSQ>0KAfYT@)S=h|qe!PkAd^MI&UwX9@ZDdh3 z8QcaU5c|DB9z}V*jrLeY$s7!ek(UTR=2CruVt(rEZsN`<*!={MMUwe#hc?EJN(~GvkOoZuA&k~5KKZO^PjG~7w2ngL=_XSg%@aTxc@QPDofa=Vh^ZdjK*nzqJfIQ zg$N-C)hdo4Av^eF`l{*XuQb4|7-N>KWqeD47|UnPoruUWhIiZ;i;msE zeo(maubKOIReuKk@t@IQ?BDzo!R7xQuf=yP8emx=Eh;YTj-5G=<^`M%E`W=jHnOYr z?AZ4J)#P8eix>k+q~3ifzFow=lh1YP+ZhiBMkhZ+4PFHQ*c`bjo%KH@MmryqLm=D% zygQwnWx5z}4WLyxNlms+qO1Z+TwWAut@L^CO^yH0$Ey zN;t8Ewsl}P$A;kYY3cRx^#`rpyYs+IO9J0!VN}!(S>|8{AKabpM(4b?p+lwc;HHz- zg+LX;IL3e!mQ@rp(V}ri#qTm%C)6Fc7)G5>ukQ-;8Z4AxYVyO7#_6FM(Ikw4dmNt4 zjJgyiw^fxNdNYqqJ40ciK*-27*2&b0Jqrxghd>TePYg>({0AuF1pqeq4$lTv;0R8o zXK&N(=IF{Fs?VbShenX0ll53~{*f==exm`ss2w8A37 zp`9gzc?q;4z2Kc__zpEO0el7JfiP8;}Y@PWFK8b7Q}c($4siIW`K(tLGs&1Hc>jh?Rn1r07;J zjK}g!!%!-@0;&<-xj3+B)`pGc8B62mlqHz_KG_31oyWeV8ipNFli`zRsHL3B0&d@h z64^5}zx_zM6FkYR>z@T4AnPOd0FP>r7!sc`zgM_2g^2*bgKZyPSdZhC`kfX>NB32w z$fWLR8}QzA z!!~A)q;O;={wtNGffWhcEh)XR?t#E%+*9=S5&itPWI?I1mjVX{1?_94_Oa3t+2FiQ6c3ojsylAwU%IJ`fp(81_Ph2EmICLh0#YB9NvzKQ!# z-LV`C&VpaZw`v(~EzCF!_Kw+;iBzH@9te5jD5dsOS%(62%0LjH%pjhkcC*hi(?kZZ zwVC3Fv2PcyD+)9arKT&U)2JVhG2yM`?}ZdO{xoNRs2E`h<39yzfXz7twr)b;^66GF z>w_%jp1YbZ9EbX#k@hVt9w{m;QE!E$*y#)>O<)H`i2Bte7cs}9>^tF)s+ye+d+SQh75lmDCQYre*=|n9#Lx@ zzP#=uX^=nQ(*U`#P4V$)Yk=#f14#!;$*trQl-{CHjk~=@;5vOQ8A1|gdR#4882^~ zf;|5r&IZ%#bIIm{yg~Jw6qp~vG8zU7V;?ySk--+#_jIQzCX(&x#wq6)9Nk7H@4LpT z0xa=3$u^T_RM$8TCb~z;Hcl{w3p)H;;_LgyzBe?#{E3S^{JalfyClBmvfxR~znKed z>0AP)9lki%lPMeo^N?vrJ_tgysjVYnNuj9WNy=YF`X+xqY05>wEO&Vla36^cDrJ|)3>6i3RQLz>UNk3Cd>RG~*`rzI@ zFJyMPH9y;t#Zbq%_Zetcv5et+j&}A&=4Fm0XVzt@x00Fy)9i3aFNkITSNn&u8@W76fCaZY&lgX;)(bM|8*uZUsdJTmAU zBN%}1qL37@-24MFb{k2%r!b01mMar14i;OtY)8md*|1zAUB7_T9o+oMI5wK|KaI)+ z4(e!cd9d)Eg!IlUR(#+uW%ahY+Rq-~YjIV5+CyNG;ph)pKrB^1r6Urx3rpen-4aeN z#pEc&h->PF`O`9jA5G5AEJR`UqbfKkpK6)=DJ|2J^ zdx_IqtjdVFW68szzp&e#pGboR19*l{aPJVEY4GzFkIRI+H#KK@h;Li~Dqh{gfU#L) zZDPJ+#{Y?G#bN<$;zhRryp@%;RVG1!TCDF5Jbc;@{didK{!QdSS%`MnPg zlO8d@Da9cxWejU&6@z|BHM?+oJp4*eO^WN@9nx zr1CHR00k5O4;3T7eUy zx};j3KD2KQ5jTDV0sg54V(4-}AiBDB;dR*th62jm^vrqNg#uBH{tzSlqGMZu0Ly!& zjgR+w8>8kdtnv@&>X4~m(5>Xx$qVhbch_aIDV%Q_05yAnaS(eQolK=Jdi{eT@_!~` z{r!KF?{5pw@A>&}g>x(h8(=2C*IX;V|N3|q5TfwMf8s__e=B7`kW?GnS*pj1LgM$9 zLKPX@Fix+EAZBnvsB@l0?QFk{@2>U3ox2w{y>n8!SQT|fWlq#UdQxKZQ=JEwe0>|` zZ}iAZPafv_kt;Bg{SAE5vsSbb_uTj}uyy%|0RZ_kM)yAIeWlDGT8iN;YLD&(8k^SV zG#N5n5=lUZHys7+R|EEM_?kBFoZ8}4ruC@TePn{+)iyr-P|5hA$da6yk#QXdvHW%v zGSNLM@V+cu2%lLkn+Z1C8qW0I9{u;b^6w-w+OjboO~TsWq*A{N%>Mgdhxi(x2-2y- ze5@?ufA2v5mxlO%uWJ5%LW~nvST2D)qrD5Q8$FTmis0k|kY6Uy;uJo*Q3vAHfra)G z_g$DjBu`-FEK??}%4OsNN5EYz0LGp3Tr%C@KCk8YkQ+G@2Keli?z@I}Zq)f$%ltvX z3Fa&z>s7v!b3LUtC{8)$!6~LZ;S|Ej8e>G3M8SObWnzf;f4G9+i*%*=(*CcbG{gUd z-6%CWq7KNW^LPknM9j8@Gzmx0)FvN*9;5XKv_z5F_ojrL{tuS~a4UTsF;!&olozn) zyom-5)ccEyGz>};kSU_jF*+G`=RYVK?{TXS*IOaKcb2f!>Vj3h@Uz47FNz|A$MRp-XM?SuhXTk<(D;=*S^b zLQ>Dr^|-qvi)Fr4E2oBMG-c~D@0P{>hk-h8+wnbC!RrQ~Hot%X$zoILSi2rp5+oxl zsRC3L=0)vH>y9CUVN`ViaU}@~vbb zmMKU0T_bl@28@DeeDO%&A#WZw64SYH5HTM9nKL1|A%zPcwdc_YtCE146sx@frKi2b z)NUqhm&ERY*`Y|fzk;V<>W$VmI@;O+WYYeX+&(q3f8tr%5x1dth+&9}Z}HrF{<{0K zayaZa#Mk_sG0asPGurA@wqck{y160LZDd0J^B%4r@@{!}LK~a!7YZTMC*o$N=6>SH zK^-8jKLQfMN>!0sV*Uc>z7261LDhMfE157d`?`Yo6O2L|Wl#)X3%I9n6IqSl!R>y; zs)4-`qZAaWC#hGmw@10$Q=zq;%d1}kMCj0TOMKo;IIEVo90 z3EouIneW^R{%SzgHy$lcO3pCb|J5g_r*W4C-qF1DBAN3b=T5ab5CgR|aY!mSH z)5%*p4ca}EPCHPM7!KD4=n8w38jpQNOz@&4SfXfggeqo|)em`EQEomP71-_9lHZIi zxs?pRpCJFd1T21chgPoshsFh;iFd;hKy00KVnh8z%OjUqIV-^U=|C$*2eR02zelc5BdcSu|VTo*Vmk$VfA7;bkI!n7d8=1o*p% z6D@10)0XMg7cWq83VyeI*z7g%i$Hy2Zwa1WT|L2asZPk>Uwo}?g*W$Cq^>3JK!2`D zK)wRwBN}QWnk-)? zuaEcZJQ{EPHe)Yir(aag*G}|$a(t)KGRMe;L&DiRirT(>X+#rPPqaqoXTXBk?^8C1 ziP1sLHAp1tFc1x?vrp3K^IAwljKEVVO`S*J5J<9JXZVH|+2-_regpGdBOpbG&K=@3 zjOi2@;7e8?t@ycr7shrZS9bos3Fo=^7G9PcFegn1Lmh{@-;HcDxkss>;|w}B0;BPp z)pF^Fe2et(%M@%-;b$y5xu-w&l>yV$4sn2LatHW(N2!TpfTX>0Q^g~iha_w&pqN;LDAs znHdYJDh6w>RTFwxt|CONViZ{$E|TViP;8}WMWLl?hF3+0H*Sp@-B|s_b=8rMGo=cn z?4BWR9-{^IA&-mp+arxja_UD5dPYV%`%9!sMa`}c_c(8Owm=Nb05FOqItOzas$H^KTqD;X#eH^L>zXgwt@-9S!sD8^aN6c9Lw+`eMzGWW^xNnJ zL1-M1eBD5FhPDbeEeg=Rdi81**_F3c%A6WHes9}V{wlao@CqpQ45MUgJd$f%F!kQf zTbs0B_zJ|M*!M{MrHtd(^&?f)yYK9LLKVk|K+Zoty|LRmG~MYB?qnC1u7aH9r@dwB zDg~N+)v3^rT-XS70XP&4@0zh14E3ina05g>+FB#TX@0-15 z@7qhu1E)26?2F%*TYpzOKG^ttltWvUc2l8$JjNkt6vta(A)?_PRCe|(R}ZzfIMESu z8Kh}PZRf^8shWw$vDDX3jF?cg(r!!M=v}{Vl6Nk<>Ofztca6d6K3bx93B-GiJyE zX=RDn$8xt!me08<>r{0TOR{8#hfPtys%2hBKm2*_qF%k%oHcr&>^!J^0LuT$m4#jP zV4BAgTCK}5i93M~raLFtMiw}QddeGdxt~qtS~OyyGDSfQ{cy;%>L-m(>N3E*wU!VW} z-UazZ35_p#ZNj`=)W^+mrH}Yy;ZkvyQ?@v&t5Br01u?4nB(~M~e6Kl~yFVz%sH4@k zdDDw)1?}Q3Q}cnK9W0cVYI~cWRAZr=V(H(5FQNZZel*J;6oiV2H|7fI+)subI6Qij;$UxwTK@Qs*IE;}7K%sx z82s58W)wF4n^x#0Ji%`}+NZlJ*yr)Rmw{mGkZ;L4>o@yH}Rj{eIxX711}yI+dLaSg`xa&yaL$Ah&PEn0)9nYZAy zTkfI}mQn9qKkGrGr#Z{nuCKvdm@w}0TGnaPHf`Eu5ZVhOfiGBF+oT=83gZDuUiKH# z*@4j&fIh=4w)jx&G+viclZedb;(YZbNQ}OkR4aYt`pPqiaE23s}J1%*%J6P1z zGrhAe?EzUTiaLDReO<)ZV*^N}N3kAb_dJZ)fWDd<;R>vvYG3wSv1`+xfBqSv7Zr@|GGRsH;zsqx%X zaA&K6n#KYJ*WXf{pgCg{-w0D7tvDzKU8GYHMRBzuSzMicK4%B989%#V z;1`%h{OLJMXnX81OkVY&&%c;z$3Bk7k><+r<-K@jPBwg96CTaxla}nl1>>Q4D$&b6 z>5y!ZYF%R_i%d=%S-qO*u2_wzIP7HwGW&R?H(i@;-*O?S;4n`Ek~MFwm>s-gxibV* z_Dr6$`qRBlWs67L9(!g7bvI`%e_w;g<@SDd#OU!%m2oUDUme|nJ=Fiz?X5+P`Ce+R zPOJLGZED5w#P=1KabY*Pr2VI|>$2ozKNmi}L5Y|hA)-;=2Y<9KLB~Ho5EFu%9rj%0 zl{xkt#p@+FfkaFracnF`)AN8o&rL3ndYuQN_e!UqU;T03KT2MpkdPmiX{0{g712R$ zFRQy-AG`CJsq7EUU-64A4G;Jkua|PyzOAZ6B(etkglW&0O5fFn(qbF-o0yo~?5@pr zO7mN(r2_m#$jQ(AXqf5#EDYhuQo(sn zlBG^;^%vz!Gg-nnu}_O!ZSZKn+RgQ^R_ffpIg5wKGI24Uvdz|s`}H->yP^*3CdiwG z=JQ*XHC9Q)nQraLE{{?*MdU@-zVdkzjyo1pX0J- zy54kTJv|2#b6N%aJ8(Djz#LD358o*vRMD$IHYhQE5oOHZ=HqFoy{Gx!s4MW8;vGfE zfsJ;o&dgx9!&awR{sT-}(b4L}JWJ%gRlxgfF-)}!j=SQ7ufiDmW)HdLK4aeCwxQUL zOzaF=n2uKP_HYbYR90Gx%`2!c%ent(^2-T*D_t+r>ApoqjBu4UY}XDO5PcoJhdA$Z z-q9zgIpV>%t&SSkg<@;)o^pK(>97pn0Qpc)wrEd%E>-gtE29eCSq=2|_R3D*HY7M= zK00lihA57S6Y4l73Qo*fbo^==%i~M(Q94NuU*FakWJl|ySv0>uEOssH7(5^jPEB-Z zZn|z3D&qRHy{6()j3wU%ByjKa(vL%sFCSUo$u@4y3?7#C2H7Vo^|FpXti*Ys zIcbk7TbPvHvnYv3nWdH=pLtigvri~)K#wGMtw)CsAXG8P7Y~o(6yE2j#TcX=v z*!!#w_QNjOUt3y~EmKTh%t&I7?AAOv!x{7^Z4@x$un4>l&E7cO_3I^BrJ==T{g!pj zacVats{jUl=yNUb;Z@V(&;E0xYM5I#rw;Q2-fr>t^()NLPLnb{o@wXB+PdqgT0Un`r%F3r`vcv*U#|xV9@QbAA9;y}!^;=biRuqR^|k$G zTn;9=FV9V$69+Qvo&yICeD2$Re&XA#6Lt}SH<-&>X>fw;XP-Bke=X~2GbeJgG`^j# zY(7CTW}ej1$jV6NnQe$i)37QIK3h40b=v6ceeBuhEn9*k$H{*4>5Tji=*hG)UcY+x zfw>!wC#1Lx9_v1i?^JU9dGD-=(rVybZ@0*UbJ(5_A3juJ#XVo=<>oE#$k@3qIp69? zy4IKcRJ%YVHE?UuWCVI;L-CatfPu=j0RcXGbA6fVNy^;7xL@9y#9se-GQ||r-qfWn z``&W#6S4FLu#u^m@?~e}7Y%+nEPO2HjvfC-XV{<|SQ&;=|GL_br8;hqz1A&=X&pp> zQpF>4EH33xgIr+1*rWKh!!7)4=<__zj%QJh)^Y#x##=zeZ>_-89#1#iX{CYewE3%C z`jDjD(71qT$Lkn}!glF>C7+3M7Y_$8{aGt_Cj?7a9h7#&s$L_c9=ajAI5p=KqNdMz z$FS>^Ol$*#yE~pgJY5W{e8TZHBUq3&DK>Rgg+U^>E$Wcm8Ar>X{BcJs!`^~$x`Xp- z(+(Y?eB{f!%(TkeTe<8S^;D(V+>1l#S(LTRs>9|Xs^#21vjFFAGNoqse^lLT&H;&M z=}Y;8Mpx_#?k)?o%8Xo|vJcVlxaNGsiQe-0d>wpj$|?o_y6+beGINn5ibrwwxJe;R zsDer(4oZl6iM_WM+PAIO>+RF+dL>*6=4PIVUmo<7al*QZd-geZo$JrlHojw+nQDA8 zB<5Ie%MZW0Eo%?!v)qQxhT^i<92qTIFI**N0p2pPh~UG=fDQo(ed75HrN z1h%U(6j$KF>JP3Ih0133pnf5>up*K}ZO?kguQ$^Z4XzC*i(c(MYJUBF-X-?QeVKkf zbXsjLSO)${_L9}fQ?kn?^ZFlIfP)rYc=_wkd^04a(H*a5mg&ukI@(lNg|nrxW2bk` z!{BK+n^p|1qO)m*hIFq{BSNc?zK>axS6&0Ai4ly>+MbjEg- zM3k{Po&VanYP_gv;Un>Vyq)X8ie;R<*lA0?ApaCpr`1~^?lpUrsN$}h>yMm1G<{=% z;lmgNhcaC;-GY0w9!X#?EHKvd-6h1`dPTK&)DDR$K!?x=6F0u#rP+AJoZ}iigmv*r zNt+ZE<8e^!!3J)(flrKe^{vvEB(hn#FDEP3Z=HS1PO+uF+EX-+No0RALfG_YL*r#+ zNc>U8Yf>$wa12S(F|=qIj-jO&Lk?Ip$aG$6@$uB&S$4FhR^;~bK!=+jQAh~3-tH89 z*8JlBY@WsXsn%5>@R_(^^k0SMI`=0E77ZgKsZW`reCvVw*9~q87i141v*dcq>w1!d z43sT*u#xim^I3-s`_^KuDI%(NZQX+Isazv}hIBQ1zhT$VC>~xL|Ms>r8wg7Y{QUge zBl;bW+EowGAoCyn54o&sjbNdyjwMxMp7;ufMLM=587YZc;N|UelAkj3ZXa z)rSHpF$7YJ!hVh$T0Fwml{MMTZ+gV&A1q7%43TkMDlWbo8lJ^rt|RHfh8ept?g9d; z0e;xBR9;0WmDp2~1#w#uh+M0YH~S%2Q%$j~T=3v>>;Gc!Eu*UJ*0y2n76l7*VG$xC z(gFgu(zhTYt&|`oQql%07@)K?N~n}{vkVmJlFl2XOQicdCVP+P+uP@Uf4_fT$Jl#0 z+{(VzyyiUPh;xEh?BYiQQxQp8OC8*Wz@ziM&F_j+CR#upC;+=eQHO9sup(DE`F3;$ zKZ5zdf%}6rrx@&7o>0xZOABC@>x6jpRx$nBZ&ObV5O{d>%lB0s6$Rh;8(hsfJ4c+q z-xvZ?ly25hJm+~IOO&TD((PKOknD8J7g=29$xg1L#6`F$ZZ{DVqn05L=l>DJBrmP6 zPYN*b;1F?9-yf}X_=q~R2u_a(JQW=ilS%tqFi(0+5t7~>XdnFi8dHq}F2`KG1mEgm z&?X^LMSZc_;_b9U?Q(c06Yg~n#Pc*lgD066+PFq`>UXOnl7@$FlYB3=(s%y7Nv)s@7;%ae=Eq81IrYE*K1-pngh^JRA^_}un9O;&=0{|J_y2m61IF;j- zocj1w=&q*aPoeqwQ3zdpido0`d~M;m3Q`|J@mes0=vS<5JI8mc?If>e&h4gLI#gQoV*NjSVDsP-pm>X=)s-3}3 zDLLUUXlgRvTa}M)UNYH0OGIcMUq{6>cUteY_zL5_bBdDrAyL|L6$_u5p%*iWkPT{B z0xCh$C}R^motVS+m!|85_;&BGJ^WcQWPi*j0hdtSEnsxaoeKKo(+#2IuXa<6c<)clOf*3wOOPoL+I4SqQ1N z{@xVA`Ww*&oC|-Bc+|6}%*V%JHnyrQl{QK?Pm=OI+BJ@nT7k7nA^&~lw84caH%?!9 zw>?5y5i$Waf!oSCC|+(7q{rs~l*u|e3hXZxP>P4v@VY6j0UO~3i&uPR`X~e3^7+Cp zCZ&#to+Ft=A~SRCul%X#%kMl~KVf48P%UE$U@Oi)teP6bk3RisJYPAz>da!^w{=)9 zWh@ul(sBg_iU^LUX~YlJ$kI1cE4BTRp@oBMJ9bmLkBb4W5u9}-_>?!s{Bc+hHC;h! zDH*9kz0~ekI(?I$awFZMx6+Lx-=Vk8ed>Y8wCbEq;ymhUxda?1QCIOP!@22Mo1N$8 zmJOZ4GMycciBU-@RAUbnS?>S$TZ{a>jC-xQ4p{}cKUUo8=5bB1fi*$;Q76Hq+X2$cXqQ!B5VKjg2VU3&$!$O+Otc8R7W zR|9(T9M`xT|1DUsU9RZMx%20Rwd9H_d*`ULLn8Uz<%ijA@0#Jpb7m@S8|}y>DnIYU-|EiXQj<4b*ujqy6jJkFK5|k$o5z>#abYQh^yqP~EnUp5c z9Ae%gX1UwCWdWzru>FGE3_3dTPd1zPiZ!WijibEHbbm{EYwO(%&?OdNrXTYy&gxkZ zU$1^o*m~H&5!)r2OfMkTQa5=yZ?C++__q+j1{6OFTkP)LX;A+cNRJ>5WhEe$ zyn9-qhq@kIJ6T?L=<0`18@0(=i@EDLPh{i8##5H^+fUPx?t-QK_EjS^h~&4jk?!Gc zb<|)v=to~1Dph_a+>$f2O@DO))8)y&>LM?Z*zgdAK03P zMxe{*hqOeuZsKA~qV7{~Q9CO`$kVzOZ6hA&oi}{AI&RB_efhy`SPQKkN3LEA5_f6| z3@rkl%Fxh>lyAA`S&t%dcnC;rwqmdTsdsKg?j9#bdPZx;2c>6X^7cfT!IIK-c$KKZ z3(#M`ZVb7I%Y6-gjjaP}f?9k_T|j1iwX7VcQtYf!!&WU#hprPc@LbdF$+}uJa8oTC zx23Tj9eW}m!&faoVf!#hv@7kK%hAW1xB9S1?J!SNc8^f&EFkN`&c3C+OkogUrITyU z$rHm8bDwadXKPv3A?zE1Q!pEJpB9-X%n)xxRq`G$4Vh3DS~#PF+OoBVj_WJ4QT8SR z3yQ4b6%hWW26RsWXHMkO`1{o*UNt`-5fq%qU6^U-7MG=j|G-+begXn6zsc%aL!T5e zE40=D^tb&t#d5z@VlmiwxVv-3a`puy#tkYne-*8#-7PQ>SKC-1rgHOY{9omh7uk~? z)^<&eULL4e*dNjBZDKoKM-VU^IC6+eK!9*lVZjlT1;nh8)b>YAoBtWD&6lDz7DF71kyq5h zP%*O0a@QkC@58USBa*YjuJGMz;jZmYF_))J=Rd>yZXlsuOHcJ?;ysP9a>OI%5+&=a zxY&8|RT%4-hTQtPVGJ_1S6D9VtDYb6BParl1nV>}&B89bp(j-QeHDO_{9j~_V~VT~ zg9d8i&pOuqit9m~Ny&fXM@<&h5qXp0+zm!0Tb7eJVum(lezMTISS7PGfTJzb zH`z6xzd=3Kl(489a(M{lw>Ot&?(xYoz^gA}%6`TzZI}}5kE;LLFd^++FG`m-3|XV} zv(_3G*=cCi{iU^<@0C*+I3M(=(Md`Pk~>&GO$Qw}`)r}smkhJ|vCel|Ll=+V;OQ@w zfnPxSyk0SPrdn9UdlUrPR%#Y(#TpC9wKds~9UlRyQ$|4IPs6=_4k1 z40nasz+ve9bL!3D1FB@f)TqS(JIcwY!GYCoaykC0X>u}*)t)7#xZ*O!@J8V>!}hms zUmQ@4-syh#Ww2M#aK;~YmrAoE;oPs;B2vzp-X=M0UM~Fs^=pXWgs7|Nl+?hg_NCHs zmlL+wIW)32UX;`9leKyfFVbc^K89-m_UQU0k6?6Hj`ooYlwq;1>SbonCnI&=J=wp8LRme{yLe4kWoG_dd=;*&vAPc{;_cTm*vScBpZ}C06ZCf zvRA2m;!|rn$HEDKXA`tTZT1ljfZ}96>jO!Yj=F*D{l>V%|Kj^~Z{nVPiuLaG;nFnW zH>@ch*xR?RADw_dM?K#}pwvb-A3?Wy7xDB5$VgF*BqMFJyJywnIrTvNtLDBS>q}@7 z8AwO{7chYscyXF^c5+bqHLL|t;&k`i{ekPgM!CZ_OHk%iLH z(g&=R9yh9J(-%neVhFPiO^y#grmiwOs77CjP2{;zx+m5v$LB+(+2O$$sb+T7>^t15 zE0zmPWN&v8)(du1j2f*C(6}D{s;G3$erBG1yV|k-4y>!k4xgAU+(Qvme0;JJUCB*< zK=8tmnjfy+Biz21p!HsQ3!h! z6X>MmcF6skk9f1~nLU)B%5ISCG0nzixx`Xz&xP%eN3}h;_pMY>tqs}Ww%J!5$T-5lh!RMd%Tlyzko20&p6a!A?_2| zM=umdXQ=Z-aKV95={3OIOBPcCFN0Xj==;>dP+9h-;|E3PwlnAQWu!F~4{$TP;cgvO za8~8^C2{&KM@DKtTtCCh8m_S^bnKck$Dz6cv58m=#j1$AfAMgG*wjJmJTw$stPSGK zN=9s^DbGy7>GkeQAoTgObukoHcuejxbR2&-9K~k)cr)eZL^GkSiNd>~KvT)aycIB1p0}C{h>$lUmoPk)u`QbwebRIv^uyecT;JA9A z{O<*lLQ3BgsfM*jGc5ak2v}OHpLp5Gq&1fl$fzg8O_Br{)+SZ@Bq(NI)Vu-HP`W5$ zHQ457C#|!u_#QlQ;)F!2H-^t#_WD^G)Hp{)hx{8#9SQVhEX>Sj5$*P&ob@uQPn0-& zc4gy=kmO@&MWY9ZBW@jBZc8jwn|i8AjQ$A066D&(+@i3V3_6M4@{LJ4b>+|k>N0f*j;^+vM4kV-9y<1BM@|$L8p!NO*8d`Jn&nxfJ ze6Si+AzjuL&>7M@I?%%IDKq%ifk3JIE$H?`H*L_R{`xH4gGY2Y6(UA!HcD^`SwcE1 z%yZaNGD$GSA^=j8mh24eJ)E4&ZrrfCschn)Q6#kd9-|%f2eB5fi}vi;(RIl8JvY=T z;-1y99p|jjh@S{Ptaw;Ka`W1sQ&s!jElym{A$>I?lt~Oxmx>sLeYe(dA4my&og3mK zCQY_tme&(;xqPNUl)V}ev5mSAti(cqTbv2Xn?P+zP7$4EgB=Vx*10#0(VrC@4(OMb z(&{g_2j_QeIJ)zfrM-@(2s-^2m0&1N)Z;j=b*>w&U20=RfzV`c0Dw$7eYzvu5h91s zLnW-=cIa9J4Bxhxdk=M#l``NLK9M$++c!bq>NPljvu;YzeryTB*HxfFg7FoTE>E5$ z!gpq9bq%GxX=Ed)P9d7|pqE~u5zV)|OO36rs3hxO7rtB#LiMKD88o$Oht*e284lR+ zEF`N1`dmV&rKP31=rN)_K?bVB7$xF$Iw4 z;GGsnP0AU?{Aa|9mne=V?Y=09=*dnO4Q%ObAdb2MKX0+CPIE}5M=t6Ow-f~h?R@W% zhn}ZB&MF6WCUxFd9Z- zG(-xcHP)H8{=Uy|)_yh`SRm^TjIO=zW_dzJ`-?ZTirn7WtxmVtfGS$2f$p6XoxpZr z8(D)ta1tASO)%TfCq(mB(X>Wr%lA$x!f_jbi@KVh9rT8=!KUwBR5(;=de7b=<`fZK zm)|Yn&1!g2fEjRP{^?oeh4zghkM_E3}1t7F&iAJNFZdb_Kb9gw33xS0m-5+vSE z{BsXb^2`oC7}4uzmW|`joN>F8r$ejvjwV73U&oJm6f$}YgoGATa;0PO7!$iI{xz7b zoeO7JK0)goy|tt#o8=yfd3%hnex{avry?)P&3F>9t+0kKgga-cJKIc0eDTugO=-oZ zLbZ=5&W;P~B{pPP#}irDv-q3hUF5*+mIKHCFLVS6)~G!UF_kDs7(n<;t2#|u25M8B z>gf3d`1I(%(35D>R1nfp*1S|EKFv8PO!|udDHDIuak0qL{NgTU@wJ$Gh_*W*IwncC zNUxBOA2gMh%u|`$k8ZjQzBekFGHOhXRZP~eU@kF7?RDk-<##dAD*9I%Bf1NARQBG$ zjmjZK+PTZ`?Xeqjp<-@Vp^m)a_SKvtR?z6_>;C+a>oB{-qL3jri9VEHlT%NPHG~Bs zQk8`@bhRo_^Guzdi#SWFzdIg7RO6QHdV<+!{8pH(Cv87z-MXo3t@mJ8{9<-;qk5ST zTu^lYzy!esw>jnBYU)10KVU7og~0F|P*lJy-raXpHxZEa z{D#Q52!!ODoSN#51$BAZhgEsmaBB^P^w~NXizngV4cI{XQ8y8IBF^xjUqCx&`V#^u zfzcnckN|(zv0wg)k$hpG{+j**#y5E~_&ZcgC%Th-#|f6ks}i;BzR-eJDbcG`Qk|TX zA;Zg<*}v2t*zaGEKQOUvaPJ9;$yUN-6I|TY7dV3y%18i(!d;TQnq6+B+7JF&HsV|P z$7?Lx={7rHV>Ym6O33uaWtv}`tbE$xDbP+aX>L$cHm(|J-f>h-pQ*OE0GRGojQR<) zIm+H4{k5CuX;3w5ajGOAs)$eV?F5{`Uv!fzYY+fk4SYc{XseXLXI{Zkc>TP7)k^aL z7QqFhom?7xuNq(z`S7j=!txb{ki`<_CI*Vcf}2h03FuSAMm2_J_S_Z!)(QkuCR=~E z4=LA_a_O8Tp6B=hqT&Q1o&wdP?l+nVLH2~wH!pteB{=nxH!n;#doxdcE2dg0!=SAB>4 zm&GdA37h&K2z{B;kuxHxPRsD{DygRT zch2nls-ZU4rUUXR@<=n<#Uqf5xhIxRPSenQJ1lYb)Mbg=`e4rhb^`H_=rOkcfAXl1mx6YicLHwgR5SVL(mVkCfPNAX=1j6m{K1CZ`2BM3* z-8=}-@SH2;q(+k7^B-?7ogrI3ZUJ)Nl#E{QXo$tR*$M_d+ffAavVC2q**2Wey1 zc9Ldg3M9=15K-x3ENIu^43IB=VY}dYXG4|fiD3Eh()mKA3-2m8=K5;mO@*Z)(~`mE zkdf?`@s!BSIl`?lJUacdm_~E!T09LWOD#D6X`re#s-JWbbWr!l4nwhXk@DRMQ{A1|Ot-K=i&>H;T%CUngWbE7@}z zWb$M8qI4Z-rD}rEIa=h)MK#QXgyuV*f#?)pqBSePAz{XbZkJkf2+|UMrIGFI@@L6m z8cI2va5T#CdboV2@TzMOljbNjASV{tD<218Y1SY(K(4$tO)8v78H!E1*ha*G?q1X99z-S zB)fuhg2rj9%g6;(`S>87;a0rjlLO1vN~4rIC7I9K^5Ob}Y{&}9&aVeeM?_}NZrG!~ zF2vjxcFaz8K70183pL$lG?HCCMroE)HC39#Z`=j*0zwj7yNbJa-9$fXuGCuEQ22ly z{-a>R@VYH_G|Q#lrtPG;dt;BdqZ;O#vQ>?Qpga^)6nYh0=DCU%2hzX7Kt5UT!`|8|oQqi&z4AmJ zv*-A&lgOPNzDh+DQQONf_pA^C-;aw;N*bR&*th*z!kVck*PfAMOg^xdch<}X({`lN*I|rT+X<@E2;bgYfC?QL;Tfjm*%sW5*6wyg)DfUJB)ACn0#9Zx_>- zEu75PS^%|1n3fieZ7UP5Jrn(?1ko4{C;1?2De~{#@O(Y=qc$KTbWzuJF%#OPLxjQe&IX^n){#zoH|t^MJcZ^?kb2lP zTnUfEv*Q(?fd{V9+1X_pdo8qMJA5LwU5ACpWkW?cR|X!8G+XTMioYIQ;X=OmN-@+O zTC^wI-arO<_PtX4QM9OD7*HdF)|U;=2p>%GJ`1ro*UqDAFTkMIouC@k^Ncp(OXlQ! zKY4QB1PvsW=aZfA!>+Z`uQl2;x$M`hFHcPuNQ~x6$<$4H}xEHxe86@892Ln&bZL z8J{-;d@C%tT-NG@66*75R7AQc|H;1Recm!Qe*OBKG!2fTxc)324i9$EU0eKa-HI(I zdfu&E?icjom{Hv^-010yuytXH?@97x3>+@w{I=_+4Jb) z_7TlB6h%d7ql-KI^V$MSHsYr*Uqz(kc+2~Kp|l|dQfQ2j5G1un;dKAmr4%GB7LGPZo8fr42+f4usoW|dUJj?I7)L}F1+l3&B zg?EtK_ra`b&BvJ{Ki79j>`F6KXIT9ZHAfLtU@Ul2FO*6FD<7kB{0<3Z zIB50CD7n4w)G`uvihOYVH%*CZEhWjbvn0e!h~E47&1^4?=EsP-lQb1Nt0~kz^iUqa zV``X>9HN+<<~W8OdI4f8R;Vbw(2x}Z&tL(l6rZ+p_b4$hH0u@--BGGhhD zM+8`!{XRomX?{sr@!a3c5azuTTi>z5X|JEP7LGWp0%_KLX+=kNn08fv=z< zBVTXWSyws_ai@iqyn*DjbNvwvX^fCi9Sdd8BeXxpV4Cv~Ycbq4g;1KY!Q0}! zN5o2+vRs=!%Ao9NeS_hK&HD z*Na}(lPEIB@JAF0?Wtcm_hK`982Ty})Y32D_BigrFqT2q10|d;ctd%rqXWr%YMBdFbPLtMK<1QE6-;d26lxp4nx2Of=33>uDCRDJemK;Q{Z|LTEG)8NP3m&qp0DvW&kZErt+fZ%nhcZUS!_OG^6 zz2#vK$mj_9*Jsz6a*4I2{o_lZkoWoF@e|BTON3tJ9I+6&pZM)UVkxm(plbCbTJ-$A zh_s`O6e}%6Y##G2Y2IyL(VcNXw(ARdwC!vMjpKuV3OXT3A5$*@g-Kv*uoEGOnr9S_Epae>65te5VJpj(J7zzlQQ$Q+cNP{w_hlG;2a_)ihsX-nVWDw-`$ z^=uJ}47ir+F!aGM0A>W_^S}C(cm!dMoL4A21kcFxfp=o&?XeJke&E%zTNF5LXh;5g zFQh+Nzv(V*m5%d^7h8H;sZk(F{P^_CPZE6NFgH;H;+i=2UNEIdsLk1|UB|ZlW7Kqo(xO3)oK~N5BWZZz{qf2V;Sr+ihDFL4dEm70)gYJ*urjN;3bltB+P3+alNV8Gx0SfG;R9h$zJS2 zxE1Te2xHb$*TGXKTJ~Qts85WrN9F1Pk<{qicz-Pmw!q8mYivAq+U3jeR8DAd|Lg2B zuf~R=t@^n#w)mLCjKwPv>o|}-va2`l+K2YuOxhg9ibQ}wtdcMNTE2hE*$Xbtt3R&! z8j}dgpz<_;QC)DG#No_->9sSQ%BOMg+p9ZQF5mP|flT}$+`ykGPjIXPDh&aH@fK_# zz=!)V;t2699~KyBNRa|C$4W@(?TiCqY@`tXv(lvTK%@9^%~hYyix+Xjh@cM81G??o zb?LSffEQjOA2fJ!sAx(TDv!nG?{YaNV$)6lT}=(N=i)$_`O4dQrTABNYh6mXlR zSxHCsA$e00Xp?Yr^%@@gsax+fskzCfts4LCz6IB9L_qTfHh2+H>6(W5XqU{Wu#D{o z&3`PtUZ|0GM(}rexJyXH+PLTVUx_*eT1<q(u%S|0`=|2KMNNnD$ros)j@Bi`Hp z>J`>*3nK;o;td3CR_zQ#Q`T31qq)$t&rr*_{p$K5O#A0W_x~K%)wwCI4&R3+fvFt@ z+Fi!z91u#c*VdL{qr!E?SeB|nz-fBjhG z$F@~yn|$zM3Luo;N0I=>(Oy39&NO&u0j*~uAdjWJ4{%OCS^p9&*xr05O;=H(-gwW8 z#CdHgae`|6ufrG4hZjJb_5cOt?_T^*@}%{N0pkDMjp`GV$IFpTB&OS-z5V zc^CfZ^6rYqXY%x{AZ8ByR-L!vu{% z5#q1&=*5c{`mURxSm!Q74AQi}K&lP?KdTK23cC|O(MP%UZSYV23znZG&qY{;zoU7- z5u8N78!7Psl9?BZS4kA^l(MD3wdCCYk8qk!DzkujV5-pnZ?l0VIvhp(zZ&PiVB7R8 zJ_aoKol<=85tZbN1Pex%^#Tem4_(CpAmK02+P8Y$OyW3Bg9E1dXHj$Kw98lB*1L@e z=!($%9)%9W6T@@-bsmyzOfogE>Uey|$ey~}A z

|#ACbF_{W_B!+rG8qL(Qu2i7=BE6`$V`$|LY2Ng($uzcoK&IqEcT2jDAF>+5=G zMV=A_7EptU2ZP1c^_0lvWO-Noxx5N!deRFp!y+7w4+g&e1#tiJ)9(O8+CV9Y6 zMmRYM{l8!c_BZT6WH!~57&(Qif1W}%fU|dAu=3X>3qN4x3ly=I1D^4TlFlr26VDUmsQr?HRx2{9oz08#i zkND4wAc0mldjNTaZpV3(t@taSAPxQRzp@S~>_2{m(3A4kGf_P zqgVUrFu;>Uk*Cncal*API9gA2qOSBulM+yN1r|Y5uJ!kyi7)7}oTYy;##=u-l=~X- zwh{`mY1oAcH99%bj;EP&K4!{AR(OjG!F*9tn8uW;r{6k7f^S$@mtK zQr!YYp$CMbpfP2n?f!lWH6AAcp$cqJ9FNz{Hcr;%VKuuwGiHzh02FXYW#iKoA4`Z0c8H1z3^lm$cCGEd0_&;sMYVl0Kyf5+DVzvd z(%0Q@s#z?KnCA4h5-}Wddm)CNv!OueKVw2JVlmUmc(1*KjZee2c+zEfT)5_gPbbD! zMUk{mh$_^p?h}kMO=Q@bbX1_vY9|0zI+!ZD&ann{o*#V?m>IqhnIc`o0!x{V8K8r* zJ3jU2=qmpv8cee?%Zs!QO-xBbxn=rQdGr*fvqjGg&#jry9plB|s8hJpanm#kv9F4@ zUqf=0;7sla8Uj_tUDaED2-)21gd-F;Ng5${+Sl2jk18o=%apZfPByGvN|X|RXXm!J z7YfjC-_C*?8jrP79py5O09*KgD=NZ&I_#U#y8lF(!5`6QIa^^F$R@{>%)lK`p?jO3YWu&U4dl{I{(ox6!)cm0kh**OB9P6eLLpn_nx>@ zo_7^9u)?nNnQE`A%w$ zsoxe+sQtt&cYbR03{z(5ZnT=YYvvtXpO+T32#i_x;p_`=2LL3!<7C?YzUK`Z9{L-- z=G~K0vVm;Xz^i`32PV)Mwvd{ilEbBzUVfRSa>*|n+5S>v*`N{bwvdli;m1q`yJ7(a zOiNSFwYLczHg8nBb@(V_PmDTl;V`M?`{7Rrp$CLB2@R z=%m&{ZsVsb)i-RSmBAzTTr_s|>w?{xfivomiD7N!b zs@n&dU?79Q`84gI#|uN)1z(Oar^P1MMu*te1)4pU<#6 zlWA=9xKry41-EgqC6w~!4mW9N+V1srPJM+ST_3aCP8o^CSoiyjPa})i;D!J>taD`3 zGkAZAorLyXo%u{U8Yf@SRa*P{_YLEx}@l+!Vsdd~@X2aRfzm3nf&ryJCHjD^a zGR-&$#ow5nEL|8vue`%}dW_#JcJFKo1j=dPfx7)3k4}WPeno2)7p`4wOar5HK41Y~#QLjK@g%uN zBL>w%Yl?lQ+xRr($0`u9n8&OTIj=RF<)Gm&T@H?Mvqj`!wWL;n_w=$v5=n8f6UyNKbqHWFPE5^XO`KS%_d2 zrUrb5O!Z}lbH;pn62^WU7v|50Rj(_WkT$}y1G42r?EZ7jjfq*q8xO3)^`l-lD~D4k zGYjh+&a1m;Y7BaI&j<~ay(NS>hPx@%KvZIoA=z3h`+cf)abagapY7+?p}SKM2fY!E z8phxk#hH+Byzid@1_Lu2E*51$fplRMsE>{gde<3x z>ztMr0sj50M>Dg9WiXf5V{BH>x~H#ya8NSs(%`OC5m&(4)ma+g{OtR27ex{o?4h1( zFVv9A4=eozwpOq+?Q@$`Jf+g;WdbWoSt43%!P--*`X<87qFV1!G#TkLnFhz-P&^Y@ z%)rx&vR>!+mtUkup$uu| zUj+E83|`_E^E@$5zMKoAvLFTDwuroWAt{>DR%+o;7xD(cObw=Q?CDo+)$}wU*!&yZ zdh@t#)*DsdGxUDBiwky1n}0xB_Qt__q++wAolVM_CM9~tWNrS`R{rim^)w;l#er74 z!5=kwl&mxI^h{&cXFYchjK66N5jA~Y?MEFSaR@0eX47_DBOE(FyUjeOKNShlDzuNM zZ{m$pV`IY5ip%dW7Z^#+h(uY~mH%8OvcDH|p?=$*eqi;jk-9%KcPvf4_l8RPtuSo| zgA1m`;mf+6SF5-B4I9|yzw!^6Dn7rF9!{q`Q_}Xr*T+JF{i*x4riI|!D zE3UTRG@OzI2@>DnTZ*Uu#nAW!dM-W7i9XJy;}qJfI@9E7AcNENgHlyAnT-!)QjR?x$Uixk0T~;c45f>-kBWkkz4$N{uG&R<r)tz|E3vvN z{Wbm=&Nmhrw4U4GhMR+yx~YHh7Eh~|2BjXES3I3@lQpGWgH2ER3zbD}Of_A1C5?Vs z2p*s4jS6Ay<#ohN$}Y1(wHmj3URu^|qPS`-K};N+#%Q+VM4j-t_Np8^>(K_(lXiSz zVE{t9GNwF41~|mEuOxq<-(M4_ZWytRoR%Nj8sqQzdOzo$7S{XhY}HiP`U$gIhHPW4 zoa;x^MCLqx&+br)3JxlMLpkn$+;f-J_AN5oTnmIuQqFL_fB15s0CO{Pj@dxOET8UTB1cwtI@xZ8}g!WSu3W@xkO#;e?s; zd@$>k2b3ZAh2B8Q9c&?|c;=GLsfQn&fsp z7VHeupb$lT8UeHN?WKdLjHP3SqqQqY@7ZPAZ59;NyK6u~y&E$=Vl&8$wi3>W9>2Z` zzX4$%YiVJ}?8>sp5N>X8S6V~Z*_^?zJ5&~-m!gA;Ek0*LC9uk}r~FeBmuhOUeVPE} zj9SnMn@Y~2s1E<`;#Z?_`IdpRsFVVz=z7Wqp{mUrW*mziqicr(rur49k4jQk zP@+X6!Snjt&2P5Iw!MKCPjHgRJ;2=N&$KX#&+L9ff$bWcXZHE+z_@l~UoF9Wlx8Lw@%~^!+MS0__kPw%N{!>}j1GF|*qX;6_$t zg>eQMTsI8;Y@Sd&QiPO#Pji&$pye)AHMVaM&uTd|1!Fq+EFOo$Ek7P81xheMCn~O) zTbHT;l8blNkG(pQnHlrF^)xSZ=p)E@Yy&#LGnu&&4!Do%W&I0zWIpY}0sGxdX$b#sc-aVH3zt>?;fw5^sR|F(g-%*ZH$S3dYO=0Ho&q!l6D z)9c6Ua~sTGtBcTRSzR$)^TbiFn|_RY9>X|zTMw~RezTpU)paQQYSnhAdDPWBu_1>}}*83gSC zzslohGQO=tyQ(%9h8Kp2JH=2!t=QrnseM3{)BA0jWl1|zB@s^fi?M$~Ahh*frR6T( zIHQIH%)^~t={o`~t|xj+Nr3o-!*H7{Zv+kC*Oqz(t;W^8wd01?+YFw4n`{gX6wk** z0?J4W-p28?G;DGr=xd*MaCyplKL#_N=jH*@f1BN%470$U);G|{c5^Nkie^NC z@;b|@tjf*MsaF`Oj>yp03Jk_s$NYh!GhVtG)1099dQAJv8jfX{CPlwe0JgAhB6`MTC#W#JL z+L)dzXvYvpM-hF+a{HH{89#?I&m& zjN6$D11};bxhohgjHrK&58^)@C%Z;2+gjeIEcdXooJ-d2uQ$~wZ3*h5_x89`!(7svTvPjR>!A$=!dw7<_cm#QI+mAtCRYVU4q}% z%I=ZV=rnXv%2dy{JZEU%>TC6^zdG6WVb-_DEygw;B$1^W8fq zQX43;WBg`0nvsC$)D-h9Y+g4G)0D(Vtez+kYB-BJ=jKqt?H)9X%?@?tC=KFhsDIz+ zZL%VBzRugwLd0dT&{5{T%V3|u4cqNWLCK+GA{9^AtnR;H-N3vpq+r_NLx}tI>@`!< z!s_*+t}-twj6b4d}Jt}2c(qt-Z41Ncjv+SJbtln&%RrpLc zz9`UZLRHgH>8Dv|*3)T+MSxZkEv;rZR1KU=Gd0wvTlg;hs3(>hx{Jx}kB+`+vB)vJ z{gU;XfxMc1fmW%a`ar7|X*SM2T!MVp_7vRYTxH~Y08TNBn25BI#X7dG6b1gU!xY=9 z%_(*IY)0~Vmfp7{6Qa3#ZtoSf{V|fRo~T?Dm#=0elUV+9&7gjD^vUs*28q{7NBdkI z2VW4bDo?f5+o~!3w|Z|Z#>%_ide|^05_~>bvkfv=Iq#6sD|dFsXq5_!Lxxc=b2im^ukvz8vDE5l z2l8db%uB8Uk9;GUCNjuZh0_ZXpK;hO{psj2MhxDyGO#n3Wt1GS(>JhEYB|Fp6#mon zMG4dEeeb;2HObVvv#J23H!`(B>+->hx3f!Zn}IqI&x=)a@tW@#zHKbU#-1H#tD zsz{s^`JSSl=AvP*#ybVclS8wQN0G@SV?di;!k>vnl0doLHxB)^#E` zG0NcK{cx`*a_pr_GMU;8$#2d+-cwHB4|xo)J2FPmrRBn@f3psh8s;}3ecm9U!ldL=G;Pz(F_$9zz{I^lVga+lFJi9faHfjTyT-e` zc5{kt!PTum4{<7MpUr_Xg$Q=Kriy|WQ@5Qzr z3Uo4T-#zis@hCRArk4kGw}gShK|lKKd7=3^kfQ;5WW1FJp+msF-uId< z-SWiW(0x|UE#fK@PweYEy2{B4O-F?WPT1e$oz+Petm`NxWeXb6HQleCnVa&|1QekexEfL$eXS$weRWWB=HvN#PsRYN3tDPQr~6ae zN~h$WLF4Z~`}XZ2-r@`L+E+cJl6kpU3>fanB(MMBygF;tLyuzwbQ`pARWtm-i;LmRs@(o_jjtbb@I*&9Y`%HZt1svp*s3P;hfP>k-KK|CC))pww-z5XHM5eWVGTE;Fa;qt#d2Y9{*MDR+D~QDfEMgI9)$6O*LMU zCYm&2#iDjFwyKJ}9Eg^J(#23qiJ+GrbQULu>xj0Gbv6HLq;AVvfl$h^1jU&0Xj7sH z(Bbl(7ZV=3qt-tL&|!xuLPT^%?dErAV)I0djl4GzZ|hJB-9@#5;X3arxwp2bZ|h%p zle&8y3v(TJ47EEUh0L*hGFdnnUVhz_uO-FvO1#x(KC5w8KqYGT?`>vyt&|u|M&9gy zu2tFUj4H#QYB{yr>E-@2tMeUwuPhh7{qv%SHZr{AY3e%^2T!eS)BtFC>RK|8vC&z< zwe!q&hr%H0`H>(k`M9&9m4UMSV30i}P4`g2r=G&9Frb*ZuyKE4Qa2{1G80=oa7O6?DB9wTscp;ZciE zt?Jvud+t)^46d{a6+_P(xK{fI~pL=A|Q7!@C&DiPE55 zY_fsJB=!rW7ZM?KPiT zrUZ&dnv8^qORu?pDOvk`?_!FAT-ut!El-|a*XV|hvnmTW!m`BMn9|phlf@9y6xsMDZo~Y>8PVRE zD~wt2Xmh)=6fkUPu0g++zrD}j;yNwMYl>W^JrK}DrF{C~swj5`x3g*7gTIyr$mMQDp1jcvxraxV2=OnbM;*<$3FxnL4t1tr&a0$n==L4Ep}MTI>EplWrB| zlhLbsOv4>IGfj48iX|UZt9_nYQ9!ZEuNq5}a+0&$T~oNMfwjIdQGL}pPkz3Z4yl@p zk^x$ak$p4OKTl@GR;{OL=rY{Rq`7^Il4FvEcsqDK-lC{4TYF7IgO0szeRDJ ztGZ82?rNP$q|)ZNOVobFaP?T|g!6Bc`67iroB`m+59e#!s={DeG%8bU`;4#=Ki>ZB zcixw{_^*JYqqt+bUX$c|sIK@Ly7l2w&8{L{Vnrl6+z*6lSjYtvPj1Hf${@1N;RZ2? zCR+xvE3lHwHs;jH$T`$~?vvv)l%Iv(MI*NJZev-M!^JkQ;De*%44SUEzpn--;t#jp^axfP6fUoqEUqj#vDR{9qs5Xt&NnxrpP4^ zW|EPV7)UW}I}bPSR^y=F8M(Ue=d0Z$A?BxFT0h+XX}c<*+5b+vd)%V{;`(kZPPN*ViniN(UE8sipZ z#{kD+Tz8R)YjURBZ#vN>LE2h4taQ3Hj!8L&mLb~h|~$VjY0WInw@h2 zVS{P&iJ}*;Ln0mf8XEb#lSirYTdlFZw z$aNgauGsOPo~>VkBezsV53*s%faLsPiuqNkcXsdP&p?t^vYo0Dc^sHcS6>R6`T!2M zl=gd~R#|C1-d(m4nHulnzufNG^-bl|NvXho?`JAc)Z-8R;20b*^HK1Rqvec_>+~Fd zk`GUgA4d}5@8bMxcq(kGZ^-v5hL(01?W9NEvQ`xU}wU{;AsZIarE+5?2@K=azBCoZm`Rw%Bi> zirfQbZxrd112e81y?C^l6%%r9O1k$8gf7;0Rt?H>6!zfBq6K?XI7a+%eYGOj`2Pt^q$fi z|6wVr;QYdv-H+5WAy1=4t6ME%t=>?hRaQxAt4?xTH0#^N6q(Qpde9k191XalT7Gkj zit_2n*fHK-&aPux!UIpQY^o5DXxwi+Yq5Bm<8;q05n)=2Nq!tpShCgwM|Tz1Q%Q)Iw6L$5up>Jf$j#e!VaRND5>FpFJMNxDmZIKYT^d@DK=LC${1?+I%P zRVNDOWT*A4(oDY<+9b8>Wpn{6(2V{`=vAe!j|Dw`JJS5(zDi_IIL0^U^Q)3KhBu;* zJk)+HpK*m<%IMqn&_zA04k3%&KUxd-I`%<4bggSr=`8%}n2_+}pIN$&03S7#HKFX$ z6g=^0agU$PgF$H_AvFa2##iE7y?d?D*^sjP?kVwRUO_3q)^ar|XRsqNU; zoR~E;w2;JwVv(qmhL|Qi1%9i~1-yP~#o9k!Tm&ktifuA~T?Ld5s(5-Gr(}WHHi%Nc zA94!5K?iaSLx6jR9(I0Y5Y-srAPqU5M&z2UH0P@OBwVH5-7aUoC;ps#1pHZ-K1S5( z^4hG2IJXTMBsFgVd|b6}UwVcIsC6|c_zEMS&@bXUB8zE9E!WB%k?2o0+khk%z0?_I z_l_s8+@&aN3@LE2?jZxn`liVd-@yEGoS7+77P3qS`h8;0VS)3UZ&HqAQz1lUP3WL2 z-C<>bhKeh)PTH8Rwxm^x+O~YBK(Z8sx8*-H9Q7z}2HOvpH0scYhT|Ea^!-pzVAZ@A26*;x0HYwVeU*G54;^b}!rT&Sfyu9D5em=mFM=B|5pb}D) z1y1OwnNYA_7PE_P!$FhgL>&i?TNI%fDkOC&4j8>7|5^dfyXf77@UAGTG6=mO&5|*H zg7OuVJH~t;thZJiQrqC9y$to+PQQvxpXYJYBcw79QUQDQ^MGs1$VUPe?zAY-x%yHt z!!0Fbu?!3ZhG{Nir1aVLsn|5O)KZx@?sX&P}5U@wJ~uraFUBYRYNZ0VAJ z-kW9Q1e#=CPW#F}A*OPTdExZGa_z4ho&n&r-p#uCEEod5QyaY_6-B6-PZA#C_V#+n z3-=?5Bo|t0t$>VBo_qaaD=%MiJtJFo7on#5GTfY_0UMlzXqzGo{ z&AJ~)T?I7!5#o&E{ck@1p;`h_<#Wwu=(?QO4~dPtzF~6GI(pgh(P}B^zPG_|@q51; zI1lO=_2pKJqt@so^##e8uhe#)=32Cs*G~4P?StG`1lCa+H4JK0OOdb&^F}F3U{4e9 zw)LEg@vD|zi8c}7buay*bvT%}O7I4bd>P)CO=n>-w{bY5?B<3&G1$0jotxnTC<=F3 zJH{5o=F&lmnPXq>kGO;;97W1vZWvC2bJw9>{k7%Wzz*$)L$O#zBf6DCk({byGx3A! zKvN*dl`=HLRff64d(abtMP_LnM7g-f;V@4~-j(2`%2WAemW)Orc#P0>J2mBRV4~R~HKciL zs;3^XZ{0^@1*mPiUX-|kxRvG>fyY8X%yH})A=-a~X5Dl)wkS>N2Os+in!L3(^=5P% z89@1gg#~^ zL55*-vC0Hc)y#Di(z$j%_XA=Ty{dKmc*XnsITvTJ8F6lf%L<=ichxx`kc!V4kV!a2 zgf61~X_`t$zO~EO9G~t(xTWj~BU}Q*v7_7t)K>l7 zaZj9O@*ah=yJh=9c9-8`e?e_JWHk=K6j>Ym;dRyKnxZ^SjU$~yu?4wcV1bw65J}KH zjm-OCRlH|%N+-AtvUoHxx{IbqrH8OF3}wm02&2*~(tkGQV#UxbgeKe~>W%&j&UrbC zzenU1Fy0Y=zUVM?7a@36b*wdNF*)i)t-#E$dGl6HfC-Tc`8r;Th$EK3GfKLOHL-xLf@7X1Y%W^g5DL};6)cL7^PN-bi3X)L?-}0OW zu&K8(jc{#K0i2Hxw{T*!M^h6~^?k=RdKaoWS#NuuK5ooV{;)1(Q| zs4|7ApK9avm&*mECM-YjM9HMU^z-eaCPw6r`Jt;EZW?4NymuRMq4k%n5LZ?M%gWGY zpAHqx7emWP?;u>-2xV$r7Y7LAq)3oby?A z&+aVV@ZiBh`Zs5dH=Q+z=MCSAe*dTny3pKN;!0=f-3#dK1HZunkGS@yyJ{kKR z78c*9LGyqA_~`#XYSn_xmHfa=y^BFwU19j1`(Dd;xhs`t4}JRkb-Czu6H8~s#UH@Js23`K@TYR@NwlofjF6TNtS6__wqut-e@qBqWwSY=IKxtB z+^?P7@u*Kbkb9llSax^7=Nyf##Br2_Id!+8f7}237tXX0baBp@F(Vjj({BS7pejVO zN9C-s)ypN@o^#R>J(l3DLa`SHn-3M*LBOPsnjIZ?5gv9WzKGeSI6{+GaShf`m!s%hn08ovxlq#cq3YF$Goq;OR?qG_&1l$0v9 zqCbp9Fr~!65IQPkW7WFxHbofeq*3kh&=tZJN$b}lf2L!~`$}IwKglp+HkeO%WTlsO+z@Je~9`R3_r?Zd*B!A^+a(#S$h4+e1H7;Xz|1o zCBFUW^m54_bxq2mPfFW z2?39tt?N;l=m+1i$ie_nvhRAG5tS^FOy0)iN7#%h_~la@GxKY!aoln-JUj`-on+j5 zIW_C7nYRTEc1c8cn(C=znjVp`PfO;hg5|Se-pL<(IG2&l8>}*%9NB)|f)vc&49mzM zCVOdZ#kd0r25QcpRicn9l9rV?7hkEbQ3}9%Y?o1y*y`M2b>S~F)%`>ciw_xsxcGv zjm{+nAb3hNifrxi?P%pJAIs#Pr|W)7uNW6nrq4otc?(AywCuT!Bji1Wgn9KL0Z&-g zYOFt6FED?VMi7{9{?nJBh!rGT*;fmX*ZGc1*}~CxJPN=DaeTLHtLC@InjZww#5cv^ zM}tzaMbz}_YnqFTM;tEoUTg10+wzQutu#(IXvtk9*nI74nNI^c zYSWLrwkW?WTsjNQpr&MfVgIsY8UH-Rac_1;_~U%;!!jmBYiTsN@BU*Un1|e&8T^i3lf1O6oV^3}{51Sx); z@nWLhyAL2oJqu-u8g!|m01!iP6E6A9n<1)WN7H%gmu@;CLixMH#_Sd9xsSkRX5+%`?1I!cUxY2k@Yn1^b?+h-wj#Y4FJ<)PWkJZ{_6B@-KH zP*5OsRPj1B&HW3GH#z>RcERt2+>`cWM>=la0!wuJ4~P)iqT>zlo|U55w$ zz}_y~^YS>Fm4IfG-O&27Q=m1AH&wvOt_(%HWx20pEr?9G(mC$Qyw40NN^G;NGc=#RT!BG=4)reK~qkUrdr-{0P8~R4PuJ!Yb zTWl;L{~v0TeUkU%8u{2E{z|(I=fP}32?N}hqiT6?z%lNI&AB;)jn)V5t|^zesR(uC zq0z8@ohF72CSsFUXC2g`LycdgTsW6&+;2Xbmb1=UUU&`JY;>WU=^1u$(QT0#@PMII z1qB&D+##+R6(BfkgcP8Zu%sP>`7dNoPxEH4d5qs=)5YSxNBto8QEcW_aa+AZX&SMJ z*~5B1aUNJ3!j5be8Au=g@bDeOEOaUK=Bo&X_hl!?*o=|S34!%@e8yeivbuTPS@#qW zPWPkngDfu>Nm-FijxzObxAzR5#J4RwKR&(V zk|@N?EJ=-%%qR}OySGj$VpJG5_LGxsJ-wRv*5+8qF@F_*K5|31!h|!;|Cxq<{H8=? z-q_dVHF%e~5x{9maBv`H(Nu^!n7ltpKfBbWCpft>R+a5VmXRWC?8rk$$A8KKPcKQCaq<3at)C>hlNF&i0P@R^#=9BR#q6sxs@~Mtm4RXPM4o-h)UO9QhRPYEY zd9oGx6A`p|t)6&Kiywx+JbQaAUz99mL2amAm32x)L3vGkT>8MW6<3YZ114WWtM)CI z!{HHvgCtR9V$pxgjiZF-;3(|v&(nT`$Y+W4#@F>7f?OiR<^&R#MsLcdM29>dDEFX~ zQq~j=5ZNN*69qY#KR_o6`xIr5mxwuHO7C}W8W7gKu~%sJ#>KF0C8H3Gc3c7QV6Ecb zlXj+pnxWgIsv2?=ZEAaQjHCIx}Ha?$334%{)#(@ zNz`5E6KH7&s0cGk#Q*BhM}>cX-kchB@dtcLejLHLL#0UYW#uwx!2qOah;9LzPeT=R zVgB#YeHz9)O$0mv=DPHSD;-FJmouhhv~);VJcaj~E|POi4}w$6ZN_i@$;TBGfhv$U z;sGD89A^mm?Rq}%A}-K4f*;z87CA7t3ElU`;85~@y0pd3_2$dZm0YlUxRHkorl{e! z$x6EjsH=wLc08`{ryimfX(650zd;ITbUC%Rp;GZ9?G13IRUG;w3I?lcy`rst2^FgW{6Lou-OgPnFrrwCEqlC@J2SHKrW{M z|5WaRg=XRM-qjgLuVCL*D4`=zt6jRh>I8(ZtBYEQf*kvrF1zpQsRYu9urm-JOMT|9 z+}~6ZjPl&v%dGM;`v!97p)irVQ*h8!rp9yCj;|Q}%OFAPB?$|!M76MIjolzB{0wI! zty|x&u;mzVjb?0T0@e)ya>K=40aGWVJ~!LH!jj_opnCZ0IAm1uZ)9}fJ&jhBQ&)hk z%@MDvTL&9hnU{$_O?RSwp!-hzW0Z`|gl6iuw}%bBIWt_cGLT#@_oi%2Ohn~W0_oI} z5pfIyBkM0rtNeJ!>D5g`o!^KkVc3$6-%Wt>o;O@AQ~wl4HJ?bii`jF4c;#@MYO`=L z4dtjo_OK-IVApmugz}8kTE4!VK52+|es!w8I3|N84BQ8ScU1lLxXYV7HIPkuAFN12 z+v;BnO@Xmm7<<-A;_-I=*gy3S(r%rhPq_VEU>>95mc##%AJ3%sOCA8u)d1<&&RFt0 z(~b&o)tpxuC+MsR%H;VUfll`Pm$q8s#3SzV=-_~Y;^v2K;|_9$N@b6bwfCv9L|c~< zZ3wKYQoE5{ry2bx?3ti=sIGMU_5&)(Ck5{id4TGg4rdTnosO6qT|$#4JJ}|~-ZN{F zc{~_=)g8B0&QF}e8u<$eOVME7P!9X*aaSTQ&o71bAXK{HTfO7;F_cpvqg;LdGIF`Y zF%W@|J&I94o^ef50Skp}ks5a3ES30)7PEj}r(d?3S>tZ&qs8^%(m~j3z!v73CF;%9 zg)5n?oVezJ&#%)ilW87Gy8T{hmCKh=%Uv#ZJTbIgv?#zEG=U)EaIEA*kLvVMPUYP_ zJ@8?RzYUO>>=)uhZu8K%kI-qPP=L=}Xq?sX7)yXWJLho^j`(R|(6`P*MwdFO55oCmCZE8yb(k49MaIC>H@N9g@83J0V%RYpg0wx`LX!U-EUlA7+ z{hcl^$88RystBbjv4kzTI8aIfgeGA>YJ*hba2rRQu$X*$pSdJ&KQnLswCo>aPDGWfpT;HEz9(f|TmA8s0V=L80Xawts`4PFq`%hc^Ma^&R!Op$=C)(oOT1 z18iL5wm`r{RY6fF(-mvLeVGGhvtV#TM@xr8VMR@RluQLAdSOSZqQY*bQ=IHyX|&Sp%{@UTD0FFq z|NHVgx6Jtcl;Kh}LAq!}Jc_j5<1ZTfLRrVjBgUakK_ysDT zzKQ(knt1^x&aHw2U?`G2RWL%bcT~oG@MU%Lxf|lcxY4%T!~~ds|Faxljn1*-tuL0) z({93(wcU|W(IzD$I_|fpoARn#^`p(A`ReKK!3d>A(ih~ezjTJw?W&Q;xF4KTK7$R@ z*B>-h#F$~ez|vN!K~&NM`%&Ee=MUm|4<&b%jqfJSmfd@8{BwF-W-)_}N+xt&O}_r} zAON4%V9W6S66yT)(B-tp%Qh)8GA@xE|6rEww1O>hI*xl|JQdDXDM-h{kwUA& zZtvdU(HJ*Dk8FK;QK?t$3`2bsJ2;Ft9CtjOy+kmu%aluGyJk zrfFJvXjZ2<={><5H8+Lcs;gZvdzL^9O&D z=(%sZK55(K($-tm+EO!zH?2_(1(XbXGq`}Fu--A z0*k)y?}P!2hK~s>8l#J$Lt2UDu+9zZb!gQOg^1HgSnL2xlJO>iYvg?Sz7vbS3?KIU zDcOe^y{KnK4cuhOZjDG=9+~4Rt|LQ^-l|GKfvOnZ?zYdcjl0OD<;BrDy!9?**OnWnj_i87nIM%gp6(RE+!yd3NwMC@fC@cl3XMytq3J4@l>;0zTU0sJ^(||&jQjTHJy47>)hu>XA zEbGUVRT$vn19C(anDeRP5%LO|H!x*v%1k|Q!pCa#cHothqr3~Z6)3RK(mAz!d|Pw**|2jd9rP9=q0;3Y-ccpUR{b9SM7gBlG&9XL zw!1ce-D8D_)tQPi8-GEySnh&?vi#Qk^z><(`>imk2T^XI^;gy{j88u3hp;x(OB;1n1H4PmLIN0-Lh{89HIGvscWC;e9iX$T*3PTcv zuB}(n9>QjobY+~>ao7hW$@kO|+*lg2ca6IM9Do=Zq0`R5P7pzxbueHiRfEXW54j<~ zyW%}!b8QA|-<;(yq-H>Nwmx8_U9w%XHp%ta3`&@ef32%xyu%EqY(DG1S3j^x?_#ej z5eGp_8>*LS;B zTJ8`@(~g_LE7Rk#O2W$H+=(9R5cgH8{oG5qw{1B#%2fy<-PImt6Dk&Pf5{xL^Z4j3 zyE|?xI&Vxtw{Wy}quJXdH`o6}O6`qZ1$?tVSI$OAmr|WfiAY)ZMps6TYDs02&-(|b z)e%QR^JE9#-{a}iV#fz?aQf_*2*FK)MV(LG=UY6GZD<_bGwujsX_$X8qSV^c9Vb&1 z>m;uJc}kftp~v+cNKU8R z|H@-rel}YsO9@fZk_NF$_VXP91RGaYySfV4}hR=DXr`nPX zJ{$wu60lFC0mqE!U+~&n)b|PC25N%aK&{7XOs=4A_&?K_@Qf_|cY(HQS0m|IaHi4W zO2PNX!|`SRcdlEwwU)7NBf|%*W~9l)md-MYjs<^Peb2#KM=;K+Dt^X`R_wcg{BMO` zb%b%vSS1BX$tBj=|0~H6DmqGX%$qw(a@bxgAKp?rGmUe$WXfN#&wa7kS1*;2zP&^SJ>P&2~b%WC>q@d2dU& z$WbsG$7dPJVLnV00H`GdAEe{0#gY_ms{30u(ZuF07zlQ&>bploDL}Ei={HD2<>hf9+LG8jle^-Y6BX91{w@GVdxG^$bn#Rvj;O954 z2};Xc_gB%j+Z#?ymGFsNZhd#I*Yfs~t6GMChV7YQ==W8f-&*C(&yikNw~Ke(^OI zAh1?)eb_Yt2>~)GiC)Y)NMusV#Twt8(#;o%8fYZX1ed-DTu;Am|8T9NrG&$M!9NiqMzY3zIFidA)D z>AVLWz^_cdSTJ7}#0Hnu!gYsqIXE~(cK!0{VUj|z4MLgnir{q`&;xlMTYbxJ#8~qO zt38)OH=A)1Pf7g{6V`Yn8LO=teKIxe?z0MDhZokpHb^iC#Oa}o^GE%V3S$DWK>q^v zO(L%$mN(g&04tJ*JIAgr{JFehN>1m~#m6qz@Czsbo(cd)_w>ZuBMXo{{dhNaTHHE( z_lgy`mMmbc7~px!pkotmyg3H)i6`&NJhww`plM_CMV+p8$UlDZj(t9Z^^@2(eZ8fa z2je>bp@&FS?`?Zpuv8~m+n2L@?ET$87#wv=dWdJ90IaSGseJv4IB->&FJ#&a_jxZN z==(9-6|OBcNb4c3^~{lXu`;TH6g&9zRuvzy1EW*2pR zg!Si)T}Z5kkkrLHpgN>h?YznZ8H$hQhIdlO7E9ahuZP)Y6^@n9AM)M4!Ud~HMjuC2 zBMwsEE-TeysCKig0No;8ka}%m|D5EH@O2P63V!h<68!EyWHlE{BAIXWWGZ)cAOqaW zv#@C`4h3u*wjm>IjI;>wMNYpxOvrzj74q`vvvZA)=32VKMSdeBcoq`q6+GFVmfhvJIRhEXQ)J@i=*5%fkj&?ZQgOS?;b$-pv06MO z@^STAhr#n!E{6jd_jZM*#cL-O{W=Dm(!dppkWEjWhflc{)w%{WhLcpW~{q79&A-YA;b|)#O7w z`R?%oFWeZU$nJ}AQ5$*DR4|1%^v(XSaJRI5JY~T)5Q%F0c?5NXpUlDuFr*o|TyU1L zY+s!Pzn#n2k`vf)hHTxXMowP!U;}z(O$m*3P7rTfFHAx64gQFB=#pqF?xT*gRhJKk z7%SIk)xr;;aoDD*eNRF9cRPfEg^Z-!?{H*VM0=g7TWI1r*NZhcpmnIA4GM>d^3JwSr@~A;8&`3y+iGu zfjmn~?zWwN8a!Tma0rgUUygEPr;m9?@qjJ7Mca}y!;n9 zoHBAcv@h9v`N-YUSMx(Ka$6Zeslj~`yFrQt<~5kQ@@_b4a?!I#8i~c{&yW=!#*zO6 zv!I%v&GqK7Q(<=|iYfE21DC_Q@)GWU-e*~*tblkaZ_?JiA~?cR*5zz zh5I6eHJvQCZ+B_~P*2Nc?|~H5=zZVyEklBsb$K>PE?g>r#I3uy_IdS4cjPn%)L{YJ z1uehht`c{FysMDJ@ZSG=Tm9&R%(iQn!%G%w`>dFvgjp+po@|YF?D{F2fwt|IR>0@? zx1}>%8Je#(Zwv8Y#p5sITm;S_%y?&_@~P;%(e^K-mG*gUF-NhV`(^8)_B9!RP@ZDJ zFQ7P5L(;LUk@|-wWPb8m{8VxFW)A-5r6c{y^>$^VnHWu7j8^|8!f9FBRSyvyi{LaE z_4epjoOS&3T2!hXovT$>H`hWy5PQ~UCH7)*&CH$ficM}aik8NBG;nl$5|8;a@h9biDgP-^kc zD&YdxsTcf9j9J5HaZ3q+beV^o*&kfK99ZZjWVLql?wdnZP;7XbGRv0m`|^s1HUy^# zB(^>|F+Z72;gl)2w!<03i%r?#DzBz$M=san2k2u9&_hCB)!^8JUO$(I*m=!M2U|CE z+&x|%PYqjxKRcEJ9`M!MZP@G>s4gm~&9ZO!QBgIIF{?x3U9N~dCk%VOzuQCyi0rkM z0UK9Ljn7RynAaJmfblXnZIP))~#D9oZ7;bwv}~)NIqtV zT#Z5bY4}>vFkK^BZl%%Ew=46H*6dG3Mr&s{8;Tc!mBdp@#N=N!%RgmZd&6)jFx!z1 zbAfLk+|<()6c07tLT=U+&JLvD{2s@5>-05LwtC@#710WS0r}qa8*9)Fa|05MgKW$~ zMBooK(MlVn-mPm2P@U&L5}K7PTBy1@nghtVJmc4gkpSd7Sy^7pn)>3}Ftv7ty*2BVD|L)67e_jyrL zx1~j2OKT1o30st#yyM?P-%4e;H^Z$6!7u?YTm{hzMFrQyTDMYm3^mhBOK^yUuQmnaC>C?$Yyq^Ml)Z+;Dw%0JM*Pk%6G;x|69OOO{5Z{`Yjhz znVB+6S-`xRQaA5^zxN}p?<7*3mEO~0)}G%6I*+JFA%|DPi+h)QmI2(9NbNU=#eyB zv47zUx0X7`G4q@Hb>q&e6(n<~)(H;Aqz-HGZv1|B9GAME9P^?3z?KV(i z7SRr`eW?NiqVP_DI))FJxDGTs9j0f5#lsK?0RQt8t9=0~L|2rvD_+xX>10^K2@ zAGTv*>CqD_>0KHqP7l((Kx5?G%SZ1We7_zdfgFVK1mMzD`EDb1))^1tvm83!Z-#(5 zxXASb(2RIs_w+Fh?4PeoKtd;hdMO8$StDqQino`g4r;Hk7+b3DN^`9Qu5oZ%p@2-^ zzb0ex(VXQ_neY_z*lW=918?*$BMbT~>HV3_;l?e>06#6jCi^>o0NHp&ac0|4g>1J5 zK$`nE<`to-?W*v)JUD12b&fp*mXFz4R|Kvd$$r;4#O)d z)}HMA`1x&iheF_`jke|{sDLEUzN&P!YXW1Yz5ORu_f zr;Y+kn7*m+o!3Uf0h{~k4BlwpFvm)h_Mwzy=ztq!d34T_jbL*MQy#0e52^0m>10O2 zKysTYav#s45%sPgD2?Xgc#qi-Kb3~@vct*}Qq@NpeLJIN)=?$Tb3o;n&Rg%0H9#*I zc{ujPZQMTfaUA-0p#V8v_#1NEr7wfwi>G2gg&Q9&)tj)}V4o)l{;gMRpDt4RMQ0vh zZ67+5pQXays}O1MTpSK=hi{jzvaQ?aT@+=Vz6eR6nQpp?Q*g&IY;ShSL{>K731+e| zb5fcR2vn^Yimy5N1jlHEe38tD5W-blcYGM};5Z7OyW>@N>A2m!s7(SiLO-HZcRS<+ zw+(9ZP#c)PbEthDhymGruBeZbx(t6`HH(2eQ^eC zKxo|($bZ#9%l>BwsB5M5+ZWO|%nH_`TGts8u2Z=J_WQ|D?rq7N* z^JiO}(-fl4)h>&(Oeb9>*hoBaPQm=0vcYbjcP>A}(JwGcEpPeB2y8gFC;?bs{b zD_tH!+jsLVb32r|qevJ)#o+_+W~Kg8m5Tg7ptl^>OFi2qG$c>X|E(`4ycPkdz&>A(J~AGv|I*Lj zLMd7Vh3=mC#v|aLTwQU4xpMMtPO}<~t(8Z1ORUiR?L-?w0s}ujY;M6S4nzgKdbsEM zmZIBydWI@^G)6bq6d-!V7~34a^J;glrP84ZwvQ%EYdQh}uFswmfALeeowxWH>n;@L zt0|FZxKw0-#~5&eb&*)6#Mt}E=T4Vmd^_gmPZilnLoe6w2|+(-kS%$|iRR~=%I@;0b;5Gk z`1M`;06fqj)DJxNHM^9XA1Ol@(UrU5ja7o@ReLnpR^dU$yDjiix{E*L#i0I8d-&+B z(~nb$^YNrP_X`#Usb~kS@D$~0L7EeYY~K8gMef(zW$`d(8#tAE^S0Vopor4TsxYKo zptNsgDWZ=0S8mR#nzps^y_uW4+P7=o^@F-HqBjUyHA%9$TG&0%KG2(*ed0pYtJDEw z`T%A19r;N}M*!R9;6z$rj&nNqf+8w=akl~Wn>aI)Hgi04msc#EV}&1QoiPdlmlfc* zUfY?QCMP{c-eSfT@pnQvA?(?yu{*=UVu=_O?=L3QmkFGk(&6yWe`k|jsWlVX7MR#< zWfuZ~?s(?c%ZGg57SNl}m%CpY{ux{K*Tn3KOKVqC9Zta!?a~|R*fu_#sr2vc9*L!M z+^-6l4CVkyu(S5V@=DM88{EIB7#7bX-Ce#m0br{wD8Yzct>15jo1>dPmd$14b*UN7 z9!pGE1QeUaDmyU_r3mQQvr`MleqMf&5a2d+6cp3r25PW_wyNc3GIOD?*jajlX*1Uy ztZs7r3EEqb4h&8#i7f+w+o_aA)cLVi~uQ=bw-gCk=1;k+MD$7lW1 zxUoHTj3St-sBqiy_qlsM^S-eN?If zMTEenu`3N9k}uHH-xKCGedo7T_Z&NTk;az21(H;kL+~URRa*dTEo&4BWw18jc!_QZ z3$*-Y0tiw9*5k|+)QT)UdZQ03xP%({ZElX9tPtfrt>86D9l-Uw24LBKCqkxj^&Io$@z z%*uCbqGOc>u|BWws4|8o}!EMKvJW(~sM~dblq;zd2>DAdf`HA5p*~j@GWh(xCFx zPTgM#gWYY|d4V?laQt12VD(l(5N5K^+&1y`Tqy6wQ%+?r!xTCn;0c>YG_l#zc)B9> z2YFU*KktJoyE(R$p?&`XV7z9n!VpSqTzRGKn&v%-nQ~diWqX#~U1isIH%fQ!O0I4g z%0~DQ*@6JJ#6oT)e=)1+gFDtWHy?K9ierO53K1#=YIIXUG3A~O*z;l>x6vcfPgqe< z8}EvY>x|TuUOr`}b`GFSSAnWEirNchRJII`KzigaI}8r3yaI`ILB^9=o?*-GV#hKD z!E_lm@!p6iujV!PvHEmXF{nW zUHu$uR0vC$^4-kgm0ou@Bm@hb(vLq!D^rsYD?!?5oBpW(3uQhx@(F#!4q+!Z)jKW% zJnc|oJ$y9e`Bo>4xhM>9FcGFmN!xDYBX{akKB2G+0OGm&ewsAKlKT1$Udl;ln<)Z-urg_5gOf*4P}A^Ftf=B+2J`2!ls zA4-vp#4Hu8ugoyZXAGX3zeC{lI-NTK99F7Sz9kxbW3tge^+YPQ1oRf+sc&vT;r;xG zz;|S_qv#yK;sNhpz{EF=MnK9(FKl=(kZGIfH<9>A6ifhnH^-wHrRdZL6kburYZB+k z5As0UN#tN^MvEJ!R0=QE$Le&Y+%U`q&k6s$UvPp%B!1 zzfVU62aTK$r18!7`)F**Mr20eZyd1PuJiCLM2*b@+{P9iFJgVtuMPi*9>vNKV@oV( z(Oi9dFb_{P^C`D=lZE!jeYT0uW>Er@hq-WQi1V*EH%G}RTqVTm2}%9M7=RY{X|KF& zWdc06wfLXa0cD*-IE^a<@j<<}-t))`?ao-x@U}Q%Tl{9Kub9H#2au5wS&Mc9P{}J(og= zw<&x&_B2?US5yYMmh9A9sD0V9=2{%k;U6fPT7tHwoa#h^@RX1tW9h@W&EamJjX*{Q z5Z_FvbsdJfB!Cax1x8cpq4K2w;xF40#fkU8_C2+l+-nXWAUT1$olX| zfAN+Epf zg+6t*#Y+;9_l{$+yTj)bjz}1dEp_DsJc~HXmi!GcIu({xK?3b$BpQ%DZ*0(<#oo(n^I8l71sMg;*FL>F@J`1z$<9)9j+yy9wc0O`*V zs_ZoN`Tmx%8z~^6 zr}n)@LtgqX97BZIY zbgoum^kmNGcz<(r_4&KJpz?=8lL2H&@yIs`U!m|p8F(NajD}9wH;sTW0?{_U4GC*J z>c1^qmWW)o2na>MBxCYpdU69~zTD`?QK@EGKXz$apNw`8fk_}W;}mJ+^1*abfmn!G z-GJ&U5aon8Jj{xy-12vLi1fP%C&)sIWfX&^EyB`YfPNf1N~IfyAeNT7Y*IWRcWrTlTffZeR(mxa;}$QO{p|R#1@odaOYD<63s7Lh80+)cl`PMH z6Y=5$D46(g4u8r2;2xyqckvL=a5Nzkw@4X!<^@j9gXK6PTwJ3c`V(#StsPO2#1r}% zDvy~Zsld##BeDDTL4|lJ22vqjI_Xygiti{oyB`<4=kqWW0WI={00en(7I#hzBj%*{LRnWE<;9@L~Q8o+2em+KvBPRxWURuRShpAoumy&>Fa zZN(~aKJ#m4_Vp8ggArUp`(r(*|evUm$Y*X%L zMJ(wC1ZL6Q8{>&CD$xQ(NaB(Pph03OTvZTM5RzuMW`!qTwS)Q-V8~te+&_O44Luom z13|fEm#G9oh}5*3r>z&QU%4gLXcVm=`ih|vCWK1qPhB}?ST4IRwCgd6bZn8HG#{El z!iBzHROt{SZD02hI*Mn3)HX_)LX!}C(H)$Zh1kbYsS-p3RUsMvjGTKG&&D)S}O*cAKZ36(*%_)uI!rvY{S zQj8f3MefbUo+*_|EF6ifIbKBoqt|y_gr5u#j z^W18Ml)6fYG>W)&Sl1+Sad^V0&FCiALN{F~le+B>EzVqqdhtbtFklVs`v(ry63@<1 zIdcJsYPlN|;S@Q;`XjXq+|)H@2dV+UaKR_BvP>BTw<5s@fxjE@MI{i7UG$6 z15@>P2zauzAHy*J9IYFBy&F`Ha0iT&AyMKmnsZp5!-5f|>3ssKo@wi~^?auYj?*7$ zafP?b7r4QNjvWfp66nTQ zc@&9l9Ug8YHex{7Zlh!?GgR4jQ+#VySOEA4fbL|On!+IO4w<$@#yu!bb0xRMQAxG^ z{KC1&y)DplmGX}Y%3qs5T!-S}E`c~`2W;V>CDxgPa3a|uE`@kYt$-+B6$G7ao-X>M z$>OCGqVXayS+>s5(vW2BDS3`SlvTF;oaZH+57bMU+gyMt5(y}i@}*mDBn0?DrL_&b zL<5}#7uLVA3XXC3o0_=(b2Zg?Rdn#q9qS*qcBfR)@(z@N{d%#VegZOB zIm~F?QVbn22W8>xn{NWr%ME!90m|jnts+?YdUi=Zq@NT@z1elYI4PV%(76)ZTOfeQ z(`8Q>E$_kuF6o0?wzqka-)RNp zjS7ies`Q2gx3IQc%aOSlkG8kgRycqRzK@7ce)$K7XpPjN+j~I zX0+CFS>L$P61^HI6JJY7+53+CvLmDO&RA5q*S}xEDx}wCiIJ{Tq#R#d>9?!7g?Cg} z8I`5DHyg38y|EvOD(A~9w|QCIe5_=c$;kKVqf`g|wK?D)Q2hV#Z|``FmYLS@-eoro zcAMB1k$4E8RMt)J+a{_bI>U_irQPi*uMTbBYB(F$DX{in{1*GCHH>?uMycDko>?@J zGtfzrg(awUYD)?KcSL)CoK{a!r$r3rri%2geX@d*I0-cDeR!!I&Lwa;eNV+tR{7yA zfxex~@EyNH+Ym6l`~Ddx%LOIo8i$dVFUckDK?u(Yhk5B~nTxan{FUOso_|NL#8HBg}CT5hJ|fWF+FlKWl3Clq#p3qiv}CG*w{d(^!-0PE$E2k}JBfHLM&juZgt(ndK{q4crs z+MZ{n(V2~!_hRoH1cq4M$d!{=7)wX*unR@Zh9`6`p?Op(jSolaeA0*!L@;^)jGRW6 zeIE4tb2A*6?#&_eh8@I*sS#^Ox0Q5L8^7l?HsJ8A5aoLfK_cCKl9}xS;AZxq6<6c~ zo&LxHVvIz}XchP=(<;!T#veo+!BJfwWIMzP6Ayd?957kg2W)JZ?nW(TLX$^t{pcG% z2k2`7^!Ng;2TDSlj}VqjF8{Xpk0Jtdh`=N;f)2a|zw(mTDY2^JW#pA&@aaW%-egQ- z;TQ5`3n8o&Cf#bcCaLA~L-e&nQxk zd`P-2fXD7slcFa~{u26o9Q1HVjQAn}k$H_fF>k~KZ=rRyrKI~8Nc%<*RTmsy8?KJA z57XQIW7;H^aA|4}SAT~JC%8jIPzu}M0LZuni2V!1q0+gqV1G!~jgY_YP^!gBb@yANEGC6^cE%MU84_JrL5xxdiEcbL zFM(_Eos(G}-B!SpG)RF7S2B-K&i8xmes?)JIGQ@Q8p%pSCXfI)ju4?@xRG&#-6;9> zGFsr^C;qT^8ow4KAxF*x)Kp9b$0z`W^)#vv^JDX@*C%7klM)F@elaHtl#!!b_AOK{U z5BoyC%c=THmO=;00XLrVnL;GTICCz`=TE+7@mA3Xe*q75et!HL$H~ZAQa=Fb8+v{M z*J5DWQc{YVlm~EuKU9^%>yhlne>?aV!HE}f;}owtV93UuZi6qif8AtZ;onNP=8Lw? zGSMe|r0pYKOzdHQ0~NRCo}9Y{WWEC%zknXAb<}0tf$I_l!)~9M5o&h{=0m>t%UWVX zbEt&^Xrw=C_mu&%T_wac*TDV?omD{#&Q?dZJP@YP7Va%=w~ycZcp5e2ksNj*QBg{X zhh~w4qnJ?9+@V59$1fHK3#q_-F7KS2G|Qu`a%Vlo*i11eB}U|q3rXkba&W8G3|?x z)he%ORC5qoqx_Ay!m?(bTm(xAWO(s=6`|7bZ+Wizsa{&|tV$y~sz$Vjo3NZCkP+O{ z?2#E3Qt}@t_;rd5G#2$A><^uz;K-(TGeX~JQ#?~|X5js`-nWz_s}&IRkpqIn97EWDMC3F0^*x9%_4k-1$cRbD zA;a$`&$9Q@veElOWV#6;5vP3za2 z6Y$7_UoxJSQkYA_NxTm6)dSqgvsw}9dQvT3zFR{D9992je=c<)VCow)+b7nta&CeL767wl+S zfW}~nUjQi}i7F;gl5wT)9R#q{@g>&9fU>;QF_^edY?k>njm_2Ui}WBkr(&S&yscO4 zcaSz>I0d&#q}IMk$qB)#i)Vmz&l^~HfFn4G(YZYG$$k>cMJ~D!-z45xs2UhUr4?ltvJtH|}7RouFWVG#c^CNBB zPxdsNK2=ZDjBHdlEBfEpB9^SV`})IJAm7K?B@#Vjsl9Rs8d>|n&WD;^_l!IKvlQCr zzEq{F4U<>l*2T(J1h+;PkbcY-wb4^xyY{`Fy3Na<2cjEE@AzR>4HMvO177YmPbBWx znOw1EVY$dTdN<3jZr&vB&ggI(Df3@PbQtN?^;II>sv4oL_oo8sY2ebB1$Nqcp1OzZ zRRVUyFpvB*hXm!?Ef*Q8y^&S4=lI&@L3*vG* z+P$A8Hjflb;RZ%cq^W(F$`@8)f-J`Mj*3psqY!GTCB;IXO{l`eX*@RarjHD3Jf1UN zacY;o$Px(kU`5vwShA&*H-WWX25s3wN<2vNZ;M#23>)aKey&w|HYyhB$W5|fqq-#n zPuS_qA6p&cIb5$b?56I#SaX33(p?Oaz{zfS0J({M>?iF17yCw_qb))X*GNJuUiB2N zT_!E1Eb>mb`?KMX%4zy3aDamU1a}M#!ePAsJ$3PPUjSpo3R!jMC*-xlT*geYIX7EzPZ$E!rr%< zddaB6G*{E&kOUB_olT1;jq=iv()kH(k3>mU#!8DOGs!bF%e@Q7P)kt|+lKlz$IP@| zc|5UjLoD3bTcbl~yvhE>K78+r?2rjI-PhADE$7&;;;%r!z)j#q1Ch`$OMCNZJ9uRU zg(|QChqfS-_JMvS)#AkW@onj09EVkEx!3HK7khl^RFE8cf^w%F6nb}wE27#%=U!?O z(xlv_Z6R6Z_%M41w7Kn@1}cK7B$xO<+Pn6@obSJ%LpGN*tECmnQBNoZH8kTQ)dIw(>UmBS{iQpqX07%6o>pC9-Axc4{QkK2#d zL%TlL`~7;oo~PFbsbGz3%Vdpse@3UOIJdz(?{?f!gK_eF^&ecn;^tAd{-wHi5TQbznNuc5GH4Lq*kY@aP$JO6eQAzr{QdfS0U)W-3 zi^1EQwZb&wWB-JnwjKj@eal|)FP17_sQ$+_7?V;2$LE0l+>v7Xr`lY(|Kp|6#)#LhV}F^W&!c7X=tO+Ji}JF{a4*r>E53$*jnO zmSb77|MF`ILqdA0w@Kv6l3E$n|2k+fbY-9{dUCSpx=RN81o&^c&#%W*Wmge z_ri8CE1MLRrSXSYfAi~pgq5gj-Y1%g_pH8gFYKTqws=)f8Z<3pF1&o-^&+9^ zD@?ebUBBlEtvLtEULaaX?{??$R&I0cQ*WFZoA_eq;!*Q?mhkW&9`{HIRhGfoqmpf$3_BU_Rbpyq~7 zy}x@#FA7T=%AHKReix{k%g`m&Sr1Q~X|M{o$0#vdG_Ij0QA){k@GVzW#Aznm($st! zv~7@gl;B1t4HK~%1RZ+eTud9GBV?5thyY>CubOiz;5)Q*atYQ3@+0j^>t9mdv$MA24$Ja^3J(bl$&`MIx38za7Iisl)-iqq8HAwL7I~gG z&{}suSe2E+M0m)2gzn5#RWCc+@s1WwiA~+Uv>`9)fP&!jG@?oQHHTl+vWbVyl3o}X z95?lP^GmD4a-=xCmr^jcxKdeJP@o@{+8QCs8@l;{H%$igIN;zK5-A-PD zuW91Lh|}kuf5EwB$K#`6trRUE%2b`>zaOJJh0#MH0|U0bZqyva79-d} zKIH0HoHy5T47O|ugHdSr{>~--cciz~5F8%7^-qAgTEy=vc(y868q&W0R(Idwjv7+= z1T$XN<%9I&0VJoGD+3g6P_#xc(!a27TZV#M;tFs+bBmKy1G(_v9FA@)90d{3;Jzbf zc5yrk6KB-!i{gov<$FL)bp*pXGs6?EmB49MdbP!kOJ}y5h{z4N* zF0>EhTr}Wp6^0XsGr{YG61bF*{&QR57pvRAtXlO0msC zy^(2o)5XWabooSg#kh=(;Z8KgzGKB3J+9n^Y&Prd5?QU^l69l1kPVu%!pqXP5yD98 zXVG_9?u=OO+!pHnoLb|aZ6any1DZ-lCxV}DN4u?~X6JrU0nX}&Mw@BTSh=ZX7K}n$ zSUzIhqd&fH_*&Dy*6^$lai>y}Kl~;%K#L4BM}LwY`R`azEO0s2gJtGYgl40uu3RdF z+ZMeU?(d1i{zBIYbkUQa;Wdh?U&^%MH?iE)+qRRPMjB(_k`{8ey@U5NB3PvQr6X>`zG1;4<`tI^;FSUA|Bu(l?NJahPqtrM@YB zzy3g$Nknez8j&GO9@Rz{^+`qR$PTulhU_Y=cgN*2u;xiWR>&S6PCBs>oC_hpLz{yKb$DSXbRBR`a2OP{D3d)@=O zE5-WP*7B)ralmA+w@>^Z%je%9D5Ns&zMnm_gM#G&Xp|QJsO|YQdmiYW)$)t7&&V4N z?|oMvOZTyI@xQ+vshT{6{h_p;*lKVOu?rZzSFkKx0Y{Q{N#-8IsxX*sx3(qFe{qFO znEuE>I85(~NyZz57GRB=Z&di1@qQRaDv`rD*cYq12&}5FeuTqczrCzW3GQ$Eif*bB zw1bDS)o$~{82JM4)ItHG`>@wE>50LMKKr6DV1U_k@X=i)N5Q7v z#AcQfvm&Wwk_O(G_p~G@!Fjbom-cm3WxHn<-{5m6yXNX$8ZmC`7_veGZvnl;3^a3j z#VcCHz^Q!2O$7aj8lQ}lZ2-BMsyrWCs0^8=bkmNdIgKUflW{*` z?&BJP2WCJtfm=Zv>sNj}KNE3&LaHY8rB)2J z{oguVI-M1m?A+>?e}WC{>9tGR+jPYbN`4GezuY_;W2%s|fmy9Mc5|4bW-Hg< z&=FCU2K2o%EeQ6Q-ns;?`!V@*LqcFb*cjh?;Z=;z2~lZ831b8xX?NII>na*O%i0=;+UR{qt=XE7svGr2^VmO1z|Y$+ zUedYW6|Z`VJ0W;=9vqM+D|hun?)CyleeK~GG%EGP3I)8dC6@FPE&QyK8Rpqz%1V4| zm6uM}Vz`gk#z#Nwf1cO#!7qG<$Q9Uj)x{&jZ_3vD7(?6Y5HOam8b6i|c~v*F`_Uhp z;-MmTbXHUys~SC7`c_RWuGBr29s180S-0=`+G(su3p*Sli-DvkR_jjp3*IHNsIMsp6ynSb9Bb5mbmB z>A?fNuWIy93A<%5imhXqI_z^%_vw{NlJ3KAw~Gv=cSvMJ%}22?r^-yexH*Wu&~1_E zRQKuHiKf+aknW##*18@kh5*7=Rz>QJ2&gKSfOgCT7q3w);D-c2vvFEpM*a zfjSnYV`vZ!ht(&eDKsM?{u8qCriBO;E+i4ucMq)QS2!vU!pVr4psP7CD$Z9H4C%zD zz1qIW48g+wZca5BNW}*(QoyOHjRCoL8IN0?O%g4p%J_;#!llu@H88>GO~lDmhSh? z?pb6L)^ooVGcvtP4v^Q|LKqE{h&Srb@}3K9vupnt`L43BuqUIF&1p(gXv>eAt2WK zU1$IXZG_;R(QN4HP?5Y5czP5$f`;w~K}f+az>)X87ZGW~QX1l*B+WbZ@?gr|-^K9b>$r!88FGZ8X&--`4r}{mGL=*Xkp7rFIMZ-*R@DhsU!#6< z)>tI3&a$pZD_g-c8noKz?1p)%b;Z5LW~1veV!zBXfm_52oMJ~!JU6~x-o7X_@-iBI z%7Klbr`$F}(&fGhA#F-63ua7>)t0mEp>?Ik;zj5?s3n>{Drm6M?-d@LIkU)I{6z|Z zz-u}tzBU@J2B9l?6A^&bOStsRJt&1!EIm?{*nqKr%?fxXn_>^8dI4LwR7P{tL|s{?V&$gvxy;hztR6=4)``#b#W znCjij8#8TSY!pn>AMAlDp1VZ82I&w>-I0tC9kvua2bS`NopYFEDj93%c#*py*x$ZA zQ!_fc|z(nSW>m+vb>z_-Ykm*W&(|z zv;Ll+R})8;iC#d49%U7`;9=Ax%Zn{Im$b^Bq{*aiko1WH2E(1)rGcQ?;PzqG&nMUwksfvWG8H;{m2hQ6b)b{RLOkFe zDjuyAs86%NIv4+NX>$E6 zF99EsD<5Nb0lK&C9p@R&2L+&N7fe`^Jd>BQm7>suc~}AwiS-qeXnbar$5WSeaRCCP zJpXD&^$9xQQ1&tLk`FRx?3B&6ThXgYmttS!$x{BbN+jlOTMf?JSpcvlklF7!C|Ey W#rZmcbvyW>7F#S`kUxKI=zjsF_z(F2 From 54a72a20c6e07fa317b968c5c01f6e90cef512f3 Mon Sep 17 00:00:00 2001 From: senseibelbi Date: Fri, 17 Apr 2026 14:41:35 +0200 Subject: [PATCH 2/4] Move manual helper scripts out of repo root --- scripts/manual/README.md | 7 +++++++ epa_tool_runner.py => scripts/manual/epa_tool_runner.py | 0 .../manual/extract_api_structure.py | 5 +++-- test_api.py => scripts/manual/test_api.py | 0 .../manual/test_chlorpyrifos_analysis.py | 0 5 files changed, 10 insertions(+), 2 deletions(-) create mode 100644 scripts/manual/README.md rename epa_tool_runner.py => scripts/manual/epa_tool_runner.py (100%) rename extract_api_structure.py => scripts/manual/extract_api_structure.py (90%) rename test_api.py => scripts/manual/test_api.py (100%) rename test_chlorpyrifos_analysis.py => scripts/manual/test_chlorpyrifos_analysis.py (100%) diff --git a/scripts/manual/README.md b/scripts/manual/README.md new file mode 100644 index 0000000..6d5f2a3 --- /dev/null +++ b/scripts/manual/README.md @@ -0,0 +1,7 @@ +Manual helper scripts used for ad hoc local verification and debugging live outside the public package surface. + +- `start_epa_mcp.sh`: starts the local MCP server for manual testing. +- `test_epa_mcp_curl.sh`, `test_mcp_http.sh`, `test_legacy_uri.sh`: shell-based smoke checks for the transport layer. +- `epa_tool_runner.py`: JSON-RPC helper for direct `tools/call` execution against a local server. +- `test_api.py`, `test_chlorpyrifos_analysis.py`: one-off API probing scripts kept for manual diagnosis. +- `extract_api_structure.py`: captures a local CTX client method snapshot into ignored `artifacts/`. diff --git a/epa_tool_runner.py b/scripts/manual/epa_tool_runner.py similarity index 100% rename from epa_tool_runner.py rename to scripts/manual/epa_tool_runner.py diff --git a/extract_api_structure.py b/scripts/manual/extract_api_structure.py similarity index 90% rename from extract_api_structure.py rename to scripts/manual/extract_api_structure.py index e99fb79..6ce4f4a 100644 --- a/extract_api_structure.py +++ b/scripts/manual/extract_api_structure.py @@ -59,8 +59,9 @@ def extract_class_methods(cls, instance=None): 'search_toxprints': str(inspect.signature(ctx.search_toxprints)) } -# Save to an ignored local artifact path so ad hoc snapshots do not clutter the repo root. -output_path = Path(__file__).resolve().parent / "artifacts" / "epa_comptox_api_structure.json" +# Save to the repo-level ignored artifacts/ path so ad hoc snapshots stay out of the public tree. +repo_root = Path(__file__).resolve().parents[2] +output_path = repo_root / "artifacts" / "epa_comptox_api_structure.json" output_path.parent.mkdir(parents=True, exist_ok=True) with output_path.open("w") as f: diff --git a/test_api.py b/scripts/manual/test_api.py similarity index 100% rename from test_api.py rename to scripts/manual/test_api.py diff --git a/test_chlorpyrifos_analysis.py b/scripts/manual/test_chlorpyrifos_analysis.py similarity index 100% rename from test_chlorpyrifos_analysis.py rename to scripts/manual/test_chlorpyrifos_analysis.py From 7572d95a0818632e4292fb1d67b75d9e33a25f5b Mon Sep 17 00:00:00 2001 From: senseibelbi Date: Fri, 17 Apr 2026 20:02:10 +0200 Subject: [PATCH 3/4] Document how to request a CompTox API key --- .env.example | 4 +++ README.md | 35 ++++++++++++++++++++-- docs/integration_guides/mcp_integration.md | 3 +- 3 files changed, 38 insertions(+), 4 deletions(-) diff --git a/.env.example b/.env.example index 2199644..f995b61 100644 --- a/.env.example +++ b/.env.example @@ -6,6 +6,10 @@ CORS_ALLOW_ORIGINS="" # comma-separated list of # CTX (Comptox) API CTX_API_BASE_URL="https://comptox.epa.gov/ctx-api" # Default CTX API server +# Request a free CTX API key from ccte_api@epa.gov. +# Official docs: +# - https://www.epa.gov/comptox-tools/computational-toxicology-and-exposure-apis +# - https://www.epa.gov/comptox-tools/computational-toxicology-and-exposure-apis-about CTX_API_KEY="your_ctx_api_key_here" # Required for CTX APIs (do not commit real key) CTX_USE_LEGACY="0" # Set to "1" to use https://api-ccte.epa.gov until 2025-10-01 EPA_COMPTOX_API_KEY="" # Legacy env name also supported (fallback) diff --git a/README.md b/README.md index a4ac471..64b16d1 100644 --- a/README.md +++ b/README.md @@ -194,7 +194,7 @@ pip install -e . # 2) configure cp .env.example .env -# set CTX_API_KEY in .env +# request a free CTX API key from ccte_api@epa.gov, then set CTX_API_KEY in .env # 3) run uvicorn epacomp_tox.transport.websocket:app --host 0.0.0.0 --port 8000 --reload @@ -218,6 +218,37 @@ uvicorn epacomp_tox.transport.websocket:app --reload > **Important:** The server needs a valid EPA CompTox API key. Set `CTX_API_KEY` (preferred) or `EPA_COMPTOX_API_KEY` in `.env` before starting the transport. +### Get an EPA CompTox API key + +EPA's current CTX API documentation says API keys are free and must be requested from the API support team at `ccte_api@epa.gov`. + +- CTX APIs overview: [epa.gov/comptox-tools/computational-toxicology-and-exposure-apis](https://www.epa.gov/comptox-tools/computational-toxicology-and-exposure-apis) +- CTX API authentication/about page: [epa.gov/comptox-tools/computational-toxicology-and-exposure-apis-about](https://www.epa.gov/comptox-tools/computational-toxicology-and-exposure-apis-about) + +Suggested request email: + +```text +To: ccte_api@epa.gov +Subject: Request for EPA CompTox CTX API key + +Hello, + +I would like to request an API key for the EPA Computational Toxicology and Exposure APIs (CTX APIs). + +Name: +Organization: +Intended use: + +Thank you. +``` + +Once EPA sends your key: + +```bash +cp .env.example .env +# then paste the key into CTX_API_KEY in .env +``` + With the server running, MCP clients can connect to `http://localhost:8000/mcp` (HTTP) or `ws://localhost:8000/mcp/ws` (WebSocket). Once the server is running: @@ -264,7 +295,7 @@ Settings are resolved via [`pydantic-settings`](https://docs.pydantic.dev/latest | Variable | Required | Default | Description | | --- | --- | --- | --- | -| `CTX_API_KEY` | ✅ | – | CompTox API key used for all downstream requests. Fallbacks: `EPA_COMPTOX_API_KEY`, `ctx_x_api_key`. | +| `CTX_API_KEY` | ✅ | – | CompTox API key used for all downstream requests. EPA currently issues keys via `ccte_api@epa.gov`. Fallbacks: `EPA_COMPTOX_API_KEY`, `ctx_x_api_key`. | | `CTX_API_BASE_URL` | Optional | `https://comptox.epa.gov/ctx-api` | Base URL for CompTox API. | | `CTX_USE_LEGACY` | Optional | `0` | Set to `1` to use the legacy `https://api-ccte.epa.gov` endpoint. | | `CTX_RETRY_ATTEMPTS` | Optional | `3` | Number of retry attempts for transient errors. | diff --git a/docs/integration_guides/mcp_integration.md b/docs/integration_guides/mcp_integration.md index 2529e18..4989681 100644 --- a/docs/integration_guides/mcp_integration.md +++ b/docs/integration_guides/mcp_integration.md @@ -5,7 +5,7 @@ The EPA CompTox MCP server exposes JSON-RPC over HTTP (`/mcp`) and WebSocket (`/ > **Prerequisites** > > 1. Deploy the MCP server (local or remote) and expose the `/mcp` endpoint. -> 2. Set `CTX_API_KEY` (preferred) or `EPA_COMPTOX_API_KEY` so the server can reach the EPA CompTox API. +> 2. Set `CTX_API_KEY` (preferred) or `EPA_COMPTOX_API_KEY` so the server can reach the EPA CompTox API. EPA currently distributes free CTX API keys via `ccte_api@epa.gov`; see the [CTX APIs overview](https://www.epa.gov/comptox-tools/computational-toxicology-and-exposure-apis). > 3. If you front the MCP server with an auth layer, obtain the access token required by your MCP client. > > Replace `http://localhost:8000/mcp` with your deployment URL when following the snippets. @@ -105,4 +105,3 @@ For additional automation examples, consult: - [`tests/test_http_transport.py`](../../tests/test_http_transport.py) for pure HTTP flows. - [`tests/test_websocket_transport.py`](../../tests/test_websocket_transport.py) for WebSocket streaming and cancellation cases. - [`scripts/mcp_ws_client.py`](../../scripts/mcp_ws_client.py) for a minimal WebSocket client you can adapt. - From 55c9bc5299d884a72fa85093ebfe3cf9fd8b3a4e Mon Sep 17 00:00:00 2001 From: senseibelbi Date: Fri, 24 Apr 2026 00:09:49 +0200 Subject: [PATCH 4/4] Harden MCP release readiness --- .env.example | 8 + MANIFEST.in | 1 + README.md | 17 +- docs/deployment.md | 13 +- docs/mcp_transport.md | 6 +- docs/operations/metrics_integration.md | 18 +- pyproject.toml | 1 + src/epacomp_tox/assets.py | 45 ++ src/epacomp_tox/contracts/__init__.py | 18 +- src/epacomp_tox/data/__init__.py | 0 .../get_bioactivity_aop.response.schema.json | 53 ++ ...get_bioactivity_assay.response.schema.json | 62 ++ ...ity_summary_by_dtxsid.response.schema.json | 52 ++ ...rch_bioactivity_terms.response.schema.json | 39 ++ .../chemical/ghs_links.response.schema.json | 19 + .../indigo_convert.response.schema.json | 13 + .../opsin_convert.response.schema.json | 16 + ...e_chemical_identifier.response.schema.json | 72 ++ .../search_chemical.response.schema.json | 53 ++ .../structure_file.response.schema.json | 40 ++ .../toxprints.response.schema.json | 7 + .../common/list_generic.response.schema.json | 7 + .../mapping_list_generic.response.schema.json | 10 + .../common/object.response.schema.json | 7 + .../object_or_list.response.schema.json | 15 + .../get_exposure_httk.response.schema.json | 63 ++ .../search_cpdat.response.schema.json | 53 ++ .../exposure/search_httk.response.schema.json | 57 ++ .../batch_search_hazard.response.schema.json | 59 ++ .../hazard/search_hazard.response.schema.json | 63 ++ ...get_contract_manifest.response.schema.json | 153 ++++ .../applicability_detail.response.schema.json | 41 ++ .../applicability_list.response.schema.json | 14 + .../metadata/model_cards.response.schema.json | 43 ++ .../predictive/ad_check.response.schema.json | 12 + .../predictive/predict.response.schema.json | 32 + ...ioritize_risk_signals.response.schema.json | 135 ++++ .../aop_linkage_summary.response.schema.json | 125 ++++ ...comptox_evidence_pack.response.schema.json | 127 ++++ .../pbpk_context_bundle.response.schema.json | 126 ++++ .../metadata/applicability_domains/README.md | 8 + .../genra_read_across_ad.json | 29 + .../opera_property_ad.json | 24 + .../test_consensus_ad.json | 25 + .../model_cards/genra_read_across.json | 209 ++++++ .../metadata/model_cards/opera_property.json | 215 ++++++ .../metadata/model_cards/test_consensus.json | 199 ++++++ src/epacomp_tox/data/schemas/README.md | 28 + .../data/schemas/aopLinkageSummary.v1.json | 232 ++++++ .../bioactivityEvidenceSummary.v1.json | 252 +++++++ .../schemas/chemicalIdentityRecord.v1.json | 123 ++++ .../data/schemas/comptoxEvidencePack.v1.json | 251 +++++++ .../schemas/comptox_model_card.schema.json | 658 ++++++++++++++++++ .../examples/aopLinkageSummary.example.json | 50 ++ .../bioactivityEvidenceSummary.example.json | 77 ++ .../chemicalIdentityRecord.example.json | 29 + .../examples/comptoxEvidencePack.example.json | 366 ++++++++++ .../exposureEvidenceSummary.example.json | 82 +++ .../hazardEvidenceSummary.example.json | 91 +++ .../examples/pbpkContextBundle.example.json | 214 ++++++ .../schemas/exposureEvidenceSummary.v1.json | 191 +++++ .../schemas/hazardEvidenceSummary.v1.json | 248 +++++++ .../data/schemas/pbpkContextBundle.v1.json | 245 +++++++ src/epacomp_tox/metadata/applicability.py | 35 +- src/epacomp_tox/metadata/model_cards.py | 42 +- src/epacomp_tox/orchestrator/audit.py | 80 ++- src/epacomp_tox/resources/manifest.py | 43 +- src/epacomp_tox/server.py | 93 ++- src/epacomp_tox/settings.py | 68 +- src/epacomp_tox/tools/registry.py | 49 ++ src/epacomp_tox/transport/common.py | 1 + src/epacomp_tox/transport/http.py | 127 +++- src/epacomp_tox/transport/security.py | 318 +++++++++ src/epacomp_tox/transport/websocket.py | 127 +++- tests/test_audit_hardening.py | 33 + tests/test_http_transport.py | 3 + tests/test_package_assets.py | 114 +++ tests/test_security_hardening.py | 246 +++++++ tests/test_tool_registry.py | 17 +- 79 files changed, 6843 insertions(+), 94 deletions(-) create mode 100644 src/epacomp_tox/assets.py create mode 100644 src/epacomp_tox/data/__init__.py create mode 100644 src/epacomp_tox/data/contracts/schemas/bioactivity/get_bioactivity_aop.response.schema.json create mode 100644 src/epacomp_tox/data/contracts/schemas/bioactivity/get_bioactivity_assay.response.schema.json create mode 100644 src/epacomp_tox/data/contracts/schemas/bioactivity/get_bioactivity_summary_by_dtxsid.response.schema.json create mode 100644 src/epacomp_tox/data/contracts/schemas/bioactivity/search_bioactivity_terms.response.schema.json create mode 100644 src/epacomp_tox/data/contracts/schemas/chemical/ghs_links.response.schema.json create mode 100644 src/epacomp_tox/data/contracts/schemas/chemical/indigo_convert.response.schema.json create mode 100644 src/epacomp_tox/data/contracts/schemas/chemical/opsin_convert.response.schema.json create mode 100644 src/epacomp_tox/data/contracts/schemas/chemical/resolve_chemical_identifier.response.schema.json create mode 100644 src/epacomp_tox/data/contracts/schemas/chemical/search_chemical.response.schema.json create mode 100644 src/epacomp_tox/data/contracts/schemas/chemical/structure_file.response.schema.json create mode 100644 src/epacomp_tox/data/contracts/schemas/cheminformatics/toxprints.response.schema.json create mode 100644 src/epacomp_tox/data/contracts/schemas/common/list_generic.response.schema.json create mode 100644 src/epacomp_tox/data/contracts/schemas/common/mapping_list_generic.response.schema.json create mode 100644 src/epacomp_tox/data/contracts/schemas/common/object.response.schema.json create mode 100644 src/epacomp_tox/data/contracts/schemas/common/object_or_list.response.schema.json create mode 100644 src/epacomp_tox/data/contracts/schemas/exposure/get_exposure_httk.response.schema.json create mode 100644 src/epacomp_tox/data/contracts/schemas/exposure/search_cpdat.response.schema.json create mode 100644 src/epacomp_tox/data/contracts/schemas/exposure/search_httk.response.schema.json create mode 100644 src/epacomp_tox/data/contracts/schemas/hazard/batch_search_hazard.response.schema.json create mode 100644 src/epacomp_tox/data/contracts/schemas/hazard/search_hazard.response.schema.json create mode 100644 src/epacomp_tox/data/contracts/schemas/manifest/get_contract_manifest.response.schema.json create mode 100644 src/epacomp_tox/data/contracts/schemas/metadata/applicability_detail.response.schema.json create mode 100644 src/epacomp_tox/data/contracts/schemas/metadata/applicability_list.response.schema.json create mode 100644 src/epacomp_tox/data/contracts/schemas/metadata/model_cards.response.schema.json create mode 100644 src/epacomp_tox/data/contracts/schemas/predictive/ad_check.response.schema.json create mode 100644 src/epacomp_tox/data/contracts/schemas/predictive/predict.response.schema.json create mode 100644 src/epacomp_tox/data/contracts/schemas/risk/prioritize_risk_signals.response.schema.json create mode 100644 src/epacomp_tox/data/contracts/schemas/workflow/aop_linkage_summary.response.schema.json create mode 100644 src/epacomp_tox/data/contracts/schemas/workflow/comptox_evidence_pack.response.schema.json create mode 100644 src/epacomp_tox/data/contracts/schemas/workflow/pbpk_context_bundle.response.schema.json create mode 100644 src/epacomp_tox/data/metadata/applicability_domains/README.md create mode 100644 src/epacomp_tox/data/metadata/applicability_domains/genra_read_across_ad.json create mode 100644 src/epacomp_tox/data/metadata/applicability_domains/opera_property_ad.json create mode 100644 src/epacomp_tox/data/metadata/applicability_domains/test_consensus_ad.json create mode 100644 src/epacomp_tox/data/metadata/model_cards/genra_read_across.json create mode 100644 src/epacomp_tox/data/metadata/model_cards/opera_property.json create mode 100644 src/epacomp_tox/data/metadata/model_cards/test_consensus.json create mode 100644 src/epacomp_tox/data/schemas/README.md create mode 100644 src/epacomp_tox/data/schemas/aopLinkageSummary.v1.json create mode 100644 src/epacomp_tox/data/schemas/bioactivityEvidenceSummary.v1.json create mode 100644 src/epacomp_tox/data/schemas/chemicalIdentityRecord.v1.json create mode 100644 src/epacomp_tox/data/schemas/comptoxEvidencePack.v1.json create mode 100644 src/epacomp_tox/data/schemas/comptox_model_card.schema.json create mode 100644 src/epacomp_tox/data/schemas/examples/aopLinkageSummary.example.json create mode 100644 src/epacomp_tox/data/schemas/examples/bioactivityEvidenceSummary.example.json create mode 100644 src/epacomp_tox/data/schemas/examples/chemicalIdentityRecord.example.json create mode 100644 src/epacomp_tox/data/schemas/examples/comptoxEvidencePack.example.json create mode 100644 src/epacomp_tox/data/schemas/examples/exposureEvidenceSummary.example.json create mode 100644 src/epacomp_tox/data/schemas/examples/hazardEvidenceSummary.example.json create mode 100644 src/epacomp_tox/data/schemas/examples/pbpkContextBundle.example.json create mode 100644 src/epacomp_tox/data/schemas/exposureEvidenceSummary.v1.json create mode 100644 src/epacomp_tox/data/schemas/hazardEvidenceSummary.v1.json create mode 100644 src/epacomp_tox/data/schemas/pbpkContextBundle.v1.json create mode 100644 src/epacomp_tox/transport/security.py create mode 100644 tests/test_package_assets.py create mode 100644 tests/test_security_hardening.py diff --git a/.env.example b/.env.example index f995b61..a615219 100644 --- a/.env.example +++ b/.env.example @@ -3,6 +3,13 @@ ENVIRONMENT="development" # deployment environment LOG_LEVEL="INFO" # logging verbosity BYPASS_AUTH="0" # set to "1" to disable auth (development only) CORS_ALLOW_ORIGINS="" # comma-separated list of allowed origins +MCP_AUTH_ISSUER="" # OIDC issuer URL for MCP bearer-token validation +MCP_AUTH_AUDIENCE="" # Expected JWT audience (comma-separated values allowed) +MCP_AUTH_JWKS_URL="" # OIDC JWKS URL used to verify JWT signatures +MCP_AUTH_REQUIRED_SCOPES="tox:read" # Space/comma-separated scopes required for MCP calls +MCP_RESOURCE_URL="http://localhost:8000/mcp" # Canonical protected MCP resource URL +MCP_RATE_LIMIT_REQUESTS_PER_MINUTE="120" # Per-subject/IP tool-call limit; 0 disables local limiter +MCP_RATE_LIMIT_BURST="20" # Token-bucket burst size for MCP tool calls # CTX (Comptox) API CTX_API_BASE_URL="https://comptox.epa.gov/ctx-api" # Default CTX API server @@ -20,3 +27,4 @@ CTX_RETRY_BASE="0.5" # Base delay (seconds) fo EPACOMP_MCP_HEARTBEAT_TIMEOUT_SECONDS="120" # Minimum heartbeat timeout negotiated with clients EPACOMP_MCP_HANDSHAKE_TIMEOUT_SECONDS="30" # Minimum handshake timeout negotiated with clients EPACOMP_MCP_METRICS_ENABLED="1" # Expose /metrics endpoint +MCP_METRICS_BYPASS_AUTH="0" # Set to "1" only when a trusted gateway protects metrics diff --git a/MANIFEST.in b/MANIFEST.in index fe89000..ae7b4a1 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,2 +1,3 @@ include README.md LICENSE recursive-include metadata *.json *.md +recursive-include src/epacomp_tox/data *.json *.md diff --git a/README.md b/README.md index 64b16d1..c3232cf 100644 --- a/README.md +++ b/README.md @@ -303,10 +303,18 @@ Settings are resolved via [`pydantic-settings`](https://docs.pydantic.dev/latest | `ENVIRONMENT` | Optional | `development` | Controls defaults like permissive CORS. | | `LOG_LEVEL` | Optional | `INFO` | Application log level. | | `BYPASS_AUTH` | Optional | `0` | Set to `1` to disable auth (development only). | +| `MCP_AUTH_ISSUER` | Production | – | Expected OIDC issuer for MCP bearer JWTs. | +| `MCP_AUTH_AUDIENCE` | Production | – | Expected JWT audience for the canonical MCP resource. | +| `MCP_AUTH_JWKS_URL` | Production | – | JWKS URL used to verify bearer-token signatures. | +| `MCP_AUTH_REQUIRED_SCOPES` | Optional | – | Space/comma-separated scopes required for MCP tool calls. | +| `MCP_RESOURCE_URL` | Optional | `http://localhost:8000/mcp` | Canonical protected resource URL advertised in OAuth metadata and challenges. | +| `MCP_RATE_LIMIT_REQUESTS_PER_MINUTE` | Optional | `120` | In-memory per-subject/IP tool-call limit; set `0` to disable local limiting. | +| `MCP_RATE_LIMIT_BURST` | Optional | `20` | Token-bucket burst size for local tool-call limiting. | | `CORS_ALLOW_ORIGINS` | Optional | – | Comma-separated origins for HTTP transport. Defaults to `*` in development. | | `EPACOMP_MCP_HEARTBEAT_TIMEOUT_SECONDS` | Optional | `120` | Minimum heartbeat timeout negotiated with WebSocket clients. | | `EPACOMP_MCP_HANDSHAKE_TIMEOUT_SECONDS` | Optional | `30` | Minimum handshake timeout negotiated with WebSocket clients. | | `EPACOMP_MCP_METRICS_ENABLED` | Optional | `1` | Toggle `/metrics` endpoint exposure. | +| `MCP_METRICS_BYPASS_AUTH` | Optional | `0` | Allow unauthenticated metrics only when a trusted gateway already protects the endpoint. | See [`docs/deployment.md`](docs/deployment.md) for production hardening tips and expanded configuration. @@ -398,7 +406,8 @@ A scheduled GitHub Action (`.github/workflows/endpoint-check.yml`) runs `python - Run via Gunicorn: `gunicorn epacomp_tox.transport.websocket:app -c deploy/gunicorn_conf.py` - Container image: see [`deploy/Dockerfile`](deploy/Dockerfile) for a hardened, non-root runtime. - Probes: `/healthz` (liveness) and `/readyz` (performs CTX connectivity check). Non-200 responses should trigger restarts. -- Metrics: `/metrics` exposes Prometheus gauges derived from `MCPServer.get_transport_metrics()`. Sample scrape/OTEL configs live in `deploy/prometheus_scrape.yaml` and `deploy/otel_collector_metrics.yaml`. +- Auth: production deployments should configure `MCP_AUTH_ISSUER`, `MCP_AUTH_AUDIENCE`, and `MCP_AUTH_JWKS_URL`; unauthorized MCP requests receive OAuth protected-resource challenges. +- Metrics: `/metrics` exposes Prometheus gauges derived from `MCPServer.get_transport_metrics()` when `EPACOMP_MCP_METRICS_ENABLED=1`; it uses the same bearer auth unless `MCP_METRICS_BYPASS_AUTH=1` is explicitly set. Sample scrape/OTEL configs live in `deploy/prometheus_scrape.yaml` and `deploy/otel_collector_metrics.yaml`. - Additional rollout guidance (TLS, ingress, scaling) lives in [`docs/deployment.md`](docs/deployment.md). --- @@ -421,14 +430,15 @@ Every successful tool invocation returns structured payloads designed for agents - `content`: human-readable JSON wrapped as text for chat surfaces. - `structuredContent.data`: machine-readable results (lists, dicts, or arrays) for programmatic chaining. -- `structuredContent.metadata`: when available, includes rate-limit information, validation metadata, and session metadata. +- `structuredContent.metadata`: when available, includes rate-limit information, validation metadata, and scrubbed session metadata. Bearer tokens and raw client authentication payloads are never echoed. - Default registered tools are retrieval and federation oriented; experimental predictive/orchestrator modules in this repository are not part of the canonical public surface yet. --- ## Security checklist -- Disable `BYPASS_AUTH` and front the MCP server with OAuth/OIDC once deployed beyond local development. +- Disable `BYPASS_AUTH` and configure OAuth/OIDC bearer validation before deploying beyond local development. +- Enforce shared rate limits at the gateway for distributed deployments; the built-in limiter is process-local defense in depth. - Restrict `CORS_ALLOW_ORIGINS` to approved hosts when exposing the HTTP transport. - Rotate `CTX_API_KEY` regularly and store secrets outside the repository (e.g. cloud secret manager or OS keychain). - Monitor `/metrics` for negotiated capability changes and unexpected spikes in `tools/call` failures. @@ -466,6 +476,7 @@ Every successful tool invocation returns structured payloads designed for agents - `tests/test_mcp_conformance_suite.py` covers handshake, catalog discovery, and streaming behaviours. - `tests/test_tool_contracts.py` enforces output schema declarations for the registered resources. +- `black --check src tests` and `isort --check-only src tests` are the canonical repository hygiene checks. - `scripts/smoke_ctx.sh` runs integration smoke tests against the live CTX API. - `scripts/mcp_http_smoke.sh` performs a quick JSON-RPC handshake and tool listing against the HTTP transport. - `scripts/mcp_interop_smoke.py` validates the public interop tool path end-to-end over the HTTP transport. diff --git a/docs/deployment.md b/docs/deployment.md index 25b0dd5..4c2cee7 100644 --- a/docs/deployment.md +++ b/docs/deployment.md @@ -7,6 +7,7 @@ This guide explains how to run the EPA CompTox MCP transport service in producti - Python 3.11 (or newer) for bare-metal deployments. - Valid CompTox credentials exposed via `CTX_API_KEY` (preferred) or `EPA_COMPTOX_API_KEY`. - Network access to the CompTox CTX API endpoint configured with `CTX_API_BASE_URL` (defaults to `https://comptox.epa.gov/ctx-api`). +- Production MCP bearer-token validation configured with `MCP_AUTH_ISSUER`, `MCP_AUTH_AUDIENCE`, `MCP_AUTH_JWKS_URL`, and an externally reachable `MCP_RESOURCE_URL`. ## Running with Gunicorn + Uvicorn Workers @@ -30,6 +31,13 @@ Key environment overrides: | `EPACOMP_MCP_GRACEFUL_TIMEOUT` | Graceful shutdown window | `30` | | `EPACOMP_MCP_KEEPALIVE` | HTTP keepalive (seconds) | `5` | | `EPACOMP_MCP_LOG_LEVEL` | Gunicorn log level | `info` | +| `MCP_AUTH_ISSUER` | Expected OIDC issuer for MCP JWTs | unset | +| `MCP_AUTH_AUDIENCE` | Expected MCP JWT audience | unset | +| `MCP_AUTH_JWKS_URL` | JWKS URL for JWT signature verification | unset | +| `MCP_AUTH_REQUIRED_SCOPES` | Required bearer scopes for MCP calls | unset | +| `MCP_RESOURCE_URL` | Canonical protected MCP resource URL | `http://localhost:8000/mcp` | +| `MCP_RATE_LIMIT_REQUESTS_PER_MINUTE` | Process-local per-subject/IP tool-call limit | `120` | +| `MCP_RATE_LIMIT_BURST` | Process-local token-bucket burst size | `20` | All workers use the `uvicorn.workers.UvicornWorker` class, so the WebSocket transport runs on ASGI-native workers. @@ -69,7 +77,8 @@ The image exposes port `8000` and ships with `/app/gunicorn_conf.py` plus `/app/ - `GET /healthz`: liveness signal, returns immediate 200 when the process is responsive. - `GET /readyz`: readiness probe. Performs a strict authenticated CTX probe via `MCPServer.check_health(probe_mode="readiness")` against stable upstream API routes. Bare reachability to `/ctx-api/health` is not enough. The endpoint returns HTTP 503 when CTX credentials are missing, rejected, or when no authenticated probe succeeds. If a prior successful probe exists it will be returned with `status: degraded`. -- `GET /metrics`: Prometheus-compatible transport metrics derived from `MCPServer.get_transport_metrics()`. Gauges report session counts (`status=active|closed`) and negotiated capability adoption (`capability=tools.streams`, `scope=all|active`, `state=enabled|disabled`). Integrate the scrape endpoint with your platform’s monitoring stack—see `deploy/prometheus_scrape.yaml` for a vanilla Prometheus job and `deploy/otel_collector_metrics.yaml` for an OpenTelemetry Collector pipeline. +- `GET /metrics`: Prometheus-compatible transport metrics derived from `MCPServer.get_transport_metrics()` when `EPACOMP_MCP_METRICS_ENABLED=1`. Gauges report session counts (`status=active|closed`) and negotiated capability adoption (`capability=tools.streams`, `scope=all|active`, `state=enabled|disabled`). The endpoint requires the configured bearer auth unless `MCP_METRICS_BYPASS_AUTH=1` is explicitly set for deployments where a trusted gateway already protects the scrape path. Integrate the scrape endpoint with your platform’s monitoring stack—see `deploy/prometheus_scrape.yaml` for a vanilla Prometheus job and `deploy/otel_collector_metrics.yaml` for an OpenTelemetry Collector pipeline. +- `GET /.well-known/oauth-protected-resource`: OAuth Protected Resource Metadata for MCP clients. Unauthorized `/mcp` HTTP and `/mcp/ws` WebSocket requests return `WWW-Authenticate` challenges that point clients at this metadata. Configure Kubernetes probes (example): @@ -110,6 +119,8 @@ docker run --rm -p 8443:8443 \ ``` 3. Use a network policy or firewall rule to restrict incoming traffic to trusted agent subnets and Platform load balancers. +4. Keep `BYPASS_AUTH=0` in production. Startup fails when production auth is enabled but issuer, audience, or JWKS settings are incomplete. +5. Use the built-in process-local rate limiter as defense in depth, and enforce shared limits at the ingress/gateway for horizontally scaled deployments. ## Logging and Observability diff --git a/docs/mcp_transport.md b/docs/mcp_transport.md index e570a25..6d50d49 100644 --- a/docs/mcp_transport.md +++ b/docs/mcp_transport.md @@ -10,7 +10,7 @@ This document captures the transport requirements for Model Context Protocol (MC 2. Client sends `initialize` request (`jsonrpc` 2.0) including: - `protocolVersion` (server must negotiate from supported set). - `capabilities` requested by client (per MCP spec §3.2). - - Optional session metadata (auth headers, agent info). + - Optional session metadata (agent info and capabilities). Bearer tokens are supplied via the transport `Authorization` header, not echoed through MCP payloads. 3. Server response must include: - Chosen `protocolVersion`. - Server `capabilities` object describing supported features. @@ -23,11 +23,11 @@ This document captures the transport requirements for Model Context Protocol (MC ### Current State -- Server advertises supported protocol versions (`2025-06-18`, `2025-03-26`, `2024-11-05`) and negotiates correctly. +- Server advertises supported protocol versions (`2025-11-25`, `2025-06-18`, `2025-03-26`, `2024-11-05`) and negotiates correctly. - `notifications/initialized` event emitted. - Client capability negotiation is persisted per session; `tools.streams`/`tools.cancel` features downgrade when the client opts out. - Ping/heartbeat logic responds to client `ping` frames and enforces configurable idle timeouts derived from transport settings or client overrides. -- Authentication metadata is included in tool responses so downstream orchestrators can forward bearer tokens and trace identifiers. +- Tool responses include only scrubbed auth summaries, such as hashed subject, issuer, scopes, expiry, and token hash. Raw bearer tokens and client `authentication` payloads are not returned. - Negotiated capability flags are exposed via `MCPServer.get_transport_metrics()` for transport telemetry dashboards. ### Required Follow-up diff --git a/docs/operations/metrics_integration.md b/docs/operations/metrics_integration.md index aa743a4..b235ff8 100644 --- a/docs/operations/metrics_integration.md +++ b/docs/operations/metrics_integration.md @@ -6,7 +6,10 @@ derived from `MCPServer.get_transport_metrics()`, reporting session counts and capability-negotiation outcomes. ## 1. Prerequisites -- MCP transport running with `/metrics` enabled (FastAPI app exposed on HTTP). +- MCP transport running with `/metrics` enabled via + `EPACOMP_MCP_METRICS_ENABLED=1` (FastAPI app exposed on HTTP). +- A bearer token accepted by the MCP auth policy, unless + `MCP_METRICS_BYPASS_AUTH=1` is deliberately set behind a trusted gateway. - Network connectivity from Prometheus / the OTEL Collector to the transport. - Access to the target monitoring configuration repo (GitOps) or cluster. @@ -15,8 +18,10 @@ capability-negotiation outcomes. repository. 2. Replace the `targets` hostname with the service address for your environment (e.g., Kubernetes service DNS or load balancer). -3. Adjust labels (such as `env`, `service`) to match your dashboard naming. -4. Reload Prometheus or commit the change to your GitOps pipeline. +3. Add the required `Authorization: Bearer ` header or configure a + gateway-side scrape identity. +4. Adjust labels (such as `env`, `service`) to match your dashboard naming. +5. Reload Prometheus or commit the change to your GitOps pipeline. Verify: - Open the Prometheus UI (`/graph`) and query `mcp_sessions_total` to confirm @@ -48,8 +53,11 @@ Verify: validation step (already documented in this commit). ## 6. Troubleshooting -- If `/metrics` returns 404, ensure your deployment uses the refreshed - application module (`epacomp_tox.transport.websocket:app`). +- If `/metrics` returns 404, ensure `EPACOMP_MCP_METRICS_ENABLED=1` and that + your deployment uses the refreshed application module + (`epacomp_tox.transport.websocket:app`). +- If `/metrics` returns 401 or 403, check the scrape token issuer, audience, + JWKS, and scopes against `MCP_AUTH_*` settings. - Verify network policies allow the monitoring stack to reach port `8000` (or your chosen bind port). - Enable debug logging on the OTEL collector (`service.telemetry.metrics.level = detailed`) diff --git a/pyproject.toml b/pyproject.toml index adb4058..7d84766 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,6 +32,7 @@ dependencies = [ "pydantic>=2.7,<3.0", "pydantic-settings>=2.3,<3.0", "jsonschema>=4.21,<5.0", + "PyJWT[crypto]>=2.8,<3.0", ] [project.optional-dependencies] diff --git a/src/epacomp_tox/assets.py b/src/epacomp_tox/assets.py new file mode 100644 index 0000000..f437675 --- /dev/null +++ b/src/epacomp_tox/assets.py @@ -0,0 +1,45 @@ +from __future__ import annotations + +import json +from importlib import resources +from typing import Any, Dict, Iterable, Optional + +DATA_PACKAGE = "epacomp_tox.data" + + +def data_root() -> Any: + """Return the packaged runtime data root.""" + return resources.files(DATA_PACKAGE) + + +def data_file(*parts: str) -> Any: + """Return a Traversable for a packaged runtime data file or directory.""" + current = data_root() + for part in parts: + current = current.joinpath(part) + return current + + +def read_json(*parts: str) -> Dict[str, Any]: + """Read JSON from a packaged runtime data file.""" + return json.loads(data_file(*parts).read_text(encoding="utf-8")) + + +def iter_data_files( + *parts: str, suffix: Optional[str] = None, recursive: bool = False +) -> Iterable[Any]: + """Iterate packaged runtime data files in deterministic name order.""" + base = data_file(*parts) + if not base.is_dir(): + return + for entry in sorted(base.iterdir(), key=lambda item: item.name): + if entry.name.startswith("."): + continue + if entry.is_dir(): + if recursive: + yield from iter_data_files( + *parts, entry.name, suffix=suffix, recursive=True + ) + continue + if suffix is None or entry.name.endswith(suffix): + yield entry diff --git a/src/epacomp_tox/contracts/__init__.py b/src/epacomp_tox/contracts/__init__.py index ec3e866..c947a42 100644 --- a/src/epacomp_tox/contracts/__init__.py +++ b/src/epacomp_tox/contracts/__init__.py @@ -2,30 +2,30 @@ import json from functools import lru_cache -from pathlib import Path from typing import Any, Dict, Tuple from jsonschema import Draft202012Validator -SCHEMA_ROOT = Path(__file__).resolve().parents[3] / "docs" / "contracts" / "schemas" +from epacomp_tox.assets import data_file class SchemaValidationError(RuntimeError): """Raised when a payload fails JSON Schema validation.""" -def _schema_path(namespace: str, name: str) -> Path: - return SCHEMA_ROOT / namespace / f"{name}.json" +def _schema_resource(namespace: str, name: str) -> Any: + return data_file("contracts", "schemas", namespace, f"{name}.json") @lru_cache(maxsize=128) def load_schema(namespace: str, name: str) -> Dict[str, Any]: """Load and cache a JSON Schema by namespace/name.""" - path = _schema_path(namespace, name) - if not path.exists(): - raise FileNotFoundError(f"Schema '{namespace}/{name}' not found at {path}") - with path.open("r", encoding="utf-8") as handle: - return json.load(handle) + resource = _schema_resource(namespace, name) + if not resource.is_file(): + raise FileNotFoundError( + f"Schema '{namespace}/{name}' not found in package data" + ) + return json.loads(resource.read_text(encoding="utf-8")) def validate_payload(payload: Any, *, namespace: str, name: str) -> None: diff --git a/src/epacomp_tox/data/__init__.py b/src/epacomp_tox/data/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/epacomp_tox/data/contracts/schemas/bioactivity/get_bioactivity_aop.response.schema.json b/src/epacomp_tox/data/contracts/schemas/bioactivity/get_bioactivity_aop.response.schema.json new file mode 100644 index 0000000..c384e2b --- /dev/null +++ b/src/epacomp_tox/data/contracts/schemas/bioactivity/get_bioactivity_aop.response.schema.json @@ -0,0 +1,53 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "bioactivity.get_bioactivity_aop.response", + "description": "Adverse outcome pathway mapping records returned from CompTox bioactivity crosswalk queries.", + "type": "array", + "items": { + "type": "object", + "description": "A CompTox AOP mapping record.", + "additionalProperties": true, + "properties": { + "aopId": { + "type": [ + "string", + "number", + "null" + ] + }, + "eventNumber": { + "type": [ + "string", + "number", + "null" + ] + }, + "eventType": { + "type": [ + "string", + "null" + ] + }, + "eventLabel": { + "type": [ + "string", + "null" + ] + }, + "aeid": { + "type": [ + "string", + "number", + "null" + ] + }, + "entrezGeneId": { + "type": [ + "string", + "number", + "null" + ] + } + } + } +} diff --git a/src/epacomp_tox/data/contracts/schemas/bioactivity/get_bioactivity_assay.response.schema.json b/src/epacomp_tox/data/contracts/schemas/bioactivity/get_bioactivity_assay.response.schema.json new file mode 100644 index 0000000..024c5aa --- /dev/null +++ b/src/epacomp_tox/data/contracts/schemas/bioactivity/get_bioactivity_assay.response.schema.json @@ -0,0 +1,62 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "bioactivity.get_bioactivity_assay.response", + "description": "Assay annotation payload returned for all-assay, single AEID, single-concentration, or gene-scoped bioactivity assay queries.", + "anyOf": [ + { + "type": "object", + "description": "Structured assay payload returned by the upstream API.", + "additionalProperties": true, + "properties": { + "aeid": { + "type": [ + "string", + "number", + "null" + ] + }, + "assayName": { + "type": [ + "string", + "null" + ] + }, + "geneSymbol": { + "type": [ + "string", + "null" + ] + } + } + }, + { + "type": "array", + "items": { + "type": "object", + "description": "An assay annotation record.", + "additionalProperties": true, + "properties": { + "aeid": { + "type": [ + "string", + "number", + "null" + ] + }, + "assayName": { + "type": [ + "string", + "null" + ] + }, + "geneSymbol": { + "type": [ + "string", + "null" + ] + } + } + } + } + ] +} diff --git a/src/epacomp_tox/data/contracts/schemas/bioactivity/get_bioactivity_summary_by_dtxsid.response.schema.json b/src/epacomp_tox/data/contracts/schemas/bioactivity/get_bioactivity_summary_by_dtxsid.response.schema.json new file mode 100644 index 0000000..40d14c9 --- /dev/null +++ b/src/epacomp_tox/data/contracts/schemas/bioactivity/get_bioactivity_summary_by_dtxsid.response.schema.json @@ -0,0 +1,52 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "bioactivity.get_bioactivity_summary_by_dtxsid.response", + "description": "Bioactivity summary records for a chemical queried by DTXSID.", + "type": "array", + "items": { + "type": "object", + "description": "A bioactivity summary record associated with the requested chemical.", + "additionalProperties": true, + "properties": { + "dtxsid": { + "type": [ + "string", + "null" + ] + }, + "aeid": { + "type": [ + "string", + "number", + "null" + ] + }, + "assayName": { + "type": [ + "string", + "null" + ] + }, + "geneSymbol": { + "type": [ + "string", + "null" + ] + }, + "hitcall": { + "type": [ + "boolean", + "number", + "null" + ] + }, + "ac50": { + "type": [ + "number", + "string", + "null" + ] + } + } + } +} diff --git a/src/epacomp_tox/data/contracts/schemas/bioactivity/search_bioactivity_terms.response.schema.json b/src/epacomp_tox/data/contracts/schemas/bioactivity/search_bioactivity_terms.response.schema.json new file mode 100644 index 0000000..7b5b373 --- /dev/null +++ b/src/epacomp_tox/data/contracts/schemas/bioactivity/search_bioactivity_terms.response.schema.json @@ -0,0 +1,39 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "bioactivity.search_bioactivity_terms.response", + "description": "Bioactivity search-term results returned for exact, prefix, or substring lookups.", + "type": "array", + "items": { + "anyOf": [ + { + "type": "string", + "description": "A matched bioactivity term." + }, + { + "type": "object", + "description": "A matched bioactivity term record when the upstream API returns structured results.", + "additionalProperties": true, + "properties": { + "term": { + "type": [ + "string", + "null" + ] + }, + "displayName": { + "type": [ + "string", + "null" + ] + }, + "category": { + "type": [ + "string", + "null" + ] + } + } + } + ] + } +} diff --git a/src/epacomp_tox/data/contracts/schemas/chemical/ghs_links.response.schema.json b/src/epacomp_tox/data/contracts/schemas/chemical/ghs_links.response.schema.json new file mode 100644 index 0000000..b4091c8 --- /dev/null +++ b/src/epacomp_tox/data/contracts/schemas/chemical/ghs_links.response.schema.json @@ -0,0 +1,19 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "chemical.check_chemical_ghs_links.response", + "type": "object", + "required": ["source", "results"], + "properties": { + "source": { + "type": "string" + }, + "results": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": true + } + } + }, + "additionalProperties": false +} diff --git a/src/epacomp_tox/data/contracts/schemas/chemical/indigo_convert.response.schema.json b/src/epacomp_tox/data/contracts/schemas/chemical/indigo_convert.response.schema.json new file mode 100644 index 0000000..e01ec5a --- /dev/null +++ b/src/epacomp_tox/data/contracts/schemas/chemical/indigo_convert.response.schema.json @@ -0,0 +1,13 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "chemical.indigo_convert.response", + "type": "object", + "required": ["outputFormat", "value"], + "properties": { + "outputFormat": { + "type": "string" + }, + "value": {} + }, + "additionalProperties": false +} diff --git a/src/epacomp_tox/data/contracts/schemas/chemical/opsin_convert.response.schema.json b/src/epacomp_tox/data/contracts/schemas/chemical/opsin_convert.response.schema.json new file mode 100644 index 0000000..11b0000 --- /dev/null +++ b/src/epacomp_tox/data/contracts/schemas/chemical/opsin_convert.response.schema.json @@ -0,0 +1,16 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "chemical.opsin_convert.response", + "type": "object", + "required": ["name", "outputFormat", "value"], + "properties": { + "name": { + "type": "string" + }, + "outputFormat": { + "type": "string" + }, + "value": {} + }, + "additionalProperties": false +} diff --git a/src/epacomp_tox/data/contracts/schemas/chemical/resolve_chemical_identifier.response.schema.json b/src/epacomp_tox/data/contracts/schemas/chemical/resolve_chemical_identifier.response.schema.json new file mode 100644 index 0000000..f8d1498 --- /dev/null +++ b/src/epacomp_tox/data/contracts/schemas/chemical/resolve_chemical_identifier.response.schema.json @@ -0,0 +1,72 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "chemical.resolve_chemical_identifier.response", + "description": "Deterministic identifier-resolution result for a single chemical input.", + "type": "object", + "additionalProperties": false, + "required": [ + "status", + "inputIdentifier", + "inputType", + "canonicalDtxsid", + "preferredName", + "casrn", + "searchModeUsed", + "candidateCount", + "candidates", + "warnings" + ], + "properties": { + "status": { + "type": "string", + "enum": ["resolved", "ambiguous", "not_found"] + }, + "inputIdentifier": { + "type": "string" + }, + "inputType": { + "type": "string" + }, + "canonicalDtxsid": { + "type": ["string", "null"] + }, + "preferredName": { + "type": ["string", "null"] + }, + "casrn": { + "type": ["string", "null"] + }, + "searchModeUsed": { + "type": ["string", "null"] + }, + "candidateCount": { + "type": "integer", + "minimum": 0 + }, + "candidates": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": false, + "properties": { + "dtxsid": {"type": ["string", "null"]}, + "dtxcid": {"type": ["string", "null"]}, + "casrn": {"type": ["string", "null"]}, + "preferredName": {"type": ["string", "null"]}, + "smiles": {"type": ["string", "null"]}, + "searchName": {"type": ["string", "null"]}, + "searchValue": {"type": ["string", "null"]}, + "rank": {"type": ["integer", "null"]}, + "synonyms": { + "type": "array", + "items": {"type": "string"} + } + } + } + }, + "warnings": { + "type": "array", + "items": {"type": "string"} + } + } +} diff --git a/src/epacomp_tox/data/contracts/schemas/chemical/search_chemical.response.schema.json b/src/epacomp_tox/data/contracts/schemas/chemical/search_chemical.response.schema.json new file mode 100644 index 0000000..c98988d --- /dev/null +++ b/src/epacomp_tox/data/contracts/schemas/chemical/search_chemical.response.schema.json @@ -0,0 +1,53 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "chemical.search_chemical.response", + "description": "A list of chemicals matching the search criteria.", + "type": "array", + "items": { + "type": "object", + "description": "A single chemical record from the search results.", + "properties": { + "dtxsid": { + "type": ["string", "null"], + "description": "DSSTox Substance Identifier - unique identifier for the chemical substance" + }, + "dtxcid": { + "type": ["string", "null"], + "description": "DSSTox Chemical Identifier - unique identifier for the chemical structure" + }, + "casrn": { + "type": ["string", "null"], + "description": "CAS Registry Number - Chemical Abstracts Service registry number" + }, + "preferredName": { + "type": ["string", "null"], + "description": "Preferred chemical name in the database" + }, + "smiles": { + "type": ["string", "null"], + "description": "SMILES (Simplified Molecular Input Line Entry System) representation of the chemical structure" + }, + "hasStructureImage": { + "type": "integer", + "description": "Flag indicating if a structure image is available (1 = yes, 0 = no)" + }, + "isMarkush": { + "type": "boolean", + "description": "Indicates if this is a Markush structure (generic chemical structure)" + }, + "searchName": { + "type": "string", + "description": "The field name that was searched" + }, + "searchValue": { + "type": "string", + "description": "The value that was searched for" + }, + "rank": { + "type": "integer", + "description": "Search result ranking score" + } + }, + "required": ["dtxsid", "preferredName", "rank"] + } +} diff --git a/src/epacomp_tox/data/contracts/schemas/chemical/structure_file.response.schema.json b/src/epacomp_tox/data/contracts/schemas/chemical/structure_file.response.schema.json new file mode 100644 index 0000000..360ae55 --- /dev/null +++ b/src/epacomp_tox/data/contracts/schemas/chemical/structure_file.response.schema.json @@ -0,0 +1,40 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "chemical.structure_file.response", + "type": "object", + "required": [ + "encoding", + "data", + "identifier", + "identifierType", + "fileFormat" + ], + "properties": { + "encoding": { + "type": "string", + "enum": ["base64", "utf-8"] + }, + "data": { + "type": "string" + }, + "contentType": { + "type": "string" + }, + "identifier": { + "type": "string" + }, + "identifierType": { + "type": "string" + }, + "fileFormat": { + "type": "string" + }, + "imageFormat": { + "type": "string" + }, + "length": { + "type": ["integer", "null"] + } + }, + "additionalProperties": false +} diff --git a/src/epacomp_tox/data/contracts/schemas/cheminformatics/toxprints.response.schema.json b/src/epacomp_tox/data/contracts/schemas/cheminformatics/toxprints.response.schema.json new file mode 100644 index 0000000..8f60ec1 --- /dev/null +++ b/src/epacomp_tox/data/contracts/schemas/cheminformatics/toxprints.response.schema.json @@ -0,0 +1,7 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "cheminformatics.toxprints.response", + "type": "object", + "description": "Generic mapping returned by ToxPrint searches; keys and shapes depend on upstream service.", + "additionalProperties": true +} diff --git a/src/epacomp_tox/data/contracts/schemas/common/list_generic.response.schema.json b/src/epacomp_tox/data/contracts/schemas/common/list_generic.response.schema.json new file mode 100644 index 0000000..2a351d9 --- /dev/null +++ b/src/epacomp_tox/data/contracts/schemas/common/list_generic.response.schema.json @@ -0,0 +1,7 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "common.list_generic.response", + "description": "Generic array schema used for MCP tools that return lists of records or scalar values.", + "type": "array", + "items": {} +} diff --git a/src/epacomp_tox/data/contracts/schemas/common/mapping_list_generic.response.schema.json b/src/epacomp_tox/data/contracts/schemas/common/mapping_list_generic.response.schema.json new file mode 100644 index 0000000..930f5f8 --- /dev/null +++ b/src/epacomp_tox/data/contracts/schemas/common/mapping_list_generic.response.schema.json @@ -0,0 +1,10 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "common.mapping_list_generic.response", + "description": "Schema for responses keyed by identifier with array payloads.", + "type": "object", + "additionalProperties": { + "type": "array", + "items": {} + } +} diff --git a/src/epacomp_tox/data/contracts/schemas/common/object.response.schema.json b/src/epacomp_tox/data/contracts/schemas/common/object.response.schema.json new file mode 100644 index 0000000..f40726e --- /dev/null +++ b/src/epacomp_tox/data/contracts/schemas/common/object.response.schema.json @@ -0,0 +1,7 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "common.object.response", + "description": "Generic object schema for MCP tools returning structured mappings.", + "type": "object", + "additionalProperties": true +} diff --git a/src/epacomp_tox/data/contracts/schemas/common/object_or_list.response.schema.json b/src/epacomp_tox/data/contracts/schemas/common/object_or_list.response.schema.json new file mode 100644 index 0000000..312aa61 --- /dev/null +++ b/src/epacomp_tox/data/contracts/schemas/common/object_or_list.response.schema.json @@ -0,0 +1,15 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "common.object_or_list.response", + "description": "Schema allowing tools to return either an object or an array of records.", + "anyOf": [ + { + "type": "object", + "additionalProperties": true + }, + { + "type": "array", + "items": {} + } + ] +} diff --git a/src/epacomp_tox/data/contracts/schemas/exposure/get_exposure_httk.response.schema.json b/src/epacomp_tox/data/contracts/schemas/exposure/get_exposure_httk.response.schema.json new file mode 100644 index 0000000..4283a9d --- /dev/null +++ b/src/epacomp_tox/data/contracts/schemas/exposure/get_exposure_httk.response.schema.json @@ -0,0 +1,63 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "exposure.get_exposure_httk.response", + "description": "Detailed HTTK records for a single chemical retrieved from the dedicated exposure HTTK endpoint.", + "type": "array", + "items": { + "type": "object", + "description": "A detailed HTTK record.", + "additionalProperties": true, + "properties": { + "dtxsid": { + "type": [ + "string", + "null" + ] + }, + "casrn": { + "type": [ + "string", + "null" + ] + }, + "preferredName": { + "type": [ + "string", + "null" + ] + }, + "species": { + "type": [ + "string", + "null" + ] + }, + "parameter": { + "type": [ + "string", + "null" + ] + }, + "value": { + "type": [ + "number", + "string", + "null" + ] + }, + "unit": { + "type": [ + "string", + "null" + ] + }, + "model": { + "type": [ + "string", + "null" + ], + "description": "HTTK model or parameter family when present." + } + } + } +} diff --git a/src/epacomp_tox/data/contracts/schemas/exposure/search_cpdat.response.schema.json b/src/epacomp_tox/data/contracts/schemas/exposure/search_cpdat.response.schema.json new file mode 100644 index 0000000..df348ec --- /dev/null +++ b/src/epacomp_tox/data/contracts/schemas/exposure/search_cpdat.response.schema.json @@ -0,0 +1,53 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "exposure.search_cpdat.response", + "description": "CPDat records returned for functional use, product use category, or list-presence searches.", + "type": "array", + "items": { + "type": "object", + "description": "A CPDat exposure record.", + "additionalProperties": true, + "properties": { + "dtxsid": { + "type": [ + "string", + "null" + ], + "description": "DSSTox substance identifier when present." + }, + "casrn": { + "type": [ + "string", + "null" + ] + }, + "preferredName": { + "type": [ + "string", + "null" + ] + }, + "functionalUse": { + "type": [ + "string", + "null" + ], + "description": "Functional use term when returned by the chosen vocabulary." + }, + "productUseCategory": { + "type": [ + "string", + "null" + ], + "description": "Product use category when returned by the chosen vocabulary." + }, + "listPresence": { + "type": [ + "string", + "null" + ], + "description": "List-presence tag or keyword when returned by the chosen vocabulary." + } + } + } +} diff --git a/src/epacomp_tox/data/contracts/schemas/exposure/search_httk.response.schema.json b/src/epacomp_tox/data/contracts/schemas/exposure/search_httk.response.schema.json new file mode 100644 index 0000000..993284b --- /dev/null +++ b/src/epacomp_tox/data/contracts/schemas/exposure/search_httk.response.schema.json @@ -0,0 +1,57 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "exposure.search_httk.response", + "description": "HTTK records returned by the CompTox exposure search surface.", + "type": "array", + "items": { + "type": "object", + "description": "A high-throughput toxicokinetic record.", + "additionalProperties": true, + "properties": { + "dtxsid": { + "type": [ + "string", + "null" + ] + }, + "casrn": { + "type": [ + "string", + "null" + ] + }, + "preferredName": { + "type": [ + "string", + "null" + ] + }, + "species": { + "type": [ + "string", + "null" + ] + }, + "parameter": { + "type": [ + "string", + "null" + ], + "description": "HTTK parameter name when the payload is parameterized." + }, + "value": { + "type": [ + "number", + "string", + "null" + ] + }, + "unit": { + "type": [ + "string", + "null" + ] + } + } + } +} diff --git a/src/epacomp_tox/data/contracts/schemas/hazard/batch_search_hazard.response.schema.json b/src/epacomp_tox/data/contracts/schemas/hazard/batch_search_hazard.response.schema.json new file mode 100644 index 0000000..70ac005 --- /dev/null +++ b/src/epacomp_tox/data/contracts/schemas/hazard/batch_search_hazard.response.schema.json @@ -0,0 +1,59 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "hazard.batch_search_hazard.response", + "description": "Mapping from DTXSID to hazard dataset records for a batch hazard lookup.", + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "object", + "description": "A hazard record returned for a specific DTXSID in the batch request.", + "additionalProperties": true, + "properties": { + "dtxsid": { + "type": [ + "string", + "null" + ] + }, + "casrn": { + "type": [ + "string", + "null" + ] + }, + "preferredName": { + "type": [ + "string", + "null" + ] + }, + "source": { + "type": [ + "string", + "null" + ] + }, + "effect": { + "type": [ + "string", + "null" + ] + }, + "value": { + "type": [ + "number", + "string", + "null" + ] + }, + "unit": { + "type": [ + "string", + "null" + ] + } + } + } + } +} diff --git a/src/epacomp_tox/data/contracts/schemas/hazard/search_hazard.response.schema.json b/src/epacomp_tox/data/contracts/schemas/hazard/search_hazard.response.schema.json new file mode 100644 index 0000000..6cd8b57 --- /dev/null +++ b/src/epacomp_tox/data/contracts/schemas/hazard/search_hazard.response.schema.json @@ -0,0 +1,63 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "hazard.search_hazard.response", + "description": "Hazard dataset records returned for a single chemical lookup across ToxValDB, ToxRefDB, cancer, genetox, ADME/IVIVE, IRIS, PPRTV, or HAWC selectors.", + "type": "array", + "items": { + "type": "object", + "description": "A hazard record from the selected CompTox hazard dataset.", + "additionalProperties": true, + "properties": { + "dtxsid": { + "type": [ + "string", + "null" + ], + "description": "DSSTox substance identifier when present in the upstream payload." + }, + "casrn": { + "type": [ + "string", + "null" + ], + "description": "CAS Registry Number when present." + }, + "preferredName": { + "type": [ + "string", + "null" + ], + "description": "Preferred chemical name when present." + }, + "source": { + "type": [ + "string", + "null" + ], + "description": "Source dataset, agency, or citation label." + }, + "effect": { + "type": [ + "string", + "null" + ], + "description": "Reported effect or endpoint label when applicable." + }, + "value": { + "type": [ + "number", + "string", + "null" + ], + "description": "Reported hazard value when applicable." + }, + "unit": { + "type": [ + "string", + "null" + ], + "description": "Unit associated with the reported value." + } + } + } +} diff --git a/src/epacomp_tox/data/contracts/schemas/manifest/get_contract_manifest.response.schema.json b/src/epacomp_tox/data/contracts/schemas/manifest/get_contract_manifest.response.schema.json new file mode 100644 index 0000000..2ce17b4 --- /dev/null +++ b/src/epacomp_tox/data/contracts/schemas/manifest/get_contract_manifest.response.schema.json @@ -0,0 +1,153 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://toxmcp.org/schemas/contracts/manifest/get_contract_manifest.response.schema.json", + "title": "get_contract_manifest response", + "type": "object", + "properties": { + "server": { + "type": "object", + "properties": { + "name": { "type": "string" }, + "title": { "type": "string" }, + "version": { "type": "string" }, + "resourceCount": { "type": "integer", "minimum": 0 }, + "toolCount": { "type": "integer", "minimum": 0 }, + "transportEndpoints": { + "type": "array", + "items": { "type": "string" } + } + }, + "required": ["name", "title", "version", "resourceCount", "toolCount", "transportEndpoints"], + "additionalProperties": true + }, + "publicBoundary": { + "type": "object", + "properties": { + "primaryRole": { "type": "string" }, + "screeningRole": { "type": "string" }, + "experimentalModules": { + "type": "array", + "items": { "type": "string" } + }, + "notOwnedByCompToxMcp": { + "type": "array", + "items": { "type": "string" } + } + }, + "required": ["primaryRole", "screeningRole", "experimentalModules", "notOwnedByCompToxMcp"], + "additionalProperties": true + }, + "resources": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { "type": "string" }, + "description": { "type": "string" }, + "url": { "type": "string" }, + "toolNames": { + "type": "array", + "items": { "type": "string" } + } + }, + "required": ["name", "description", "url", "toolNames"], + "additionalProperties": true + } + }, + "tools": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { "type": "string" }, + "resource": { "type": "string" }, + "hasOutputSchema": { "type": "boolean" }, + "responseSchemaRef": { + "type": ["object", "null"], + "properties": { + "namespace": { "type": "string" }, + "name": { "type": "string" } + }, + "required": ["namespace", "name"], + "additionalProperties": false + } + }, + "required": ["name", "resource", "hasOutputSchema"], + "additionalProperties": true + } + }, + "portableObjectSchemas": { + "type": "array", + "items": { + "type": "object", + "properties": { + "file": { "type": "string" }, + "title": { "type": "string" }, + "schemaId": { "type": "string" }, + "exampleFile": { "type": "string" } + }, + "required": ["file"], + "additionalProperties": true + } + }, + "responseSchemas": { + "type": "array", + "items": { + "type": "object", + "properties": { + "namespace": { "type": "string" }, + "file": { "type": "string" }, + "path": { "type": "string" } + }, + "required": ["namespace", "file", "path"], + "additionalProperties": false + } + }, + "publicContractReferences": { + "type": "object", + "properties": { + "interop": { + "type": "array", + "items": { "$ref": "#/$defs/contractReference" } + }, + "screeningPrioritization": { + "type": "array", + "items": { "$ref": "#/$defs/contractReference" } + } + }, + "required": ["interop", "screeningPrioritization"], + "additionalProperties": false + } + }, + "required": [ + "server", + "publicBoundary", + "resources", + "tools", + "portableObjectSchemas", + "responseSchemas", + "publicContractReferences" + ], + "additionalProperties": true, + "$defs": { + "contractReference": { + "type": "object", + "properties": { + "toolName": { "type": "string" }, + "responseSchemaRef": { + "type": "object", + "properties": { + "namespace": { "type": "string" }, + "name": { "type": "string" } + }, + "required": ["namespace", "name"], + "additionalProperties": false + }, + "portableSchema": { "type": "string" }, + "exampleFile": { "type": "string" } + }, + "required": ["toolName", "responseSchemaRef"], + "additionalProperties": true + } + } +} diff --git a/src/epacomp_tox/data/contracts/schemas/metadata/applicability_detail.response.schema.json b/src/epacomp_tox/data/contracts/schemas/metadata/applicability_detail.response.schema.json new file mode 100644 index 0000000..5195e64 --- /dev/null +++ b/src/epacomp_tox/data/contracts/schemas/metadata/applicability_detail.response.schema.json @@ -0,0 +1,41 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "metadata.get_applicability_domain.response", + "type": "object", + "required": ["model", "version", "criteria", "policy"], + "properties": { + "model": {"type": "string"}, + "version": {"type": "string"}, + "criteria": { + "type": "array", + "items": {"type": "object", "additionalProperties": true} + }, + "policy": {"type": "string"}, + "errorCode": {"type": ["string", "null"]}, + "documentedCriteria": { + "type": "array", + "items": {"type": "object", "additionalProperties": true} + }, + "delegatedCriteria": { + "type": "array", + "items": {"type": "object", "additionalProperties": true} + }, + "locallyEnforcedCriteria": { + "type": "array", + "items": {"type": "string"} + }, + "enforcementLocation": { + "type": "string", + "enum": ["delegated-service", "local-engine"] + }, + "guardrailStatus": { + "type": "object", + "additionalProperties": true + }, + "references": { + "type": "array", + "items": {"type": "object", "additionalProperties": true} + } + }, + "additionalProperties": true +} diff --git a/src/epacomp_tox/data/contracts/schemas/metadata/applicability_list.response.schema.json b/src/epacomp_tox/data/contracts/schemas/metadata/applicability_list.response.schema.json new file mode 100644 index 0000000..2934c9f --- /dev/null +++ b/src/epacomp_tox/data/contracts/schemas/metadata/applicability_list.response.schema.json @@ -0,0 +1,14 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "metadata.list_applicability_domain.response", + "type": "object", + "required": ["applicabilityDomains", "nextCursor"], + "properties": { + "applicabilityDomains": { + "type": "array", + "items": {"type": "object", "additionalProperties": true} + }, + "nextCursor": {"type": ["string", "null"]} + }, + "additionalProperties": false +} diff --git a/src/epacomp_tox/data/contracts/schemas/metadata/model_cards.response.schema.json b/src/epacomp_tox/data/contracts/schemas/metadata/model_cards.response.schema.json new file mode 100644 index 0000000..e46dfe7 --- /dev/null +++ b/src/epacomp_tox/data/contracts/schemas/metadata/model_cards.response.schema.json @@ -0,0 +1,43 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "metadata.get_model_card.response", + "type": "object", + "required": ["modelCards", "nextCursor"], + "properties": { + "modelCards": { + "type": "array", + "items": { + "type": "object", + "required": ["card", "checksum", "lastModified"], + "properties": { + "card": {"type": "object"}, + "checksum": {"type": "string"}, + "lastModified": {"type": "string"}, + "documentedCriteria": { + "type": "array", + "items": {"type": "object", "additionalProperties": true} + }, + "delegatedCriteria": { + "type": "array", + "items": {"type": "object", "additionalProperties": true} + }, + "locallyEnforcedCriteria": { + "type": "array", + "items": {"type": "string"} + }, + "enforcementLocation": { + "type": "string", + "enum": ["delegated-service", "local-engine"] + }, + "guardrailStatus": { + "type": "object", + "additionalProperties": true + } + }, + "additionalProperties": true + } + }, + "nextCursor": {"type": ["string", "null"]} + }, + "additionalProperties": false +} diff --git a/src/epacomp_tox/data/contracts/schemas/predictive/ad_check.response.schema.json b/src/epacomp_tox/data/contracts/schemas/predictive/ad_check.response.schema.json new file mode 100644 index 0000000..618a875 --- /dev/null +++ b/src/epacomp_tox/data/contracts/schemas/predictive/ad_check.response.schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "predictive.check_applicability_domain.response", + "type": "object", + "required": ["in_domain", "confidence", "details"], + "properties": { + "in_domain": {"type": "boolean"}, + "confidence": {"type": "number"}, + "details": {"type": "object", "additionalProperties": true} + }, + "additionalProperties": true +} diff --git a/src/epacomp_tox/data/contracts/schemas/predictive/predict.response.schema.json b/src/epacomp_tox/data/contracts/schemas/predictive/predict.response.schema.json new file mode 100644 index 0000000..c4df305 --- /dev/null +++ b/src/epacomp_tox/data/contracts/schemas/predictive/predict.response.schema.json @@ -0,0 +1,32 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "predictive.predict.response", + "type": "object", + "required": ["prediction", "applicability_domain", "metadata"], + "properties": { + "prediction": { + "type": "object", + "additionalProperties": true + }, + "applicability_domain": { + "$ref": "#/$defs/adCheck" + }, + "metadata": { + "type": "object", + "additionalProperties": true + } + }, + "$defs": { + "adCheck": { + "type": "object", + "required": ["in_domain", "confidence", "details"], + "properties": { + "in_domain": {"type": "boolean"}, + "confidence": {"type": "number"}, + "details": {"type": "object", "additionalProperties": true} + }, + "additionalProperties": true + } + }, + "additionalProperties": false +} diff --git a/src/epacomp_tox/data/contracts/schemas/risk/prioritize_risk_signals.response.schema.json b/src/epacomp_tox/data/contracts/schemas/risk/prioritize_risk_signals.response.schema.json new file mode 100644 index 0000000..2221254 --- /dev/null +++ b/src/epacomp_tox/data/contracts/schemas/risk/prioritize_risk_signals.response.schema.json @@ -0,0 +1,135 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://toxmcp.org/schemas/contracts/risk/prioritize_risk_signals.response.schema.json", + "title": "prioritize_risk_signals response", + "type": "object", + "properties": { + "chemicalRef": { + "type": "object", + "properties": { + "dtxsid": { "type": "string" }, + "preferredName": { "type": "string" }, + "casrn": { "type": "string" } + }, + "required": ["dtxsid", "preferredName"], + "additionalProperties": true + }, + "identityResolution": { + "type": "object", + "additionalProperties": true + }, + "hazardSignal": { + "type": "object", + "properties": { + "recordCount": { "type": "integer", "minimum": 0 }, + "sourceTool": { "type": "string" }, + "selectedMetric": { + "type": ["object", "null"], + "additionalProperties": true + } + }, + "required": ["recordCount", "sourceTool"], + "additionalProperties": true + }, + "exposureSignal": { + "type": "object", + "properties": { + "seem": { "$ref": "#/$defs/evidenceSlice" }, + "httk": { "$ref": "#/$defs/evidenceSlice" }, + "mmdb": { "$ref": "#/$defs/evidenceSlice" }, + "cpdat": { "$ref": "#/$defs/evidenceSlice" } + }, + "required": ["seem", "httk", "mmdb", "cpdat"], + "additionalProperties": false + }, + "prioritization": { + "type": "object", + "properties": { + "priorityBand": { + "type": "string", + "enum": ["higher", "moderate", "lower", "inconclusive"] + }, + "marginOfExposure": { "type": ["number", "null"] }, + "hazardPointOfDeparture": { "type": ["number", "null"] }, + "hazardUnit": { "type": ["string", "null"] }, + "exposureEstimate": { "type": ["number", "null"] }, + "exposureUnit": { "type": ["string", "null"] }, + "signalDirection": { "type": "string" }, + "priorityHeuristic": { + "type": "object", + "additionalProperties": { "type": "string" } + }, + "basis": { "type": "string" }, + "supportingSignals": { + "type": "array", + "items": { "type": "string" } + }, + "caveats": { + "type": "array", + "items": { "type": "string" } + } + }, + "required": [ + "priorityBand", + "marginOfExposure", + "signalDirection", + "basis", + "caveats" + ], + "additionalProperties": true + }, + "knownDataGaps": { + "type": "array", + "items": { "type": "string" } + }, + "limitations": { + "type": "array", + "items": { "type": "string" } + }, + "generatedFromTools": { + "type": "array", + "items": { "type": "string" } + }, + "provenanceSummary": { + "type": "object", + "properties": { + "generatedBy": { "type": "string" }, + "generatedAt": { "type": "string" }, + "sourceCount": { "type": "integer", "minimum": 0 }, + "sourceTools": { + "type": "array", + "items": { "type": "string" } + }, + "identityMode": { "type": "string" } + }, + "required": ["generatedBy", "generatedAt", "sourceCount", "sourceTools", "identityMode"], + "additionalProperties": true + } + }, + "required": [ + "chemicalRef", + "hazardSignal", + "exposureSignal", + "prioritization", + "knownDataGaps", + "limitations", + "generatedFromTools", + "provenanceSummary" + ], + "additionalProperties": true, + "$defs": { + "evidenceSlice": { + "type": "object", + "properties": { + "recordCount": { "type": "integer", "minimum": 0 }, + "sourceTool": { "type": "string" }, + "selectedMetrics": { + "type": ["object", "null"], + "additionalProperties": true + } + }, + "required": ["recordCount", "sourceTool"], + "additionalProperties": true + } + } +} diff --git a/src/epacomp_tox/data/contracts/schemas/workflow/aop_linkage_summary.response.schema.json b/src/epacomp_tox/data/contracts/schemas/workflow/aop_linkage_summary.response.schema.json new file mode 100644 index 0000000..24797d8 --- /dev/null +++ b/src/epacomp_tox/data/contracts/schemas/workflow/aop_linkage_summary.response.schema.json @@ -0,0 +1,125 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "workflow.aop_linkage_summary.response", + "description": "CompTox-side AOP linkage summary prepared for downstream mechanistic consumers.", + "type": "object", + "additionalProperties": false, + "required": [ + "chemicalRef", + "lookupMode", + "mappings", + "supportingAssays", + "confidence", + "provenance" + ], + "properties": { + "chemicalRef": { + "type": "object", + "additionalProperties": true, + "required": [ + "dtxsid", + "preferredName" + ], + "properties": { + "dtxsid": { + "type": "string" + }, + "preferredName": { + "type": "string" + }, + "casrn": { + "type": [ + "string", + "null" + ] + } + } + }, + "lookupMode": { + "type": "string" + }, + "mappings": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": true + } + }, + "supportingAssays": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": true + } + }, + "confidence": { + "type": "object", + "additionalProperties": true, + "required": [ + "score" + ], + "properties": { + "score": { + "type": "number" + } + } + }, + "identityResolution": { + "type": [ + "object", + "null" + ], + "additionalProperties": true + }, + "data": { + "type": [ + "object", + "null" + ], + "additionalProperties": true + }, + "knownDataGaps": { + "type": "array", + "items": {"type": "string"} + }, + "limitations": { + "type": "array", + "items": {"type": "string"} + }, + "generatedFromTools": { + "type": "array", + "items": {"type": "string"} + }, + "provenanceSummary": { + "type": "object", + "additionalProperties": true + }, + "metadata": { + "type": [ + "object", + "null" + ], + "additionalProperties": true + }, + "provenance": { + "type": "object", + "additionalProperties": true, + "required": [ + "sourceMcp", + "generatedAt", + "sources" + ], + "properties": { + "sourceMcp": { + "type": "string" + }, + "generatedAt": { + "type": "string" + }, + "sources": { + "type": "array" + } + } + } + } +} diff --git a/src/epacomp_tox/data/contracts/schemas/workflow/comptox_evidence_pack.response.schema.json b/src/epacomp_tox/data/contracts/schemas/workflow/comptox_evidence_pack.response.schema.json new file mode 100644 index 0000000..7aaa3ed --- /dev/null +++ b/src/epacomp_tox/data/contracts/schemas/workflow/comptox_evidence_pack.response.schema.json @@ -0,0 +1,127 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "workflow.comptox_evidence_pack.response", + "description": "Portable CompTox evidence pack assembled from identity, hazard, exposure, bioactivity, AOP, and PBPK-context slices.", + "type": "object", + "additionalProperties": false, + "required": [ + "chemicalIdentity", + "metadata", + "audit", + "semanticCoverage" + ], + "properties": { + "chemicalIdentity": { + "type": "object", + "additionalProperties": true, + "required": [ + "dtxsid", + "preferredName", + "provenance" + ] + }, + "hazardEvidenceSummary": { + "type": [ + "object", + "null" + ], + "additionalProperties": true + }, + "exposureEvidenceSummary": { + "type": [ + "object", + "null" + ], + "additionalProperties": true + }, + "bioactivityEvidenceSummary": { + "type": [ + "object", + "null" + ], + "additionalProperties": true + }, + "aopLinkageSummary": { + "type": [ + "object", + "null" + ], + "additionalProperties": true + }, + "pbpkContextBundle": { + "type": [ + "object", + "null" + ], + "additionalProperties": true + }, + "identityResolution": { + "type": [ + "object", + "null" + ], + "additionalProperties": true + }, + "data": { + "type": [ + "object", + "null" + ], + "additionalProperties": true + }, + "knownDataGaps": { + "type": "array", + "items": {"type": "string"} + }, + "limitations": { + "type": "array", + "items": {"type": "string"} + }, + "generatedFromTools": { + "type": "array", + "items": {"type": "string"} + }, + "provenanceSummary": { + "type": "object", + "additionalProperties": true + }, + "metadata": { + "type": "object", + "additionalProperties": true, + "required": [ + "packId", + "sourceMcp", + "createdAt", + "suiteRole" + ] + }, + "mcpMetadata": { + "type": [ + "object", + "null" + ], + "additionalProperties": true + }, + "audit": { + "type": "object", + "additionalProperties": true, + "required": [ + "generatedAt", + "generatedBy", + "sourceTools" + ] + }, + "semanticCoverage": { + "type": "object", + "additionalProperties": true, + "required": [ + "identity", + "hazard", + "exposure", + "bioactivity", + "aopLinkage", + "pbpkContext" + ] + } + } +} diff --git a/src/epacomp_tox/data/contracts/schemas/workflow/pbpk_context_bundle.response.schema.json b/src/epacomp_tox/data/contracts/schemas/workflow/pbpk_context_bundle.response.schema.json new file mode 100644 index 0000000..3e748d3 --- /dev/null +++ b/src/epacomp_tox/data/contracts/schemas/workflow/pbpk_context_bundle.response.schema.json @@ -0,0 +1,126 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "workflow.pbpk_context_bundle.response", + "description": "CompTox-side PBPK context package prepared for downstream PBPK workflows.", + "type": "object", + "additionalProperties": false, + "required": [ + "chemicalIdentityRef", + "httkSlice", + "hazardAdmeIviveSlice", + "exposureHints", + "modelCardRefs", + "provenance", + "handoffTarget" + ], + "properties": { + "chemicalIdentityRef": { + "type": "object", + "additionalProperties": true, + "required": [ + "dtxsid", + "preferredName", + "provenance" + ], + "properties": { + "dtxsid": { + "type": "string" + }, + "preferredName": { + "type": "string" + }, + "provenance": { + "type": "object" + } + } + }, + "httkSlice": { + "type": [ + "object", + "null" + ], + "additionalProperties": true + }, + "hazardAdmeIviveSlice": { + "type": [ + "object", + "null" + ], + "additionalProperties": true + }, + "exposureHints": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": true + } + }, + "modelCardRefs": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": true + } + }, + "identityResolution": { + "type": [ + "object", + "null" + ], + "additionalProperties": true + }, + "data": { + "type": [ + "object", + "null" + ], + "additionalProperties": true + }, + "knownDataGaps": { + "type": "array", + "items": {"type": "string"} + }, + "limitations": { + "type": "array", + "items": {"type": "string"} + }, + "generatedFromTools": { + "type": "array", + "items": {"type": "string"} + }, + "provenanceSummary": { + "type": "object", + "additionalProperties": true + }, + "metadata": { + "type": [ + "object", + "null" + ], + "additionalProperties": true + }, + "provenance": { + "type": "object", + "additionalProperties": true, + "required": [ + "sourceMcp", + "generatedAt", + "sources" + ], + "properties": { + "sourceMcp": { + "type": "string" + }, + "generatedAt": { + "type": "string" + }, + "sources": { + "type": "array" + } + } + }, + "handoffTarget": { + "type": "string" + } + } +} diff --git a/src/epacomp_tox/data/metadata/applicability_domains/README.md b/src/epacomp_tox/data/metadata/applicability_domains/README.md new file mode 100644 index 0000000..9a9eff9 --- /dev/null +++ b/src/epacomp_tox/data/metadata/applicability_domains/README.md @@ -0,0 +1,8 @@ +# Applicability Domain Reference Data + +Machine-readable definitions for applicability domain (AD) guardrails. Each JSON file aligns with a predictive model card and provides detailed parameters used by `PredictiveServiceBase` implementations during AD enforcement. + +Conventions: +- One JSON per model (e.g., `test_consensus_ad.json`, `opera_property_ad.json`, `genra_read_across_ad.json`). +- Each file contains descriptors, thresholds, similarity parameters, and references. +- Files should be versioned alongside model cards and validated in CI (Task 2.5). diff --git a/src/epacomp_tox/data/metadata/applicability_domains/genra_read_across_ad.json b/src/epacomp_tox/data/metadata/applicability_domains/genra_read_across_ad.json new file mode 100644 index 0000000..1d8be0b --- /dev/null +++ b/src/epacomp_tox/data/metadata/applicability_domains/genra_read_across_ad.json @@ -0,0 +1,29 @@ +{ + "model": "GenRA Read-Across Workflow", + "version": "2.1.0", + "criteria": [ + { + "type": "similarity", + "metric": "tanimoto", + "threshold": 0.7, + "minAnalogues": 3 + }, + { + "type": "coverage", + "requirements": ["in vivo", "in vitro"], + "minimumDomains": 2 + }, + { + "type": "expert_rule", + "rule": "Mode of action tags must align", + "allowableMismatch": 1 + } + ], + "policy": "block", + "errorCode": "GENRA_AD_FAIL", + "references": [ + { + "citation": "GenRA Technical Manual 2025" + } + ] +} diff --git a/src/epacomp_tox/data/metadata/applicability_domains/opera_property_ad.json b/src/epacomp_tox/data/metadata/applicability_domains/opera_property_ad.json new file mode 100644 index 0000000..cd5ad98 --- /dev/null +++ b/src/epacomp_tox/data/metadata/applicability_domains/opera_property_ad.json @@ -0,0 +1,24 @@ +{ + "model": "OPERA Property Predictions", + "version": "3.6.1", + "criteria": [ + { + "type": "descriptor_range", + "descriptors": ["atomCount", "bondCount", "polarSurfaceArea"], + "range": {"mode": "min_max"} + }, + { + "type": "similarity", + "metric": "tanimoto", + "threshold": 0.6, + "neighbors": 5 + } + ], + "policy": "warn", + "errorCode": "OPERA_AD_WARN", + "references": [ + { + "citation": "OPERA Manual 2025" + } + ] +} diff --git a/src/epacomp_tox/data/metadata/applicability_domains/test_consensus_ad.json b/src/epacomp_tox/data/metadata/applicability_domains/test_consensus_ad.json new file mode 100644 index 0000000..21b9efe --- /dev/null +++ b/src/epacomp_tox/data/metadata/applicability_domains/test_consensus_ad.json @@ -0,0 +1,25 @@ +{ + "model": "TEST Consensus Acute Toxicity", + "version": "5.2.0", + "criteria": [ + { + "type": "descriptor_range", + "descriptors": ["logS", "logP", "LUMO", "polarSurfaceArea"], + "range": {"lowerPercentile": 0.05, "upperPercentile": 0.95} + }, + { + "type": "similarity", + "metric": "tanimoto", + "threshold": 0.65, + "fingerprint": "pubchem" + } + ], + "policy": "block", + "errorCode": "TEST_AD_FAIL", + "references": [ + { + "citation": "Mansouri et al. 2018", + "doi": "10.1021/acs.jcim.7b00524" + } + ] +} diff --git a/src/epacomp_tox/data/metadata/model_cards/genra_read_across.json b/src/epacomp_tox/data/metadata/model_cards/genra_read_across.json new file mode 100644 index 0000000..8a88c9a --- /dev/null +++ b/src/epacomp_tox/data/metadata/model_cards/genra_read_across.json @@ -0,0 +1,209 @@ +{ + "schemaVersion": "1.0", + "modelDetails": { + "name": "GenRA Read-Across Workflow", + "version": "2.1.0", + "modelType": "Read-Across", + "description": "Generalized read-across workflow combining analogue search, evidence weighting, and prediction synthesis.", + "developers": [ + { + "name": "EPA Computational Toxicology" + } + ], + "organizations": [ + "US EPA" + ], + "releaseDate": "2025-03-05", + "license": "EPA Terms of Use" + }, + "intendedUse": { + "summary": "Supports regulatory read-across decisions for data gap filling and hazard assessment.", + "inScope": [ + "Organic chemicals with available ToxCast/ToxVal analogues" + ], + "outOfScope": [ + "Chemicals lacking sufficient analogue coverage", + "Mixtures" + ], + "limitations": [ + "Requires SME review when analogue similarity < 0.7." + ], + "warnings": [ + "Document evidence narrative before external submission." + ], + "regulatoryPrograms": [ + "TSCA New Chemicals", + "OECD Cooperative Chemicals Assessment" + ] + }, + "oecdValidationPrinciples": { + "definedEndpoint": { + "description": "Endpoints inherited from analogue dataset (e.g., repeat-dose toxicity LOAEL)", + "unit": "varies by endpoint" + }, + "unambiguousAlgorithm": { + "summary": "Analogue search using structural fingerprints, evidence scoring across data streams, Bayesian-weighted prediction aggregation.", + "methodClass": "Read-Across", + "implementation": "GenRA Service 2.1", + "references": [ + { + "citation": "Patlewicz et al. 2015", + "doi": "10.1093/toxsci/kfv169" + } + ] + }, + "definedApplicabilityDomain": { + "summary": "Assess analogue availability, structural similarity, and metadata completeness before generating predictions.", + "relatedTools": [ + "genra.check_applicability_domain" + ], + "references": [ + { + "citation": "GenRA Technical Manual 2025" + } + ] + }, + "goodnessOfFitMetrics": { + "internalValidation": [ + { + "name": "Coverage", + "value": 0.78, + "dataset": "historical read-across cases" + } + ], + "externalValidation": [ + { + "name": "Accuracy", + "value": 0.72, + "dataset": "case studies" + }, + { + "name": "Precision", + "value": 0.69, + "dataset": "case studies" + } + ] + }, + "mechanisticInterpretation": { + "summary": "Evidence weighting prioritizes analogues sharing mode-of-action descriptors and toxicity pathways.", + "confidence": "moderate" + } + }, + "trainingData": { + "dataset": { + "name": "GenRA Analogue Library 2025", + "source": "EPA CompTox", + "description": "Curated analogue relationships with experimental endpoints" + }, + "records": 1200, + "chemicalCount": 850, + "descriptorCount": 60, + "preprocessing": "Harmonized identifiers, removal of conflicting analogue evidence, assignment of mode-of-action tags." + }, + "evaluationData": { + "datasets": [ + { + "name": "GenRA Case Studies", + "source": "EPA Internal", + "description": "Historical regulatory read-across decisions" + } + ], + "validationApproach": "Leave-one-target-out analogue removal", + "metrics": [ + { + "name": "Balanced Accuracy", + "value": 0.71, + "dataset": "case studies" + }, + { + "name": "Coverage", + "value": 0.76, + "dataset": "case studies" + } + ], + "applicabilityDomainCoverage": 0.82 + }, + "applicabilityDomain": { + "summary": "Composite checks for analogue similarity, data completeness, and evidence diversity.", + "criteria": [ + { + "type": "similarity", + "description": "At least three structural analogues with Tanimoto similarity >= 0.7.", + "parameters": { + "threshold": 0.7, + "minAnalogues": 3 + } + }, + { + "type": "coverage", + "description": "Analogues must span at least two evidence domains (in vivo, in vitro, in silico).", + "parameters": { + "minDomains": 2 + } + }, + { + "type": "expert_rule", + "description": "Mode-of-action tags must align across selected analogues.", + "parameters": { + "allowableMismatch": 1 + } + } + ], + "enforcement": { + "mcpTools": [ + "genra.check_applicability_domain" + ], + "policy": "block", + "errorCodes": [ + "GENRA_AD_FAIL" + ] + }, + "confidenceBands": [ + { + "label": "Robust", + "minConfidence": 0.8, + "actions": [ + "Eligible for automated dossier generation" + ] + }, + { + "label": "Limited", + "minConfidence": 0.5, + "actions": [ + "Requires SME justification and documentation" + ] + } + ] + }, + "ethicalConsiderations": { + "risks": [ + "Analogues may introduce hidden biases when evidence base is uneven." + ], + "mitigations": [ + "Require documentation of analogue selection rationale and SME oversight." + ], + "humanOversight": "SME approval mandated for final predictions and evidence narratives." + }, + "provenance": { + "sourceRepositories": [ + "https://github.com/epa/genra" + ], + "build": { + "id": "genra-build-2025-03-05", + "timestamp": "2025-03-05T09:15:00Z", + "environment": "EPA CICD" + }, + "checksum": { + "algorithm": "SHA256", + "value": "3ce4ec4983d3e7c6b2089b967679f5fc293096750293eb98d2b211f780a1f95e" + }, + "reviewStatus": { + "approvedBy": [ + { + "name": "Regulatory Affairs Read-Across Committee" + } + ], + "approvalDate": "2025-03-10" + } + } +} \ No newline at end of file diff --git a/src/epacomp_tox/data/metadata/model_cards/opera_property.json b/src/epacomp_tox/data/metadata/model_cards/opera_property.json new file mode 100644 index 0000000..edc6439 --- /dev/null +++ b/src/epacomp_tox/data/metadata/model_cards/opera_property.json @@ -0,0 +1,215 @@ +{ + "schemaVersion": "1.0", + "modelDetails": { + "name": "OPERA Property Predictions", + "version": "3.6.1", + "modelType": "QSAR", + "description": "Predicts physicochemical properties (LogP, water solubility, vapor pressure) using OPERA ensemble models.", + "developers": [ + { + "name": "NIEHS NICEATM" + }, + { + "name": "EPA Computational Toxicology" + } + ], + "organizations": [ + "US EPA", + "NIEHS" + ], + "releaseDate": "2025-02-20", + "license": "OPERA EULA" + }, + "intendedUse": { + "summary": "Supports exposure assessment workflows requiring physicochemical property estimates for organic chemicals.", + "inScope": [ + "Neutral organic chemicals", + "Screening-level exposure modelling" + ], + "outOfScope": [ + "Inorganic substances", + "Highly ionized species" + ], + "limitations": [ + "Predictions outside training descriptor ranges may be unreliable." + ], + "warnings": [ + "Verify units when integrating with downstream PBPK models." + ], + "regulatoryPrograms": [ + "TSCA Existing Chemicals", + "REACH dossier support" + ] + }, + "oecdValidationPrinciples": { + "definedEndpoint": { + "description": "LogP, water solubility (log mol/L), vapor pressure (log Pa)", + "unit": "log scale" + }, + "unambiguousAlgorithm": { + "summary": "Random forest and support vector regression ensembles with descriptor selection.", + "methodClass": "Ensemble", + "implementation": "OPERA CLI 3.6", + "references": [ + { + "citation": "Mansouri et al. 2018", + "doi": "10.1021/acs.jcim.7b00524" + } + ] + }, + "definedApplicabilityDomain": { + "summary": "Combines leverage statistics with similarity to nearest neighbors in descriptor space.", + "relatedTools": [ + "opera.check_applicability_domain" + ], + "references": [ + { + "citation": "OPERA Technical Documentation 2024" + } + ] + }, + "goodnessOfFitMetrics": { + "internalValidation": [ + { + "name": "R2", + "value": 0.92, + "dataset": "training", + "description": "LogP" + } + ], + "externalValidation": [ + { + "name": "RMSE", + "value": 0.31, + "dataset": "external", + "description": "LogP", + "units": "log" + }, + { + "name": "RMSE", + "value": 0.45, + "dataset": "external", + "description": "Water Solubility", + "units": "log mol/L" + } + ] + }, + "mechanisticInterpretation": { + "summary": "Descriptors capture polar surface area, hydrogen bonding, and fragment counts aligned with property trends.", + "confidence": "moderate" + } + }, + "trainingData": { + "dataset": { + "name": "OPERA Training Library 2024", + "source": "EPA CompTox", + "description": "Consolidated experimental property measurements" + }, + "records": 2500, + "chemicalCount": 2200, + "descriptorCount": 45, + "preprocessing": "Standardized structures (neutralized), removal of salts, descriptor scaling.", + "classBalance": "Continuous endpoints" + }, + "evaluationData": { + "datasets": [ + { + "name": "OPERA External Validation", + "source": "EPA CompTox", + "description": "Hold-out dataset of curated property measurements" + } + ], + "validationApproach": "80/20 train-test split with 5-fold cross-validation", + "metrics": [ + { + "name": "MAE", + "value": 0.28, + "dataset": "external", + "description": "LogP" + }, + { + "name": "R2", + "value": 0.85, + "dataset": "external", + "description": "Vapor Pressure" + } + ], + "applicabilityDomainCoverage": 0.9 + }, + "applicabilityDomain": { + "summary": "Descriptor range checks plus nearest-neighbor similarity enforced before prediction delivery.", + "criteria": [ + { + "type": "descriptor_range", + "description": "Each descriptor must fall within training min/max after scaling.", + "parameters": { + "mode": "min_max" + } + }, + { + "type": "similarity", + "description": "Average Tanimoto similarity to top 5 training neighbors >= 0.6.", + "parameters": { + "threshold": 0.6, + "neighbors": 5 + } + } + ], + "enforcement": { + "mcpTools": [ + "opera.check_applicability_domain" + ], + "policy": "warn", + "errorCodes": [ + "OPERA_AD_WARN" + ] + }, + "confidenceBands": [ + { + "label": "High", + "minConfidence": 0.75, + "actions": [ + "Auto-approve" + ] + }, + { + "label": "Low", + "minConfidence": 0.5, + "actions": [ + "Escalate to SME" + ] + } + ], + "references": [ + { + "citation": "OPERA Manual 2025" + } + ] + }, + "ethicalConsiderations": { + "risks": [ + "Limited coverage for highly polar or reactive chemicals." + ], + "mitigations": [ + "Flag low-confidence predictions for manual review." + ] + }, + "provenance": { + "sourceRepositories": [ + "https://github.com/kmansouri/OPERA" + ], + "build": { + "id": "opera-build-2025-02-20", + "timestamp": "2025-02-20T14:30:00Z", + "environment": "GitHub Actions" + }, + "checksum": { + "algorithm": "SHA256", + "value": "79af18b3515e9a1d69037e2a154c7c6088cf3fae8c388ff901abdadf5a304a52" + }, + "reviewStatus": { + "approvedBy": [], + "notes": "Pending SME review" + } + } +} \ No newline at end of file diff --git a/src/epacomp_tox/data/metadata/model_cards/test_consensus.json b/src/epacomp_tox/data/metadata/model_cards/test_consensus.json new file mode 100644 index 0000000..c92aed0 --- /dev/null +++ b/src/epacomp_tox/data/metadata/model_cards/test_consensus.json @@ -0,0 +1,199 @@ +{ + "schemaVersion": "1.0", + "modelDetails": { + "name": "TEST Consensus Acute Toxicity", + "version": "5.2.0", + "modelType": "QSAR", + "description": "Predicts acute aquatic toxicity using consensus of TEST models.", + "developers": [ + { + "name": "EPA Computational Toxicology" + } + ], + "organizations": [ + "US EPA" + ], + "releaseDate": "2025-01-15", + "license": "EPA Terms of Use" + }, + "intendedUse": { + "summary": "Supports screening-level acute aquatic toxicity assessments for organic chemicals.", + "inScope": [ + "Non-ionic organic chemicals", + "Screening-level prioritization" + ], + "outOfScope": [ + "Ionic species", + "Metals" + ], + "limitations": [ + "Do not apply to mixtures without expert review." + ], + "warnings": [ + "Use applicability domain checks prior to decision-making." + ], + "regulatoryPrograms": [ + "TSCA New Chemicals" + ] + }, + "oecdValidationPrinciples": { + "definedEndpoint": { + "description": "96-hour fathead minnow LC50", + "unit": "mg/L", + "speciesOrSystem": "Pimephales promelas" + }, + "unambiguousAlgorithm": { + "summary": "Consensus of multiple QSAR models combining regression and classification outputs.", + "methodClass": "Ensemble", + "implementation": "TEST v5.2", + "references": [ + { + "citation": "TEST user manual 2024" + } + ] + }, + "definedApplicabilityDomain": { + "summary": "Leverage and descriptor range checks against training set.", + "references": [ + { + "citation": "Mansouri et al. 2018", + "doi": "10.1021/acs.jcim.7b00524" + } + ], + "relatedTools": [ + "test.check_applicability_domain" + ] + }, + "goodnessOfFitMetrics": { + "internalValidation": [ + { + "name": "R2", + "value": 0.81, + "dataset": "training" + } + ], + "externalValidation": [ + { + "name": "Q2", + "value": 0.74, + "dataset": "external" + }, + { + "name": "RMSE", + "value": 0.45, + "units": "log10" + } + ] + }, + "mechanisticInterpretation": { + "summary": "Descriptors capture hydrophobicity and molecular size consistent with narcosis mode of action.", + "confidence": "moderate" + } + }, + "trainingData": { + "dataset": { + "name": "TEST Training Set 2024", + "source": "EPA CompTox", + "description": "Curated LC50 dataset for freshwater species" + }, + "records": 580, + "chemicalCount": 560, + "descriptorCount": 35, + "preprocessing": "Standardization of chemical identifiers and removal of salts." + }, + "evaluationData": { + "datasets": [ + { + "name": "Fathead Minnow External", + "source": "EPA AQUIRE", + "description": "Independent validation dataset" + } + ], + "validationApproach": "Hold-out external validation", + "metrics": [ + { + "name": "RMSE", + "value": 0.52, + "dataset": "external", + "units": "log10" + } + ], + "applicabilityDomainCoverage": 0.88 + }, + "applicabilityDomain": { + "summary": "Combines leverage thresholds with descriptor range checks.", + "criteria": [ + { + "type": "descriptor_range", + "description": "All descriptors must fall within 5th-95th percentile of training set.", + "parameters": { + "percentileLower": 0.05, + "percentileUpper": 0.95 + } + }, + { + "type": "similarity", + "description": "Tanimoto similarity to nearest neighbor must exceed 0.65.", + "parameters": { + "threshold": 0.65 + } + } + ], + "enforcement": { + "mcpTools": [ + "test.check_applicability_domain" + ], + "policy": "block", + "errorCodes": [ + "TEST_AD_FAIL" + ] + }, + "confidenceBands": [ + { + "label": "High", + "minConfidence": 0.8, + "actions": [ + "Eligible for automated workflow" + ] + }, + { + "label": "Moderate", + "minConfidence": 0.6, + "actions": [ + "Requires SME review" + ] + } + ] + }, + "ethicalConsiderations": { + "risks": [ + "Model is biased toward narcosis-class chemicals." + ], + "mitigations": [ + "Flag predictions with low similarity for SME review." + ], + "humanOversight": "Regulatory reviewer must approve high-impact predictions." + }, + "provenance": { + "sourceRepositories": [ + "https://github.com/epa/test" + ], + "build": { + "id": "build-2025-01-15", + "timestamp": "2025-01-15T10:00:00Z", + "environment": "GitHub Actions" + }, + "checksum": { + "algorithm": "SHA256", + "value": "4a2a288f4f9b15727ea63a2c70a786844bab608d75d0d70fd0d0d7e0dad32f90" + }, + "reviewStatus": { + "approvedBy": [ + { + "name": "Regulatory Affairs" + } + ], + "approvalDate": "2025-02-01" + } + } +} \ No newline at end of file diff --git a/src/epacomp_tox/data/schemas/README.md b/src/epacomp_tox/data/schemas/README.md new file mode 100644 index 0000000..aa375cc --- /dev/null +++ b/src/epacomp_tox/data/schemas/README.md @@ -0,0 +1,28 @@ +# Portable CompTox Schemas + +The `schemas/` directory publishes portable evidence objects for cross-suite handoff. + +- `docs/contracts/schemas/`: MCP response wrappers for live tool responses. +- `schemas/`: portable objects that downstream MCPs and orchestrators can consume without depending on a specific transport call. + +Portable schema versions are intentionally independent from package patch releases. +For example, a package cleanup release may tighten docs, tests, or release tooling +without changing the `*.v1.json` portable object family. + +Current portable objects: + +- `chemicalIdentityRecord.v1.json` +- `hazardEvidenceSummary.v1.json` +- `exposureEvidenceSummary.v1.json` +- `bioactivityEvidenceSummary.v1.json` +- `aopLinkageSummary.v1.json` +- `pbpkContextBundle.v1.json` +- `comptoxEvidencePack.v1.json` +- `comptox_model_card.schema.json` + +Design rules: + +- Objects are lean and composable. +- CompTox owns evidence ingress and handoff packaging, not downstream AOP semantics or PBPK execution outputs. +- Model-card semantics are reused from `comptox_model_card.schema.json` instead of cloned into a second schema family. +- Example instances live under `schemas/examples/` and are validated in tests. diff --git a/src/epacomp_tox/data/schemas/aopLinkageSummary.v1.json b/src/epacomp_tox/data/schemas/aopLinkageSummary.v1.json new file mode 100644 index 0000000..55c1c43 --- /dev/null +++ b/src/epacomp_tox/data/schemas/aopLinkageSummary.v1.json @@ -0,0 +1,232 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://epa.gov/comptox/schemas/aopLinkageSummary.v1.json", + "title": "CompTox AOP Linkage Summary v1", + "description": "Portable CompTox-side AOP linkage object for downstream mechanistic consumers.", + "type": "object", + "additionalProperties": false, + "required": [ + "chemicalRef", + "lookupMode", + "mappings", + "supportingAssays", + "confidence", + "provenance" + ], + "properties": { + "chemicalRef": { + "$ref": "#/$defs/chemicalRef" + }, + "lookupMode": { + "type": "string", + "enum": [ + "chemical", + "dtxsid", + "assay", + "target" + ] + }, + "mappings": { + "type": "array", + "items": { + "$ref": "#/$defs/mapping" + } + }, + "supportingAssays": { + "type": "array", + "items": { + "$ref": "#/$defs/assayReference" + } + }, + "confidence": { + "$ref": "#/$defs/confidence" + }, + "identityResolution": { + "type": ["object", "null"], + "additionalProperties": true + }, + "knownDataGaps": { + "type": "array", + "items": {"type": "string"} + }, + "limitations": { + "type": "array", + "items": {"type": "string"} + }, + "generatedFromTools": { + "type": "array", + "items": {"type": "string"} + }, + "provenanceSummary": { + "type": "object", + "additionalProperties": true + }, + "provenance": { + "$ref": "#/$defs/provenance" + } + }, + "$defs": { + "chemicalRef": { + "type": "object", + "additionalProperties": false, + "required": [ + "dtxsid", + "preferredName" + ], + "properties": { + "dtxsid": { + "type": "string", + "pattern": "^DTXSID[0-9A-Z]+$" + }, + "preferredName": { + "type": "string" + }, + "casrn": { + "type": [ + "string", + "null" + ] + } + } + }, + "mapping": { + "type": "object", + "additionalProperties": false, + "required": [ + "aopId", + "eventType", + "eventLabel" + ], + "properties": { + "aopId": { + "type": "string" + }, + "aopTitle": { + "type": "string" + }, + "keyEventId": { + "type": "string" + }, + "eventType": { + "type": "string" + }, + "eventLabel": { + "type": "string" + }, + "relationship": { + "type": "string" + }, + "evidenceDirection": { + "type": "string" + }, + "confidence": { + "type": "number", + "minimum": 0, + "maximum": 1 + } + } + }, + "assayReference": { + "type": "object", + "additionalProperties": false, + "required": [ + "aeid", + "assayName" + ], + "properties": { + "aeid": { + "type": "string" + }, + "assayName": { + "type": "string" + }, + "targetName": { + "type": "string" + } + } + }, + "confidence": { + "type": "object", + "additionalProperties": false, + "required": [ + "score" + ], + "properties": { + "score": { + "type": "number", + "minimum": 0, + "maximum": 1 + }, + "band": { + "type": "string" + }, + "basis": { + "type": "string" + } + } + }, + "sourceRecord": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "toolName": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + }, + "retrievedAt": { + "type": "string", + "format": "date-time" + }, + "citation": { + "type": "string" + } + } + }, + "provenance": { + "type": "object", + "additionalProperties": false, + "required": [ + "sourceMcp", + "generatedAt", + "sources" + ], + "properties": { + "sourceMcp": { + "type": "string" + }, + "generatedAt": { + "type": "string", + "format": "date-time" + }, + "generatedBy": { + "type": "string" + }, + "traceId": { + "type": "string" + }, + "sources": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/$defs/sourceRecord" + } + }, + "notes": { + "type": "array", + "items": { + "type": "string" + } + } + } + } + } +} diff --git a/src/epacomp_tox/data/schemas/bioactivityEvidenceSummary.v1.json b/src/epacomp_tox/data/schemas/bioactivityEvidenceSummary.v1.json new file mode 100644 index 0000000..60b3ca8 --- /dev/null +++ b/src/epacomp_tox/data/schemas/bioactivityEvidenceSummary.v1.json @@ -0,0 +1,252 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://epa.gov/comptox/schemas/bioactivityEvidenceSummary.v1.json", + "title": "CompTox Bioactivity Evidence Summary v1", + "description": "Portable summary of CompTox bioactivity, assay, target, and AOP crosswalk information.", + "type": "object", + "additionalProperties": false, + "required": [ + "chemicalRef", + "summary", + "assays", + "targets", + "aopMappings", + "provenance" + ], + "properties": { + "chemicalRef": { + "$ref": "#/$defs/chemicalRef" + }, + "summary": { + "$ref": "#/$defs/summary" + }, + "assays": { + "type": "array", + "items": { + "$ref": "#/$defs/assaySummary" + } + }, + "targets": { + "type": "array", + "items": { + "$ref": "#/$defs/targetSummary" + } + }, + "aopMappings": { + "type": "array", + "items": { + "$ref": "#/$defs/aopMapping" + } + }, + "provenance": { + "$ref": "#/$defs/provenance" + } + }, + "$defs": { + "chemicalRef": { + "type": "object", + "additionalProperties": false, + "required": [ + "dtxsid", + "preferredName" + ], + "properties": { + "dtxsid": { + "type": "string", + "pattern": "^DTXSID[0-9A-Z]+$" + }, + "preferredName": { + "type": "string" + }, + "casrn": { + "type": [ + "string", + "null" + ] + } + } + }, + "summary": { + "type": "object", + "additionalProperties": false, + "required": [ + "assayCount", + "targetCount" + ], + "properties": { + "assayCount": { + "type": "integer", + "minimum": 0 + }, + "activeAssayCount": { + "type": "integer", + "minimum": 0 + }, + "targetCount": { + "type": "integer", + "minimum": 0 + }, + "referenceAssaySet": { + "type": "string" + }, + "notes": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "assaySummary": { + "type": "object", + "additionalProperties": false, + "required": [ + "aeid", + "assayName" + ], + "properties": { + "aeid": { + "type": "string" + }, + "assayName": { + "type": "string" + }, + "assayComponent": { + "type": "string" + }, + "activityDirection": { + "type": "string" + }, + "activityValue": { + "type": [ + "number", + "string", + "null" + ] + }, + "unit": { + "type": "string" + }, + "hitCall": { + "type": [ + "boolean", + "null" + ] + } + } + }, + "targetSummary": { + "type": "object", + "additionalProperties": false, + "required": [ + "targetName" + ], + "properties": { + "targetName": { + "type": "string" + }, + "geneSymbol": { + "type": "string" + }, + "targetFamily": { + "type": "string" + }, + "assayCount": { + "type": "integer", + "minimum": 0 + } + } + }, + "aopMapping": { + "type": "object", + "additionalProperties": false, + "required": [ + "aopId", + "eventType", + "eventLabel" + ], + "properties": { + "aopId": { + "type": "string" + }, + "aopTitle": { + "type": "string" + }, + "eventType": { + "type": "string" + }, + "eventLabel": { + "type": "string" + }, + "confidence": { + "type": "number", + "minimum": 0, + "maximum": 1 + } + } + }, + "sourceRecord": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "toolName": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + }, + "retrievedAt": { + "type": "string", + "format": "date-time" + }, + "citation": { + "type": "string" + } + } + }, + "provenance": { + "type": "object", + "additionalProperties": false, + "required": [ + "sourceMcp", + "generatedAt", + "sources" + ], + "properties": { + "sourceMcp": { + "type": "string" + }, + "generatedAt": { + "type": "string", + "format": "date-time" + }, + "generatedBy": { + "type": "string" + }, + "traceId": { + "type": "string" + }, + "sources": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/$defs/sourceRecord" + } + }, + "notes": { + "type": "array", + "items": { + "type": "string" + } + } + } + } + } +} diff --git a/src/epacomp_tox/data/schemas/chemicalIdentityRecord.v1.json b/src/epacomp_tox/data/schemas/chemicalIdentityRecord.v1.json new file mode 100644 index 0000000..a3d13c5 --- /dev/null +++ b/src/epacomp_tox/data/schemas/chemicalIdentityRecord.v1.json @@ -0,0 +1,123 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://epa.gov/comptox/schemas/chemicalIdentityRecord.v1.json", + "title": "CompTox Chemical Identity Record v1", + "description": "Portable chemical identity object for cross-suite evidence handoff.", + "type": "object", + "additionalProperties": false, + "required": [ + "dtxsid", + "preferredName", + "provenance" + ], + "properties": { + "dtxsid": { + "type": "string", + "pattern": "^DTXSID[0-9A-Z]+$", + "description": "DSSTox substance identifier." + }, + "preferredName": { + "type": "string", + "description": "Preferred CompTox chemical name." + }, + "casrn": { + "type": [ + "string", + "null" + ], + "description": "CAS Registry Number when available." + }, + "inchikey": { + "type": [ + "string", + "null" + ], + "description": "Standard InChIKey when available." + }, + "smiles": { + "type": [ + "string", + "null" + ], + "description": "Canonical or source-provided SMILES when available." + }, + "synonyms": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Useful alternate names for downstream matching." + }, + "provenance": { + "$ref": "#/$defs/provenance" + } + }, + "$defs": { + "sourceRecord": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "toolName": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + }, + "retrievedAt": { + "type": "string", + "format": "date-time" + }, + "citation": { + "type": "string" + }, + "license": { + "type": "string" + } + } + }, + "provenance": { + "type": "object", + "additionalProperties": false, + "required": [ + "sourceMcp", + "generatedAt", + "sources" + ], + "properties": { + "sourceMcp": { + "type": "string" + }, + "generatedAt": { + "type": "string", + "format": "date-time" + }, + "generatedBy": { + "type": "string" + }, + "traceId": { + "type": "string" + }, + "sources": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/$defs/sourceRecord" + } + }, + "notes": { + "type": "array", + "items": { + "type": "string" + } + } + } + } + } +} diff --git a/src/epacomp_tox/data/schemas/comptoxEvidencePack.v1.json b/src/epacomp_tox/data/schemas/comptoxEvidencePack.v1.json new file mode 100644 index 0000000..f1cf001 --- /dev/null +++ b/src/epacomp_tox/data/schemas/comptoxEvidencePack.v1.json @@ -0,0 +1,251 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://epa.gov/comptox/schemas/comptoxEvidencePack.v1.json", + "title": "CompTox Evidence Pack v1", + "description": "Portable cross-domain evidence package assembled from CompTox retrieval outputs.", + "type": "object", + "additionalProperties": false, + "required": [ + "chemicalIdentity", + "metadata", + "audit", + "semanticCoverage" + ], + "properties": { + "chemicalIdentity": { + "$ref": "https://epa.gov/comptox/schemas/chemicalIdentityRecord.v1.json" + }, + "hazardEvidenceSummary": { + "oneOf": [ + { + "$ref": "https://epa.gov/comptox/schemas/hazardEvidenceSummary.v1.json" + }, + { + "type": "null" + } + ] + }, + "exposureEvidenceSummary": { + "oneOf": [ + { + "$ref": "https://epa.gov/comptox/schemas/exposureEvidenceSummary.v1.json" + }, + { + "type": "null" + } + ] + }, + "bioactivityEvidenceSummary": { + "oneOf": [ + { + "$ref": "https://epa.gov/comptox/schemas/bioactivityEvidenceSummary.v1.json" + }, + { + "type": "null" + } + ] + }, + "aopLinkageSummary": { + "oneOf": [ + { + "$ref": "https://epa.gov/comptox/schemas/aopLinkageSummary.v1.json" + }, + { + "type": "null" + } + ] + }, + "pbpkContextBundle": { + "oneOf": [ + { + "$ref": "https://epa.gov/comptox/schemas/pbpkContextBundle.v1.json" + }, + { + "type": "null" + } + ] + }, + "identityResolution": { + "type": ["object", "null"], + "additionalProperties": true + }, + "knownDataGaps": { + "type": "array", + "items": {"type": "string"} + }, + "limitations": { + "type": "array", + "items": {"type": "string"} + }, + "generatedFromTools": { + "type": "array", + "items": {"type": "string"} + }, + "provenanceSummary": { + "type": "object", + "additionalProperties": true + }, + "metadata": { + "$ref": "#/$defs/metadata" + }, + "audit": { + "$ref": "#/$defs/audit" + }, + "semanticCoverage": { + "$ref": "#/$defs/semanticCoverage" + } + }, + "$defs": { + "metadata": { + "type": "object", + "additionalProperties": false, + "required": [ + "packId", + "sourceMcp", + "createdAt", + "suiteRole" + ], + "properties": { + "packId": { + "type": "string" + }, + "sourceMcp": { + "type": "string" + }, + "createdAt": { + "type": "string", + "format": "date-time" + }, + "suiteRole": { + "type": "string", + "const": "evidence-federation" + }, + "downstreamConsumers": { + "type": "array", + "items": { + "type": "string" + } + }, + "modelCardRefs": { + "type": "array", + "items": { + "oneOf": [ + { + "$ref": "https://epa.gov/comptox/schemas/comptox-model-card.schema.json" + }, + { + "$ref": "#/$defs/modelCardReference" + } + ] + } + } + } + }, + "modelCardReference": { + "type": "object", + "additionalProperties": false, + "required": [ + "modelName", + "modelVersion" + ], + "properties": { + "modelName": { + "type": "string" + }, + "modelVersion": { + "type": "string" + }, + "endpoint": { + "type": "string" + }, + "cardUri": { + "type": "string", + "format": "uri" + }, + "limitations": { + "type": "array", + "items": {"type": "string"} + }, + "warnings": { + "type": "array", + "items": {"type": "string"} + } + } + }, + "audit": { + "type": "object", + "additionalProperties": false, + "required": [ + "generatedAt", + "generatedBy", + "sourceTools" + ], + "properties": { + "generatedAt": { + "type": "string", + "format": "date-time" + }, + "generatedBy": { + "type": "string" + }, + "requestId": { + "type": "string" + }, + "sourceTools": { + "type": "array", + "minItems": 1, + "items": { + "type": "string" + } + }, + "notes": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "semanticCoverage": { + "type": "object", + "additionalProperties": false, + "required": [ + "identity", + "hazard", + "exposure", + "bioactivity", + "aopLinkage", + "pbpkContext" + ], + "properties": { + "identity": { + "$ref": "#/$defs/coverageLevel" + }, + "hazard": { + "$ref": "#/$defs/coverageLevel" + }, + "exposure": { + "$ref": "#/$defs/coverageLevel" + }, + "bioactivity": { + "$ref": "#/$defs/coverageLevel" + }, + "aopLinkage": { + "$ref": "#/$defs/coverageLevel" + }, + "pbpkContext": { + "$ref": "#/$defs/coverageLevel" + } + } + }, + "coverageLevel": { + "type": "string", + "enum": [ + "none", + "linked", + "summary", + "detailed" + ] + } + } +} diff --git a/src/epacomp_tox/data/schemas/comptox_model_card.schema.json b/src/epacomp_tox/data/schemas/comptox_model_card.schema.json new file mode 100644 index 0000000..7d988fd --- /dev/null +++ b/src/epacomp_tox/data/schemas/comptox_model_card.schema.json @@ -0,0 +1,658 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://epa.gov/comptox/schemas/comptox-model-card.schema.json", + "title": "CompTox ModelCard", + "description": "Machine-readable CompTox model card aligned with OECD QSAR validation principles and MCP requirements.", + "type": "object", + "required": [ + "schemaVersion", + "modelDetails", + "intendedUse", + "oecdValidationPrinciples", + "trainingData", + "evaluationData", + "applicabilityDomain", + "ethicalConsiderations", + "provenance" + ], + "additionalProperties": true, + "properties": { + "schemaVersion": { + "type": "string", + "pattern": "^v?\\d+\\.\\d+(\\.\\d+)?$", + "description": "Semantic version of the CompTox model card schema used to validate this document." + }, + "modelDetails": { + "type": "object", + "description": "Core identity and lifecycle information for the model.", + "required": [ + "name", + "version", + "description", + "modelType", + "developers", + "organizations", + "releaseDate" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "Canonical model name (e.g., TEST Consensus Acute Toxicity)." + }, + "version": { + "type": "string", + "description": "Model semantic version or tagged release identifier." + }, + "modelType": { + "type": "string", + "description": "High-level classification for the model (e.g., QSAR, Read-Across, PBPK, Analogue-Search)." + }, + "description": { + "type": "string", + "description": "Short overview of the model purpose and capabilities." + }, + "developers": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#/$defs/person" }, + "description": "Primary model developers or maintainers." + }, + "organizations": { + "type": "array", + "minItems": 1, + "items": { "type": "string" }, + "description": "Affiliated organizations sponsoring or maintaining the model." + }, + "releaseDate": { + "type": "string", + "format": "date", + "description": "Date when this model version was released to production." + }, + "lastUpdated": { + "type": "string", + "format": "date", + "description": "Date of the latest material update to this model card or implementation." + }, + "license": { + "type": "string", + "description": "License governing model distribution or use." + }, + "tags": { + "type": "array", + "items": { "type": "string" }, + "description": "Keywords describing the domain, endpoints, or regulatory programs." + }, + "documentation": { + "type": "array", + "items": { "$ref": "#/$defs/reference" }, + "description": "Links to human-readable technical documentation, publications, or SOPs." + } + } + }, + "intendedUse": { + "type": "object", + "description": "Specific use cases, audiences, and limitations for the model.", + "required": ["summary", "inScope", "outOfScope"], + "additionalProperties": false, + "properties": { + "summary": { + "type": "string", + "description": "Narrative summary of intended purpose and regulatory context." + }, + "inScope": { + "type": "array", + "items": { "type": "string" }, + "description": "Explicitly supported use cases, decision contexts, or chemical classes." + }, + "outOfScope": { + "type": "array", + "items": { "type": "string" }, + "description": "Use cases, populations, or chemicals for which the model should not be applied." + }, + "limitations": { + "type": "array", + "items": { "type": "string" }, + "description": "Known scientific or technical limitations relevant to agents or reviewers." + }, + "warnings": { + "type": "array", + "items": { "type": "string" }, + "description": "Cautionary statements that must be surfaced alongside predictions." + }, + "regulatoryPrograms": { + "type": "array", + "items": { "type": "string" }, + "description": "Regulatory programs or guidance documents under which this model is accepted." + } + } + }, + "oecdValidationPrinciples": { + "type": "object", + "description": "Compliance details for the OECD five principles of QSAR validation.", + "required": [ + "definedEndpoint", + "unambiguousAlgorithm", + "definedApplicabilityDomain", + "goodnessOfFitMetrics", + "mechanisticInterpretation" + ], + "additionalProperties": false, + "properties": { + "definedEndpoint": { + "type": "object", + "required": ["description", "unit"], + "additionalProperties": false, + "properties": { + "description": { + "type": "string", + "description": "Precise description of the biological or physicochemical endpoint predicted by the model." + }, + "unit": { + "type": "string", + "description": "Measurement unit associated with the endpoint, if applicable." + }, + "speciesOrSystem": { + "type": "string", + "description": "Species, biological system, or experimental context used to measure the endpoint." + }, + "references": { + "type": "array", + "items": { "$ref": "#/$defs/reference" }, + "description": "Supporting references defining the endpoint." + } + } + }, + "unambiguousAlgorithm": { + "type": "object", + "required": ["summary"], + "additionalProperties": false, + "properties": { + "summary": { + "type": "string", + "description": "Concise description of the algorithm, statistical method, or machine learning architecture." + }, + "methodClass": { + "type": "string", + "description": "High-level classification (e.g., linear regression, random forest, k-NN, read-across)." + }, + "implementation": { + "type": "string", + "description": "Primary implementation reference (e.g., software package, repository URL)." + }, + "references": { + "type": "array", + "items": { "$ref": "#/$defs/reference" }, + "description": "Peer-reviewed publications or technical reports detailing the algorithm." + } + } + }, + "definedApplicabilityDomain": { + "type": "object", + "description": "Summary of AD approach; detailed machine-readable definition is in the top-level applicabilityDomain section.", + "required": ["summary"], + "additionalProperties": false, + "properties": { + "summary": { + "type": "string", + "description": "Narrative summary of the applicability domain technique(s)." + }, + "references": { + "type": "array", + "items": { "$ref": "#/$defs/reference" }, + "description": "Citations supporting the AD methodology." + }, + "relatedTools": { + "type": "array", + "items": { "type": "string" }, + "description": "Names of MCP tools that enforce this AD (e.g., test.check_applicability_domain)." + } + } + }, + "goodnessOfFitMetrics": { + "type": "object", + "description": "Quantitative performance metrics for internal and external validation.", + "additionalProperties": false, + "properties": { + "internalValidation": { + "type": "array", + "items": { "$ref": "#/$defs/metric" }, + "description": "Metrics derived from training or cross-validation." + }, + "externalValidation": { + "type": "array", + "items": { "$ref": "#/$defs/metric" }, + "description": "Metrics derived from external or hold-out datasets." + }, + "applicabilityDomainMetrics": { + "type": "array", + "items": { "$ref": "#/$defs/metric" }, + "description": "Metrics specific to AD performance (coverage, false positive rate, etc.)." + } + } + }, + "mechanisticInterpretation": { + "type": "object", + "required": ["summary"], + "additionalProperties": false, + "properties": { + "summary": { + "type": "string", + "description": "Explanation of mechanistic plausibility or descriptor relevance." + }, + "confidence": { + "type": "string", + "description": "Assessment of strength of mechanistic evidence (e.g., strong, moderate, speculative)." + }, + "references": { + "type": "array", + "items": { "$ref": "#/$defs/reference" }, + "description": "Supporting publications or expert assessments." + } + } + } + } + }, + "trainingData": { + "type": "object", + "description": "Provenance and characteristics of the training dataset.", + "required": ["dataset", "records", "chemicalCount"], + "additionalProperties": false, + "properties": { + "dataset": { "$ref": "#/$defs/dataset" }, + "records": { + "type": "integer", + "minimum": 0, + "description": "Total number of records (rows) used for training." + }, + "chemicalCount": { + "type": "integer", + "minimum": 0, + "description": "Number of unique chemicals represented." + }, + "descriptorCount": { + "type": "integer", + "minimum": 0, + "description": "Number of descriptors/features used by the model." + }, + "preprocessing": { + "type": "string", + "description": "Data curation and preprocessing steps applied prior to model training." + }, + "classBalance": { + "type": "string", + "description": "Summary of class balance or distribution characteristics, if applicable." + } + } + }, + "evaluationData": { + "type": "object", + "description": "Details for external validation datasets and methodologies.", + "required": ["datasets", "validationApproach", "metrics"], + "additionalProperties": false, + "properties": { + "datasets": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#/$defs/dataset" }, + "description": "External datasets used for validation." + }, + "validationApproach": { + "type": "string", + "description": "Description of the validation methodology (e.g., k-fold CV, external test set)." + }, + "metrics": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#/$defs/metric" }, + "description": "Quantitative performance metrics derived from evaluation datasets." + }, + "applicabilityDomainCoverage": { + "type": "number", + "minimum": 0, + "maximum": 1, + "description": "Fraction of evaluation dataset predictions that fell within the applicability domain." + } + } + }, + "applicabilityDomain": { + "type": "object", + "description": "Machine-readable applicability domain definition used to enforce guardrails.", + "required": ["summary", "criteria", "enforcement"], + "additionalProperties": false, + "properties": { + "summary": { + "type": "string", + "description": "Overall description of the AD methodology and its role in the workflow." + }, + "criteria": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#/$defs/applicabilityCriterion" }, + "description": "List of criteria that define the applicability domain boundaries." + }, + "enforcement": { + "type": "object", + "required": ["mcpTools"], + "additionalProperties": false, + "properties": { + "mcpTools": { + "type": "array", + "items": { "type": "string" }, + "description": "Names of MCP tools responsible for enforcing the AD checks." + }, + "policy": { + "type": "string", + "description": "Policy guidance controlling how out-of-domain results are handled (e.g., block, warn)." + }, + "errorCodes": { + "type": "array", + "items": { "type": "string" }, + "description": "Expected error codes or statuses emitted when AD checks fail." + } + } + }, + "confidenceBands": { + "type": "array", + "items": { "$ref": "#/$defs/confidenceBand" }, + "description": "Optional mapping of AD confidence to downstream decision guidance." + }, + "references": { + "type": "array", + "items": { "$ref": "#/$defs/reference" }, + "description": "Citations describing the AD algorithm or validation." + } + } + }, + "ethicalConsiderations": { + "type": "object", + "description": "Bias, fairness, or ethical implications of the model.", + "required": ["risks"], + "additionalProperties": false, + "properties": { + "risks": { + "type": "array", + "items": { "type": "string" }, + "description": "Known ethical or bias-related risks to surface to users." + }, + "mitigations": { + "type": "array", + "items": { "type": "string" }, + "description": "Mitigation strategies or guidance for interpreting model outputs." + }, + "humanOversight": { + "type": "string", + "description": "Description of required human oversight or SME review." + } + } + }, + "provenance": { + "type": "object", + "description": "Provenance, version control, and audit metadata.", + "required": ["sourceRepositories", "build", "checksum"], + "additionalProperties": false, + "properties": { + "sourceRepositories": { + "type": "array", + "items": { "type": "string", "format": "uri" }, + "description": "Repositories, data stores, or registries containing canonical model assets." + }, + "build": { + "type": "object", + "required": ["id", "timestamp"], + "additionalProperties": false, + "properties": { + "id": { + "type": "string", + "description": "Build identifier or commit hash used to produce the deployable artifact." + }, + "timestamp": { + "type": "string", + "format": "date-time", + "description": "Timestamp when the build was produced." + }, + "environment": { + "type": "string", + "description": "Description of build environment (e.g., GitHub Actions workflow, container digest)." + } + } + }, + "checksum": { + "type": "object", + "required": ["algorithm", "value"], + "additionalProperties": false, + "properties": { + "algorithm": { + "type": "string", + "description": "Hash algorithm used (e.g., SHA256)." + }, + "value": { + "type": "string", + "description": "Checksum value for the model artifact or dataset bundle." + } + } + }, + "versionHistory": { + "type": "array", + "items": { + "type": "object", + "required": ["version", "changes"], + "additionalProperties": false, + "properties": { + "version": { "type": "string" }, + "changes": { + "type": "array", + "items": { "type": "string" } + }, + "date": { + "type": "string", + "format": "date" + } + } + }, + "description": "Chronological record of major updates to the model or card." + }, + "reviewStatus": { + "type": "object", + "properties": { + "approvedBy": { + "type": "array", + "items": { "$ref": "#/$defs/person" }, + "description": "Stakeholders who have reviewed/approved the model card." + }, + "approvalDate": { + "type": "string", + "format": "date" + }, + "notes": { + "type": "string" + } + } + } + } + }, + "notes": { + "type": "string", + "description": "Free-form notes or additional context not captured elsewhere." + } + }, + "$defs": { + "person": { + "type": "object", + "required": ["name"], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "Full name of the person." + }, + "affiliation": { + "type": "string", + "description": "Organization or institution." + }, + "email": { + "type": "string", + "format": "email", + "description": "Contact email for follow-up questions." + }, + "orcid": { + "type": "string", + "pattern": "^\\d{4}-\\d{4}-\\d{4}-\\d{3}[\\dX]$", + "description": "ORCID identifier, if available." + } + } + }, + "reference": { + "type": "object", + "required": ["citation"], + "additionalProperties": false, + "properties": { + "citation": { + "type": "string", + "description": "Human-readable citation." + }, + "doi": { + "type": "string", + "description": "Digital object identifier." + }, + "url": { + "type": "string", + "format": "uri", + "description": "Resolvable URL for the reference." + }, + "notes": { + "type": "string", + "description": "Context or relevance of the reference." + } + } + }, + "metric": { + "type": "object", + "required": ["name", "value"], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "Metric name (e.g., R2, RMSE, MAE)." + }, + "value": { + "type": "number", + "description": "Numeric value for the metric." + }, + "dataset": { + "type": "string", + "description": "Dataset name or split associated with the metric." + }, + "units": { + "type": "string", + "description": "Measurement units, if applicable." + }, + "description": { + "type": "string", + "description": "Additional context for the metric calculation." + } + } + }, + "dataset": { + "type": "object", + "required": ["name", "source"], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "Dataset name or identifier." + }, + "source": { + "type": "string", + "description": "Data source or repository from which the dataset was obtained." + }, + "description": { + "type": "string", + "description": "Narrative description of the dataset contents." + }, + "accessUrl": { + "type": "string", + "format": "uri", + "description": "URL where the dataset can be accessed or requested." + }, + "license": { + "type": "string", + "description": "License governing dataset usage." + }, + "recordCount": { + "type": "integer", + "minimum": 0, + "description": "Number of records in the dataset." + }, + "timeRange": { + "type": "object", + "properties": { + "start": { + "type": "string", + "format": "date" + }, + "end": { + "type": "string", + "format": "date" + } + } + } + } + }, + "applicabilityCriterion": { + "type": "object", + "required": ["type", "description"], + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "enum": ["descriptor_range", "similarity", "distance", "coverage", "expert_rule", "other"], + "description": "Category of applicability domain criterion." + }, + "description": { + "type": "string", + "description": "Human-readable explanation of the criterion." + }, + "parameters": { + "type": "object", + "description": "Machine-readable parameters defining the criterion (thresholds, descriptor names, etc.)." + }, + "threshold": { + "type": "number", + "description": "Primary numeric threshold, when applicable." + }, + "unit": { + "type": "string", + "description": "Unit associated with the threshold." + }, + "references": { + "type": "array", + "items": { "$ref": "#/$defs/reference" }, + "description": "References backing the criterion rationale." + } + } + }, + "confidenceBand": { + "type": "object", + "required": ["label", "minConfidence", "actions"], + "additionalProperties": false, + "properties": { + "label": { + "type": "string", + "description": "Human-readable label for the confidence band (e.g., High, Moderate, Low)." + }, + "minConfidence": { + "type": "number", + "minimum": 0, + "maximum": 1, + "description": "Minimum confidence score (0-1) included in the band." + }, + "actions": { + "type": "array", + "items": { "type": "string" }, + "description": "Recommended actions or policy guidance when a prediction falls in this band." + } + } + } + } +} diff --git a/src/epacomp_tox/data/schemas/examples/aopLinkageSummary.example.json b/src/epacomp_tox/data/schemas/examples/aopLinkageSummary.example.json new file mode 100644 index 0000000..6a3e171 --- /dev/null +++ b/src/epacomp_tox/data/schemas/examples/aopLinkageSummary.example.json @@ -0,0 +1,50 @@ +{ + "chemicalRef": { + "dtxsid": "DTXSID7020182", + "preferredName": "Bisphenol A", + "casrn": "80-05-7" + }, + "lookupMode": "dtxsid", + "mappings": [ + { + "aopId": "AOP:42", + "aopTitle": "Estrogen receptor activation leading to reproductive effects", + "keyEventId": "KE:1735", + "eventType": "molecular_initiating_event", + "eventLabel": "Activation of estrogen receptor", + "relationship": "linked_via_assay_mapping", + "evidenceDirection": "supports", + "confidence": 0.81 + } + ], + "supportingAssays": [ + { + "aeid": "AEID:1856", + "assayName": "ATG_ERa_TRANS_up", + "targetName": "Estrogen receptor alpha" + } + ], + "confidence": { + "score": 0.79, + "band": "moderate", + "basis": "Single-target concordance across linked ToxCast assays and AOP crosswalk coverage." + }, + "provenance": { + "sourceMcp": "epacomp-tox-mcp", + "generatedAt": "2026-03-21T10:00:00Z", + "generatedBy": "build_aop_linkage_summary", + "traceId": "ctx-example-aop-001", + "sources": [ + { + "name": "CompTox Bioactivity AOP mapping", + "toolName": "get_bioactivity_aop", + "url": "https://comptox.epa.gov/dashboard", + "retrievedAt": "2026-03-21T09:59:10Z", + "citation": "EPA CompTox AOP crosswalk endpoint" + } + ], + "notes": [ + "CompTox-side linkage object only; downstream AOP normalization belongs in aop-mcp." + ] + } +} diff --git a/src/epacomp_tox/data/schemas/examples/bioactivityEvidenceSummary.example.json b/src/epacomp_tox/data/schemas/examples/bioactivityEvidenceSummary.example.json new file mode 100644 index 0000000..3d872c4 --- /dev/null +++ b/src/epacomp_tox/data/schemas/examples/bioactivityEvidenceSummary.example.json @@ -0,0 +1,77 @@ +{ + "chemicalRef": { + "dtxsid": "DTXSID7020182", + "preferredName": "Bisphenol A", + "casrn": "80-05-7" + }, + "summary": { + "assayCount": 3, + "activeAssayCount": 2, + "targetCount": 2, + "referenceAssaySet": "toxcast-phase-iii", + "notes": [ + "Example summary highlighting endocrine-relevant activity." + ] + }, + "assays": [ + { + "aeid": "AEID:1856", + "assayName": "ATG_ERa_TRANS_up", + "assayComponent": "estrogen receptor alpha transcriptional activation", + "activityDirection": "up", + "activityValue": 0.48, + "unit": "uM", + "hitCall": true + }, + { + "aeid": "AEID:1234", + "assayName": "TOX21_AR_BLA_Antagonist_ratio", + "assayComponent": "androgen receptor antagonist", + "activityDirection": "down", + "activityValue": 5.1, + "unit": "uM", + "hitCall": false + } + ], + "targets": [ + { + "targetName": "Estrogen receptor alpha", + "geneSymbol": "ESR1", + "targetFamily": "nuclear receptor", + "assayCount": 2 + }, + { + "targetName": "Estrogen receptor beta", + "geneSymbol": "ESR2", + "targetFamily": "nuclear receptor", + "assayCount": 1 + } + ], + "aopMappings": [ + { + "aopId": "AOP:42", + "aopTitle": "Estrogen receptor activation leading to reproductive effects", + "eventType": "molecular_initiating_event", + "eventLabel": "Activation of estrogen receptor", + "confidence": 0.81 + } + ], + "provenance": { + "sourceMcp": "epacomp-tox-mcp", + "generatedAt": "2026-03-21T10:00:00Z", + "generatedBy": "assemble_comptox_evidence_pack", + "traceId": "ctx-example-bioactivity-001", + "sources": [ + { + "name": "CompTox Bioactivity APIs", + "toolName": "get_bioactivity_summary_by_dtxsid", + "url": "https://comptox.epa.gov/dashboard", + "retrievedAt": "2026-03-21T09:59:00Z", + "citation": "EPA CompTox bioactivity endpoints" + } + ], + "notes": [ + "Illustrative bioactivity summary object." + ] + } +} diff --git a/src/epacomp_tox/data/schemas/examples/chemicalIdentityRecord.example.json b/src/epacomp_tox/data/schemas/examples/chemicalIdentityRecord.example.json new file mode 100644 index 0000000..bc7e01f --- /dev/null +++ b/src/epacomp_tox/data/schemas/examples/chemicalIdentityRecord.example.json @@ -0,0 +1,29 @@ +{ + "dtxsid": "DTXSID7020182", + "preferredName": "Bisphenol A", + "casrn": "80-05-7", + "inchikey": "IHQYDGMOQILFNV-UHFFFAOYSA-N", + "smiles": "CC(C)(C1=CC=C(C=C1)O)C2=CC=C(C=C2)O", + "synonyms": [ + "BPA", + "4,4'-Isopropylidenediphenol" + ], + "provenance": { + "sourceMcp": "epacomp-tox-mcp", + "generatedAt": "2026-03-21T10:00:00Z", + "generatedBy": "assemble_comptox_evidence_pack", + "traceId": "ctx-example-chemical-001", + "sources": [ + { + "name": "CompTox Chemicals Dashboard API", + "toolName": "search_chemical", + "url": "https://comptox.epa.gov/dashboard", + "retrievedAt": "2026-03-21T09:58:00Z", + "citation": "EPA CompTox Chemicals Dashboard" + } + ], + "notes": [ + "Example identity record for portable handoff validation." + ] + } +} diff --git a/src/epacomp_tox/data/schemas/examples/comptoxEvidencePack.example.json b/src/epacomp_tox/data/schemas/examples/comptoxEvidencePack.example.json new file mode 100644 index 0000000..cfb3eb2 --- /dev/null +++ b/src/epacomp_tox/data/schemas/examples/comptoxEvidencePack.example.json @@ -0,0 +1,366 @@ +{ + "chemicalIdentity": { + "dtxsid": "DTXSID7020182", + "preferredName": "Bisphenol A", + "casrn": "80-05-7", + "inchikey": "IHQYDGMOQILFNV-UHFFFAOYSA-N", + "smiles": "CC(C)(C1=CC=C(C=C1)O)C2=CC=C(C=C2)O", + "synonyms": [ + "BPA" + ], + "provenance": { + "sourceMcp": "epacomp-tox-mcp", + "generatedAt": "2026-03-21T10:00:00Z", + "generatedBy": "assemble_comptox_evidence_pack", + "traceId": "ctx-example-pack-chemical-001", + "sources": [ + { + "name": "CompTox Chemicals Dashboard API", + "toolName": "search_chemical", + "url": "https://comptox.epa.gov/dashboard", + "retrievedAt": "2026-03-21T09:58:00Z", + "citation": "EPA CompTox Chemicals Dashboard" + } + ], + "notes": [ + "Embedded identity record." + ] + } + }, + "hazardEvidenceSummary": { + "chemicalRef": { + "dtxsid": "DTXSID7020182", + "preferredName": "Bisphenol A", + "casrn": "80-05-7" + }, + "datasets": [ + { + "dataset": "toxval", + "summaryLevel": "summary", + "recordCount": 1, + "records": [ + { + "effect": "NOEL", + "value": 40, + "unit": "mg/kg-day" + } + ], + "sourceTool": "search_hazard", + "retrievedAt": "2026-03-21T09:58:00Z" + } + ], + "keyFindings": [ + { + "statement": "A repeated-dose oral NOEL is available in ToxValDB.", + "sourceDataset": "toxval", + "endpoint": "NOEL", + "value": 40, + "unit": "mg/kg-day" + } + ], + "references": [ + { + "citation": "EPA CompTox hazard datasets for Bisphenol A.", + "url": "https://comptox.epa.gov/dashboard" + } + ], + "provenance": { + "sourceMcp": "epacomp-tox-mcp", + "generatedAt": "2026-03-21T10:00:00Z", + "generatedBy": "assemble_comptox_evidence_pack", + "traceId": "ctx-example-pack-hazard-001", + "sources": [ + { + "name": "CompTox Hazard APIs", + "toolName": "search_hazard", + "url": "https://comptox.epa.gov/dashboard", + "retrievedAt": "2026-03-21T09:58:00Z", + "citation": "EPA CompTox hazard endpoints" + } + ] + }, + "requestMetadata": { + "sourceTools": [ + "search_hazard" + ], + "requestedAt": "2026-03-21T09:58:00Z", + "summaryOnly": true + } + }, + "exposureEvidenceSummary": { + "chemicalRef": { + "dtxsid": "DTXSID7020182", + "preferredName": "Bisphenol A", + "casrn": "80-05-7" + }, + "cpdat": { + "recordCount": 1, + "records": [ + { + "productUseCategory": "Food contact material" + } + ], + "sourceTool": "search_cpdat", + "retrievedAt": "2026-03-21T09:58:30Z" + }, + "seem": null, + "httk": { + "recordCount": 1, + "records": [ + { + "fractionUnboundPlasma": 0.06 + } + ], + "sourceTool": "search_httk", + "retrievedAt": "2026-03-21T09:59:00Z" + }, + "mmdb": null, + "qsurs": { + "recordCount": 1, + "records": [ + { + "useDescriptor": "plasticizer", + "probability": 0.62 + } + ], + "sourceTool": "search_qsurs", + "retrievedAt": "2026-03-21T09:59:30Z" + }, + "provenance": { + "sourceMcp": "epacomp-tox-mcp", + "generatedAt": "2026-03-21T10:00:00Z", + "generatedBy": "assemble_comptox_evidence_pack", + "traceId": "ctx-example-pack-exposure-001", + "sources": [ + { + "name": "CompTox Exposure APIs", + "toolName": "search_cpdat", + "url": "https://comptox.epa.gov/dashboard", + "retrievedAt": "2026-03-21T09:58:30Z", + "citation": "EPA CompTox exposure endpoints" + } + ] + } + }, + "bioactivityEvidenceSummary": { + "chemicalRef": { + "dtxsid": "DTXSID7020182", + "preferredName": "Bisphenol A", + "casrn": "80-05-7" + }, + "summary": { + "assayCount": 2, + "activeAssayCount": 1, + "targetCount": 1 + }, + "assays": [ + { + "aeid": "AEID:1856", + "assayName": "ATG_ERa_TRANS_up", + "activityDirection": "up", + "activityValue": 0.48, + "unit": "uM", + "hitCall": true + } + ], + "targets": [ + { + "targetName": "Estrogen receptor alpha", + "geneSymbol": "ESR1", + "targetFamily": "nuclear receptor", + "assayCount": 1 + } + ], + "aopMappings": [ + { + "aopId": "AOP:42", + "eventType": "molecular_initiating_event", + "eventLabel": "Activation of estrogen receptor", + "confidence": 0.81 + } + ], + "provenance": { + "sourceMcp": "epacomp-tox-mcp", + "generatedAt": "2026-03-21T10:00:00Z", + "generatedBy": "assemble_comptox_evidence_pack", + "traceId": "ctx-example-pack-bioactivity-001", + "sources": [ + { + "name": "CompTox Bioactivity APIs", + "toolName": "get_bioactivity_summary_by_dtxsid", + "url": "https://comptox.epa.gov/dashboard", + "retrievedAt": "2026-03-21T09:59:00Z", + "citation": "EPA CompTox bioactivity endpoints" + } + ] + } + }, + "aopLinkageSummary": { + "chemicalRef": { + "dtxsid": "DTXSID7020182", + "preferredName": "Bisphenol A", + "casrn": "80-05-7" + }, + "lookupMode": "dtxsid", + "mappings": [ + { + "aopId": "AOP:42", + "eventType": "molecular_initiating_event", + "eventLabel": "Activation of estrogen receptor", + "confidence": 0.81 + } + ], + "supportingAssays": [ + { + "aeid": "AEID:1856", + "assayName": "ATG_ERa_TRANS_up" + } + ], + "confidence": { + "score": 0.79, + "basis": "Example CompTox linkage confidence." + }, + "provenance": { + "sourceMcp": "epacomp-tox-mcp", + "generatedAt": "2026-03-21T10:00:00Z", + "generatedBy": "build_aop_linkage_summary", + "traceId": "ctx-example-pack-aop-001", + "sources": [ + { + "name": "CompTox AOP mapping", + "toolName": "get_bioactivity_aop", + "url": "https://comptox.epa.gov/dashboard", + "retrievedAt": "2026-03-21T09:59:10Z", + "citation": "EPA CompTox AOP crosswalk endpoint" + } + ] + } + }, + "pbpkContextBundle": { + "chemicalIdentityRef": { + "dtxsid": "DTXSID7020182", + "preferredName": "Bisphenol A", + "casrn": "80-05-7", + "inchikey": "IHQYDGMOQILFNV-UHFFFAOYSA-N", + "smiles": "CC(C)(C1=CC=C(C=C1)O)C2=CC=C(C=C2)O", + "synonyms": [ + "BPA" + ], + "provenance": { + "sourceMcp": "epacomp-tox-mcp", + "generatedAt": "2026-03-21T10:00:00Z", + "generatedBy": "build_pbpk_context_bundle", + "traceId": "ctx-example-pack-pbpk-chemical-001", + "sources": [ + { + "name": "CompTox Chemicals Dashboard API", + "toolName": "search_chemical", + "url": "https://comptox.epa.gov/dashboard", + "retrievedAt": "2026-03-21T09:58:00Z", + "citation": "EPA CompTox Chemicals Dashboard" + } + ] + } + }, + "httkSlice": { + "recordCount": 1, + "records": [ + { + "fractionUnboundPlasma": 0.06 + } + ], + "sourceTool": "get_exposure_httk", + "retrievedAt": "2026-03-21T09:59:00Z", + "selectedMetrics": { + "fractionUnboundPlasma": 0.06 + } + }, + "hazardAdmeIviveSlice": { + "recordCount": 1, + "records": [ + { + "intrinsicClearance": 12.4 + } + ], + "sourceTool": "get_hazard_adme_ivive", + "retrievedAt": "2026-03-21T09:59:20Z", + "selectedMetrics": { + "intrinsicClearance": 12.4 + } + }, + "exposureHints": [ + { + "hintType": "population_exposure", + "value": 0.02, + "unit": "mg/kg-day", + "source": "SEEM" + } + ], + "modelCardRefs": [ + { + "modelName": "CompTox HTTK 3-Compartment Model", + "modelVersion": "1.0.0", + "endpoint": "screening-level internal concentration", + "cardUri": "https://github.com/ToxMCP/comptox-mcp/blob/main/schemas/comptox_model_card.schema.json" + } + ], + "provenance": { + "sourceMcp": "epacomp-tox-mcp", + "generatedAt": "2026-03-21T10:00:00Z", + "generatedBy": "build_pbpk_context_bundle", + "traceId": "ctx-example-pack-pbpk-001", + "sources": [ + { + "name": "CompTox Exposure and Hazard APIs", + "toolName": "get_exposure_httk", + "url": "https://comptox.epa.gov/dashboard", + "retrievedAt": "2026-03-21T09:59:00Z", + "citation": "EPA CompTox HTTK and ADME endpoints" + } + ] + }, + "handoffTarget": "pbpk-mcp" + }, + "metadata": { + "packId": "comptox-pack-bpa-001", + "sourceMcp": "epacomp-tox-mcp", + "createdAt": "2026-03-21T10:00:00Z", + "suiteRole": "evidence-federation", + "downstreamConsumers": [ + "aop-mcp", + "pbpk-mcp" + ], + "modelCardRefs": [ + { + "modelName": "CompTox HTTK 3-Compartment Model", + "modelVersion": "1.0.0", + "endpoint": "screening-level internal concentration", + "cardUri": "https://github.com/ToxMCP/comptox-mcp/blob/main/schemas/comptox_model_card.schema.json" + } + ] + }, + "audit": { + "generatedAt": "2026-03-21T10:00:00Z", + "generatedBy": "assemble_comptox_evidence_pack", + "requestId": "comptox-pack-request-001", + "sourceTools": [ + "search_chemical", + "search_hazard", + "search_cpdat", + "get_bioactivity_summary_by_dtxsid", + "get_bioactivity_aop", + "get_exposure_httk" + ], + "notes": [ + "Illustrative pack showing the intended portable handoff shape." + ] + }, + "semanticCoverage": { + "identity": "detailed", + "hazard": "summary", + "exposure": "summary", + "bioactivity": "summary", + "aopLinkage": "linked", + "pbpkContext": "summary" + } +} diff --git a/src/epacomp_tox/data/schemas/examples/exposureEvidenceSummary.example.json b/src/epacomp_tox/data/schemas/examples/exposureEvidenceSummary.example.json new file mode 100644 index 0000000..57d66e5 --- /dev/null +++ b/src/epacomp_tox/data/schemas/examples/exposureEvidenceSummary.example.json @@ -0,0 +1,82 @@ +{ + "chemicalRef": { + "dtxsid": "DTXSID7020182", + "preferredName": "Bisphenol A", + "casrn": "80-05-7" + }, + "cpdat": { + "recordCount": 1, + "records": [ + { + "productUseCategory": "Food contact material", + "presence": "reported" + } + ], + "sourceTool": "search_cpdat", + "retrievedAt": "2026-03-21T09:58:30Z" + }, + "seem": { + "recordCount": 1, + "records": [ + { + "population": "adult", + "medianExposure": 0.02, + "unit": "mg/kg-day" + } + ], + "sourceTool": "get_seem_general", + "retrievedAt": "2026-03-21T09:58:45Z" + }, + "httk": { + "recordCount": 1, + "records": [ + { + "model": "httk-3compartment", + "clint": 12.4, + "clintUnit": "uL/min/10^6 hepatocytes" + } + ], + "sourceTool": "search_httk", + "retrievedAt": "2026-03-21T09:59:00Z" + }, + "mmdb": { + "recordCount": 1, + "records": [ + { + "medium": "drinking_water", + "detectionFrequency": 0.18 + } + ], + "sourceTool": "get_exposure_mmdb_aggregate_by_dtxsid", + "retrievedAt": "2026-03-21T09:59:15Z" + }, + "qsurs": { + "recordCount": 1, + "records": [ + { + "useDescriptor": "plasticizer", + "probability": 0.62 + } + ], + "sourceTool": "search_qsurs", + "retrievedAt": "2026-03-21T09:59:30Z" + }, + "provenance": { + "sourceMcp": "epacomp-tox-mcp", + "generatedAt": "2026-03-21T10:00:00Z", + "generatedBy": "assemble_comptox_evidence_pack", + "traceId": "ctx-example-exposure-001", + "sources": [ + { + "name": "CompTox Exposure APIs", + "toolName": "search_cpdat", + "url": "https://comptox.epa.gov/dashboard", + "retrievedAt": "2026-03-21T09:58:30Z", + "citation": "EPA CompTox exposure endpoints" + } + ], + "notes": [ + "Illustrative exposure summary object." + ] + } +} diff --git a/src/epacomp_tox/data/schemas/examples/hazardEvidenceSummary.example.json b/src/epacomp_tox/data/schemas/examples/hazardEvidenceSummary.example.json new file mode 100644 index 0000000..84799d1 --- /dev/null +++ b/src/epacomp_tox/data/schemas/examples/hazardEvidenceSummary.example.json @@ -0,0 +1,91 @@ +{ + "chemicalRef": { + "dtxsid": "DTXSID7020182", + "preferredName": "Bisphenol A", + "casrn": "80-05-7" + }, + "datasets": [ + { + "dataset": "toxval", + "summaryLevel": "summary", + "recordCount": 2, + "records": [ + { + "effect": "NOEL", + "value": 40, + "unit": "mg/kg-day", + "species": "Rat" + }, + { + "effect": "LOAEL", + "value": 120, + "unit": "mg/kg-day", + "species": "Rat" + } + ], + "sourceTool": "search_hazard", + "retrievedAt": "2026-03-21T09:58:00Z" + }, + { + "dataset": "cancer", + "summaryLevel": "summary", + "recordCount": 1, + "records": [ + { + "agency": "IARC", + "classification": "Not classifiable as to carcinogenicity to humans" + } + ], + "sourceTool": "search_hazard", + "retrievedAt": "2026-03-21T09:58:00Z" + } + ], + "keyFindings": [ + { + "statement": "ToxValDB includes repeated-dose oral values for Bisphenol A.", + "sourceDataset": "toxval", + "endpoint": "NOEL", + "value": 40, + "unit": "mg/kg-day", + "confidence": 0.77 + }, + { + "statement": "Cancer summary records are sparse relative to general hazard coverage.", + "sourceDataset": "cancer", + "context": "Use as a supporting line of evidence rather than a sole hazard basis." + } + ], + "references": [ + { + "citation": "EPA CompTox Chemicals Dashboard hazard datasets for Bisphenol A.", + "url": "https://comptox.epa.gov/dashboard" + } + ], + "provenance": { + "sourceMcp": "epacomp-tox-mcp", + "generatedAt": "2026-03-21T10:00:00Z", + "generatedBy": "assemble_comptox_evidence_pack", + "traceId": "ctx-example-hazard-001", + "sources": [ + { + "name": "CompTox Hazard APIs", + "toolName": "search_hazard", + "url": "https://comptox.epa.gov/dashboard", + "retrievedAt": "2026-03-21T09:58:00Z", + "citation": "EPA CompTox hazard endpoints" + } + ], + "notes": [ + "Illustrative hazard summary object." + ] + }, + "requestMetadata": { + "sourceTools": [ + "search_hazard", + "get_hazard_toxval" + ], + "requestedAt": "2026-03-21T09:58:00Z", + "summaryOnly": true, + "requestId": "hazard-example-request-001" + } +} diff --git a/src/epacomp_tox/data/schemas/examples/pbpkContextBundle.example.json b/src/epacomp_tox/data/schemas/examples/pbpkContextBundle.example.json new file mode 100644 index 0000000..c71dcd4 --- /dev/null +++ b/src/epacomp_tox/data/schemas/examples/pbpkContextBundle.example.json @@ -0,0 +1,214 @@ +{ + "chemicalIdentityRef": { + "dtxsid": "DTXSID7020182", + "preferredName": "Bisphenol A", + "casrn": "80-05-7", + "inchikey": "IHQYDGMOQILFNV-UHFFFAOYSA-N", + "smiles": "CC(C)(C1=CC=C(C=C1)O)C2=CC=C(C=C2)O", + "synonyms": [ + "BPA", + "4,4'-Isopropylidenediphenol" + ], + "provenance": { + "sourceMcp": "epacomp-tox-mcp", + "generatedAt": "2026-03-21T10:00:00Z", + "generatedBy": "build_pbpk_context_bundle", + "traceId": "ctx-example-pbpk-chemical-001", + "sources": [ + { + "name": "CompTox Chemicals Dashboard API", + "toolName": "search_chemical", + "url": "https://comptox.epa.gov/dashboard", + "retrievedAt": "2026-03-21T09:58:00Z", + "citation": "EPA CompTox Chemicals Dashboard" + } + ], + "notes": [ + "Embedded identity record for PBPK handoff." + ] + } + }, + "httkSlice": { + "recordCount": 1, + "records": [ + { + "model": "httk-3compartment", + "species": "human", + "fractionUnboundPlasma": 0.06, + "intrinsicClearance": 12.4 + } + ], + "sourceTool": "get_exposure_httk", + "retrievedAt": "2026-03-21T09:59:00Z", + "selectedMetrics": { + "fractionUnboundPlasma": 0.06, + "intrinsicClearance": 12.4, + "clearanceUnit": "uL/min/10^6 hepatocytes" + } + }, + "hazardAdmeIviveSlice": { + "recordCount": 1, + "records": [ + { + "assay": "hepatocyte_clearance", + "value": 12.4, + "unit": "uL/min/10^6 hepatocytes" + } + ], + "sourceTool": "get_hazard_adme_ivive", + "retrievedAt": "2026-03-21T09:59:20Z", + "selectedMetrics": { + "primaryAdmeSignal": "hepatocyte_clearance" + } + }, + "exposureHints": [ + { + "hintType": "population_exposure", + "value": 0.02, + "unit": "mg/kg-day", + "source": "SEEM", + "context": "Adult central tendency estimate." + }, + { + "hintType": "product_presence", + "value": "food_contact_material", + "source": "CPDat" + } + ], + "modelCardRefs": [ + { + "schemaVersion": "1.0", + "modelDetails": { + "name": "CompTox HTTK 3-Compartment Model", + "version": "1.0.0", + "modelType": "PBPK", + "description": "Illustrative model card for an HTTK-derived PBPK handoff example.", + "developers": [ + { + "name": "EPA CompTox Team" + } + ], + "organizations": [ + "U.S. EPA" + ], + "releaseDate": "2025-01-01" + }, + "intendedUse": { + "summary": "Provide a transportable model description for downstream PBPK qualification review.", + "inScope": [ + "Screening-level internal exposure context" + ], + "outOfScope": [ + "Final PBPK qualification decisions" + ], + "limitations": [ + "Example model card embedded for schema validation only." + ], + "warnings": [ + "Not a substitute for pbpk-mcp qualification outputs." + ], + "regulatoryPrograms": [ + "NGRA workflow support" + ] + }, + "oecdValidationPrinciples": { + "definedEndpoint": { + "description": "Internal concentration time-course surrogates", + "unit": "mg/L" + }, + "unambiguousAlgorithm": { + "summary": "Three-compartment HTTK parameterization." + }, + "definedApplicabilityDomain": { + "summary": "Applies when chemical-specific HTTK parameters are available." + }, + "goodnessOfFitMetrics": {}, + "mechanisticInterpretation": { + "summary": "Mechanistic interpretation derives from toxicokinetic parameterization." + } + }, + "trainingData": { + "dataset": { + "name": "HTTK parameter tables", + "source": "CompTox" + }, + "records": 1, + "chemicalCount": 1 + }, + "evaluationData": { + "datasets": [ + { + "name": "Illustrative evaluation set", + "source": "CompTox" + } + ], + "validationApproach": "Reference example", + "metrics": [ + { + "name": "coverage", + "value": 1.0 + } + ] + }, + "applicabilityDomain": { + "summary": "Requires a resolvable chemical identity and available HTTK parameter slice.", + "criteria": [ + { + "type": "coverage", + "description": "HTTK parameters must be available." + } + ], + "enforcement": { + "mcpTools": [ + "build_pbpk_context_bundle" + ] + } + }, + "ethicalConsiderations": { + "risks": [ + "Users may overinterpret screening-level context as a final PBPK decision." + ] + }, + "provenance": { + "sourceRepositories": [ + "https://github.com/ToxMCP/comptox-mcp" + ], + "build": { + "id": "example-build", + "timestamp": "2026-03-21T10:00:00Z" + }, + "checksum": { + "algorithm": "SHA256", + "value": "examplechecksum" + }, + "reviewStatus": { + "approvedBy": [ + { + "name": "QA Reviewer" + } + ], + "approvalDate": "2026-03-21" + } + } + } + ], + "provenance": { + "sourceMcp": "epacomp-tox-mcp", + "generatedAt": "2026-03-21T10:00:00Z", + "generatedBy": "build_pbpk_context_bundle", + "traceId": "ctx-example-pbpk-001", + "sources": [ + { + "name": "CompTox Exposure and Hazard APIs", + "toolName": "get_exposure_httk", + "url": "https://comptox.epa.gov/dashboard", + "retrievedAt": "2026-03-21T09:59:00Z", + "citation": "EPA CompTox HTTK and ADME endpoints" + } + ], + "notes": [ + "This bundle provides context only; it does not emit internal exposure estimates." + ] + }, + "handoffTarget": "pbpk-mcp" +} diff --git a/src/epacomp_tox/data/schemas/exposureEvidenceSummary.v1.json b/src/epacomp_tox/data/schemas/exposureEvidenceSummary.v1.json new file mode 100644 index 0000000..e08bcf3 --- /dev/null +++ b/src/epacomp_tox/data/schemas/exposureEvidenceSummary.v1.json @@ -0,0 +1,191 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://epa.gov/comptox/schemas/exposureEvidenceSummary.v1.json", + "title": "CompTox Exposure Evidence Summary v1", + "description": "Portable summary of exposure and HTTK evidence retrieved from CompTox.", + "type": "object", + "additionalProperties": false, + "required": [ + "chemicalRef", + "cpdat", + "seem", + "httk", + "mmdb", + "qsurs", + "provenance" + ], + "properties": { + "chemicalRef": { + "$ref": "#/$defs/chemicalRef" + }, + "cpdat": { + "oneOf": [ + { + "$ref": "#/$defs/evidenceSlice" + }, + { + "type": "null" + } + ] + }, + "seem": { + "oneOf": [ + { + "$ref": "#/$defs/evidenceSlice" + }, + { + "type": "null" + } + ] + }, + "httk": { + "oneOf": [ + { + "$ref": "#/$defs/evidenceSlice" + }, + { + "type": "null" + } + ] + }, + "mmdb": { + "oneOf": [ + { + "$ref": "#/$defs/evidenceSlice" + }, + { + "type": "null" + } + ] + }, + "qsurs": { + "oneOf": [ + { + "$ref": "#/$defs/evidenceSlice" + }, + { + "type": "null" + } + ] + }, + "provenance": { + "$ref": "#/$defs/provenance" + } + }, + "$defs": { + "chemicalRef": { + "type": "object", + "additionalProperties": false, + "required": [ + "dtxsid", + "preferredName" + ], + "properties": { + "dtxsid": { + "type": "string", + "pattern": "^DTXSID[0-9A-Z]+$" + }, + "preferredName": { + "type": "string" + }, + "casrn": { + "type": [ + "string", + "null" + ] + } + } + }, + "evidenceSlice": { + "type": "object", + "additionalProperties": false, + "required": [ + "recordCount", + "records" + ], + "properties": { + "recordCount": { + "type": "integer", + "minimum": 0 + }, + "records": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": true + } + }, + "sourceTool": { + "type": "string" + }, + "retrievedAt": { + "type": "string", + "format": "date-time" + } + } + }, + "sourceRecord": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "toolName": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + }, + "retrievedAt": { + "type": "string", + "format": "date-time" + }, + "citation": { + "type": "string" + } + } + }, + "provenance": { + "type": "object", + "additionalProperties": false, + "required": [ + "sourceMcp", + "generatedAt", + "sources" + ], + "properties": { + "sourceMcp": { + "type": "string" + }, + "generatedAt": { + "type": "string", + "format": "date-time" + }, + "generatedBy": { + "type": "string" + }, + "traceId": { + "type": "string" + }, + "sources": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/$defs/sourceRecord" + } + }, + "notes": { + "type": "array", + "items": { + "type": "string" + } + } + } + } + } +} diff --git a/src/epacomp_tox/data/schemas/hazardEvidenceSummary.v1.json b/src/epacomp_tox/data/schemas/hazardEvidenceSummary.v1.json new file mode 100644 index 0000000..8998fff --- /dev/null +++ b/src/epacomp_tox/data/schemas/hazardEvidenceSummary.v1.json @@ -0,0 +1,248 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://epa.gov/comptox/schemas/hazardEvidenceSummary.v1.json", + "title": "CompTox Hazard Evidence Summary v1", + "description": "Portable summary of hazard evidence retrieved from CompTox hazard datasets.", + "type": "object", + "additionalProperties": false, + "required": [ + "chemicalRef", + "datasets", + "keyFindings", + "references", + "provenance", + "requestMetadata" + ], + "properties": { + "chemicalRef": { + "$ref": "#/$defs/chemicalRef" + }, + "datasets": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/$defs/datasetSlice" + } + }, + "keyFindings": { + "type": "array", + "items": { + "$ref": "#/$defs/keyFinding" + } + }, + "references": { + "type": "array", + "items": { + "$ref": "https://epa.gov/comptox/schemas/comptox-model-card.schema.json#/$defs/reference" + } + }, + "provenance": { + "$ref": "#/$defs/provenance" + }, + "requestMetadata": { + "$ref": "#/$defs/requestMetadata" + } + }, + "$defs": { + "chemicalRef": { + "type": "object", + "additionalProperties": false, + "required": [ + "dtxsid", + "preferredName" + ], + "properties": { + "dtxsid": { + "type": "string", + "pattern": "^DTXSID[0-9A-Z]+$" + }, + "preferredName": { + "type": "string" + }, + "casrn": { + "type": [ + "string", + "null" + ] + } + } + }, + "datasetSlice": { + "type": "object", + "additionalProperties": false, + "required": [ + "dataset", + "summaryLevel", + "recordCount" + ], + "properties": { + "dataset": { + "type": "string", + "enum": [ + "toxval", + "toxref", + "cancer", + "genetox", + "adme_ivive", + "iris", + "pprtv", + "hawc" + ] + }, + "summaryLevel": { + "type": "string", + "enum": [ + "summary", + "detail", + "mixed" + ] + }, + "recordCount": { + "type": "integer", + "minimum": 0 + }, + "records": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": true + } + }, + "sourceTool": { + "type": "string" + }, + "retrievedAt": { + "type": "string", + "format": "date-time" + } + } + }, + "keyFinding": { + "type": "object", + "additionalProperties": false, + "required": [ + "statement", + "sourceDataset" + ], + "properties": { + "statement": { + "type": "string" + }, + "sourceDataset": { + "type": "string" + }, + "endpoint": { + "type": "string" + }, + "value": { + "type": [ + "number", + "string", + "null" + ] + }, + "unit": { + "type": "string" + }, + "confidence": { + "type": "number", + "minimum": 0, + "maximum": 1 + }, + "context": { + "type": "string" + } + } + }, + "sourceRecord": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "toolName": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + }, + "retrievedAt": { + "type": "string", + "format": "date-time" + }, + "citation": { + "type": "string" + } + } + }, + "provenance": { + "type": "object", + "additionalProperties": false, + "required": [ + "sourceMcp", + "generatedAt", + "sources" + ], + "properties": { + "sourceMcp": { + "type": "string" + }, + "generatedAt": { + "type": "string", + "format": "date-time" + }, + "generatedBy": { + "type": "string" + }, + "traceId": { + "type": "string" + }, + "sources": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/$defs/sourceRecord" + } + }, + "notes": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "requestMetadata": { + "type": "object", + "additionalProperties": false, + "required": [ + "sourceTools", + "requestedAt" + ], + "properties": { + "sourceTools": { + "type": "array", + "minItems": 1, + "items": { + "type": "string" + } + }, + "requestedAt": { + "type": "string", + "format": "date-time" + }, + "summaryOnly": { + "type": "boolean" + }, + "requestId": { + "type": "string" + } + } + } + } +} diff --git a/src/epacomp_tox/data/schemas/pbpkContextBundle.v1.json b/src/epacomp_tox/data/schemas/pbpkContextBundle.v1.json new file mode 100644 index 0000000..520e691 --- /dev/null +++ b/src/epacomp_tox/data/schemas/pbpkContextBundle.v1.json @@ -0,0 +1,245 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://epa.gov/comptox/schemas/pbpkContextBundle.v1.json", + "title": "CompTox PBPK Context Bundle v1", + "description": "Portable CompTox-side context package for downstream PBPK workflows.", + "type": "object", + "additionalProperties": false, + "required": [ + "chemicalIdentityRef", + "httkSlice", + "hazardAdmeIviveSlice", + "exposureHints", + "modelCardRefs", + "provenance", + "handoffTarget" + ], + "properties": { + "chemicalIdentityRef": { + "$ref": "https://epa.gov/comptox/schemas/chemicalIdentityRecord.v1.json" + }, + "httkSlice": { + "oneOf": [ + { + "$ref": "#/$defs/evidenceSlice" + }, + { + "type": "null" + } + ] + }, + "hazardAdmeIviveSlice": { + "oneOf": [ + { + "$ref": "#/$defs/evidenceSlice" + }, + { + "type": "null" + } + ] + }, + "exposureHints": { + "type": "array", + "items": { + "$ref": "#/$defs/exposureHint" + } + }, + "modelCardRefs": { + "type": "array", + "items": { + "oneOf": [ + { + "$ref": "https://epa.gov/comptox/schemas/comptox-model-card.schema.json" + }, + { + "$ref": "#/$defs/modelCardReference" + } + ] + } + }, + "identityResolution": { + "type": ["object", "null"], + "additionalProperties": true + }, + "knownDataGaps": { + "type": "array", + "items": {"type": "string"} + }, + "limitations": { + "type": "array", + "items": {"type": "string"} + }, + "generatedFromTools": { + "type": "array", + "items": {"type": "string"} + }, + "provenanceSummary": { + "type": "object", + "additionalProperties": true + }, + "provenance": { + "$ref": "#/$defs/provenance" + }, + "handoffTarget": { + "type": "string", + "const": "pbpk-mcp" + } + }, + "$defs": { + "evidenceSlice": { + "type": "object", + "additionalProperties": false, + "required": [ + "recordCount", + "records" + ], + "properties": { + "recordCount": { + "type": "integer", + "minimum": 0 + }, + "records": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": true + } + }, + "sourceTool": { + "type": "string" + }, + "retrievedAt": { + "type": "string", + "format": "date-time" + }, + "selectedMetrics": { + "type": "object", + "additionalProperties": true + } + } + }, + "exposureHint": { + "type": "object", + "additionalProperties": false, + "required": [ + "hintType", + "value", + "source" + ], + "properties": { + "hintType": { + "type": "string" + }, + "value": { + "type": [ + "number", + "string" + ] + }, + "unit": { + "type": "string" + }, + "source": { + "type": "string" + }, + "context": { + "type": "string" + } + } + }, + "modelCardReference": { + "type": "object", + "additionalProperties": false, + "required": [ + "modelName", + "modelVersion" + ], + "properties": { + "modelName": { + "type": "string" + }, + "modelVersion": { + "type": "string" + }, + "endpoint": { + "type": "string" + }, + "cardUri": { + "type": "string", + "format": "uri" + }, + "limitations": { + "type": "array", + "items": {"type": "string"} + }, + "warnings": { + "type": "array", + "items": {"type": "string"} + } + } + }, + "sourceRecord": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "toolName": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + }, + "retrievedAt": { + "type": "string", + "format": "date-time" + }, + "citation": { + "type": "string" + } + } + }, + "provenance": { + "type": "object", + "additionalProperties": false, + "required": [ + "sourceMcp", + "generatedAt", + "sources" + ], + "properties": { + "sourceMcp": { + "type": "string" + }, + "generatedAt": { + "type": "string", + "format": "date-time" + }, + "generatedBy": { + "type": "string" + }, + "traceId": { + "type": "string" + }, + "sources": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/$defs/sourceRecord" + } + }, + "notes": { + "type": "array", + "items": { + "type": "string" + } + } + } + } + } +} diff --git a/src/epacomp_tox/metadata/applicability.py b/src/epacomp_tox/metadata/applicability.py index 39034bb..9dc1706 100644 --- a/src/epacomp_tox/metadata/applicability.py +++ b/src/epacomp_tox/metadata/applicability.py @@ -4,15 +4,22 @@ from pathlib import Path from typing import Any, Dict, Iterable, List, Optional, Tuple -DEFAULT_AD_DIR = Path(Path.cwd(), "metadata", "applicability_domains") +from epacomp_tox.assets import data_file + +DEFAULT_AD_DIR = data_file("metadata", "applicability_domains") class ApplicabilityDomainStore: """File-backed access to applicability domain reference data.""" def __init__(self, directory: Optional[Path] = None): - self.directory = Path(directory or DEFAULT_AD_DIR) - self.directory.mkdir(parents=True, exist_ok=True) + if directory is None: + self.directory = DEFAULT_AD_DIR + self._filesystem_backed = False + else: + self.directory = Path(directory) + self.directory.mkdir(parents=True, exist_ok=True) + self._filesystem_backed = True def list_definitions( self, @@ -37,7 +44,19 @@ def get_definition(self, model_name: str) -> Optional[Dict[str, Any]]: return None def _iter_defs(self) -> Iterable[Dict[str, Any]]: - for path in sorted(self.directory.glob("*.json")): + paths = ( + sorted(self.directory.glob("*.json")) + if self._filesystem_backed + else sorted( + ( + entry + for entry in self.directory.iterdir() + if entry.is_file() and entry.name.endswith(".json") + ), + key=lambda entry: entry.name, + ) + ) + for path in paths: try: payload = json.loads(path.read_text(encoding="utf-8")) except ( @@ -45,5 +64,11 @@ def _iter_defs(self) -> Iterable[Dict[str, Any]]: json.JSONDecodeError, ): # pragma: no cover - logged upstream continue - payload["path"] = str(path) + if self._filesystem_backed: + payload["path"] = str(path) + else: + payload["path"] = ( + "package://epacomp_tox.data/metadata/" + f"applicability_domains/{path.name}" + ) yield payload diff --git a/src/epacomp_tox/metadata/model_cards.py b/src/epacomp_tox/metadata/model_cards.py index 73162c8..75122c9 100644 --- a/src/epacomp_tox/metadata/model_cards.py +++ b/src/epacomp_tox/metadata/model_cards.py @@ -7,7 +7,10 @@ from pathlib import Path from typing import Any, Dict, Iterable, List, Optional, Tuple -DEFAULT_MODEL_CARD_DIR = Path(Path.cwd(), "metadata", "model_cards") +from epacomp_tox.assets import data_file + +DEFAULT_MODEL_CARD_DIR = data_file("metadata", "model_cards") +PACKAGED_LAST_MODIFIED = "1970-01-01T00:00:00+00:00" @dataclass @@ -21,8 +24,13 @@ class ModelCardStore: """Simple file-backed store for CompTox model cards.""" def __init__(self, directory: Optional[Path] = None): - self.directory = Path(directory or DEFAULT_MODEL_CARD_DIR) - self.directory.mkdir(parents=True, exist_ok=True) + if directory is None: + self.directory = DEFAULT_MODEL_CARD_DIR + self._filesystem_backed = False + else: + self.directory = Path(directory) + self.directory.mkdir(parents=True, exist_ok=True) + self._filesystem_backed = True def list_cards( self, @@ -42,7 +50,19 @@ def list_cards( return page, next_cursor def _iter_cards(self) -> Iterable[Dict[str, Any]]: - for path in sorted(self.directory.glob("*.json")): + paths = ( + sorted(self.directory.glob("*.json")) + if self._filesystem_backed + else sorted( + ( + entry + for entry in self.directory.iterdir() + if entry.is_file() and entry.name.endswith(".json") + ), + key=lambda entry: entry.name, + ) + ) + for path in paths: try: raw = path.read_text(encoding="utf-8") payload = json.loads(raw) @@ -52,12 +72,20 @@ def _iter_cards(self) -> Iterable[Dict[str, Any]]: ): # pragma: no cover - logged upstream continue checksum = hashlib.sha256(raw.encode("utf-8")).hexdigest() - stat = path.stat() + if self._filesystem_backed: + stat = path.stat() + last_modified = datetime.fromtimestamp(stat.st_mtime).isoformat() + path_value = str(path) + else: + last_modified = PACKAGED_LAST_MODIFIED + path_value = ( + f"package://epacomp_tox.data/metadata/model_cards/{path.name}" + ) yield { "card": payload, "checksum": checksum, - "path": str(path), - "lastModified": datetime.fromtimestamp(stat.st_mtime).isoformat(), + "path": path_value, + "lastModified": last_modified, } @staticmethod diff --git a/src/epacomp_tox/orchestrator/audit.py b/src/epacomp_tox/orchestrator/audit.py index 113026d..aeb8709 100644 --- a/src/epacomp_tox/orchestrator/audit.py +++ b/src/epacomp_tox/orchestrator/audit.py @@ -2,10 +2,14 @@ import hashlib import json +import re +import time from datetime import datetime, timezone -from pathlib import Path +from pathlib import Path, PurePosixPath from typing import Dict, Iterable, List, Optional, Tuple, Union +SAFE_PATH_COMPONENT = re.compile(r"^[A-Za-z0-9][A-Za-z0-9_.-]{0,127}$") + class AuditBundleStore: """Durable storage for orchestrator audit bundles and attachments.""" @@ -13,7 +17,7 @@ class AuditBundleStore: def __init__( self, base_dir: Union[str, Path], *, retention_days: Optional[int] = None ) -> None: - self.base_dir = Path(base_dir) + self.base_dir = Path(base_dir).resolve() self.base_dir.mkdir(parents=True, exist_ok=True) self.retention_days = retention_days @@ -26,8 +30,9 @@ def save( run_id = bundle.get("workflowRunId") if not run_id: raise ValueError("Bundle must include 'workflowRunId'.") + run_id = self._safe_component(str(run_id), "workflowRunId") - run_dir = self.base_dir / run_id + run_dir = self._resolve_under_base(self.base_dir / run_id) run_dir.mkdir(parents=True, exist_ok=True) created_at = datetime.now(timezone.utc).isoformat() @@ -35,7 +40,7 @@ def save( bundle, ensure_ascii=False, indent=2, sort_keys=True ).encode("utf-8") bundle_path = run_dir / "bundle.json" - bundle_path.write_bytes(payload) + self._atomic_write(bundle_path, payload) bundle_checksum = hashlib.sha256(payload).hexdigest() attachments_meta: List[Dict[str, any]] = [] @@ -43,13 +48,13 @@ def save( attachments_dir = run_dir / "attachments" attachments_dir.mkdir(parents=True, exist_ok=True) for name, content in attachments.items(): - target = attachments_dir / name + safe_name, target = self._safe_attachment_path(attachments_dir, name) target.parent.mkdir(parents=True, exist_ok=True) data = content.encode("utf-8") if isinstance(content, str) else content - target.write_bytes(data) + self._atomic_write(target, data) attachments_meta.append( { - "name": name, + "name": safe_name, "path": str(target.relative_to(self.base_dir)), "size": len(data), "checksum": hashlib.sha256(data).hexdigest(), @@ -71,9 +76,9 @@ def save( } metadata_path = run_dir / "metadata.json" - metadata_path.write_text( - json.dumps(metadata, indent=2, sort_keys=True), - encoding="utf-8", + self._atomic_write( + metadata_path, + json.dumps(metadata, indent=2, sort_keys=True).encode("utf-8"), ) # Update chain manifest with latest hash @@ -100,9 +105,9 @@ def _update_chain_manifest( "updatedAt": created_at, } try: - chain_manifest_path.write_text( - json.dumps(manifest, indent=2, sort_keys=True), - encoding="utf-8", + self._atomic_write( + chain_manifest_path, + json.dumps(manifest, indent=2, sort_keys=True).encode("utf-8"), ) except OSError: # pragma: no cover - defensive pass @@ -122,7 +127,9 @@ def verify_chain(self) -> Tuple[bool, List[str]]: errors.append(f"Run {run_id}: previous hash mismatch") # Recompute bundle hash from file - bundle_path = self.base_dir / meta.get("bundlePath", "") + bundle_path = self._resolve_under_base( + self.base_dir / meta.get("bundlePath", "") + ) if bundle_path.exists(): computed = hashlib.sha256(bundle_path.read_bytes()).hexdigest() if computed != meta.get("bundleChecksum"): @@ -135,13 +142,19 @@ def verify_chain(self) -> Tuple[bool, List[str]]: return (not errors, errors) def load_bundle(self, run_id: str) -> Dict[str, any]: - bundle_path = self.base_dir / run_id / "bundle.json" + safe_run_id = self._safe_component(str(run_id), "workflowRunId") + bundle_path = self._resolve_under_base( + self.base_dir / safe_run_id / "bundle.json" + ) if not bundle_path.exists(): raise FileNotFoundError(f"No bundle found for run {run_id}") return json.loads(bundle_path.read_text(encoding="utf-8")) def load_metadata(self, run_id: str) -> Dict[str, any]: - metadata_path = self.base_dir / run_id / "metadata.json" + safe_run_id = self._safe_component(str(run_id), "workflowRunId") + metadata_path = self._resolve_under_base( + self.base_dir / safe_run_id / "metadata.json" + ) if not metadata_path.exists(): raise FileNotFoundError(f"No metadata found for run {run_id}") return json.loads(metadata_path.read_text(encoding="utf-8")) @@ -159,3 +172,38 @@ def list_runs(self) -> List[Dict[str, any]]: except json.JSONDecodeError: continue return runs + + @staticmethod + def _safe_component(value: str, label: str) -> str: + if not SAFE_PATH_COMPONENT.match(value) or ".." in value: + raise ValueError(f"Unsafe {label}: {value!r}") + return value + + def _resolve_under_base(self, path: Path) -> Path: + resolved = path.resolve() + try: + resolved.relative_to(self.base_dir) + except ValueError as exc: + raise ValueError("Resolved audit path escapes store root.") from exc + return resolved + + def _safe_attachment_path( + self, attachments_dir: Path, name: Union[str, Path] + ) -> Tuple[str, Path]: + raw_name = str(name).replace("\\", "/") + relative = PurePosixPath(raw_name) + if relative.is_absolute() or not relative.parts: + raise ValueError(f"Unsafe attachment name: {raw_name!r}") + safe_parts = [ + self._safe_component(part, "attachment path component") + for part in relative.parts + ] + safe_name = "/".join(safe_parts) + target = self._resolve_under_base(attachments_dir.joinpath(*safe_parts)) + return safe_name, target + + @staticmethod + def _atomic_write(path: Path, payload: bytes) -> None: + tmp_path = path.with_name(f".{path.name}.{time.time_ns()}.tmp") + tmp_path.write_bytes(payload) + tmp_path.replace(path) diff --git a/src/epacomp_tox/resources/manifest.py b/src/epacomp_tox/resources/manifest.py index f481fef..010ce4e 100644 --- a/src/epacomp_tox/resources/manifest.py +++ b/src/epacomp_tox/resources/manifest.py @@ -3,6 +3,7 @@ from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional +from epacomp_tox.assets import data_file, iter_data_files from epacomp_tox.contracts import schema_ref from .base import BaseResource @@ -23,7 +24,7 @@ def __init__( ) -> None: super().__init__(api_key) self._server_getter = server_getter - self._repo_root = repo_root or Path(__file__).resolve().parents[3] + self._repo_root = Path(repo_root) if repo_root is not None else None @property def name(self) -> str: @@ -167,8 +168,12 @@ def _tool_entries(self, server: "MCPServer") -> List[Dict[str, Any]]: def _portable_schema_entries(self) -> List[Dict[str, Any]]: entries: List[Dict[str, Any]] = [] - schemas_dir = self._repo_root / "schemas" - for path in sorted(schemas_dir.glob("*.json")): + paths = ( + sorted((self._repo_root / "schemas").glob("*.json")) + if self._repo_root is not None + else list(iter_data_files("schemas", suffix=".json")) + ) + for path in paths: if path.name.startswith("."): continue data = self._load_json(path) @@ -187,13 +192,26 @@ def _portable_schema_entries(self) -> List[Dict[str, Any]]: def _response_schema_entries(self) -> List[Dict[str, Any]]: entries: List[Dict[str, Any]] = [] - schemas_root = self._repo_root / "docs" / "contracts" / "schemas" - for path in sorted(schemas_root.glob("*/*.json")): + paths = ( + sorted( + (self._repo_root / "docs" / "contracts" / "schemas").glob("*/*.json") + ) + if self._repo_root is not None + else list( + iter_data_files("contracts", "schemas", suffix=".json", recursive=True) + ) + ) + for path in paths: + relative_path = ( + str(path.relative_to(self._repo_root)) + if self._repo_root is not None + else f"docs/contracts/schemas/{path.parent.name}/{path.name}" + ) entries.append( { "namespace": path.parent.name, "file": path.name, - "path": str(path.relative_to(self._repo_root)), + "path": relative_path, } ) return entries @@ -216,9 +234,16 @@ def _contract_reference( def _portable_example_for(self, schema_file: str) -> Optional[str]: stem = schema_file.replace(".v1.json", "") - candidate = self._repo_root / "schemas" / "examples" / f"{stem}.example.json" - if candidate.exists(): - return str(candidate.relative_to(self._repo_root)) + if self._repo_root is not None: + candidate = ( + self._repo_root / "schemas" / "examples" / f"{stem}.example.json" + ) + if candidate.exists(): + return str(candidate.relative_to(self._repo_root)) + return None + candidate = data_file("schemas", "examples", f"{stem}.example.json") + if candidate.is_file(): + return f"schemas/examples/{stem}.example.json" return None @staticmethod diff --git a/src/epacomp_tox/server.py b/src/epacomp_tox/server.py index a5c8a01..44c303f 100644 --- a/src/epacomp_tox/server.py +++ b/src/epacomp_tox/server.py @@ -7,6 +7,7 @@ from pathlib import Path from typing import Any, Dict, List, Optional, Tuple +from jsonschema.exceptions import ValidationError as JsonSchemaValidationError from pydantic import ValidationError from ctxpy import CtxApiError, RateLimitInfo @@ -19,6 +20,14 @@ from epacomp_tox.validators import to_serializable +class ToolInputValidationError(ValueError): + """Raised when tool input fails advertised JSON Schema validation.""" + + +class ToolOutputValidationError(RuntimeError): + """Raised when structuredContent fails advertised JSON Schema validation.""" + + class MCPServer: """ Model Context Protocol (MCP) server for EPA CompTox data. @@ -149,8 +158,9 @@ def execute_tool(self, tool_name: str, parameters: Dict[str, Any]) -> Any: """ for resource in self.resources.values(): if resource.has_tool(tool_name): - result = resource.execute_tool(tool_name, parameters) registration = self.tool_registry.get_registration(tool_name) + self._validate_tool_input(registration, parameters or {}) + result = resource.execute_tool(tool_name, parameters) if registration.response_schema_ref: namespace, name = registration.response_schema_ref try: @@ -273,6 +283,7 @@ def call_tool( resource = registration.resource try: + self._validate_tool_input(registration, parameters or {}) validated_params = registration.parameters_model.model_validate( parameters or {} ) @@ -324,6 +335,10 @@ def call_tool( "data": existing_sc, "metadata": combined_metadata, } + if not result.get("isError") and "structuredContent" in result: + self._validate_structured_content( + registration, result["structuredContent"] + ) self._emit_audit_event( tool_name=tool_name, status="success", @@ -336,7 +351,7 @@ def call_tool( params=validated_params.model_dump(exclude_none=True), ) return result - except ValidationError as exc: + except (ValidationError, ToolInputValidationError) as exc: self._emit_audit_event( tool_name=tool_name, status="invalid_params", @@ -356,9 +371,8 @@ def call_tool( metadata = self._format_metadata(resource.get_last_metadata()) session_metadata = self._format_session_context(context) error_payload = { - "message": str(exc), + "message": "Upstream CTX request failed.", "status": exc.status, - "detail": exc.detail, "requestId": exc.request_id, "retryAfter": exc.retry_after, } @@ -387,7 +401,7 @@ def call_tool( "content": [ { "type": "text", - "text": f"Tool call failed: {exc}", + "text": "Tool call failed: upstream CTX request failed.", "annotations": {"audience": ["assistant"]}, } ], @@ -407,7 +421,22 @@ def call_tool( params=parameters, error=str(exc), ) - raise + return { + "content": [ + { + "type": "text", + "text": "Tool call failed: internal server error.", + "annotations": {"audience": ["assistant"]}, + } + ], + "structuredContent": self._drop_none_values( + { + "message": "Tool execution failed.", + "correlationId": correlation_id, + } + ), + "isError": True, + } def _emit_audit_event( self, @@ -471,6 +500,43 @@ def _invoke_resource( pass return execute_tool(tool_name, parameters) + @staticmethod + def _format_json_schema_error(error: JsonSchemaValidationError) -> str: + location = ".".join(str(item) for item in error.path) + if location: + return f"{location}: {error.message}" + return error.message + + def _validate_tool_input( + self, registration: Any, parameters: Dict[str, Any] + ) -> None: + errors = sorted( + registration.input_validator.iter_errors(parameters or {}), + key=lambda error: list(error.path), + ) + if errors: + message = "; ".join( + self._format_json_schema_error(error) for error in errors[:5] + ) + raise ToolInputValidationError(message) + + def _validate_structured_content( + self, registration: Any, structured_content: Dict[str, Any] + ) -> None: + if registration.output_validator is None: + return + errors = sorted( + registration.output_validator.iter_errors(structured_content), + key=lambda error: list(error.path), + ) + if errors: + message = "; ".join( + self._format_json_schema_error(error) for error in errors[:5] + ) + raise ToolOutputValidationError( + f"Tool '{registration.name}' structuredContent failed schema validation: {message}" + ) + def _find_resource(self, tool_name: str): for resource in self.resources.values(): if resource.has_tool(tool_name): @@ -543,6 +609,7 @@ def _normalise_tool_definition( "description": tool.get("description", ""), "inputSchema": input_schema, "annotations": { + **(tool.get("annotations") or {}), "resource": resource_name, }, } @@ -566,7 +633,7 @@ def register_session( *, client_capabilities: Dict[str, Any], client_info: Optional[Dict[str, Any]] = None, - authentication: Optional[Dict[str, Any]] = None, + auth: Optional[Dict[str, Any]] = None, negotiated_capabilities: Optional[Dict[str, Any]] = None, ) -> None: """Track active session metadata for observability and governance.""" @@ -575,7 +642,7 @@ def register_session( "clientCapabilities": client_capabilities, "negotiatedCapabilities": negotiated_capabilities or {}, "clientInfo": client_info or {}, - "authentication": authentication or {}, + "auth": auth or {}, "lastActivity": datetime.now(tz=timezone.utc).isoformat(), "status": "active", } @@ -690,11 +757,15 @@ def _format_session_context( client_caps = context.get("clientCapabilities") if client_caps: session_view["clientCapabilities"] = client_caps - authentication = context.get("authentication") - if authentication: - session_view["authentication"] = authentication + auth_context = context.get("auth") + if auth_context: + session_view["auth"] = auth_context return session_view or None + @staticmethod + def _drop_none_values(payload: Dict[str, Any]) -> Dict[str, Any]: + return {key: value for key, value in payload.items() if value is not None} + def get_transport_metrics(self) -> Dict[str, Any]: """Summarize negotiated capability flags for observability consumers.""" summary: Dict[str, Any] = { diff --git a/src/epacomp_tox/settings.py b/src/epacomp_tox/settings.py index e302fbc..3e3ce06 100644 --- a/src/epacomp_tox/settings.py +++ b/src/epacomp_tox/settings.py @@ -25,6 +25,30 @@ def is_development(self) -> bool: class SecuritySettings: bypass_auth: bool allowed_origins: List[str] + auth_issuer: Optional[str] + auth_audience: Optional[str] + auth_jwks_url: Optional[str] + auth_required_scopes: List[str] + resource_url: str + + @property + def auth_configured(self) -> bool: + return bool(self.auth_issuer and self.auth_audience and self.auth_jwks_url) + + @property + def auth_requested(self) -> bool: + return bool( + self.auth_issuer + or self.auth_audience + or self.auth_jwks_url + or self.auth_required_scopes + ) + + +@dataclass(frozen=True) +class RateLimitSettings: + requests_per_minute: int + burst: int @dataclass(frozen=True) @@ -45,6 +69,7 @@ class TransportSettings: @dataclass(frozen=True) class ObservabilitySettings: metrics_enabled: bool = True + metrics_bypass_auth: bool = False class _RawSettings(BaseSettings): @@ -61,6 +86,19 @@ class _RawSettings(BaseSettings): bypass_auth: bool = Field(default=False, alias="BYPASS_AUTH") cors_allow_origins: Optional[str] = Field(default=None, alias="CORS_ALLOW_ORIGINS") + mcp_auth_issuer: Optional[str] = Field(default=None, alias="MCP_AUTH_ISSUER") + mcp_auth_audience: Optional[str] = Field(default=None, alias="MCP_AUTH_AUDIENCE") + mcp_auth_jwks_url: Optional[str] = Field(default=None, alias="MCP_AUTH_JWKS_URL") + mcp_auth_required_scopes: Optional[str] = Field( + default=None, alias="MCP_AUTH_REQUIRED_SCOPES" + ) + mcp_resource_url: str = Field( + default="http://localhost:8000/mcp", alias="MCP_RESOURCE_URL" + ) + rate_limit_requests_per_minute: int = Field( + default=120, alias="MCP_RATE_LIMIT_REQUESTS_PER_MINUTE" + ) + rate_limit_burst: int = Field(default=20, alias="MCP_RATE_LIMIT_BURST") ctx_api_key: Optional[str] = Field(default=None, alias="CTX_API_KEY") ctx_api_key_legacy: Optional[str] = Field(default=None, alias="EPA_COMPTOX_API_KEY") @@ -80,6 +118,7 @@ class _RawSettings(BaseSettings): ) metrics_enabled: bool = Field(default=True, alias="EPACOMP_MCP_METRICS_ENABLED") + metrics_bypass_auth: bool = Field(default=False, alias="MCP_METRICS_BYPASS_AUTH") class Settings(_RawSettings): @@ -95,7 +134,21 @@ def security(self) -> SecuritySettings: origins = [origin.strip() for origin in raw.split(",") if origin.strip()] if not origins and self.app.is_development: origins = ["*"] - return SecuritySettings(bypass_auth=self.bypass_auth, allowed_origins=origins) + scopes = [ + scope.strip() + for chunk in (self.mcp_auth_required_scopes or "").split(",") + for scope in chunk.split() + if scope.strip() + ] + return SecuritySettings( + bypass_auth=bool(self.bypass_auth), + allowed_origins=origins, + auth_issuer=self.mcp_auth_issuer, + auth_audience=self.mcp_auth_audience, + auth_jwks_url=self.mcp_auth_jwks_url, + auth_required_scopes=scopes, + resource_url=self.mcp_resource_url, + ) @cached_property def ctx(self) -> ContextSettings: @@ -126,9 +179,20 @@ def transport(self) -> TransportSettings: heartbeat_timeout=heartbeat, handshake_timeout=handshake ) + @cached_property + def rate_limit(self) -> RateLimitSettings: + rpm = max(0, int(self.rate_limit_requests_per_minute)) + burst = int(self.rate_limit_burst) + if burst <= 0: + burst = rpm + return RateLimitSettings(requests_per_minute=rpm, burst=max(0, burst)) + @cached_property def observability(self) -> ObservabilitySettings: - return ObservabilitySettings(metrics_enabled=bool(self.metrics_enabled)) + return ObservabilitySettings( + metrics_enabled=bool(self.metrics_enabled), + metrics_bypass_auth=bool(self.metrics_bypass_auth), + ) @lru_cache(maxsize=1) diff --git a/src/epacomp_tox/tools/registry.py b/src/epacomp_tox/tools/registry.py index db8de7e..fef4b7d 100644 --- a/src/epacomp_tox/tools/registry.py +++ b/src/epacomp_tox/tools/registry.py @@ -1,9 +1,11 @@ from __future__ import annotations import logging +from copy import deepcopy from dataclasses import dataclass from typing import Any, Dict, Iterable, List, Optional, Tuple, Type +from jsonschema import Draft202012Validator from pydantic import BaseModel from epacomp_tox.contracts import load_schema @@ -21,6 +23,8 @@ class ToolRegistration: output_schema: Optional[Dict[str, Any]] resource: BaseResource parameters_model: Type[BaseModel] + input_validator: Draft202012Validator + output_validator: Optional[Draft202012Validator] annotations: Dict[str, Any] response_schema_ref: Optional[Tuple[str, str]] @@ -46,6 +50,7 @@ def register_resource( input_schema = ( tool.get("inputSchema") or tool.get("parameters") or {"type": "object"} ) + input_schema = _normalise_input_schema(input_schema) output_schema = tool.get("outputSchema") response_schema_ref: Optional[Tuple[str, str]] = None @@ -67,6 +72,8 @@ def register_resource( "properties": {"data": output_schema}, "required": ["data"], } + if output_schema: + output_schema = _normalise_output_schema(output_schema) description = tool.get("description", "") parameters_model = create_model_from_schema(name, input_schema) @@ -75,6 +82,10 @@ def register_resource( combined_annotations.update(tool_annotations) if annotations: combined_annotations.update(annotations) + combined_annotations.setdefault("readOnlyHint", True) + combined_annotations.setdefault("destructiveHint", False) + combined_annotations.setdefault("openWorldHint", True) + combined_annotations.setdefault("idempotentHint", True) self._tools[name] = ToolRegistration( name=name, @@ -83,6 +94,10 @@ def register_resource( output_schema=output_schema, resource=resource, parameters_model=parameters_model, + input_validator=Draft202012Validator(input_schema), + output_validator=( + Draft202012Validator(output_schema) if output_schema else None + ), annotations=combined_annotations, response_schema_ref=response_schema_ref, ) @@ -117,3 +132,37 @@ def list_definitions(self) -> List[Dict[str, Any]]: def __iter__(self) -> Iterable[ToolRegistration]: return iter(self._tools.values()) + + +def _normalise_input_schema(schema: Dict[str, Any]) -> Dict[str, Any]: + normalised = deepcopy(schema or {"type": "object"}) + if normalised.get("type", "object") == "object": + normalised.setdefault("properties", {}) + normalised.setdefault("additionalProperties", False) + return normalised + + +def _normalise_output_schema(schema: Dict[str, Any]) -> Dict[str, Any]: + normalised = deepcopy(schema) + if normalised.get("type") != "object": + return normalised + properties = normalised.setdefault("properties", {}) + metadata_schema = {"type": "object", "additionalProperties": True} + properties.setdefault("metadata", metadata_schema) + properties.setdefault("mcpMetadata", metadata_schema) + properties.setdefault( + "data", + { + "type": [ + "object", + "array", + "string", + "number", + "integer", + "boolean", + "null", + ], + "additionalProperties": True, + }, + ) + return normalised diff --git a/src/epacomp_tox/transport/common.py b/src/epacomp_tox/transport/common.py index 6476791..8c41833 100644 --- a/src/epacomp_tox/transport/common.py +++ b/src/epacomp_tox/transport/common.py @@ -3,6 +3,7 @@ from __future__ import annotations SUPPORTED_PROTOCOL_VERSIONS = [ + "2025-11-25", "2025-06-18", "2025-03-26", "2024-11-05", diff --git a/src/epacomp_tox/transport/http.py b/src/epacomp_tox/transport/http.py index 575a5eb..5161cf1 100644 --- a/src/epacomp_tox/transport/http.py +++ b/src/epacomp_tox/transport/http.py @@ -14,6 +14,7 @@ PRIMARY_PROTOCOL_VERSION, SUPPORTED_PROTOCOL_VERSIONS, ) +from epacomp_tox.transport.security import AuthContext, AuthError, BearerAuthValidator logger = logging.getLogger(__name__) @@ -28,6 +29,7 @@ UNAUTHORIZED = -32000 FORBIDDEN = -32001 TOOL_EXECUTION_ERROR = -32002 +RATE_LIMITED = -32029 # HTTP transport capabilities advertised during initialize HTTP_SERVER_CAPABILITIES: Dict[str, Any] = { @@ -40,6 +42,14 @@ router = APIRouter() +class RateLimitExceeded(RuntimeError): + """Raised when a tool-call rate limit is exceeded.""" + + def __init__(self, retry_after_seconds: float): + super().__init__("Rate limit exceeded") + self.retry_after_seconds = retry_after_seconds + + @router.get("/mcp") async def mcp_probe(request: Request) -> Response: """ @@ -47,6 +57,9 @@ async def mcp_probe(request: Request) -> Response: Returns server info and supported protocol versions without requiring a JSON-RPC body. """ server = _get_mcp_server(request) + auth_response = _require_http_auth_or_response(request, request_id=None) + if auth_response is not None: + return auth_response return JSONResponse( status_code=status.HTTP_200_OK, content={ @@ -58,7 +71,7 @@ async def mcp_probe(request: Request) -> Response: ) -# Issue 3: OAuth discovery placeholder endpoints +# OAuth/OIDC discovery and MCP protected-resource metadata endpoints. @router.get("/.well-known/oauth-authorization-server") @router.get("/mcp/.well-known/oauth-authorization-server") @router.get("/.well-known/oauth-authorization-server/mcp") @@ -70,6 +83,17 @@ async def oauth_discovery_placeholder() -> Response: return JSONResponse(status_code=status.HTTP_200_OK, content={}) +@router.get("/.well-known/oauth-protected-resource") +@router.get("/.well-known/oauth-protected-resource/mcp") +@router.get("/mcp/.well-known/oauth-protected-resource") +async def oauth_protected_resource_metadata(request: Request) -> Response: + validator = _get_auth_validator(request) + return JSONResponse( + status_code=status.HTTP_200_OK, + content=validator.protected_resource_metadata(), + ) + + def _jsonrpc_success(result: Any, request_id: Optional[Any]) -> Dict[str, Any]: response: Dict[str, Any] = {"jsonrpc": JSONRPC_VERSION, "result": result} if request_id is not None: @@ -106,6 +130,54 @@ def _get_mcp_server(request: Request) -> MCPServer: return server +def _get_auth_validator(request: Request) -> BearerAuthValidator: + validator = getattr(request.app.state, "auth_validator", None) + if validator is None: + raise HTTPException( + status_code=status.HTTP_503_SERVICE_UNAVAILABLE, + detail="Authentication policy unavailable", + ) + return validator + + +def _remote_addr(request: Request) -> str: + client = getattr(request, "client", None) + if client and client.host: + return client.host + return "unknown" + + +def _require_http_auth_or_response( + request: Request, *, request_id: Optional[Any] +) -> Optional[Response]: + validator = _get_auth_validator(request) + try: + auth_context = validator.authenticate_header( + request.headers.get("authorization"), remote_addr=_remote_addr(request) + ) + except AuthError as exc: + return _auth_error_response(validator, exc, request_id=request_id) + request.state.auth_context = auth_context + return None + + +def _auth_error_response( + validator: BearerAuthValidator, exc: AuthError, *, request_id: Optional[Any] +) -> Response: + code = UNAUTHORIZED if exc.status_code == 401 else FORBIDDEN + payload = _jsonrpc_error( + code=code, + message=exc.description, + request_id=request_id, + data={"error": exc.error}, + ) + return JSONResponse( + status_code=exc.status_code, + content=payload, + headers={"WWW-Authenticate": validator.www_authenticate_header(exc)}, + ) + + def _normalize_tool_parameters(params: Dict[str, Any]) -> Dict[str, Any]: """Handle parameter shapes used by various MCP clients.""" if not isinstance(params, dict): @@ -157,8 +229,11 @@ def _build_request_context(request: Request) -> Dict[str, Any]: }, "clientCapabilities": {}, "negotiatedCapabilities": {}, - "transport": {"type": "http"}, + "transport": {"type": "http", "remoteAddress": _remote_addr(request)}, } + auth_context: Optional[AuthContext] = getattr(request.state, "auth_context", None) + if auth_context is not None: + context["auth"] = auth_context.safe_summary() correlation_id = getattr(request.state, "correlation_id", None) if correlation_id: context["correlationId"] = correlation_id @@ -225,6 +300,10 @@ async def mcp_endpoint(request: Request) -> Response: params = payload.get("params") or {} jsonrpc_version = payload.get("jsonrpc") + auth_response = _require_http_auth_or_response(request, request_id=request_id) + if auth_response is not None: + return auth_response + # Compatibility: respond to JSON-RPC initialize with a proper JSON-RPC envelope # while still carrying the "connected" shape Codex/Gemini expect. # if isinstance(method, str) and method.lower() in {"initialize", "mcp/initialize"}: @@ -288,13 +367,26 @@ async def mcp_endpoint(request: Request) -> Response: code=FORBIDDEN, message=str(exc), request_id=request_id ) return JSONResponse(status_code=status.HTTP_403_FORBIDDEN, content=content) + except RateLimitExceeded as exc: + content = _jsonrpc_error( + code=RATE_LIMITED, + message="Rate limit exceeded", + request_id=request_id, + data={"retryAfterSeconds": round(exc.retry_after_seconds, 3)}, + ) + return JSONResponse( + status_code=status.HTTP_429_TOO_MANY_REQUESTS, + content=content, + headers={"Retry-After": str(max(1, int(exc.retry_after_seconds)))}, + ) except Exception as exc: # pylint: disable=broad-except logger.exception("Unhandled MCP error") + correlation_id = getattr(request.state, "correlation_id", None) content = _jsonrpc_error( code=INTERNAL_ERROR, message="Internal server error", request_id=request_id, - data=str(exc), + data={"correlationId": correlation_id} if correlation_id else None, ) return JSONResponse( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, content=content @@ -366,7 +458,13 @@ def _handle_initialize(server: MCPServer, params: Dict[str, Any]) -> Dict[str, A logger.info( "HTTP MCP initialize with capabilities: %s", params.get("capabilities", {}) ) - protocol_version = params.get("protocolVersion") or PRIMARY_PROTOCOL_VERSION + requested_version = params.get("protocolVersion") + if requested_version and requested_version not in SUPPORTED_PROTOCOL_VERSIONS: + raise ValueError( + "Unsupported protocol version. Supported versions: " + + ", ".join(SUPPORTED_PROTOCOL_VERSIONS) + ) + protocol_version = requested_version or PRIMARY_PROTOCOL_VERSION # session_id = params.get("sessionId") or str(uuid4()) # Removed as per instructions # Return ONLY standard MCP fields @@ -1064,6 +1162,7 @@ async def _handle_tools_call( tool_params = _normalize_tool_parameters(params) context = _build_request_context(request) + _enforce_rate_limit(request, context=context) try: result = server.call_tool(tool_name, tool_params, context=context) @@ -1122,3 +1221,23 @@ async def _handle_tools_call( raise ValueError("Tool result could not be serialized.") from exc return result + + +def _enforce_rate_limit(request: Request, *, context: Dict[str, Any]) -> None: + limiter = getattr(request.app.state, "rate_limiter", None) + if limiter is None or not getattr(limiter, "enabled", False): + return + auth_context: Optional[AuthContext] = getattr(request.state, "auth_context", None) + fallback_key = f"ip:{_remote_addr(request)}" + key = ( + auth_context.rate_limit_key(fallback_key) + if auth_context is not None + else fallback_key + ) + decision = limiter.check(key) + if not decision.allowed: + context["rateLimit"] = { + "limited": True, + "retryAfterSeconds": round(decision.retry_after_seconds, 3), + } + raise RateLimitExceeded(decision.retry_after_seconds) diff --git a/src/epacomp_tox/transport/security.py b/src/epacomp_tox/transport/security.py new file mode 100644 index 0000000..a06a968 --- /dev/null +++ b/src/epacomp_tox/transport/security.py @@ -0,0 +1,318 @@ +from __future__ import annotations + +import hashlib +import time +from dataclasses import dataclass +from threading import Lock +from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple + +from epacomp_tox.settings import AppSettings, RateLimitSettings, SecuritySettings + +try: # pragma: no cover - exercised when JWT validation is configured + import jwt + from jwt import PyJWKClient +except ImportError: # pragma: no cover - optional until auth is configured + jwt = None # type: ignore[assignment] + PyJWKClient = None # type: ignore[assignment] + + +class AuthError(RuntimeError): + """Authentication or authorization failure.""" + + def __init__( + self, + *, + status_code: int, + error: str, + description: str, + required_scopes: Optional[Sequence[str]] = None, + ) -> None: + super().__init__(description) + self.status_code = status_code + self.error = error + self.description = description + self.required_scopes = list(required_scopes or []) + + +@dataclass(frozen=True) +class AuthContext: + """Safe authentication summary for sessions, audit, and metadata.""" + + subject_hash: Optional[str] + issuer: Optional[str] + audience: Tuple[str, ...] + scopes: Tuple[str, ...] + expires_at: Optional[int] + token_hash: Optional[str] + bypassed: bool = False + + def safe_summary(self) -> Dict[str, Any]: + summary: Dict[str, Any] = { + "authenticated": self.subject_hash is not None and not self.bypassed, + "scopes": list(self.scopes), + } + if self.subject_hash: + summary["subjectHash"] = self.subject_hash + if self.issuer: + summary["issuer"] = self.issuer + if self.audience: + summary["audience"] = list(self.audience) + if self.expires_at is not None: + summary["expiresAt"] = self.expires_at + if self.token_hash: + summary["tokenHash"] = self.token_hash + if self.bypassed: + summary["bypassed"] = True + return summary + + def rate_limit_key(self, fallback: str) -> str: + if self.subject_hash: + return f"sub:{self.subject_hash}" + if self.token_hash: + return f"tok:{self.token_hash}" + return fallback + + +@dataclass(frozen=True) +class RateLimitDecision: + allowed: bool + retry_after_seconds: float = 0.0 + remaining: int = 0 + + +class InMemoryRateLimiter: + """Simple process-local token-bucket limiter for MCP tool calls.""" + + def __init__(self, settings: RateLimitSettings): + self.requests_per_minute = settings.requests_per_minute + self.burst = max(1, settings.burst) + self._buckets: Dict[str, Tuple[float, float]] = {} + self._lock = Lock() + + @property + def enabled(self) -> bool: + return self.requests_per_minute > 0 + + def check(self, key: str) -> RateLimitDecision: + if not self.enabled: + return RateLimitDecision(allowed=True, remaining=self.burst) + + now = time.monotonic() + refill_per_second = self.requests_per_minute / 60.0 + with self._lock: + tokens, last_seen = self._buckets.get(key, (float(self.burst), now)) + elapsed = max(0.0, now - last_seen) + tokens = min(float(self.burst), tokens + elapsed * refill_per_second) + if tokens < 1.0: + retry_after = (1.0 - tokens) / refill_per_second + self._buckets[key] = (tokens, now) + return RateLimitDecision( + allowed=False, + retry_after_seconds=retry_after, + remaining=0, + ) + tokens -= 1.0 + self._buckets[key] = (tokens, now) + return RateLimitDecision( + allowed=True, + remaining=max(0, int(tokens)), + ) + + +class BearerAuthValidator: + """Validate MCP bearer tokens against configured OIDC/JWKS settings.""" + + def __init__( + self, + *, + security: SecuritySettings, + app: AppSettings, + bypass_auth: Optional[bool] = None, + ) -> None: + self.security = security + self.app = app + self.bypass_auth = security.bypass_auth if bypass_auth is None else bypass_auth + self.enabled = self._resolve_enabled() + self.required_scopes = tuple(security.auth_required_scopes) + self._jwks_client = None + if self.enabled: + self._validate_configuration() + if PyJWKClient is None: + raise RuntimeError( + "PyJWT[crypto] is required when MCP bearer authentication is enabled." + ) + self._jwks_client = PyJWKClient(security.auth_jwks_url) # type: ignore[arg-type] + + def _resolve_enabled(self) -> bool: + if self.bypass_auth: + return False + if self.security.auth_configured: + return True + if self.security.auth_requested: + return True + return not self.app.is_development + + def _validate_configuration(self) -> None: + missing = [] + if not self.security.auth_issuer: + missing.append("MCP_AUTH_ISSUER") + if not self.security.auth_audience: + missing.append("MCP_AUTH_AUDIENCE") + if not self.security.auth_jwks_url: + missing.append("MCP_AUTH_JWKS_URL") + if missing: + raise RuntimeError( + "MCP auth is enabled but incomplete; set " + + ", ".join(missing) + + " or use BYPASS_AUTH=1 for local development." + ) + + def authenticate_header( + self, authorization: Optional[str], *, remote_addr: Optional[str] = None + ) -> AuthContext: + if not self.enabled: + return AuthContext( + subject_hash=None, + issuer=None, + audience=(), + scopes=(), + expires_at=None, + token_hash=None, + bypassed=self.bypass_auth, + ) + + scheme, token = _split_authorization(authorization) + if scheme.lower() != "bearer" or not token: + raise AuthError( + status_code=401, + error="invalid_token", + description="Bearer token is required.", + required_scopes=self.required_scopes, + ) + + claims = self._decode_jwt(token) + scopes = tuple(sorted(_extract_scopes(claims))) + missing_scopes = sorted(set(self.required_scopes) - set(scopes)) + if missing_scopes: + raise AuthError( + status_code=403, + error="insufficient_scope", + description="Bearer token is missing required MCP scope.", + required_scopes=self.required_scopes, + ) + + subject = str(claims.get("sub") or "") + audience = claims.get("aud") + return AuthContext( + subject_hash=_hash_value(subject) if subject else None, + issuer=claims.get("iss"), + audience=tuple(str(item) for item in _as_list(audience)), + scopes=scopes, + expires_at=( + claims.get("exp") if isinstance(claims.get("exp"), int) else None + ), + token_hash=_hash_value(token), + bypassed=False, + ) + + def _decode_jwt(self, token: str) -> Dict[str, Any]: + if jwt is None or self._jwks_client is None: + raise AuthError( + status_code=401, + error="invalid_token", + description="JWT validation is not available.", + required_scopes=self.required_scopes, + ) + try: + signing_key = self._jwks_client.get_signing_key_from_jwt(token).key + return jwt.decode( + token, + signing_key, + algorithms=[ + "RS256", + "RS384", + "RS512", + "ES256", + "ES384", + "ES512", + ], + audience=_split_config_values(self.security.auth_audience), + issuer=self.security.auth_issuer, + ) + except Exception as exc: + raise AuthError( + status_code=401, + error="invalid_token", + description="Bearer token is invalid.", + required_scopes=self.required_scopes, + ) from exc + + def protected_resource_metadata(self) -> Dict[str, Any]: + metadata: Dict[str, Any] = { + "resource": self.security.resource_url, + "bearer_methods_supported": ["header"], + "scopes_supported": list(self.required_scopes), + } + if self.security.auth_issuer: + metadata["authorization_servers"] = [self.security.auth_issuer] + if self.security.auth_jwks_url: + metadata["jwks_uri"] = self.security.auth_jwks_url + return metadata + + def www_authenticate_header(self, error: Optional[AuthError] = None) -> str: + parts = [ + "Bearer", + f'resource="{self.security.resource_url}"', + f'resource_metadata="{self.security.resource_url.rstrip("/")}/.well-known/oauth-protected-resource"', + ] + scopes = error.required_scopes if error else self.required_scopes + if scopes: + parts.append(f'scope="{" ".join(scopes)}"') + if error is not None: + parts.append(f'error="{error.error}"') + return ", ".join(parts) + + +def _split_authorization(authorization: Optional[str]) -> Tuple[str, str]: + if not authorization: + return "", "" + parts = authorization.strip().split(None, 1) + if len(parts) != 2: + return authorization.strip(), "" + return parts[0], parts[1].strip() + + +def _split_config_values(value: Optional[str]) -> Any: + values = [item.strip() for item in (value or "").split(",") if item.strip()] + if not values: + return value + if len(values) == 1: + return values[0] + return values + + +def _extract_scopes(claims: Dict[str, Any]) -> List[str]: + scopes: List[str] = [] + scope_value = claims.get("scope") + if isinstance(scope_value, str): + scopes.extend(scope_value.split()) + scp_value = claims.get("scp") + if isinstance(scp_value, str): + scopes.extend(scp_value.split()) + elif isinstance(scp_value, Iterable): + scopes.extend(str(item) for item in scp_value) + return [scope for scope in scopes if scope] + + +def _as_list(value: Any) -> List[Any]: + if value is None: + return [] + if isinstance(value, list): + return value + if isinstance(value, tuple): + return list(value) + return [value] + + +def _hash_value(value: str) -> str: + return hashlib.sha256(value.encode("utf-8")).hexdigest()[:24] diff --git a/src/epacomp_tox/transport/websocket.py b/src/epacomp_tox/transport/websocket.py index bb38c33..d19ce95 100644 --- a/src/epacomp_tox/transport/websocket.py +++ b/src/epacomp_tox/transport/websocket.py @@ -9,8 +9,16 @@ from functools import partial from typing import Any, Dict, List, Optional -from fastapi import FastAPI, HTTPException, Response, WebSocket, WebSocketDisconnect +from fastapi import ( + FastAPI, + HTTPException, + Request, + Response, + WebSocket, + WebSocketDisconnect, +) from fastapi.middleware.cors import CORSMiddleware +from fastapi.responses import JSONResponse from prometheus_client import ( CONTENT_TYPE_LATEST, CollectorRegistry, @@ -26,6 +34,12 @@ SUPPORTED_PROTOCOL_VERSIONS, ) from epacomp_tox.transport.http import router as http_router +from epacomp_tox.transport.security import ( + AuthContext, + AuthError, + BearerAuthValidator, + InMemoryRateLimiter, +) from epacomp_tox.validators import to_serializable logger = logging.getLogger(__name__) @@ -38,6 +52,7 @@ CANCELLED_ERROR_CODE = -32800 CAPABILITY_NOT_NEGOTIATED_ERROR_CODE = -32004 +RATE_LIMITED_ERROR_CODE = -32029 class AuditMiddleware: @@ -182,9 +197,18 @@ def __init__( class MCPWebSocketSession: """Manage a single MCP WebSocket session and JSON-RPC message loop.""" - def __init__(self, websocket: WebSocket, server: MCPServer): + def __init__( + self, + websocket: WebSocket, + server: MCPServer, + *, + auth_context: AuthContext, + rate_limiter: Optional[InMemoryRateLimiter] = None, + ): self.websocket = websocket self.server = server + self.auth_context = auth_context + self.rate_limiter = rate_limiter self.initialized = False self.protocol_version: Optional[str] = None self.session_id = str(uuid.uuid4()) @@ -197,7 +221,6 @@ def __init__(self, websocket: WebSocket, server: MCPServer): DEFAULT_SERVER_CAPABILITIES ) self.client_info: Dict[str, Any] = {} - self.authentication: Dict[str, Any] = {} self._close_reason = "disconnect" self.active_requests: Dict[str, Dict[str, Any]] = {} self._streams_enabled = True @@ -341,7 +364,6 @@ async def _handle_initialize(self, message_id: Any, params: Dict[str, Any]) -> N self.negotiated_capabilities.get("tools", {}).get("cancel", False) ) self.client_info = params.get("clientInfo") or {} - self.authentication = params.get("authentication") or {} transport_settings = self.server.get_transport_options() heartbeat_override = params.get("heartbeatIntervalMs") if isinstance(heartbeat_override, (int, float)) and heartbeat_override > 0: @@ -354,7 +376,7 @@ async def _handle_initialize(self, message_id: Any, params: Dict[str, Any]) -> N self.session_id, client_capabilities=self.client_capabilities, client_info=self.client_info, - authentication=self.authentication, + auth=self.auth_context.safe_summary(), negotiated_capabilities=self.negotiated_capabilities, ) server_info = self.server.get_server_info() @@ -549,6 +571,19 @@ async def _handle_tools_call(self, message_id: Any, params: Dict[str, Any]) -> N message_id, code=-32602, message="Tool arguments must be an object" ) return + if self.rate_limiter is not None and self.rate_limiter.enabled: + fallback_key = f"ws:{self.session_id}" + decision = self.rate_limiter.check( + self.auth_context.rate_limit_key(fallback_key) + ) + if not decision.allowed: + await self._send_error( + message_id, + code=RATE_LIMITED_ERROR_CODE, + message="Rate limit exceeded", + data={"retryAfterSeconds": round(decision.retry_after_seconds, 3)}, + ) + return request_id = params.get("requestId") or str(uuid.uuid4()) timeout_ms = params.get("timeoutMs") timeout_seconds: Optional[float] = None @@ -850,7 +885,7 @@ async def _run_tool_call( raise ToolExecutionError( code=-32603, message="Tool execution failed", - data={"detail": str(exc)}, + data={"reason": "internal_error"}, ) from exc async def _emit_event(self, method: str, params: Dict[str, Any]) -> None: @@ -869,7 +904,7 @@ def _session_context(self) -> Dict[str, Any]: "clientInfo": deepcopy(self.client_info), "clientCapabilities": deepcopy(self.client_capabilities), "negotiatedCapabilities": deepcopy(self.negotiated_capabilities), - "authentication": deepcopy(self.authentication), + "auth": deepcopy(self.auth_context.safe_summary()), } @@ -921,10 +956,43 @@ def _json_default(value: Any) -> Any: return converted -def create_app(server: Optional[MCPServer] = None) -> FastAPI: +def _remote_addr_from_request(request: Request) -> str: + if request.client and request.client.host: + return request.client.host + return "unknown" + + +def _remote_addr_from_websocket(websocket: WebSocket) -> str: + if websocket.client and websocket.client.host: + return websocket.client.host + return "unknown" + + +def _metrics_auth_response( + validator: BearerAuthValidator, exc: AuthError +) -> JSONResponse: + return JSONResponse( + status_code=exc.status_code, + content={"detail": exc.description, "error": exc.error}, + headers={"WWW-Authenticate": validator.www_authenticate_header(exc)}, + ) + + +def create_app( + server: Optional[MCPServer] = None, + *, + auth_bypass: Optional[bool] = None, + auth_validator: Optional[BearerAuthValidator] = None, +) -> FastAPI: """Create a FastAPI application exposing the MCP WebSocket transport.""" app = FastAPI(title="EPA CompTox MCP Server") + app.state.auth_validator = auth_validator or BearerAuthValidator( + security=settings.security, + app=settings.app, + bypass_auth=auth_bypass, + ) + app.state.rate_limiter = InMemoryRateLimiter(settings.rate_limit) allowed_origins = settings.security.allowed_origins if not allowed_origins and settings.app.is_development: @@ -977,7 +1045,18 @@ async def readyz() -> Dict[str, Any]: return {"status": "ok", "ctx": health} @app.get("/metrics", tags=["metrics"]) - async def metrics() -> Response: + async def metrics(request: Request) -> Response: + if not settings.observability.metrics_enabled: + raise HTTPException(status_code=404, detail="Metrics disabled") + if not settings.observability.metrics_bypass_auth: + validator: BearerAuthValidator = app.state.auth_validator + try: + validator.authenticate_header( + request.headers.get("authorization"), + remote_addr=_remote_addr_from_request(request), + ) + except AuthError as exc: + return _metrics_auth_response(validator, exc) server_instance = getattr(app.state, "mcp_server", None) payload = _render_prometheus_metrics(server_instance) return Response(content=payload, media_type=CONTENT_TYPE_LATEST) @@ -1009,7 +1088,35 @@ async def websocket_endpoint(websocket: WebSocket) -> None: await websocket.close() return - session = MCPWebSocketSession(websocket=websocket, server=server_instance) + validator: BearerAuthValidator = app.state.auth_validator + try: + auth_context = validator.authenticate_header( + websocket.headers.get("authorization"), + remote_addr=_remote_addr_from_websocket(websocket), + ) + except AuthError as exc: + await websocket.accept() + await websocket.send_text( + json.dumps( + { + "jsonrpc": "2.0", + "error": { + "code": (-32000 if exc.status_code == 401 else -32001), + "message": exc.description, + "data": {"error": exc.error}, + }, + } + ) + ) + await websocket.close(code=4401 if exc.status_code == 401 else 4403) + return + + session = MCPWebSocketSession( + websocket=websocket, + server=server_instance, + auth_context=auth_context, + rate_limiter=app.state.rate_limiter, + ) await session.run() app.include_router(http_router) diff --git a/tests/test_audit_hardening.py b/tests/test_audit_hardening.py index 2fa663c..f22eb73 100644 --- a/tests/test_audit_hardening.py +++ b/tests/test_audit_hardening.py @@ -139,3 +139,36 @@ def test_bundle_store_verify_chain_detects_missing_file(tmp_path: Path): valid, errors = store.verify_chain() assert valid is False assert any("bundle file missing" in e for e in errors) + + +def test_bundle_store_rejects_unsafe_workflow_run_id(tmp_path: Path) -> None: + store = AuditBundleStore(tmp_path) + + with pytest.raises(ValueError): + store.save({"workflowRunId": "../escape", "data": "a"}) + + +def test_bundle_store_rejects_attachment_traversal(tmp_path: Path) -> None: + store = AuditBundleStore(tmp_path) + + with pytest.raises(ValueError): + store.save( + {"workflowRunId": "run-1", "data": "a"}, + attachments={"../escape.txt": "nope"}, + ) + assert not (tmp_path.parent / "escape.txt").exists() + + +def test_bundle_store_allows_nested_safe_attachments(tmp_path: Path) -> None: + store = AuditBundleStore(tmp_path) + + metadata = store.save( + {"workflowRunId": "run-1", "data": "a"}, + attachments={"interop/aop_linkage_summary.json": "{}"}, + ) + + attachment = ( + tmp_path / "run-1" / "attachments" / "interop" / "aop_linkage_summary.json" + ) + assert attachment.exists() + assert metadata["attachments"][0]["name"] == "interop/aop_linkage_summary.json" diff --git a/tests/test_http_transport.py b/tests/test_http_transport.py index 177f9a5..b92f096 100644 --- a/tests/test_http_transport.py +++ b/tests/test_http_transport.py @@ -196,6 +196,9 @@ def test_http_transport_initialize_and_list_and_call(): assert any(tool["name"] == "echo" for tool in tools) first_tool = next(tool for tool in tools if tool["name"] == "echo") assert first_tool["annotations"]["resource"] == "echo" + assert first_tool["annotations"]["readOnlyHint"] is True + assert first_tool["annotations"]["destructiveHint"] is False + assert first_tool["annotations"]["openWorldHint"] is True call_response = client.post( "/mcp", diff --git a/tests/test_package_assets.py b/tests/test_package_assets.py new file mode 100644 index 0000000..1240b6e --- /dev/null +++ b/tests/test_package_assets.py @@ -0,0 +1,114 @@ +from __future__ import annotations + +import os +import subprocess +import sys +import venv +import zipfile +from pathlib import Path + +ROOT = Path(__file__).resolve().parents[1] +PACKAGE_DATA = ROOT / "src" / "epacomp_tox" / "data" + + +def _relative_files(root: Path) -> dict[str, bytes]: + return { + str(path.relative_to(root)): path.read_bytes() + for path in root.rglob("*") + if path.is_file() and path.suffix in {".json", ".md"} + } + + +def test_packaged_runtime_assets_match_source_copies() -> None: + source_roots = { + "contracts/schemas": ROOT / "docs" / "contracts" / "schemas", + "schemas": ROOT / "schemas", + "metadata/model_cards": ROOT / "metadata" / "model_cards", + "metadata/applicability_domains": ROOT / "metadata" / "applicability_domains", + } + package_roots = { + "contracts/schemas": PACKAGE_DATA / "contracts" / "schemas", + "schemas": PACKAGE_DATA / "schemas", + "metadata/model_cards": PACKAGE_DATA / "metadata" / "model_cards", + "metadata/applicability_domains": PACKAGE_DATA + / "metadata" + / "applicability_domains", + } + + for label, source_root in source_roots.items(): + assert _relative_files(package_roots[label]) == _relative_files(source_root) + + +def test_wheel_contains_runtime_assets_and_instantiates_server(tmp_path: Path) -> None: + wheel_dir = tmp_path / "wheelhouse" + wheel_dir.mkdir() + build = subprocess.run( + [ + sys.executable, + "-m", + "pip", + "wheel", + ".", + "--no-deps", + "--wheel-dir", + str(wheel_dir), + ], + cwd=ROOT, + text=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + timeout=120, + ) + assert build.returncode == 0, build.stdout + wheel = next(wheel_dir.glob("*.whl")) + + with zipfile.ZipFile(wheel) as archive: + names = set(archive.namelist()) + assert ( + "epacomp_tox/data/contracts/schemas/metadata/model_cards.response.schema.json" + in names + ) + assert "epacomp_tox/data/schemas/comptoxEvidencePack.v1.json" in names + assert "epacomp_tox/data/metadata/model_cards/genra_read_across.json" in names + + venv_dir = tmp_path / "venv" + venv.EnvBuilder(with_pip=True, system_site_packages=True).create(venv_dir) + bin_dir = "Scripts" if os.name == "nt" else "bin" + pip = venv_dir / bin_dir / "pip" + python = venv_dir / bin_dir / "python" + install = subprocess.run( + [str(pip), "install", "--no-deps", str(wheel)], + cwd=tmp_path, + text=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + timeout=120, + ) + assert install.returncode == 0, install.stdout + + smoke = subprocess.run( + [ + str(python), + "-c", + ( + "from epacomp_tox.server import MCPServer; " + "s=MCPServer(api_key='dummy-key'); " + "names={t['name'] for t in s.get_tools()}; " + "assert 'metadata_get_model_card' in names; " + "assert 'get_contract_manifest' in names; " + "assert s.call_tool('metadata_get_model_card', {}, context={})" + "['structuredContent']['modelCards']; " + "assert s.call_tool('metadata_list_applicability_domain', {}, context={})" + "['structuredContent']['applicabilityDomains']; " + "assert s.call_tool('get_contract_manifest', {}, context={})" + "['structuredContent']['responseSchemas']" + ), + ], + cwd=tmp_path, + env={key: value for key, value in os.environ.items() if key != "PYTHONPATH"}, + text=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + timeout=120, + ) + assert smoke.returncode == 0, smoke.stdout diff --git a/tests/test_security_hardening.py b/tests/test_security_hardening.py new file mode 100644 index 0000000..1cbd718 --- /dev/null +++ b/tests/test_security_hardening.py @@ -0,0 +1,246 @@ +from __future__ import annotations + +from typing import Any, Dict, List, Optional + +import pytest +from fastapi.testclient import TestClient +from starlette.websockets import WebSocketDisconnect + +from epacomp_tox.resources.base import BaseResource +from epacomp_tox.server import MCPServer +from epacomp_tox.settings import RateLimitSettings +from epacomp_tox.transport.security import AuthContext, AuthError, InMemoryRateLimiter +from epacomp_tox.transport.websocket import create_app + + +class EchoResource(BaseResource): + def __init__(self, api_key: str = "dummy"): + super().__init__(api_key) + + @property + def name(self) -> str: + return "echo" + + @property + def description(self) -> str: + return "Echo test resource" + + def get_tools(self) -> List[Dict[str, Any]]: + return [ + { + "name": "echo", + "description": "Echo back provided text", + "inputSchema": { + "type": "object", + "properties": {"text": {"type": "string"}}, + "required": ["text"], + }, + } + ] + + def execute_tool(self, tool_name: str, parameters: Dict[str, Any]) -> Any: + if tool_name != "echo": + raise ValueError("Unknown tool") + self._last_metadata = {"resource": self.name} + return {"echo": parameters["text"]} + + +class CrashingResource(EchoResource): + def execute_tool(self, tool_name: str, parameters: Dict[str, Any]) -> Any: + raise RuntimeError("secret-token-value") + + +class EchoServer(MCPServer): + def __init__(self, resource: Optional[BaseResource] = None): + self._resource = resource or EchoResource() + super().__init__(api_key="dummy-key", validate_health=False) + + def _initialize_resources(self) -> Dict[str, BaseResource]: + return {"echo": self._resource} + + +class FakeAuthValidator: + enabled = True + + def authenticate_header( + self, authorization: Optional[str], *, remote_addr: Optional[str] = None + ) -> AuthContext: + if authorization == "Bearer valid": + return AuthContext( + subject_hash="subject-hash", + issuer="https://issuer.example", + audience=("mcp://test",), + scopes=("tox:read",), + expires_at=1893456000, + token_hash="token-hash", + ) + if authorization == "Bearer noscope": + raise AuthError( + status_code=403, + error="insufficient_scope", + description="Bearer token is missing required MCP scope.", + required_scopes=["tox:read"], + ) + raise AuthError( + status_code=401, + error="invalid_token", + description="Bearer token is required.", + required_scopes=["tox:read"], + ) + + def protected_resource_metadata(self) -> Dict[str, Any]: + return { + "resource": "https://mcp.example/mcp", + "authorization_servers": ["https://issuer.example"], + "scopes_supported": ["tox:read"], + "bearer_methods_supported": ["header"], + } + + def www_authenticate_header(self, error: Optional[AuthError] = None) -> str: + suffix = f', error="{error.error}"' if error else "" + return ( + 'Bearer, resource="https://mcp.example/mcp", ' + 'scope="tox:read"' + f"{suffix}" + ) + + +def _rpc_call(tool_params: Dict[str, Any]) -> Dict[str, Any]: + return { + "jsonrpc": "2.0", + "id": 1, + "method": "tools/call", + "params": {"name": "echo", "parameters": tool_params}, + } + + +def test_http_rejects_missing_bearer_token_with_challenge() -> None: + app = create_app(server=EchoServer(), auth_validator=FakeAuthValidator()) + with TestClient(app) as client: + response = client.post("/mcp", json=_rpc_call({"text": "hello"})) + + assert response.status_code == 401 + assert response.json()["error"]["code"] == -32000 + assert "WWW-Authenticate" in response.headers + assert "Bearer" in response.headers["WWW-Authenticate"] + + +def test_http_rejects_valid_token_missing_scope() -> None: + app = create_app(server=EchoServer(), auth_validator=FakeAuthValidator()) + with TestClient(app) as client: + response = client.post( + "/mcp", + json=_rpc_call({"text": "hello"}), + headers={"authorization": "Bearer noscope"}, + ) + + assert response.status_code == 403 + assert response.json()["error"]["code"] == -32001 + + +def test_http_does_not_echo_raw_authentication_metadata() -> None: + app = create_app(server=EchoServer(), auth_validator=FakeAuthValidator()) + with TestClient(app) as client: + response = client.post( + "/mcp", + json=_rpc_call({"text": "hello"}), + headers={"authorization": "Bearer valid"}, + ) + + assert response.status_code == 200 + body_text = response.text + assert "Bearer valid" not in body_text + structured = response.json()["result"]["structuredContent"] + assert structured["metadata"]["session"]["auth"]["subjectHash"] == "subject-hash" + + +def test_protected_resource_metadata_endpoint_is_public() -> None: + app = create_app(server=EchoServer(), auth_validator=FakeAuthValidator()) + with TestClient(app) as client: + response = client.get("/.well-known/oauth-protected-resource") + + assert response.status_code == 200 + assert response.json()["resource"] == "https://mcp.example/mcp" + + +def test_invalid_extra_tool_parameter_fails_before_execution() -> None: + app = create_app(server=EchoServer(), auth_validator=FakeAuthValidator()) + with TestClient(app) as client: + response = client.post( + "/mcp", + json=_rpc_call({"text": "hello", "extra": "nope"}), + headers={"authorization": "Bearer valid"}, + ) + + assert response.status_code == 400 + assert "Additional properties" in response.json()["error"]["message"] + + +def test_tool_call_rate_limit_returns_jsonrpc_error() -> None: + app = create_app(server=EchoServer(), auth_validator=FakeAuthValidator()) + app.state.rate_limiter = InMemoryRateLimiter( + RateLimitSettings(requests_per_minute=60, burst=1) + ) + with TestClient(app) as client: + first = client.post( + "/mcp", + json=_rpc_call({"text": "first"}), + headers={"authorization": "Bearer valid"}, + ) + second = client.post( + "/mcp", + json=_rpc_call({"text": "second"}), + headers={"authorization": "Bearer valid"}, + ) + + assert first.status_code == 200 + assert second.status_code == 429 + assert second.json()["error"]["code"] == -32029 + + +def test_internal_tool_exception_does_not_leak_raw_detail() -> None: + app = create_app( + server=EchoServer(resource=CrashingResource()), + auth_validator=FakeAuthValidator(), + ) + with TestClient(app) as client: + response = client.post( + "/mcp", + json=_rpc_call({"text": "hello"}), + headers={"authorization": "Bearer valid"}, + ) + + assert response.status_code == 200 + assert response.json()["result"]["isError"] is True + assert "secret-token-value" not in response.text + + +def test_websocket_rejects_missing_bearer_token() -> None: + app = create_app(server=EchoServer(), auth_validator=FakeAuthValidator()) + with TestClient(app) as client: + with client.websocket_connect("/mcp/ws") as websocket: + message = websocket.receive_json() + assert message["error"]["code"] == -32000 + with pytest.raises(WebSocketDisconnect): + websocket.receive_text() + + +def test_websocket_accepts_valid_bearer_token() -> None: + app = create_app(server=EchoServer(), auth_validator=FakeAuthValidator()) + with TestClient(app) as client: + with client.websocket_connect( + "/mcp/ws", headers={"authorization": "Bearer valid"} + ) as websocket: + websocket.send_json( + { + "jsonrpc": "2.0", + "id": 1, + "method": "initialize", + "params": { + "protocolVersion": "2025-11-25", + "capabilities": {}, + "clientInfo": {"name": "test"}, + }, + } + ) + assert websocket.receive_json()["result"]["protocolVersion"] == "2025-11-25" diff --git a/tests/test_tool_registry.py b/tests/test_tool_registry.py index e23b1f8..1549077 100644 --- a/tests/test_tool_registry.py +++ b/tests/test_tool_registry.py @@ -43,14 +43,13 @@ def test_registry_wraps_non_object_output_schema_without_warning_log( registry.register_resource(resource) definition = registry.list_definitions()[0] - assert definition["outputSchema"] == { - "type": "object", - "properties": { - "data": { - "type": "array", - "items": {"type": "string"}, - } - }, - "required": ["data"], + output_schema = definition["outputSchema"] + assert output_schema["type"] == "object" + assert output_schema["required"] == ["data"] + assert output_schema["properties"]["data"] == { + "type": "array", + "items": {"type": "string"}, } + assert output_schema["properties"]["metadata"]["additionalProperties"] is True + assert output_schema["properties"]["mcpMetadata"]["additionalProperties"] is True assert not caplog.records