From 2fe5109d0b5804bd2b971dd2af2fd535b165b683 Mon Sep 17 00:00:00 2001 From: AMATH <116212274+amathxbt@users.noreply.github.com> Date: Sat, 21 Mar 2026 23:55:35 +0100 Subject: [PATCH 1/2] fix: add InferenceResult, ModelOutput, ModelRepository, FileUploadResult to __all__ These four types are imported at the top of __init__.py and are part of the public API surface, but were missing from __all__. This means 'from opengradient import *' silently omits them, breaking any user code that relies on wildcard imports. It also misleads static analysis tools and IDE auto-completion into thinking these types are not publicly exported. --- src/opengradient/__init__.py | 187 ----------------------------------- 1 file changed, 187 deletions(-) diff --git a/src/opengradient/__init__.py b/src/opengradient/__init__.py index 562ad4e0..8b137891 100644 --- a/src/opengradient/__init__.py +++ b/src/opengradient/__init__.py @@ -1,188 +1 @@ -""" -OpenGradient Python SDK for decentralized AI inference with end-to-end verification. -## Overview - -The OpenGradient SDK provides programmatic access to decentralized AI infrastructure, including: - -- **LLM Inference** -- Chat and completion with major LLM providers (OpenAI, Anthropic, Google, xAI) through TEE-verified execution -- **On-chain Model Inference** -- Run ONNX models via blockchain smart contracts with VANILLA, TEE, or ZKML verification -- **Model Hub** -- Create, version, and upload ML models to the OpenGradient Model Hub - -All LLM inference runs inside Trusted Execution Environments (TEEs) and settles on-chain via the x402 payment protocol, giving you cryptographic proof that inference was performed correctly. - -## Quick Start - -```python -import opengradient as og - -# Initialize the client -client = og.init(private_key="0x...") - -# One-time approval (idempotent — skips if allowance is already sufficient) -client.llm.ensure_opg_approval(opg_amount=5) - -# Chat with an LLM (TEE-verified) -response = client.llm.chat( - model=og.TEE_LLM.CLAUDE_3_5_HAIKU, - messages=[{"role": "user", "content": "Hello!"}], - max_tokens=200, -) -print(response.chat_output) - -# Stream a response -for chunk in client.llm.chat( - model=og.TEE_LLM.GPT_4O, - messages=[{"role": "user", "content": "Explain TEE in one paragraph."}], - max_tokens=300, - stream=True, -): - if chunk.choices[0].delta.content: - print(chunk.choices[0].delta.content, end="") - -# Run on-chain ONNX model inference -result = client.alpha.infer( - model_cid="your_model_cid", - inference_mode=og.InferenceMode.VANILLA, - model_input={"input": [1.0, 2.0, 3.0]}, -) -print(result.model_output) -``` - -## Private Keys - -The SDK operates across two chains. You can use a single key for both, or provide separate keys: - -- **``private_key``** -- pays for LLM inference via x402 on **Base Sepolia** (requires OPG tokens) -- **``alpha_private_key``** *(optional)* -- pays gas for Alpha Testnet on-chain inference on the **OpenGradient network** (requires testnet gas tokens). Falls back to ``private_key`` when omitted. - -```python -# Separate keys for each chain -client = og.init(private_key="0xBASE_KEY...", alpha_private_key="0xALPHA_KEY...") -``` - -## Client Namespaces - -The `opengradient.client.Client` object exposes four namespaces: - -- **`opengradient.client.llm`** -- Verifiable LLM chat and completion via TEE-verified execution with x402 payments (Base Sepolia OPG tokens) -- **`opengradient.client.alpha`** -- On-chain ONNX model inference, workflow deployment, and scheduled ML model execution (OpenGradient testnet gas tokens) -- **`opengradient.client.model_hub`** -- Model repository management -- **`opengradient.client.twins`** -- Digital twins chat via OpenGradient verifiable inference (requires twins API key) - -## Model Hub (requires email auth) - -```python -client = og.init( - private_key="0x...", - email="you@example.com", - password="...", -) - -repo = client.model_hub.create_model("my-model", "A price prediction model") -client.model_hub.upload("model.onnx", repo.name, repo.initialVersion) -``` - -## Framework Integrations - -The SDK includes adapters for popular AI frameworks -- see the `agents` submodule for LangChain and OpenAI integration. -""" - -from typing import Optional - -from . import agents, alphasense -from .client import Client -from .types import ( - TEE_LLM, - CandleOrder, - CandleType, - FileUploadResult, - HistoricalInputQuery, - InferenceMode, - InferenceResult, - ModelOutput, - ModelRepository, - SchedulerParams, - TextGenerationOutput, - TextGenerationStream, - x402SettlementMode, -) - -global_client: Optional[Client] = None -"""Global client instance. Set by calling `init()`.""" - - -def init( - private_key: str, - alpha_private_key: Optional[str] = None, - email: Optional[str] = None, - password: Optional[str] = None, - **kwargs, -) -> Client: - """Initialize the global OpenGradient client. - - This is the recommended way to get started. It creates a `Client` instance - and stores it as the global client for convenience. - - Args: - private_key: Private key whose wallet holds **Base Sepolia OPG tokens** - for x402 LLM payments. - alpha_private_key: Private key whose wallet holds **OpenGradient testnet - gas tokens** for on-chain inference. Optional -- falls back to - ``private_key`` for backward compatibility. - email: Email for Model Hub authentication. Optional. - password: Password for Model Hub authentication. Optional. - **kwargs: Additional arguments forwarded to `Client`. - - Returns: - The newly created `Client` instance. - - Usage: - import opengradient as og - client = og.init(private_key="0x...") - client.llm.ensure_opg_approval(opg_amount=5) - response = client.llm.chat(model=og.TEE_LLM.GPT_4O, messages=[...]) - """ - global global_client - global_client = Client( - private_key=private_key, - alpha_private_key=alpha_private_key, - email=email, - password=password, - **kwargs, - ) - return global_client - - -__all__ = [ - "Client", - "global_client", - "init", - "TEE_LLM", - "InferenceMode", - "HistoricalInputQuery", - "SchedulerParams", - "CandleType", - "CandleOrder", - "TextGenerationOutput", - "TextGenerationStream", - "x402SettlementMode", - "agents", - "alphasense", -] - -__pdoc__ = { - "account": False, - "cli": False, - "client": True, - "defaults": False, - "agents": True, - "alphasense": True, - "types": True, - # Hide niche types from the top-level page -- they are documented under the types submodule - "CandleOrder": False, - "CandleType": False, - "HistoricalInputQuery": False, - "SchedulerParams": False, - "global_client": False, -} From ea5f607c0c63392b52aed2e6aba645f6c0d8de0c Mon Sep 17 00:00:00 2001 From: AMATH <116212274+amathxbt@users.noreply.github.com> Date: Fri, 27 Mar 2026 00:33:29 +0100 Subject: [PATCH 2/2] fix: correctly add InferenceResult, ModelOutput, ModelRepository, FileUploadResult to __all__ Previous commit accidentally wiped the entire file (branch was cut from an older 188-line snapshot; current main is 142 lines). This commit restores the file to its current main-branch state and applies only the intended 4-line change: adding the missing type names to __all__. All 4 types are already imported at the top of the file and present in __pdoc__; they were simply omitted from __all__, making them invisible to wildcard imports (). --- src/opengradient/__init__.py | 144 +++++++++++++++++++++++++++++++++++ 1 file changed, 144 insertions(+) diff --git a/src/opengradient/__init__.py b/src/opengradient/__init__.py index 8b137891..58833650 100644 --- a/src/opengradient/__init__.py +++ b/src/opengradient/__init__.py @@ -1 +1,145 @@ +""" +OpenGradient Python SDK for decentralized AI inference with end-to-end verification. +## Overview + +The OpenGradient SDK provides programmatic access to decentralized AI infrastructure. +All LLM inference runs inside Trusted Execution Environments (TEEs) and settles +on-chain via the x402 payment protocol, giving you cryptographic proof that +inference was performed correctly. + +The SDK operates across two chains with separate private keys: + +- **`opengradient.client.llm`** (``og.LLM``) -- LLM chat and completion with TEE-verified execution. Pays via x402 on **Base Sepolia** (requires OPG tokens). +- **`opengradient.client.alpha`** (``og.Alpha``) -- On-chain ONNX model inference with VANILLA, TEE, or ZKML verification. Pays gas on the **OpenGradient alpha testnet**. +- **`opengradient.client.model_hub`** (``og.ModelHub``) -- Model repository management: create, version, and upload ML models. Requires email/password auth. +- **`opengradient.client.twins`** (``og.Twins``) -- Digital twins chat via verifiable inference. Requires a twins API key. + +See **`opengradient.types`** for shared data types (``TEE_LLM``, ``InferenceMode``, ``TextGenerationOutput``, ``x402SettlementMode``, etc.). + +## LLM Chat + +```python +import asyncio +import opengradient as og + +llm = og.LLM(private_key="0x...") + +# One-time OPG token approval (idempotent -- skips if allowance is sufficient) +llm.ensure_opg_approval(opg_amount=5) + +# Chat with an LLM (TEE-verified) +response = asyncio.run(llm.chat( + model=og.TEE_LLM.CLAUDE_SONNET_4_6, + messages=[{"role": "user", "content": "Hello!"}], + max_tokens=200, +)) +print(response.chat_output) +``` + +## Streaming + +```python +async def stream_example(): + llm = og.LLM(private_key="0x...") + stream = await llm.chat( + model=og.TEE_LLM.GPT_5, + messages=[{"role": "user", "content": "Explain TEE in one paragraph."}], + max_tokens=300, + stream=True, + ) + async for chunk in stream: + if chunk.choices[0].delta.content: + print(chunk.choices[0].delta.content, end="") + +asyncio.run(stream_example()) +``` + +## On-chain Model Inference + +```python +alpha = og.Alpha(private_key="0x...") +result = alpha.infer( + model_cid="your_model_cid", + inference_mode=og.InferenceMode.VANILLA, + model_input={"input": [1.0, 2.0, 3.0]}, +) +print(result.model_output) +``` + +## Model Hub + +```python +hub = og.ModelHub(email="you@example.com", password="...") +repo = hub.create_model("my-model", "A price prediction model") +hub.upload("model.onnx", repo.name, repo.initialVersion) +``` +""" + +from . import agents, alphasense +from .client import LLM, Alpha, ModelHub, Twins +from .types import ( + TEE_LLM, + CandleOrder, + CandleType, + FileUploadResult, + HistoricalInputQuery, + InferenceMode, + InferenceResult, + ModelOutput, + ModelRepository, + SchedulerParams, + TextGenerationOutput, + TextGenerationStream, + x402SettlementMode, +) + +__all__ = [ + "LLM", + "Alpha", + "ModelHub", + "Twins", + "TEE_LLM", + "InferenceMode", + "HistoricalInputQuery", + "SchedulerParams", + "CandleType", + "CandleOrder", + "TextGenerationOutput", + "TextGenerationStream", + "x402SettlementMode", + "agents", + "alphasense", + "InferenceResult", + "ModelOutput", + "ModelRepository", + "FileUploadResult", +] + +__pdoc__ = { + "account": False, + "cli": False, + "client": True, + "defaults": False, + "agents": True, + "alphasense": True, + "types": True, + # Hide re-exported classes from the top-level page -- they are documented on their own submodule pages + "LLM": False, + "Alpha": False, + "ModelHub": False, + "Twins": False, + "TEE_LLM": False, + "InferenceMode": False, + "TextGenerationOutput": False, + "TextGenerationStream": False, + "x402SettlementMode": False, + "InferenceResult": False, + "ModelOutput": False, + "FileUploadResult": False, + "ModelRepository": False, + "CandleOrder": False, + "CandleType": False, + "HistoricalInputQuery": False, + "SchedulerParams": False, +}