From 494237db6c482eec396a63021b594bfdaeafefb3 Mon Sep 17 00:00:00 2001 From: SSharma-10 Date: Tue, 5 May 2026 15:34:13 +0530 Subject: [PATCH] Reademe Update --- README.md | 117 ++++++++++++++++++- examples/inference/chat_completion_stream.py | 40 +++++++ examples/inference/image_generation.py | 32 +++++ examples/inference/list_models.py | 23 ++++ 4 files changed, 211 insertions(+), 1 deletion(-) create mode 100644 examples/inference/chat_completion_stream.py create mode 100644 examples/inference/image_generation.py create mode 100644 examples/inference/list_models.py diff --git a/README.md b/README.md index c51e8dee..d422e3e4 100644 --- a/README.md +++ b/README.md @@ -9,6 +9,14 @@ A top priority of this project is to ensure the client abides by the API contract. Therefore, the client itself wraps a generated client based on the [DigitalOcean OpenAPI Specification](https://github.com/digitalocean/openapi) to support all of DigitalOcean's HTTP APIs. +> **🚀 New in v0.29.0 — AI & Inference support** +> +> `pydo` now ships first-class support for DigitalOcean's +> [Gradient AI Platform](https://www.digitalocean.com/products/gradient): chat +> completions (with streaming), image generation, audio, batch inference, and +> model listing — all from the same `Client`. Jump to +> [**AI & Inference**](#ai--inference) to get started. + # **Getting Started With the Client** ## Prerequisites @@ -36,6 +44,30 @@ from pydo import Client client = Client(token=os.getenv("DIGITALOCEAN_TOKEN")) ``` +> **â„šī¸ `api_key=` vs `token=` — what's the difference?** +> +> They're the same thing. `token=` and `api_key=` are just two names for +> the same argument, and both work for every API in `pydo` — infrastructure +> and inference. Use whichever name reads better in your code. +> +> What matters is the **credential** you pass in, not the argument name: +> +> | What you're calling | What you need | +> | --- | --- | +> | Infrastructure APIs (`droplets`, `ssh_keys`, `kubernetes`, `volumes`, â€Ļ) | A DigitalOcean API token (PAT). | +> | Inference APIs (`chat`, `images`, `models`, `audio`, `batches`, `files`, `responses`) | A PAT created with **full access** scope, **or** a Gradient **Model Access Key**. | +> +> If you only have a limited-scope PAT, infra calls will work but inference +> calls will fail with a 401. To fix it, create a new PAT with full access, +> or use a Model Access Key instead. +> +> ```python +> # All three of these work — pick the one you like: +> client = Client(token=os.environ["DIGITALOCEAN_TOKEN"]) # full-access PAT +> client = Client(api_key=os.environ["DIGITALOCEAN_TOKEN"]) # same thing, different name +> client = Client(api_key=os.environ["MODEL_ACCESS_KEY"]) # Gradient model access key +> ``` + #### Example of Using `pydo` to Access DO Resources Find below a working example for GETting a ssh_key ([per this http request](https://docs.digitalocean.com/reference/api/api-reference/#operation/sshKeys_list)) and printing the ID associated with the ssh key. If you'd like to try out this quick example, you can follow [these instructions](https://docs.digitalocean.com/products/droplets/how-to/add-ssh-keys/) to add ssh keys to your DO account. @@ -62,6 +94,83 @@ ID: 123457, NAME: my_prod_ssh_key, FINGERPRINT: eb:76:c7:2a:d3:3e:80:5d:ef:2e:ca **Note**: More working examples can be found [here](https://github.com/digitalocean/pydo/tree/main/examples). +## **AI & Inference** + +> Talk to models on DigitalOcean's Gradient AI Platform with the same +> `pydo.Client`. + +The snippets below use a **DigitalOcean PAT created with full access scope** +(required for inference APIs). A Gradient Model Access Key works too — see +the [credentials note](#pydo-quickstart) above. + +#### Chat completions (streaming) + +```python +import os +from pydo import Client + +client = Client(token=os.environ["DIGITALOCEAN_TOKEN"]) # PAT with full access scope + +stream = client.chat.completions.create( + model="llama3.3-70b-instruct", + messages=[{"role": "user", "content": "Tell me some fun facts about sharks"}], + max_tokens=512, + stream=True, +) + +for chunk in stream: + if not chunk.choices: + continue + delta = chunk.choices[0].delta + piece = delta.get("reasoning_content") or delta.get("content") + if piece: + print(piece, end="", flush=True) +print() +``` + +#### Image generation + +```python +import os, base64 +from pydo import Client + +client = Client(token=os.environ["DIGITALOCEAN_TOKEN"]) # PAT with full access scope + +result = client.images.generate( + model="openai-gpt-image-1", + prompt="A friendly cartoon shark typing on a laptop at a sunny beach", + n=1, +) + +with open("output.png", "wb") as f: + f.write(base64.b64decode(result.data[0].b64_json)) + +print("Image saved as output.png") +``` + +#### List available models + +```python +import os +from pydo import Client + +client = Client(token=os.environ["DIGITALOCEAN_TOKEN"]) # PAT with full access scope + +models = client.models.list() +for model in models["data"]: + print(model["id"]) +``` + +Runnable versions of the three snippets above live in +[`examples/inference/`](examples/inference/): + +- [`chat_completion_stream.py`](examples/inference/chat_completion_stream.py) +- [`image_generation.py`](examples/inference/image_generation.py) +- [`list_models.py`](examples/inference/list_models.py) + +For more (audio, batches, agents, async streaming responses, file uploads, +etc.), see the `inference_*.py` scripts in [`examples/`](examples/). + #### Pagination Example Below is an example on handling pagination. One must parse the URL to find the @@ -301,7 +410,13 @@ Short term, we are focused on improving usability and user productivity (part of Long term: -> Model support, expand on supporting functions +> Model support, expand on supporting functions, deepen AI/Inference coverage - The client currently inputs and outputs JSON dictionaries. Adding models would unlock features such as typing and validation. - Add supporting functions to elevate customer experience (i.e. adding a funtion that surfaces IP address for a Droplet) +- **AI & Inference**: continue expanding coverage of the + [Gradient AI Platform](https://www.digitalocean.com/products/gradient) + alongside the infrastructure APIs — keeping chat, images, audio, batches, + responses, agents, and model management feature-complete and idiomatic + from the same `pydo.Client`. `pydo` is an + infrastructure **and** AI SDK; both surfaces are first-class going forward. diff --git a/examples/inference/chat_completion_stream.py b/examples/inference/chat_completion_stream.py new file mode 100644 index 00000000..b0640569 --- /dev/null +++ b/examples/inference/chat_completion_stream.py @@ -0,0 +1,40 @@ +"""Stream a chat completion from the Gradient AI Platform token-by-token. + +Usage: + # The PAT must be created with FULL ACCESS scope to call inference APIs. + export DIGITALOCEAN_TOKEN="your-full-access-pat" + python examples/inference/chat_completion_stream.py +""" + +import os + +from pydo import Client + + +def main() -> None: + client = Client(token=os.environ["DIGITALOCEAN_TOKEN"]) + + stream = client.chat.completions.create( + model="llama3.3-70b-instruct", + messages=[ + { + "role": "user", + "content": "Tell me some fun facts about sharks.", + } + ], + max_tokens=512, + stream=True, + ) + + for chunk in stream: + if not chunk.choices: + continue + delta = chunk.choices[0].delta + piece = delta.get("reasoning_content") or delta.get("content") + if piece: + print(piece, end="", flush=True) + print() + + +if __name__ == "__main__": + main() diff --git a/examples/inference/image_generation.py b/examples/inference/image_generation.py new file mode 100644 index 00000000..41e580bb --- /dev/null +++ b/examples/inference/image_generation.py @@ -0,0 +1,32 @@ +"""Generate an image with the Gradient AI Platform and save it to disk. + +Usage: + # The PAT must be created with FULL ACCESS scope to call inference APIs. + export DIGITALOCEAN_TOKEN="your-full-access-pat" + python examples/inference/image_generation.py +""" + +import base64 +import os + +from pydo import Client + + +def main() -> None: + client = Client(token=os.environ["DIGITALOCEAN_TOKEN"]) + + result = client.images.generate( + model="openai-gpt-image-1", + prompt="A friendly shark typing on a laptop at a sunny beach", + n=1, + ) + + output_path = "output.png" + with open(output_path, "wb") as f: + f.write(base64.b64decode(result.data[0].b64_json)) + + print(f"Image saved as {output_path}") + + +if __name__ == "__main__": + main() diff --git a/examples/inference/list_models.py b/examples/inference/list_models.py new file mode 100644 index 00000000..0b3ad16a --- /dev/null +++ b/examples/inference/list_models.py @@ -0,0 +1,23 @@ +"""List every inference model available to the current Gradient account. + +Usage: + # The PAT must be created with FULL ACCESS scope to call inference APIs. + export DIGITALOCEAN_TOKEN="your-full-access-pat" + python examples/inference/list_models.py +""" + +import os + +from pydo import Client + + +def main() -> None: + client = Client(token=os.environ["DIGITALOCEAN_TOKEN"]) + + models = client.models.list() + for model in models["data"]: + print(model["id"]) + + +if __name__ == "__main__": + main()