diff --git a/.claude/worktrees/zealous-cartwright b/.claude/worktrees/zealous-cartwright
new file mode 160000
index 00000000..7523537b
--- /dev/null
+++ b/.claude/worktrees/zealous-cartwright
@@ -0,0 +1 @@
+Subproject commit 7523537b5f99f6fefb98b41b6bb656943d949909
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
new file mode 100644
index 00000000..e4af5e8a
--- /dev/null
+++ b/.github/workflows/ci.yml
@@ -0,0 +1,108 @@
+name: CI
+
+on:
+ pull_request:
+ branches: [main]
+ push:
+ branches: [main]
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ # ──────────────────────────────────────────────────────────────
+ # Unit tests — fast, hermetic, no network, no ports
+ # ──────────────────────────────────────────────────────────────
+ unit-tests:
+ name: Unit Tests
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ python-version: ["3.12", "3.13"]
+
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Install uv
+ uses: astral-sh/setup-uv@v4
+ with:
+ enable-cache: true
+
+ - name: Set up Python ${{ matrix.python-version }}
+ run: uv python install ${{ matrix.python-version }}
+
+ - name: Install dependencies
+ run: uv sync --locked --all-extras --dev
+
+ - name: Run pre-commit checks
+ run: uv run pre-commit run --all-files
+
+ - name: Run unit tests with coverage
+ run: |
+ uv run pytest tests/unit/ \
+ --cov=bindu \
+ --cov-report=term-missing \
+ --cov-report=xml:coverage.xml \
+ --cov-fail-under=60 \
+ -v
+
+ - name: Upload coverage
+ if: matrix.python-version == '3.13'
+ uses: coverallsapp/github-action@v2
+ with:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ file: coverage.xml
+
+ # ──────────────────────────────────────────────────────────────
+ # E2E gRPC tests — real servers, real ports, full round-trip
+ # ──────────────────────────────────────────────────────────────
+ e2e-grpc-tests:
+ name: E2E gRPC Tests
+ runs-on: ubuntu-latest
+ needs: unit-tests
+
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Install uv
+ uses: astral-sh/setup-uv@v4
+ with:
+ enable-cache: true
+
+ - name: Set up Python 3.13
+ run: uv python install 3.13
+
+ - name: Install dependencies
+ run: uv sync --locked --all-extras --dev
+
+ - name: Run E2E gRPC integration tests
+ run: |
+ uv run pytest tests/integration/grpc/ \
+ -v -m e2e \
+ --timeout=60
+
+ # ──────────────────────────────────────────────────────────────
+ # TypeScript SDK — build and verify
+ # ──────────────────────────────────────────────────────────────
+ typescript-sdk:
+ name: TypeScript SDK Build
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Setup Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version: "20"
+
+ - name: Install and build SDK
+ working-directory: sdks/typescript
+ run: |
+ npm install
+ npm run build
+
+ - name: Verify example dependencies
+ working-directory: examples/typescript-openai-agent
+ run: npm install
diff --git a/.gitignore b/.gitignore
index dca8505c..cc4e55f2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -187,6 +187,8 @@ examples/.bindu/public.pem
examples/agent_swarm/.bindu/private.pem
examples/agent_swarm/.bindu/public.pem
+examples/typescript-openai-agent/logs/
+
# Pebbling keys (DID private/public keys)
examples/.pebbling/*
**/.pebbling/private.pem
@@ -203,3 +205,14 @@ bindu/penguin/.bindu/public.pem
.bindu/
postman/*
+node_modules/
+**/node_modules/
+
+# TypeScript SDK
+sdks/typescript/dist/
+sdks/typescript/node_modules/
+
+# Kotlin SDK
+sdks/kotlin/build/
+sdks/kotlin/.gradle/
+sdks/kotlin/bin/
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 820e57a5..5d7ad762 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -13,19 +13,19 @@ repos:
hooks:
- id: ruff
args: ["--fix"]
- exclude: ^examples/
+ exclude: ^(examples/|bindu/grpc/generated/)
- id: ruff-format
- exclude: ^examples/
+ exclude: ^(examples/|bindu/grpc/generated/)
- repo: local
hooks:
- id: ty
name: ty type checker
- entry: uv run ty check bindu/ tests/
+ entry: uv run ty check bindu/ tests/ --exclude bindu/grpc/generated/
language: system
types: [python]
pass_filenames: false
- exclude: ^examples/
+ exclude: ^(examples/|bindu/grpc/generated/)
- id: pytest
name: pytest with coverage
@@ -84,4 +84,4 @@ repos:
rev: 6.3.0
hooks:
- id: pydocstyle
- exclude: ^examples/
+ exclude: ^(examples/|bindu/grpc/generated/)
diff --git a/README.md b/README.md
index 4673a956..509cce0d 100644
--- a/README.md
+++ b/README.md
@@ -221,7 +221,46 @@ $env:BINDU_PORT="4000"
Existing examples that use `http://localhost:3773` are automatically overridden when `BINDU_PORT` is set.
-### Option 2: Zero-Config Local Agent
+### Option 2: TypeScript Agent
+
+Same pattern, different language. Create `index.ts`:
+
+```typescript
+import { bindufy } from "@bindu/sdk";
+import OpenAI from "openai";
+
+const openai = new OpenAI();
+
+bindufy({
+ author: "your.email@example.com",
+ name: "research_agent",
+ description: "A research assistant agent",
+ deployment: { url: "http://localhost:3773", expose: true },
+ skills: ["skills/question-answering"],
+}, async (messages) => {
+ const response = await openai.chat.completions.create({
+ model: "gpt-4o",
+ messages: messages.map(m => ({
+ role: m.role as "user" | "assistant" | "system",
+ content: m.content,
+ })),
+ });
+ return response.choices[0].message.content || "";
+});
+```
+
+Run it:
+
+```bash
+npm install @bindu/sdk openai
+npx tsx index.ts
+```
+
+The SDK launches the Bindu core automatically in the background. Your agent is live at `http://localhost:3773` — same A2A protocol, same DID, same everything.
+
+> See [examples/typescript-openai-agent/](examples/typescript-openai-agent/) for the full working example with setup instructions.
+
+### Option 3: Zero-Config Local Agent
Try Bindu without setting up Postgres, Redis, or any cloud services. Runs entirely locally using in-memory storage and scheduler.
@@ -437,6 +476,7 @@ Output:
| 🔄 **Retry Mechanism** | Automatic retry with exponential backoff for resilient agents | [Guide →](https://docs.getbindu.com/bindu/learn/retry/overview) |
| 🔑 **Decentralized Identifiers (DIDs)** | Cryptographic identity for verifiable, secure agent interactions and payment integration | [Guide →](docs/DID.md) |
| 🏥 **Health Check & Metrics** | Monitor agent health and performance with built-in endpoints | [Guide →](docs/HEALTH_METRICS.md) |
+| 🌍 **Language-Agnostic (gRPC)** | Bindufy agents written in TypeScript, Kotlin, Rust, or any language via gRPC adapter | [Guide →](docs/GRPC_LANGUAGE_AGNOSTIC.md) |
---
@@ -502,10 +542,47 @@ NightSky enables swarms of agents. Each Bindu is a dot annotating agents with th
+## 🌍 Language-Agnostic Agents
+
+Bindu isn't limited to Python. Write your agent in **any language** — the gRPC adapter handles the rest.
+
+**Python** (direct, in-process):
+```python
+from bindu.penguin.bindufy import bindufy
+
+bindufy(config, handler)
+```
+
+**TypeScript** (via `@bindu/sdk`):
+```typescript
+import { bindufy } from "@bindu/sdk";
+
+bindufy(config, async (messages) => {
+ const res = await openai.chat.completions.create({ model: "gpt-4o", messages });
+ return res.choices[0].message.content;
+});
+```
+
+**Kotlin** (via `bindu-sdk`):
+```kotlin
+bindufy(config) { messages ->
+ myAgent.run(messages.last().content)
+}
+```
+
+All three produce the same result: a full A2A microservice with DID, auth, x402, scheduling, and storage. The TypeScript/Kotlin SDKs automatically launch the Bindu core in the background — one command, one terminal.
+
+See [examples/](examples/) for working examples and [docs/GRPC_LANGUAGE_AGNOSTIC.md](docs/GRPC_LANGUAGE_AGNOSTIC.md) for full details.
+
+---
+
+
+
## 🛠️ Supported Agent Frameworks
Bindu is **framework-agnostic** and tested with:
+**Python:**
- **AG2** (formerly AutoGen)
- **Agno**
- **CrewAI**
@@ -513,6 +590,13 @@ Bindu is **framework-agnostic** and tested with:
- **LlamaIndex**
- **FastAgent**
+**TypeScript:**
+- **OpenAI SDK**
+- **LangChain.js**
+
+**Kotlin:**
+- **OpenAI Kotlin SDK**
+
Want integration with your favorite framework? [Let us know on Discord](https://discord.gg/3w5zuYUuwt)!
---
@@ -524,10 +608,19 @@ Want integration with your favorite framework? [Let us know on Discord](https://
Bindu maintains **70%+ test coverage** (target: 80%+):
```bash
+# Unit tests (fast, in pre-commit)
+uv run pytest tests/unit/ -v
+
+# E2E gRPC integration tests (real servers, full round-trip)
+uv run pytest tests/integration/grpc/ -v -m e2e
+
+# All tests with coverage
uv run pytest -n auto --cov=bindu --cov-report=term-missing
uv run coverage report --skip-covered --fail-under=70
```
+**CI runs automatically on every PR** — unit tests, E2E gRPC tests, and TypeScript SDK build verification. See [`.github/workflows/ci.yml`](.github/workflows/ci.yml).
+
---
@@ -653,10 +746,11 @@ Grateful to these projects:
## 🗺️ Roadmap
-- [ ] GRPC transport support
+- [x] gRPC transport + language-agnostic SDKs (TypeScript, Kotlin)
- [ ] Increase test coverage to 80% (in progress)
- [ ] AP2 end-to-end support
- [ ] DSPy integration (in progress)
+- [ ] Rust SDK
- [ ] MLTS support
- [ ] X402 support with other facilitators
diff --git a/bindu/cli/__init__.py b/bindu/cli/__init__.py
new file mode 100644
index 00000000..182ac080
--- /dev/null
+++ b/bindu/cli/__init__.py
@@ -0,0 +1,94 @@
+"""Bindu CLI — command-line interface for the Bindu framework.
+
+Provides the `bindu` command with subcommands:
+ - bindu serve --grpc : Start the Bindu core with gRPC server for SDK registration
+
+The CLI is primarily an internal interface used by language SDKs (TypeScript,
+Kotlin, Rust) to spawn the Python core as a child process. End users typically
+use bindufy() directly in their Python scripts.
+"""
+
+import argparse
+import signal
+import sys
+
+from bindu.utils.logging import get_logger
+
+logger = get_logger("bindu.cli")
+
+
+def _handle_serve(args: argparse.Namespace) -> None:
+ """Handle the `bindu serve` command.
+
+ Starts the gRPC server on the specified port and waits for SDK agents
+ to register via RegisterAgent. When an agent registers, the core runs
+ the full bindufy logic and starts an HTTP server for that agent.
+
+ Args:
+ args: Parsed CLI arguments (port, grpc_port, grpc flag).
+ """
+ if not args.grpc:
+ print("Error: --grpc flag is required for `bindu serve`")
+ print("Usage: bindu serve --grpc [--grpc-port 3774]")
+ sys.exit(1)
+
+ # Import here to avoid loading heavy dependencies on --help
+ from bindu.grpc.registry import AgentRegistry
+ from bindu.grpc.server import start_grpc_server
+
+ grpc_port = args.grpc_port
+ registry = AgentRegistry()
+
+ logger.info(f"Starting Bindu core with gRPC on port {grpc_port}")
+
+ server = start_grpc_server(registry=registry, port=grpc_port)
+
+ # Handle graceful shutdown
+ def _shutdown(signum: int, frame: object) -> None:
+ logger.info("Shutting down gRPC server...")
+ server.stop(grace=5)
+ sys.exit(0)
+
+ signal.signal(signal.SIGINT, _shutdown)
+ signal.signal(signal.SIGTERM, _shutdown)
+
+ # Block until terminated
+ server.wait_for_termination()
+
+
+def main() -> None:
+ """Run the Bindu CLI."""
+ parser = argparse.ArgumentParser(
+ prog="bindu",
+ description="Bindu Framework CLI",
+ )
+ subparsers = parser.add_subparsers(dest="command", help="Available commands")
+
+ # bindu serve
+ serve_parser = subparsers.add_parser(
+ "serve",
+ help="Start the Bindu core server",
+ )
+ serve_parser.add_argument(
+ "--grpc",
+ action="store_true",
+ help="Enable gRPC server for language SDK registration",
+ )
+ serve_parser.add_argument(
+ "--grpc-port",
+ type=int,
+ default=3774,
+ help="gRPC server port (default: 3774)",
+ )
+
+ args = parser.parse_args()
+
+ if args.command == "serve":
+ _handle_serve(args)
+ else:
+ parser.print_help()
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/bindu/grpc/__init__.py b/bindu/grpc/__init__.py
new file mode 100644
index 00000000..532ee0c8
--- /dev/null
+++ b/bindu/grpc/__init__.py
@@ -0,0 +1,38 @@
+"""Bindu gRPC Adapter — Language-agnostic agent support.
+
+This package enables agents written in any language (TypeScript, Kotlin, Rust, etc.)
+to register with the Bindu core and be executed as microservices.
+
+Architecture:
+ The gRPC adapter has two sides:
+
+ 1. BinduService (core side, port 3774):
+ - Receives RegisterAgent calls from language SDKs
+ - Runs the full bindufy logic (DID, auth, x402, manifest, HTTP server)
+ - Manages agent lifecycle (heartbeat, unregister)
+
+ 2. GrpcAgentClient (core → SDK):
+ - Callable that replaces manifest.run for remote agents
+ - Calls HandleMessages on the SDK's AgentHandler service
+ - Returns results in the same format as Python handlers
+
+ The key invariant: GrpcAgentClient is a drop-in replacement for manifest.run.
+ ManifestWorker, ResultProcessor, and ResponseDetector require zero changes.
+
+Usage:
+ # Start core with gRPC enabled
+ bindufy(config, handler, grpc=True)
+
+ # Or via environment variable
+ GRPC__ENABLED=true python my_agent.py
+"""
+
+from bindu.grpc.client import GrpcAgentClient
+from bindu.grpc.registry import AgentRegistry
+from bindu.grpc.server import start_grpc_server
+
+__all__ = [
+ "GrpcAgentClient",
+ "AgentRegistry",
+ "start_grpc_server",
+]
diff --git a/bindu/grpc/client.py b/bindu/grpc/client.py
new file mode 100644
index 00000000..188e3d86
--- /dev/null
+++ b/bindu/grpc/client.py
@@ -0,0 +1,278 @@
+"""gRPC client for calling remote agent handlers.
+
+GrpcAgentClient is a callable class that replaces manifest.run for agents
+registered via gRPC. When ManifestWorker calls manifest.run(messages) at
+line 171 of manifest_worker.py, this client makes a gRPC call to the SDK's
+AgentHandler endpoint and returns the result in the same format that
+ResultProcessor and ResponseDetector expect.
+
+Supports both unary and streaming responses:
+ - Unary (HandleMessages): Returns str or dict directly.
+ - Streaming (HandleMessagesStream): Returns a generator that yields
+ chunks. ResultProcessor.collect_results() drains it automatically.
+
+Key contract:
+ - Input: list[dict[str, str]] — chat messages [{"role": "user", "content": "..."}]
+ - Output: str (normal completion), dict with "state" key (state transition),
+ or generator of str/dict (streaming — collected by ResultProcessor).
+
+This means ManifestWorker, ResultProcessor, and ResponseDetector require
+zero changes — they cannot tell the difference between a local Python handler
+and a remote gRPC handler.
+"""
+
+from __future__ import annotations
+
+from typing import Any
+
+import grpc
+
+from bindu.grpc.generated import agent_handler_pb2, agent_handler_pb2_grpc
+from bindu.utils.logging import get_logger
+
+logger = get_logger("bindu.grpc.client")
+
+
+class GrpcAgentClient:
+ """Callable gRPC client that acts as manifest.run for remote agents.
+
+ When the Bindu core registers a remote agent (via BinduService.RegisterAgent),
+ it creates a GrpcAgentClient pointing to the SDK's AgentHandler server.
+ This client is set as manifest.run, so ManifestWorker calls it transparently.
+
+ Supports both unary and streaming modes:
+ - Unary: Calls HandleMessages, returns str or dict.
+ - Streaming: Calls HandleMessagesStream, returns a generator.
+ ResultProcessor.collect_results() handles generators via __next__.
+
+ The __call__ signature uses 'messages' as the parameter name to pass
+ validate_agent_function() inspection.
+
+ Attributes:
+ _address: The SDK's AgentHandler gRPC address (e.g., "localhost:50052").
+ _timeout: Timeout in seconds for HandleMessages calls.
+ _use_streaming: Whether to use HandleMessagesStream instead of HandleMessages.
+ _channel: Lazy-initialized gRPC channel.
+ _stub: Lazy-initialized AgentHandler stub.
+ """
+
+ def __init__(
+ self,
+ callback_address: str,
+ timeout: float = 30.0,
+ use_streaming: bool = False,
+ ) -> None:
+ """Initialize the gRPC agent client.
+
+ Args:
+ callback_address: The SDK's AgentHandler gRPC server address
+ (e.g., "localhost:50052").
+ timeout: Timeout in seconds for HandleMessages calls.
+ use_streaming: If True, use HandleMessagesStream (server-side streaming)
+ instead of HandleMessages (unary). The streaming RPC returns a
+ generator that ResultProcessor.collect_results() will drain.
+ """
+ self._address = callback_address
+ self._timeout = timeout
+ self._use_streaming = use_streaming
+ self._channel: grpc.Channel | None = None
+ self._stub: agent_handler_pb2_grpc.AgentHandlerStub | None = None
+
+ def _ensure_connected(self) -> None:
+ """Lazily create the gRPC channel and stub on first use."""
+ if self._channel is None:
+ self._channel = grpc.insecure_channel(
+ self._address,
+ options=[
+ ("grpc.max_receive_message_length", 4 * 1024 * 1024),
+ ("grpc.max_send_message_length", 4 * 1024 * 1024),
+ ],
+ )
+ self._stub = agent_handler_pb2_grpc.AgentHandlerStub(self._channel)
+ logger.debug(f"Connected to agent handler at {self._address}")
+
+ def _build_request(
+ self, messages: list[dict[str, str]]
+ ) -> agent_handler_pb2.HandleRequest:
+ """Convert chat-format messages to a proto HandleRequest.
+
+ Args:
+ messages: Conversation history as list of dicts.
+ Each dict has "role" (str) and "content" (str) keys.
+
+ Returns:
+ HandleRequest proto message ready for gRPC call.
+ """
+ proto_messages = [
+ agent_handler_pb2.ChatMessage(
+ role=m.get("role", "user"),
+ content=m.get("content", ""),
+ )
+ for m in messages
+ ]
+ return agent_handler_pb2.HandleRequest(messages=proto_messages)
+
+ def __call__(self, messages: list[dict[str, str]], **kwargs: Any) -> Any:
+ """Execute the remote handler with conversation history.
+
+ Called by ManifestWorker at line 171:
+ raw_results = self.manifest.run(message_history or [])
+
+ Supports two modes:
+ - Unary (default): Calls HandleMessages, returns str or dict.
+ - Streaming: Calls HandleMessagesStream, returns a generator.
+ ResultProcessor.collect_results() drains generators via __next__.
+
+ Args:
+ messages: Conversation history as list of dicts.
+ Each dict has "role" (str) and "content" (str) keys.
+ **kwargs: Additional keyword arguments (ignored, for compatibility).
+
+ Returns:
+ Unary mode:
+ str: Plain text response (maps to "completed" task state).
+ dict: Structured response with "state" key for state transitions.
+ Streaming mode:
+ Generator[str | dict]: Yields chunks. ResultProcessor.collect_results()
+ uses the last yielded value as the final result.
+
+ Raises:
+ grpc.RpcError: If the gRPC call fails (caught by ManifestWorker's
+ try/except which calls _handle_task_failure).
+ """
+ self._ensure_connected()
+ assert self._stub is not None
+
+ request = self._build_request(messages)
+
+ if self._use_streaming:
+ logger.debug(
+ f"Calling HandleMessagesStream on {self._address} "
+ f"with {len(request.messages)} messages"
+ )
+ return self._handle_streaming(request)
+ else:
+ logger.debug(
+ f"Calling HandleMessages on {self._address} "
+ f"with {len(request.messages)} messages"
+ )
+ return self._handle_unary(request)
+
+ def _handle_unary(
+ self, request: agent_handler_pb2.HandleRequest
+ ) -> str | dict[str, Any]:
+ """Make a unary HandleMessages call.
+
+ Args:
+ request: Proto HandleRequest.
+
+ Returns:
+ str or dict from _response_to_result().
+ """
+ assert self._stub is not None
+ response = self._stub.HandleMessages(request, timeout=self._timeout)
+ return self._response_to_result(response)
+
+ def _handle_streaming(self, request: agent_handler_pb2.HandleRequest) -> Any:
+ """Make a streaming HandleMessagesStream call.
+
+ Returns a generator that yields results from the stream.
+ ResultProcessor.collect_results() detects this via __next__
+ and drains it, using the last yielded value as the final result.
+
+ Args:
+ request: Proto HandleRequest.
+
+ Yields:
+ str or dict from _response_to_result() for each stream chunk.
+ """
+ assert self._stub is not None
+ response_stream = self._stub.HandleMessagesStream(
+ request, timeout=self._timeout
+ )
+ for response in response_stream:
+ yield self._response_to_result(response)
+
+ @staticmethod
+ def _response_to_result(
+ response: agent_handler_pb2.HandleResponse,
+ ) -> str | dict[str, Any]:
+ """Convert a proto HandleResponse to the format ResultProcessor expects.
+
+ The downstream processing chain expects:
+ - str: Normal text response → task completes
+ - dict with "state" key: State transition → task stays open
+ e.g., {"state": "input-required", "prompt": "Can you clarify?"}
+
+ Args:
+ response: Proto HandleResponse from the SDK.
+
+ Returns:
+ str or dict matching what ResponseDetector.determine_task_state() expects.
+ """
+ if response.state:
+ # Structured response — maps to intermediate task state
+ result: dict[str, Any] = {"state": response.state}
+ if response.prompt:
+ result["prompt"] = response.prompt
+ if response.content:
+ result["content"] = response.content
+ # Include any extra metadata from the SDK
+ for key, value in response.metadata.items():
+ result[key] = value
+ return result
+ else:
+ # Plain string response — maps to "completed" task state
+ return response.content
+
+ def health_check(self) -> bool:
+ """Check if the remote SDK agent is healthy.
+
+ Returns:
+ True if the agent responds and reports healthy, False otherwise.
+ """
+ self._ensure_connected()
+ assert self._stub is not None
+ try:
+ response = self._stub.HealthCheck(
+ agent_handler_pb2.HealthCheckRequest(),
+ timeout=5.0,
+ )
+ return response.healthy
+ except grpc.RpcError as e:
+ logger.warning(f"Health check failed for {self._address}: {e}")
+ return False
+
+ def get_capabilities(
+ self,
+ ) -> agent_handler_pb2.GetCapabilitiesResponse | None:
+ """Query the remote SDK agent's capabilities.
+
+ Returns:
+ GetCapabilitiesResponse if successful, None on failure.
+ """
+ self._ensure_connected()
+ assert self._stub is not None
+ try:
+ return self._stub.GetCapabilities(
+ agent_handler_pb2.GetCapabilitiesRequest(),
+ timeout=5.0,
+ )
+ except grpc.RpcError as e:
+ logger.warning(f"GetCapabilities failed for {self._address}: {e}")
+ return None
+
+ def close(self) -> None:
+ """Close the gRPC channel and release resources."""
+ if self._channel is not None:
+ self._channel.close()
+ self._channel = None
+ self._stub = None
+ logger.debug(f"Closed connection to {self._address}")
+
+ def __repr__(self) -> str: # noqa: D105
+ mode = "streaming" if self._use_streaming else "unary"
+ return (
+ f"GrpcAgentClient(address={self._address!r}, "
+ f"timeout={self._timeout}, mode={mode})"
+ )
diff --git a/bindu/grpc/generated/__init__.py b/bindu/grpc/generated/__init__.py
new file mode 100644
index 00000000..d891a2cc
--- /dev/null
+++ b/bindu/grpc/generated/__init__.py
@@ -0,0 +1,7 @@
+"""Auto-generated protobuf stubs for the Bindu gRPC protocol.
+
+These files are generated by running:
+ bash scripts/generate_protos.sh
+
+Do not edit these files manually.
+"""
diff --git a/bindu/grpc/generated/agent_handler_pb2.py b/bindu/grpc/generated/agent_handler_pb2.py
new file mode 100644
index 00000000..5db6ab4e
--- /dev/null
+++ b/bindu/grpc/generated/agent_handler_pb2.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# NO CHECKED-IN PROTOBUF GENCODE
+# source: agent_handler.proto
+# Protobuf Python Version: 6.31.1
+"""Generated protocol buffer code."""
+
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import runtime_version as _runtime_version
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf.internal import builder as _builder
+
+_runtime_version.ValidateProtobufRuntimeVersion(
+ _runtime_version.Domain.PUBLIC, 6, 31, 1, "", "agent_handler.proto"
+)
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
+ b'\n\x13\x61gent_handler.proto\x12\nbindu.grpc"w\n\x14RegisterAgentRequest\x12\x13\n\x0b\x63onfig_json\x18\x01 \x01(\t\x12+\n\x06skills\x18\x02 \x03(\x0b\x32\x1b.bindu.grpc.SkillDefinition\x12\x1d\n\x15grpc_callback_address\x18\x03 \x01(\t"i\n\x15RegisterAgentResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x10\n\x08\x61gent_id\x18\x02 \x01(\t\x12\x0b\n\x03\x64id\x18\x03 \x01(\t\x12\x11\n\tagent_url\x18\x04 \x01(\t\x12\r\n\x05\x65rror\x18\x05 \x01(\t"7\n\x10HeartbeatRequest\x12\x10\n\x08\x61gent_id\x18\x01 \x01(\t\x12\x11\n\ttimestamp\x18\x02 \x01(\x03"C\n\x11HeartbeatResponse\x12\x14\n\x0c\x61\x63knowledged\x18\x01 \x01(\x08\x12\x18\n\x10server_timestamp\x18\x02 \x01(\x03"*\n\x16UnregisterAgentRequest\x12\x10\n\x08\x61gent_id\x18\x01 \x01(\t"9\n\x17UnregisterAgentResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\r\n\x05\x65rror\x18\x02 \x01(\t",\n\x0b\x43hatMessage\x12\x0c\n\x04role\x18\x01 \x01(\t\x12\x0f\n\x07\x63ontent\x18\x02 \x01(\t"_\n\rHandleRequest\x12)\n\x08messages\x18\x01 \x03(\x0b\x32\x17.bindu.grpc.ChatMessage\x12\x0f\n\x07task_id\x18\x02 \x01(\t\x12\x12\n\ncontext_id\x18\x03 \x01(\t"\xbf\x01\n\x0eHandleResponse\x12\x0f\n\x07\x63ontent\x18\x01 \x01(\t\x12\r\n\x05state\x18\x02 \x01(\t\x12\x0e\n\x06prompt\x18\x03 \x01(\t\x12\x10\n\x08is_final\x18\x04 \x01(\x08\x12:\n\x08metadata\x18\x05 \x03(\x0b\x32(.bindu.grpc.HandleResponse.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\xb3\x01\n\x0fSkillDefinition\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x0c\n\x04tags\x18\x03 \x03(\t\x12\x13\n\x0binput_modes\x18\x04 \x03(\t\x12\x14\n\x0coutput_modes\x18\x05 \x03(\t\x12\x0f\n\x07version\x18\x06 \x01(\t\x12\x0e\n\x06\x61uthor\x18\x07 \x01(\t\x12\x13\n\x0braw_content\x18\x08 \x01(\t\x12\x0e\n\x06\x66ormat\x18\t \x01(\t"\x18\n\x16GetCapabilitiesRequest"\x96\x01\n\x17GetCapabilitiesResponse\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x0f\n\x07version\x18\x03 \x01(\t\x12\x1a\n\x12supports_streaming\x18\x04 \x01(\x08\x12+\n\x06skills\x18\x05 \x03(\x0b\x32\x1b.bindu.grpc.SkillDefinition"\x14\n\x12HealthCheckRequest"7\n\x13HealthCheckResponse\x12\x0f\n\x07healthy\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t2\x8a\x02\n\x0c\x42induService\x12T\n\rRegisterAgent\x12 .bindu.grpc.RegisterAgentRequest\x1a!.bindu.grpc.RegisterAgentResponse\x12H\n\tHeartbeat\x12\x1c.bindu.grpc.HeartbeatRequest\x1a\x1d.bindu.grpc.HeartbeatResponse\x12Z\n\x0fUnregisterAgent\x12".bindu.grpc.UnregisterAgentRequest\x1a#.bindu.grpc.UnregisterAgentResponse2\xd4\x02\n\x0c\x41gentHandler\x12G\n\x0eHandleMessages\x12\x19.bindu.grpc.HandleRequest\x1a\x1a.bindu.grpc.HandleResponse\x12O\n\x14HandleMessagesStream\x12\x19.bindu.grpc.HandleRequest\x1a\x1a.bindu.grpc.HandleResponse0\x01\x12Z\n\x0fGetCapabilities\x12".bindu.grpc.GetCapabilitiesRequest\x1a#.bindu.grpc.GetCapabilitiesResponse\x12N\n\x0bHealthCheck\x12\x1e.bindu.grpc.HealthCheckRequest\x1a\x1f.bindu.grpc.HealthCheckResponseB6\n\x11\x63om.getbindu.grpcP\x01Z\x1fgithub.com/getbindu/bindu/protob\x06proto3'
+)
+
+_globals = globals()
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "agent_handler_pb2", _globals)
+if not _descriptor._USE_C_DESCRIPTORS:
+ _globals["DESCRIPTOR"]._loaded_options = None
+ _globals[
+ "DESCRIPTOR"
+ ]._serialized_options = (
+ b"\n\021com.getbindu.grpcP\001Z\037github.com/getbindu/bindu/proto"
+ )
+ _globals["_HANDLERESPONSE_METADATAENTRY"]._loaded_options = None
+ _globals["_HANDLERESPONSE_METADATAENTRY"]._serialized_options = b"8\001"
+ _globals["_REGISTERAGENTREQUEST"]._serialized_start = 35
+ _globals["_REGISTERAGENTREQUEST"]._serialized_end = 154
+ _globals["_REGISTERAGENTRESPONSE"]._serialized_start = 156
+ _globals["_REGISTERAGENTRESPONSE"]._serialized_end = 261
+ _globals["_HEARTBEATREQUEST"]._serialized_start = 263
+ _globals["_HEARTBEATREQUEST"]._serialized_end = 318
+ _globals["_HEARTBEATRESPONSE"]._serialized_start = 320
+ _globals["_HEARTBEATRESPONSE"]._serialized_end = 387
+ _globals["_UNREGISTERAGENTREQUEST"]._serialized_start = 389
+ _globals["_UNREGISTERAGENTREQUEST"]._serialized_end = 431
+ _globals["_UNREGISTERAGENTRESPONSE"]._serialized_start = 433
+ _globals["_UNREGISTERAGENTRESPONSE"]._serialized_end = 490
+ _globals["_CHATMESSAGE"]._serialized_start = 492
+ _globals["_CHATMESSAGE"]._serialized_end = 536
+ _globals["_HANDLEREQUEST"]._serialized_start = 538
+ _globals["_HANDLEREQUEST"]._serialized_end = 633
+ _globals["_HANDLERESPONSE"]._serialized_start = 636
+ _globals["_HANDLERESPONSE"]._serialized_end = 827
+ _globals["_HANDLERESPONSE_METADATAENTRY"]._serialized_start = 780
+ _globals["_HANDLERESPONSE_METADATAENTRY"]._serialized_end = 827
+ _globals["_SKILLDEFINITION"]._serialized_start = 830
+ _globals["_SKILLDEFINITION"]._serialized_end = 1009
+ _globals["_GETCAPABILITIESREQUEST"]._serialized_start = 1011
+ _globals["_GETCAPABILITIESREQUEST"]._serialized_end = 1035
+ _globals["_GETCAPABILITIESRESPONSE"]._serialized_start = 1038
+ _globals["_GETCAPABILITIESRESPONSE"]._serialized_end = 1188
+ _globals["_HEALTHCHECKREQUEST"]._serialized_start = 1190
+ _globals["_HEALTHCHECKREQUEST"]._serialized_end = 1210
+ _globals["_HEALTHCHECKRESPONSE"]._serialized_start = 1212
+ _globals["_HEALTHCHECKRESPONSE"]._serialized_end = 1267
+ _globals["_BINDUSERVICE"]._serialized_start = 1270
+ _globals["_BINDUSERVICE"]._serialized_end = 1536
+ _globals["_AGENTHANDLER"]._serialized_start = 1539
+ _globals["_AGENTHANDLER"]._serialized_end = 1879
+# @@protoc_insertion_point(module_scope)
diff --git a/bindu/grpc/generated/agent_handler_pb2.pyi b/bindu/grpc/generated/agent_handler_pb2.pyi
new file mode 100644
index 00000000..6a8b04ef
--- /dev/null
+++ b/bindu/grpc/generated/agent_handler_pb2.pyi
@@ -0,0 +1,213 @@
+from google.protobuf.internal import containers as _containers
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from collections.abc import Iterable as _Iterable, Mapping as _Mapping
+from typing import ClassVar as _ClassVar, Optional as _Optional, Union as _Union
+
+DESCRIPTOR: _descriptor.FileDescriptor
+
+class RegisterAgentRequest(_message.Message):
+ __slots__ = ("config_json", "skills", "grpc_callback_address")
+ CONFIG_JSON_FIELD_NUMBER: _ClassVar[int]
+ SKILLS_FIELD_NUMBER: _ClassVar[int]
+ GRPC_CALLBACK_ADDRESS_FIELD_NUMBER: _ClassVar[int]
+ config_json: str
+ skills: _containers.RepeatedCompositeFieldContainer[SkillDefinition]
+ grpc_callback_address: str
+ def __init__(
+ self,
+ config_json: _Optional[str] = ...,
+ skills: _Optional[_Iterable[_Union[SkillDefinition, _Mapping]]] = ...,
+ grpc_callback_address: _Optional[str] = ...,
+ ) -> None: ...
+
+class RegisterAgentResponse(_message.Message):
+ __slots__ = ("success", "agent_id", "did", "agent_url", "error")
+ SUCCESS_FIELD_NUMBER: _ClassVar[int]
+ AGENT_ID_FIELD_NUMBER: _ClassVar[int]
+ DID_FIELD_NUMBER: _ClassVar[int]
+ AGENT_URL_FIELD_NUMBER: _ClassVar[int]
+ ERROR_FIELD_NUMBER: _ClassVar[int]
+ success: bool
+ agent_id: str
+ did: str
+ agent_url: str
+ error: str
+ def __init__(
+ self,
+ success: bool = ...,
+ agent_id: _Optional[str] = ...,
+ did: _Optional[str] = ...,
+ agent_url: _Optional[str] = ...,
+ error: _Optional[str] = ...,
+ ) -> None: ...
+
+class HeartbeatRequest(_message.Message):
+ __slots__ = ("agent_id", "timestamp")
+ AGENT_ID_FIELD_NUMBER: _ClassVar[int]
+ TIMESTAMP_FIELD_NUMBER: _ClassVar[int]
+ agent_id: str
+ timestamp: int
+ def __init__(
+ self, agent_id: _Optional[str] = ..., timestamp: _Optional[int] = ...
+ ) -> None: ...
+
+class HeartbeatResponse(_message.Message):
+ __slots__ = ("acknowledged", "server_timestamp")
+ ACKNOWLEDGED_FIELD_NUMBER: _ClassVar[int]
+ SERVER_TIMESTAMP_FIELD_NUMBER: _ClassVar[int]
+ acknowledged: bool
+ server_timestamp: int
+ def __init__(
+ self, acknowledged: bool = ..., server_timestamp: _Optional[int] = ...
+ ) -> None: ...
+
+class UnregisterAgentRequest(_message.Message):
+ __slots__ = ("agent_id",)
+ AGENT_ID_FIELD_NUMBER: _ClassVar[int]
+ agent_id: str
+ def __init__(self, agent_id: _Optional[str] = ...) -> None: ...
+
+class UnregisterAgentResponse(_message.Message):
+ __slots__ = ("success", "error")
+ SUCCESS_FIELD_NUMBER: _ClassVar[int]
+ ERROR_FIELD_NUMBER: _ClassVar[int]
+ success: bool
+ error: str
+ def __init__(self, success: bool = ..., error: _Optional[str] = ...) -> None: ...
+
+class ChatMessage(_message.Message):
+ __slots__ = ("role", "content")
+ ROLE_FIELD_NUMBER: _ClassVar[int]
+ CONTENT_FIELD_NUMBER: _ClassVar[int]
+ role: str
+ content: str
+ def __init__(
+ self, role: _Optional[str] = ..., content: _Optional[str] = ...
+ ) -> None: ...
+
+class HandleRequest(_message.Message):
+ __slots__ = ("messages", "task_id", "context_id")
+ MESSAGES_FIELD_NUMBER: _ClassVar[int]
+ TASK_ID_FIELD_NUMBER: _ClassVar[int]
+ CONTEXT_ID_FIELD_NUMBER: _ClassVar[int]
+ messages: _containers.RepeatedCompositeFieldContainer[ChatMessage]
+ task_id: str
+ context_id: str
+ def __init__(
+ self,
+ messages: _Optional[_Iterable[_Union[ChatMessage, _Mapping]]] = ...,
+ task_id: _Optional[str] = ...,
+ context_id: _Optional[str] = ...,
+ ) -> None: ...
+
+class HandleResponse(_message.Message):
+ __slots__ = ("content", "state", "prompt", "is_final", "metadata")
+ class MetadataEntry(_message.Message):
+ __slots__ = ("key", "value")
+ KEY_FIELD_NUMBER: _ClassVar[int]
+ VALUE_FIELD_NUMBER: _ClassVar[int]
+ key: str
+ value: str
+ def __init__(
+ self, key: _Optional[str] = ..., value: _Optional[str] = ...
+ ) -> None: ...
+
+ CONTENT_FIELD_NUMBER: _ClassVar[int]
+ STATE_FIELD_NUMBER: _ClassVar[int]
+ PROMPT_FIELD_NUMBER: _ClassVar[int]
+ IS_FINAL_FIELD_NUMBER: _ClassVar[int]
+ METADATA_FIELD_NUMBER: _ClassVar[int]
+ content: str
+ state: str
+ prompt: str
+ is_final: bool
+ metadata: _containers.ScalarMap[str, str]
+ def __init__(
+ self,
+ content: _Optional[str] = ...,
+ state: _Optional[str] = ...,
+ prompt: _Optional[str] = ...,
+ is_final: bool = ...,
+ metadata: _Optional[_Mapping[str, str]] = ...,
+ ) -> None: ...
+
+class SkillDefinition(_message.Message):
+ __slots__ = (
+ "name",
+ "description",
+ "tags",
+ "input_modes",
+ "output_modes",
+ "version",
+ "author",
+ "raw_content",
+ "format",
+ )
+ NAME_FIELD_NUMBER: _ClassVar[int]
+ DESCRIPTION_FIELD_NUMBER: _ClassVar[int]
+ TAGS_FIELD_NUMBER: _ClassVar[int]
+ INPUT_MODES_FIELD_NUMBER: _ClassVar[int]
+ OUTPUT_MODES_FIELD_NUMBER: _ClassVar[int]
+ VERSION_FIELD_NUMBER: _ClassVar[int]
+ AUTHOR_FIELD_NUMBER: _ClassVar[int]
+ RAW_CONTENT_FIELD_NUMBER: _ClassVar[int]
+ FORMAT_FIELD_NUMBER: _ClassVar[int]
+ name: str
+ description: str
+ tags: _containers.RepeatedScalarFieldContainer[str]
+ input_modes: _containers.RepeatedScalarFieldContainer[str]
+ output_modes: _containers.RepeatedScalarFieldContainer[str]
+ version: str
+ author: str
+ raw_content: str
+ format: str
+ def __init__(
+ self,
+ name: _Optional[str] = ...,
+ description: _Optional[str] = ...,
+ tags: _Optional[_Iterable[str]] = ...,
+ input_modes: _Optional[_Iterable[str]] = ...,
+ output_modes: _Optional[_Iterable[str]] = ...,
+ version: _Optional[str] = ...,
+ author: _Optional[str] = ...,
+ raw_content: _Optional[str] = ...,
+ format: _Optional[str] = ...,
+ ) -> None: ...
+
+class GetCapabilitiesRequest(_message.Message):
+ __slots__ = ()
+ def __init__(self) -> None: ...
+
+class GetCapabilitiesResponse(_message.Message):
+ __slots__ = ("name", "description", "version", "supports_streaming", "skills")
+ NAME_FIELD_NUMBER: _ClassVar[int]
+ DESCRIPTION_FIELD_NUMBER: _ClassVar[int]
+ VERSION_FIELD_NUMBER: _ClassVar[int]
+ SUPPORTS_STREAMING_FIELD_NUMBER: _ClassVar[int]
+ SKILLS_FIELD_NUMBER: _ClassVar[int]
+ name: str
+ description: str
+ version: str
+ supports_streaming: bool
+ skills: _containers.RepeatedCompositeFieldContainer[SkillDefinition]
+ def __init__(
+ self,
+ name: _Optional[str] = ...,
+ description: _Optional[str] = ...,
+ version: _Optional[str] = ...,
+ supports_streaming: bool = ...,
+ skills: _Optional[_Iterable[_Union[SkillDefinition, _Mapping]]] = ...,
+ ) -> None: ...
+
+class HealthCheckRequest(_message.Message):
+ __slots__ = ()
+ def __init__(self) -> None: ...
+
+class HealthCheckResponse(_message.Message):
+ __slots__ = ("healthy", "message")
+ HEALTHY_FIELD_NUMBER: _ClassVar[int]
+ MESSAGE_FIELD_NUMBER: _ClassVar[int]
+ healthy: bool
+ message: str
+ def __init__(self, healthy: bool = ..., message: _Optional[str] = ...) -> None: ...
diff --git a/bindu/grpc/generated/agent_handler_pb2_grpc.py b/bindu/grpc/generated/agent_handler_pb2_grpc.py
new file mode 100644
index 00000000..82840bb8
--- /dev/null
+++ b/bindu/grpc/generated/agent_handler_pb2_grpc.py
@@ -0,0 +1,452 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
+
+import grpc
+
+from bindu.grpc.generated import agent_handler_pb2 as agent__handler__pb2
+
+GRPC_GENERATED_VERSION = "1.78.0"
+GRPC_VERSION = grpc.__version__
+_version_not_supported = False
+
+try:
+ from grpc._utilities import first_version_is_lower
+
+ _version_not_supported = first_version_is_lower(
+ GRPC_VERSION, GRPC_GENERATED_VERSION
+ )
+except ImportError:
+ _version_not_supported = True
+
+if _version_not_supported:
+ raise RuntimeError(
+ f"The grpc package installed is at version {GRPC_VERSION},"
+ + " but the generated code in agent_handler_pb2_grpc.py depends on"
+ + f" grpcio>={GRPC_GENERATED_VERSION}."
+ + f" Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}"
+ + f" or downgrade your generated code using grpcio-tools<={GRPC_VERSION}."
+ )
+
+
+class BinduServiceStub(object):
+ """=============================================================================
+ BinduService — SDK calls this on the Core to register and manage agents
+ =============================================================================
+
+ """
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.RegisterAgent = channel.unary_unary(
+ "/bindu.grpc.BinduService/RegisterAgent",
+ request_serializer=agent__handler__pb2.RegisterAgentRequest.SerializeToString,
+ response_deserializer=agent__handler__pb2.RegisterAgentResponse.FromString,
+ _registered_method=True,
+ )
+ self.Heartbeat = channel.unary_unary(
+ "/bindu.grpc.BinduService/Heartbeat",
+ request_serializer=agent__handler__pb2.HeartbeatRequest.SerializeToString,
+ response_deserializer=agent__handler__pb2.HeartbeatResponse.FromString,
+ _registered_method=True,
+ )
+ self.UnregisterAgent = channel.unary_unary(
+ "/bindu.grpc.BinduService/UnregisterAgent",
+ request_serializer=agent__handler__pb2.UnregisterAgentRequest.SerializeToString,
+ response_deserializer=agent__handler__pb2.UnregisterAgentResponse.FromString,
+ _registered_method=True,
+ )
+
+
+class BinduServiceServicer(object):
+ """=============================================================================
+ BinduService — SDK calls this on the Core to register and manage agents
+ =============================================================================
+
+ """
+
+ def RegisterAgent(self, request, context):
+ """Register an agent with the Bindu core.
+ Core runs the full bindufy logic: DID, auth, x402, manifest, HTTP server.
+ Returns agent identity and the A2A endpoint URL.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def Heartbeat(self, request, context):
+ """Periodic heartbeat to signal the SDK is still alive."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def UnregisterAgent(self, request, context):
+ """Unregister an agent and shut down its A2A server."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+
+def add_BinduServiceServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ "RegisterAgent": grpc.unary_unary_rpc_method_handler(
+ servicer.RegisterAgent,
+ request_deserializer=agent__handler__pb2.RegisterAgentRequest.FromString,
+ response_serializer=agent__handler__pb2.RegisterAgentResponse.SerializeToString,
+ ),
+ "Heartbeat": grpc.unary_unary_rpc_method_handler(
+ servicer.Heartbeat,
+ request_deserializer=agent__handler__pb2.HeartbeatRequest.FromString,
+ response_serializer=agent__handler__pb2.HeartbeatResponse.SerializeToString,
+ ),
+ "UnregisterAgent": grpc.unary_unary_rpc_method_handler(
+ servicer.UnregisterAgent,
+ request_deserializer=agent__handler__pb2.UnregisterAgentRequest.FromString,
+ response_serializer=agent__handler__pb2.UnregisterAgentResponse.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ "bindu.grpc.BinduService", rpc_method_handlers
+ )
+ server.add_generic_rpc_handlers((generic_handler,))
+ server.add_registered_method_handlers(
+ "bindu.grpc.BinduService", rpc_method_handlers
+ )
+
+
+# This class is part of an EXPERIMENTAL API.
+class BinduService(object):
+ """=============================================================================
+ BinduService — SDK calls this on the Core to register and manage agents
+ =============================================================================
+
+ """
+
+ @staticmethod
+ def RegisterAgent(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/bindu.grpc.BinduService/RegisterAgent",
+ agent__handler__pb2.RegisterAgentRequest.SerializeToString,
+ agent__handler__pb2.RegisterAgentResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def Heartbeat(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/bindu.grpc.BinduService/Heartbeat",
+ agent__handler__pb2.HeartbeatRequest.SerializeToString,
+ agent__handler__pb2.HeartbeatResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def UnregisterAgent(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/bindu.grpc.BinduService/UnregisterAgent",
+ agent__handler__pb2.UnregisterAgentRequest.SerializeToString,
+ agent__handler__pb2.UnregisterAgentResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+
+class AgentHandlerStub(object):
+ """=============================================================================
+ AgentHandler — Core calls this on the SDK to execute tasks
+ =============================================================================
+
+ """
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.HandleMessages = channel.unary_unary(
+ "/bindu.grpc.AgentHandler/HandleMessages",
+ request_serializer=agent__handler__pb2.HandleRequest.SerializeToString,
+ response_deserializer=agent__handler__pb2.HandleResponse.FromString,
+ _registered_method=True,
+ )
+ self.HandleMessagesStream = channel.unary_stream(
+ "/bindu.grpc.AgentHandler/HandleMessagesStream",
+ request_serializer=agent__handler__pb2.HandleRequest.SerializeToString,
+ response_deserializer=agent__handler__pb2.HandleResponse.FromString,
+ _registered_method=True,
+ )
+ self.GetCapabilities = channel.unary_unary(
+ "/bindu.grpc.AgentHandler/GetCapabilities",
+ request_serializer=agent__handler__pb2.GetCapabilitiesRequest.SerializeToString,
+ response_deserializer=agent__handler__pb2.GetCapabilitiesResponse.FromString,
+ _registered_method=True,
+ )
+ self.HealthCheck = channel.unary_unary(
+ "/bindu.grpc.AgentHandler/HealthCheck",
+ request_serializer=agent__handler__pb2.HealthCheckRequest.SerializeToString,
+ response_deserializer=agent__handler__pb2.HealthCheckResponse.FromString,
+ _registered_method=True,
+ )
+
+
+class AgentHandlerServicer(object):
+ """=============================================================================
+ AgentHandler — Core calls this on the SDK to execute tasks
+ =============================================================================
+
+ """
+
+ def HandleMessages(self, request, context):
+ """Execute a handler with conversation history (unary).
+ Core sends messages, SDK runs the developer's handler, returns response.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def HandleMessagesStream(self, request, context):
+ """Execute a handler with streaming response (server-side streaming).
+ SDK yields chunks; core collects them via ResultProcessor.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def GetCapabilities(self, request, context):
+ """Query agent capabilities (skills, supported modes)."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def HealthCheck(self, request, context):
+ """Health check to verify the SDK process is responsive."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+
+def add_AgentHandlerServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ "HandleMessages": grpc.unary_unary_rpc_method_handler(
+ servicer.HandleMessages,
+ request_deserializer=agent__handler__pb2.HandleRequest.FromString,
+ response_serializer=agent__handler__pb2.HandleResponse.SerializeToString,
+ ),
+ "HandleMessagesStream": grpc.unary_stream_rpc_method_handler(
+ servicer.HandleMessagesStream,
+ request_deserializer=agent__handler__pb2.HandleRequest.FromString,
+ response_serializer=agent__handler__pb2.HandleResponse.SerializeToString,
+ ),
+ "GetCapabilities": grpc.unary_unary_rpc_method_handler(
+ servicer.GetCapabilities,
+ request_deserializer=agent__handler__pb2.GetCapabilitiesRequest.FromString,
+ response_serializer=agent__handler__pb2.GetCapabilitiesResponse.SerializeToString,
+ ),
+ "HealthCheck": grpc.unary_unary_rpc_method_handler(
+ servicer.HealthCheck,
+ request_deserializer=agent__handler__pb2.HealthCheckRequest.FromString,
+ response_serializer=agent__handler__pb2.HealthCheckResponse.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ "bindu.grpc.AgentHandler", rpc_method_handlers
+ )
+ server.add_generic_rpc_handlers((generic_handler,))
+ server.add_registered_method_handlers(
+ "bindu.grpc.AgentHandler", rpc_method_handlers
+ )
+
+
+# This class is part of an EXPERIMENTAL API.
+class AgentHandler(object):
+ """=============================================================================
+ AgentHandler — Core calls this on the SDK to execute tasks
+ =============================================================================
+
+ """
+
+ @staticmethod
+ def HandleMessages(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/bindu.grpc.AgentHandler/HandleMessages",
+ agent__handler__pb2.HandleRequest.SerializeToString,
+ agent__handler__pb2.HandleResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def HandleMessagesStream(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_stream(
+ request,
+ target,
+ "/bindu.grpc.AgentHandler/HandleMessagesStream",
+ agent__handler__pb2.HandleRequest.SerializeToString,
+ agent__handler__pb2.HandleResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def GetCapabilities(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/bindu.grpc.AgentHandler/GetCapabilities",
+ agent__handler__pb2.GetCapabilitiesRequest.SerializeToString,
+ agent__handler__pb2.GetCapabilitiesResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def HealthCheck(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/bindu.grpc.AgentHandler/HealthCheck",
+ agent__handler__pb2.HealthCheckRequest.SerializeToString,
+ agent__handler__pb2.HealthCheckResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
diff --git a/bindu/grpc/registry.py b/bindu/grpc/registry.py
new file mode 100644
index 00000000..9a8dde0b
--- /dev/null
+++ b/bindu/grpc/registry.py
@@ -0,0 +1,142 @@
+"""Thread-safe agent registry for gRPC-registered remote agents.
+
+Tracks agents that have registered via the BinduService.RegisterAgent RPC.
+Each entry maps an agent_id to its gRPC callback address, manifest, and
+lifecycle timestamps.
+
+Thread safety is required because the gRPC server uses a ThreadPoolExecutor
+for handling concurrent RegisterAgent/Heartbeat/UnregisterAgent calls.
+"""
+
+from __future__ import annotations
+
+import threading
+from dataclasses import dataclass, field
+from datetime import datetime, timezone
+from typing import TYPE_CHECKING
+
+from bindu.utils.logging import get_logger
+
+if TYPE_CHECKING:
+ from bindu.common.models import AgentManifest
+
+logger = get_logger("bindu.grpc.registry")
+
+
+@dataclass
+class RegisteredAgent:
+ """A remote agent registered via gRPC.
+
+ Attributes:
+ agent_id: UUID string of the registered agent.
+ grpc_callback_address: The SDK's AgentHandler gRPC address
+ (e.g., "localhost:50052"). Core calls HandleMessages here.
+ manifest: The AgentManifest created during registration.
+ registered_at: UTC timestamp when the agent was registered.
+ last_heartbeat: UTC timestamp of the last heartbeat received.
+ """
+
+ agent_id: str
+ grpc_callback_address: str
+ manifest: AgentManifest
+ registered_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
+ last_heartbeat: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
+
+
+class AgentRegistry:
+ """Thread-safe in-memory registry for gRPC-registered agents.
+
+ Provides register/unregister/lookup operations protected by a
+ threading.Lock, since the gRPC server's ThreadPoolExecutor may
+ call these concurrently.
+ """
+
+ def __init__(self) -> None: # noqa: D107
+ self._agents: dict[str, RegisteredAgent] = {}
+ self._lock = threading.Lock()
+
+ def register(
+ self,
+ agent_id: str,
+ grpc_callback_address: str,
+ manifest: AgentManifest,
+ ) -> RegisteredAgent:
+ """Register a new remote agent.
+
+ Args:
+ agent_id: UUID string of the agent.
+ grpc_callback_address: SDK's AgentHandler gRPC address.
+ manifest: AgentManifest created during registration.
+
+ Returns:
+ The RegisteredAgent entry.
+ """
+ entry = RegisteredAgent(
+ agent_id=agent_id,
+ grpc_callback_address=grpc_callback_address,
+ manifest=manifest,
+ )
+ with self._lock:
+ self._agents[agent_id] = entry
+ logger.info(
+ f"Registered agent {agent_id} with callback at {grpc_callback_address}"
+ )
+ return entry
+
+ def get(self, agent_id: str) -> RegisteredAgent | None:
+ """Look up a registered agent by ID.
+
+ Args:
+ agent_id: UUID string of the agent.
+
+ Returns:
+ RegisteredAgent if found, None otherwise.
+ """
+ with self._lock:
+ return self._agents.get(agent_id)
+
+ def unregister(self, agent_id: str) -> bool:
+ """Remove an agent from the registry.
+
+ Args:
+ agent_id: UUID string of the agent to remove.
+
+ Returns:
+ True if the agent was found and removed, False otherwise.
+ """
+ with self._lock:
+ removed = self._agents.pop(agent_id, None)
+ if removed:
+ logger.info(f"Unregistered agent {agent_id}")
+ return True
+ logger.warning(f"Attempted to unregister unknown agent {agent_id}")
+ return False
+
+ def update_heartbeat(self, agent_id: str) -> bool:
+ """Update the last heartbeat timestamp for an agent.
+
+ Args:
+ agent_id: UUID string of the agent.
+
+ Returns:
+ True if the agent was found and updated, False otherwise.
+ """
+ with self._lock:
+ entry = self._agents.get(agent_id)
+ if entry:
+ entry.last_heartbeat = datetime.now(timezone.utc)
+ return True
+ return False
+
+ def list_agents(self) -> list[RegisteredAgent]:
+ """Return a snapshot of all registered agents.
+
+ Returns:
+ List of RegisteredAgent entries (copy, safe to iterate).
+ """
+ with self._lock:
+ return list(self._agents.values())
+
+ def __len__(self) -> int: # noqa: D105
+ with self._lock:
+ return len(self._agents)
diff --git a/bindu/grpc/server.py b/bindu/grpc/server.py
new file mode 100644
index 00000000..d1e51119
--- /dev/null
+++ b/bindu/grpc/server.py
@@ -0,0 +1,95 @@
+"""gRPC server for the Bindu core — accepts SDK registrations on port 3774.
+
+This module starts a gRPC server that implements BinduService. External
+SDKs (TypeScript, Kotlin, Rust) connect to this server to register their
+agents via RegisterAgent.
+
+The server uses a ThreadPoolExecutor for concurrent request handling.
+It is started either:
+ - By the `bindu serve --grpc` CLI command (standalone mode)
+ - By BinduApplication lifespan when grpc.enabled=True (integrated mode)
+
+Usage:
+ from bindu.grpc.server import start_grpc_server
+ from bindu.grpc.registry import AgentRegistry
+
+ registry = AgentRegistry()
+ server = start_grpc_server(registry)
+ server.wait_for_termination() # blocks
+"""
+
+from __future__ import annotations
+
+from concurrent import futures
+
+import grpc
+
+from bindu.grpc.generated import agent_handler_pb2_grpc
+from bindu.grpc.registry import AgentRegistry
+from bindu.grpc.service import BinduServiceImpl
+from bindu.settings import app_settings
+from bindu.utils.logging import get_logger
+
+logger = get_logger("bindu.grpc.server")
+
+
+def start_grpc_server(
+ registry: AgentRegistry | None = None,
+ host: str | None = None,
+ port: int | None = None,
+ max_workers: int | None = None,
+) -> grpc.Server:
+ """Start the Bindu gRPC server for SDK agent registration.
+
+ Creates a gRPC server that serves BinduService, allowing external SDKs
+ to register agents via RegisterAgent RPC.
+
+ Args:
+ registry: Agent registry instance. Creates a new one if None.
+ host: Bind host. Defaults to app_settings.grpc.host.
+ port: Bind port. Defaults to app_settings.grpc.port (3774).
+ max_workers: Thread pool size. Defaults to app_settings.grpc.max_workers.
+
+ Returns:
+ The started grpc.Server instance. Call wait_for_termination() to block,
+ or stop() to shut down.
+ """
+ registry = registry or AgentRegistry()
+ host = host or app_settings.grpc.host
+ port = port or app_settings.grpc.port
+ max_workers = max_workers or app_settings.grpc.max_workers
+
+ # Create gRPC server with thread pool
+ server = grpc.server(
+ futures.ThreadPoolExecutor(max_workers=max_workers),
+ options=[
+ (
+ "grpc.max_receive_message_length",
+ app_settings.grpc.max_message_length,
+ ),
+ (
+ "grpc.max_send_message_length",
+ app_settings.grpc.max_message_length,
+ ),
+ ],
+ )
+
+ # Register BinduService
+ agent_handler_pb2_grpc.add_BinduServiceServicer_to_server(
+ BinduServiceImpl(registry),
+ server,
+ )
+
+ # Bind to address
+ bind_address = f"{host}:{port}"
+ server.add_insecure_port(bind_address)
+
+ # Start serving
+ server.start()
+ logger.info(f"gRPC server started on {bind_address}")
+ logger.info(
+ "Waiting for SDK agent registrations... "
+ "(TypeScript, Kotlin, Rust agents can now connect)"
+ )
+
+ return server
diff --git a/bindu/grpc/service.py b/bindu/grpc/service.py
new file mode 100644
index 00000000..d66d6bae
--- /dev/null
+++ b/bindu/grpc/service.py
@@ -0,0 +1,220 @@
+"""BinduService gRPC implementation — handles agent registration from SDKs.
+
+When a TypeScript/Kotlin/Rust SDK calls RegisterAgent, this service:
+1. Deserializes the config JSON
+2. Converts proto SkillDefinitions to inline skill dicts
+3. Creates a GrpcAgentClient pointing to the SDK's callback address
+4. Delegates to _bindufy_core() which handles DID, auth, x402, manifest,
+ BinduApplication, and starts uvicorn in a background thread
+5. Returns the agent_id, DID, and A2A endpoint URL
+
+The _bindufy_core() function is the same code path as Python bindufy(),
+ensuring DRY — there is exactly one place that handles agent setup.
+"""
+
+from __future__ import annotations
+
+import json
+import time
+from pathlib import Path
+
+import grpc
+
+from bindu.grpc.client import GrpcAgentClient
+from bindu.grpc.generated import agent_handler_pb2, agent_handler_pb2_grpc
+from bindu.grpc.registry import AgentRegistry
+from bindu.settings import app_settings
+from bindu.utils.logging import get_logger
+
+logger = get_logger("bindu.grpc.service")
+
+
+def _proto_skills_to_dicts(
+ skills: list[agent_handler_pb2.SkillDefinition],
+) -> list[dict]:
+ """Convert proto SkillDefinition messages to inline skill dicts.
+
+ The inline dict format is already supported by load_skills() in the core.
+ Each skill dict contains the parsed content so the core doesn't need
+ filesystem access to the SDK's project directory.
+
+ Args:
+ skills: List of proto SkillDefinition messages from the SDK.
+
+ Returns:
+ List of skill dicts compatible with create_manifest().
+ """
+ result = []
+ for skill in skills:
+ skill_dict = {
+ "name": skill.name,
+ "description": skill.description,
+ "tags": list(skill.tags),
+ "input_modes": list(skill.input_modes),
+ "output_modes": list(skill.output_modes),
+ }
+ if skill.version:
+ skill_dict["version"] = skill.version
+ if skill.author:
+ skill_dict["author"] = skill.author
+ if skill.raw_content:
+ skill_dict["raw_content"] = skill.raw_content
+ skill_dict["format"] = skill.format or "yaml"
+ result.append(skill_dict)
+ return result
+
+
+class BinduServiceImpl(agent_handler_pb2_grpc.BinduServiceServicer):
+ """gRPC servicer for BinduService — handles SDK registration and lifecycle.
+
+ This runs on the Bindu core's gRPC server (port 3774). SDKs connect to
+ this service to register their agents, send heartbeats, and unregister.
+
+ Attributes:
+ registry: Thread-safe agent registry for tracking registered agents.
+ """
+
+ def __init__(self, registry: AgentRegistry) -> None: # noqa: D107
+ self.registry = registry
+
+ def RegisterAgent(
+ self,
+ request: agent_handler_pb2.RegisterAgentRequest,
+ context: grpc.ServicerContext,
+ ) -> agent_handler_pb2.RegisterAgentResponse:
+ """Register a remote agent and start its A2A HTTP server.
+
+ This method:
+ 1. Parses the config JSON from the SDK
+ 2. Creates a GrpcAgentClient for the SDK's callback address
+ 3. Calls _bindufy_core() to run the full setup (DID, auth, x402, etc.)
+ 4. Starts uvicorn in a background thread
+ 5. Returns agent identity and URL
+
+ Args:
+ request: RegisterAgentRequest with config_json, skills, and callback.
+ context: gRPC servicer context.
+
+ Returns:
+ RegisterAgentResponse with agent_id, DID, and A2A URL.
+ """
+ try:
+ # 1. Parse config from JSON
+ config = json.loads(request.config_json)
+ logger.info(
+ f"RegisterAgent received for '{config.get('name', 'unknown')}' "
+ f"with callback at {request.grpc_callback_address}"
+ )
+
+ # 2. Convert proto skills to inline dicts
+ skills = _proto_skills_to_dicts(list(request.skills))
+
+ # 3. Create GrpcAgentClient as the handler callable
+ grpc_client = GrpcAgentClient(
+ callback_address=request.grpc_callback_address,
+ timeout=app_settings.grpc.handler_timeout,
+ )
+
+ # 4. Determine key directory for this agent
+ agent_name = config.get("name", "unknown")
+ key_dir = Path(f".bindu/agents/{agent_name}")
+ key_dir.mkdir(parents=True, exist_ok=True)
+
+ # 5. Run the full bindufy logic via _bindufy_core
+ # This is the SAME code path as Python bindufy() — DRY
+ from bindu.penguin.bindufy import _bindufy_core
+
+ manifest = _bindufy_core(
+ config=config,
+ handler_callable=grpc_client,
+ run_server=True,
+ key_dir=key_dir,
+ launch=False,
+ caller_dir=key_dir,
+ skills_override=skills,
+ skip_handler_validation=True,
+ run_server_in_background=True, # Don't block the gRPC call
+ )
+
+ # 6. Register in our registry
+ self.registry.register(
+ agent_id=str(manifest.id),
+ grpc_callback_address=request.grpc_callback_address,
+ manifest=manifest,
+ )
+
+ logger.info(
+ f"Agent '{agent_name}' registered successfully: "
+ f"id={manifest.id}, did={manifest.did_extension.did}, "
+ f"url={manifest.url}"
+ )
+
+ return agent_handler_pb2.RegisterAgentResponse(
+ success=True,
+ agent_id=str(manifest.id),
+ did=str(manifest.did_extension.did),
+ agent_url=manifest.url,
+ )
+
+ except json.JSONDecodeError as e:
+ error_msg = f"Invalid config_json: {e}"
+ logger.error(error_msg)
+ return agent_handler_pb2.RegisterAgentResponse(
+ success=False, error=error_msg
+ )
+ except Exception as e:
+ error_msg = f"Registration failed: {e}"
+ logger.error(error_msg, exc_info=True)
+ return agent_handler_pb2.RegisterAgentResponse(
+ success=False, error=error_msg
+ )
+
+ def Heartbeat(
+ self,
+ request: agent_handler_pb2.HeartbeatRequest,
+ context: grpc.ServicerContext,
+ ) -> agent_handler_pb2.HeartbeatResponse:
+ """Process a heartbeat from a registered SDK agent.
+
+ Args:
+ request: HeartbeatRequest with agent_id and timestamp.
+ context: gRPC servicer context.
+
+ Returns:
+ HeartbeatResponse with acknowledgment.
+ """
+ updated = self.registry.update_heartbeat(request.agent_id)
+ if not updated:
+ logger.warning(f"Heartbeat from unknown agent: {request.agent_id}")
+ return agent_handler_pb2.HeartbeatResponse(
+ acknowledged=updated,
+ server_timestamp=int(time.time() * 1000),
+ )
+
+ def UnregisterAgent(
+ self,
+ request: agent_handler_pb2.UnregisterAgentRequest,
+ context: grpc.ServicerContext,
+ ) -> agent_handler_pb2.UnregisterAgentResponse:
+ """Unregister an agent and clean up resources.
+
+ Args:
+ request: UnregisterAgentRequest with agent_id.
+ context: gRPC servicer context.
+
+ Returns:
+ UnregisterAgentResponse with success status.
+ """
+ # Close the GrpcAgentClient connection if it exists
+ entry = self.registry.get(request.agent_id)
+ if entry and hasattr(entry.manifest.run, "close"):
+ close_fn = getattr(entry.manifest.run, "close")
+ close_fn()
+
+ removed = self.registry.unregister(request.agent_id)
+ if removed:
+ logger.info(f"Agent {request.agent_id} unregistered successfully")
+ return agent_handler_pb2.UnregisterAgentResponse(
+ success=removed,
+ error="" if removed else f"Agent {request.agent_id} not found",
+ )
diff --git a/bindu/penguin/bindufy.py b/bindu/penguin/bindufy.py
index aa79cd25..bcbdcfab 100644
--- a/bindu/penguin/bindufy.py
+++ b/bindu/penguin/bindufy.py
@@ -344,73 +344,46 @@ def _create_deployment_config(
)
-def bindufy(
+def _bindufy_core(
config: Dict[str, Any],
- handler: Callable[[list[dict[str, str]]], Any],
+ handler_callable: Callable,
run_server: bool = True,
key_dir: str | Path | None = None,
launch: bool = False,
+ caller_dir: Path | None = None,
+ skills_override: list | None = None,
+ skip_handler_validation: bool = False,
+ run_server_in_background: bool = False,
) -> AgentManifest:
- """Transform an agent instance and handler into a bindu-compatible agent.
+ """Core bindufy logic shared by both Python and gRPC registration paths.
+
+ This is the internal engine that transforms a config + callable into a
+ fully running Bindu microservice with DID, auth, x402, A2A, scheduler,
+ and storage. Both bindufy() (Python path) and BinduServiceImpl.RegisterAgent()
+ (gRPC path) delegate to this function.
Args:
- config: Configuration dictionary containing:
- - author: Agent author email (required for Hibiscus registration)
- - name: Human-readable agent name
- - id: Unique agent identifier (optional, auto-generated if not provided)
- - description: Agent description
- - version: Agent version string (default: "1.0.0")
- - recreate_keys: Force regeneration of existing keys (default: True)
- - skills: List of agent skills/capabilities
- - env_file: Path to .env file (optional, for local development)
- - capabilities: Technical capabilities (streaming, notifications, etc.)
- - agent_trust: Trust and security configuration
- - kind: Agent type ('agent', 'team', or 'workflow') (default: "agent")
- - debug_mode: Enable debug logging (default: False)
- - debug_level: Debug verbosity level (default: 1)
- - monitoring: Enable monitoring/metrics (default: False)
- - telemetry: Enable telemetry collection (default: True)
- - num_history_sessions: Number of conversation histories to maintain (default: 10)
- - documentation_url: URL to agent documentation
- - extra_metadata: Additional metadata dictionary
- - deployment: Deployment configuration dict
- - storage: Storage backend configuration dict
- - scheduler: Task scheduler configuration dict
- - global_webhook_url: Default webhook URL for all tasks (optional)
- - global_webhook_token: Authentication token for global webhook (optional)
- handler: The handler function that processes messages and returns responses.
- Must have signature: (messages: str) -> str
- run_server: If True, starts the uvicorn server (blocking). If False, returns manifest
- immediately for testing/programmatic usage (default: True)
- key_dir: Directory for storing DID keys. If None, attempts to detect from caller's
- directory (may fail in REPL/notebooks). Falls back to current working directory.
- launch: If True, creates a public tunnel via FRP to expose the server to the internet
- with an auto-generated subdomain (default: False)
+ config: Raw or pre-validated configuration dictionary.
+ handler_callable: The handler to execute tasks. Either a Python function
+ (from bindufy()) or a GrpcAgentClient (from gRPC registration).
+ run_server: If True, starts the uvicorn HTTP server.
+ key_dir: Directory for storing DID keys.
+ launch: If True, creates a public tunnel via FRP.
+ caller_dir: Directory of the calling file (for skill/key resolution).
+ Required for Python path, optional for gRPC path.
+ skills_override: Pre-loaded skills list (from gRPC path where SDK sends
+ skill content). If None, skills are loaded from config paths.
+ skip_handler_validation: If True, skip validate_agent_function().
+ Used for gRPC path where handler is a GrpcAgentClient.
+ run_server_in_background: If True, start uvicorn in a background thread
+ instead of blocking. Used by gRPC service so RegisterAgent can return.
Returns:
- AgentManifest: The manifest for the bindufied agent
-
- Example:
- def my_handler(messages: str) -> str:
- result = agent.run(input=messages)
- return result.to_dict()["content"]
-
- config = {
- "author": "user@example.com",
- "name": "my-agent",
- "description": "A helpful assistant",
- "capabilities": {"streaming": True},
- "deployment": {"url": "http://localhost:3773", "protocol_version": "1.0.0"},
- }
-
- manifest = bindufy(agent, config, my_handler)
+ AgentManifest: The manifest for the bindufied agent.
"""
if not isinstance(config, dict):
raise TypeError("config must be a dictionary")
- if not callable(handler):
- raise TypeError("handler must be callable")
-
# Load capability-specific configs from environment variables (webhooks, negotiation)
config = load_config_from_env(config)
@@ -460,23 +433,20 @@ def my_handler(messages: str) -> str:
if auth_config is not None:
update_auth_settings(auth_config)
- # Validate that this is a protocol-compliant function
- handler_name = getattr(handler, "__name__", "")
- logger.info(f"Validating handler function: {handler_name}")
- validate_agent_function(handler)
- logger.info(f"Agent ID: {agent_id}")
-
- # Get caller information for file paths
- frame = inspect.currentframe()
- if not frame or not frame.f_back:
- raise RuntimeError("Unable to determine caller file path")
+ # Validate handler if required (skipped for gRPC path)
+ if not skip_handler_validation:
+ handler_name = getattr(handler_callable, "__name__", "")
+ logger.info(f"Validating handler function: {handler_name}")
+ validate_agent_function(handler_callable)
- caller_file = inspect.getframeinfo(frame.f_back).filename
- caller_dir = Path(os.path.abspath(caller_file)).parent
+ logger.info(f"Agent ID: {agent_id}")
# Determine key directory with fallback strategy
+ effective_key_dir = key_dir or caller_dir
resolved_key_dir = resolve_key_directory(
- explicit_dir=key_dir, caller_dir=caller_dir, subdir=app_settings.did.pki_dir
+ explicit_dir=effective_key_dir,
+ caller_dir=caller_dir or Path.cwd(),
+ subdir=app_settings.did.pki_dir,
)
# Initialize DID extension with key management
@@ -489,12 +459,15 @@ def my_handler(messages: str) -> str:
key_password=validated_config.get("key_password"),
)
- # Load skills from configuration (supports both file-based and inline)
+ # Load skills: use override (gRPC path) or load from config paths (Python path)
logger.info("Loading agent skills...")
- skills_list = load_skills(
- validated_config.get("skills") or [],
- caller_dir, # Always set at this point
- )
+ if skills_override is not None:
+ skills_list = skills_override
+ else:
+ skills_list = load_skills(
+ validated_config.get("skills") or [],
+ caller_dir or Path.cwd(),
+ )
# Set agent metadata for DID document
agent_url = (
@@ -522,7 +495,7 @@ def my_handler(messages: str) -> str:
# Create agent manifest with loaded skills
_manifest = create_manifest(
- agent_function=handler,
+ agent_function=handler_callable,
id=agent_id,
did_extension=did_extension,
name=validated_config["name"],
@@ -563,7 +536,11 @@ def my_handler(messages: str) -> str:
# Register agent in Hydra if authentication is enabled with Hydra provider
credentials = _register_in_hydra(
- agent_id_str, validated_config, agent_url, did_extension, caller_dir
+ agent_id_str,
+ validated_config,
+ agent_url,
+ did_extension,
+ caller_dir or Path.cwd(),
)
logger.info(f"Starting deployment for agent: {agent_id}")
@@ -571,9 +548,6 @@ def my_handler(messages: str) -> str:
# Import server components (deferred to avoid circular import)
from bindu.server import BinduApplication
- # Storage and scheduler will be initialized in BinduApplication's lifespan
- # No need to create instances here - just pass the config
-
# Create telemetry configuration
telemetry_config = _create_telemetry_config(validated_config)
@@ -596,7 +570,7 @@ def my_handler(messages: str) -> str:
# Create tunnel if enabled
tunnel_url = _setup_tunnel(tunnel_config, port, _manifest, bindu_app)
- # Start server if requested (blocking), otherwise return manifest immediately
+ # Start server if requested
if run_server:
# Display server startup banner
prepare_server_display(
@@ -609,11 +583,111 @@ def my_handler(messages: str) -> str:
tunnel_url=tunnel_url,
)
- # Run server with graceful shutdown handling
- start_uvicorn_server(bindu_app, host=host, port=port, display_info=True)
+ if run_server_in_background:
+ # Start uvicorn in a background thread (used by gRPC service)
+ import threading
+
+ server_thread = threading.Thread(
+ target=start_uvicorn_server,
+ args=(bindu_app,),
+ kwargs={"host": host, "port": port, "display_info": True},
+ daemon=True,
+ name=f"uvicorn-{validated_config['name']}",
+ )
+ server_thread.start()
+ logger.info(f"HTTP server started in background thread on {host}:{port}")
+ else:
+ # Run server blocking (normal Python bindufy path)
+ start_uvicorn_server(bindu_app, host=host, port=port, display_info=True)
else:
logger.info(
"Server not started (run_server=False). Manifest returned for programmatic use."
)
return _manifest
+
+
+def bindufy(
+ config: Dict[str, Any],
+ handler: Callable[[list[dict[str, str]]], Any],
+ run_server: bool = True,
+ key_dir: str | Path | None = None,
+ launch: bool = False,
+) -> AgentManifest:
+ """Transform an agent handler into a Bindu microservice.
+
+ This is the main entry point for Python agents. It validates the handler,
+ resolves the caller directory, and delegates to _bindufy_core() which
+ handles DID, auth, x402, manifest creation, and server startup.
+
+ Args:
+ config: Configuration dictionary containing:
+ - author: Agent author email (required)
+ - name: Human-readable agent name
+ - id: Unique agent identifier (optional, auto-generated if not provided)
+ - description: Agent description
+ - version: Agent version string (default: "1.0.0")
+ - recreate_keys: Force regeneration of existing keys (default: True)
+ - skills: List of agent skills/capabilities
+ - env_file: Path to .env file (optional, for local development)
+ - capabilities: Technical capabilities (streaming, notifications, etc.)
+ - agent_trust: Trust and security configuration
+ - kind: Agent type ('agent', 'team', or 'workflow') (default: "agent")
+ - debug_mode: Enable debug logging (default: False)
+ - debug_level: Debug verbosity level (default: 1)
+ - monitoring: Enable monitoring/metrics (default: False)
+ - telemetry: Enable telemetry collection (default: True)
+ - num_history_sessions: Number of conversation histories to maintain (default: 10)
+ - documentation_url: URL to agent documentation
+ - extra_metadata: Additional metadata dictionary
+ - deployment: Deployment configuration dict
+ - storage: Storage backend configuration dict
+ - scheduler: Task scheduler configuration dict
+ - global_webhook_url: Default webhook URL for all tasks (optional)
+ - global_webhook_token: Authentication token for global webhook (optional)
+ handler: The handler function that processes messages and returns responses.
+ Must have signature: (messages: list[dict[str, str]]) -> Any
+ run_server: If True, starts the uvicorn server (blocking). If False, returns manifest
+ immediately for testing/programmatic usage (default: True)
+ key_dir: Directory for storing DID keys. If None, attempts to detect from caller's
+ directory (may fail in REPL/notebooks). Falls back to current working directory.
+ launch: If True, creates a public tunnel via FRP to expose the server to the internet
+ with an auto-generated subdomain (default: False)
+
+ Returns:
+ AgentManifest: The manifest for the bindufied agent
+
+ Example:
+ def my_handler(messages: list[dict[str, str]]) -> str:
+ result = agent.run(input=messages)
+ return result.to_dict()["content"]
+
+ config = {
+ "author": "user@example.com",
+ "name": "my-agent",
+ "description": "A helpful assistant",
+ "capabilities": {"streaming": True},
+ "deployment": {"url": "http://localhost:3773", "protocol_version": "1.0.0"},
+ }
+
+ manifest = bindufy(config, my_handler)
+ """
+ if not callable(handler):
+ raise TypeError("handler must be callable")
+
+ # Get caller information for file paths
+ frame = inspect.currentframe()
+ if not frame or not frame.f_back:
+ raise RuntimeError("Unable to determine caller file path")
+
+ caller_file = inspect.getframeinfo(frame.f_back).filename
+ caller_dir = Path(os.path.abspath(caller_file)).parent
+
+ return _bindufy_core(
+ config=config,
+ handler_callable=handler,
+ run_server=run_server,
+ key_dir=key_dir,
+ launch=launch,
+ caller_dir=caller_dir,
+ )
diff --git a/bindu/settings.py b/bindu/settings.py
index a6a70779..9a2c8d1f 100644
--- a/bindu/settings.py
+++ b/bindu/settings.py
@@ -949,6 +949,73 @@ class SentrySettings(BaseSettings):
debug: bool = False
+class GrpcSettings(BaseSettings):
+ """gRPC adapter configuration for language-agnostic agent support.
+
+ When enabled, the Bindu core starts a gRPC server alongside the HTTP server.
+ External SDKs (TypeScript, Kotlin, Rust) connect to this gRPC server to
+ register their agents and receive handler calls.
+
+ The gRPC server implements BinduService (registration) and acts as a client
+ to the SDK's AgentHandler service (task execution).
+
+ Architecture:
+ SDK (any language) --gRPC--> Bindu Core (:3774)
+ RegisterAgent(config, skills, callback_address)
+
+ Bindu Core --gRPC--> SDK (callback_address)
+ HandleMessages(messages) when a task arrives
+ """
+
+ model_config = SettingsConfigDict(
+ env_file=".env",
+ env_prefix="GRPC__",
+ extra="allow",
+ )
+
+ # Enable/disable gRPC adapter server
+ enabled: bool = Field(
+ default=False,
+ description="Enable gRPC server for language-agnostic SDK support",
+ )
+
+ # gRPC server bind address
+ host: str = Field(
+ default="0.0.0.0",
+ description="Host to bind the gRPC server to",
+ )
+
+ # gRPC server port (separate from HTTP port 3773)
+ port: int = Field(
+ default=3774,
+ description="Port for the gRPC server (default: 3774)",
+ )
+
+ # Thread pool size for gRPC server
+ max_workers: int = Field(
+ default=10,
+ description="Maximum number of gRPC server worker threads",
+ )
+
+ # Maximum message size (4MB default)
+ max_message_length: int = Field(
+ default=4 * 1024 * 1024,
+ description="Maximum gRPC message size in bytes (default: 4MB)",
+ )
+
+ # Timeout for HandleMessages calls to SDK (seconds)
+ handler_timeout: float = Field(
+ default=30.0,
+ description="Timeout in seconds for calling SDK's HandleMessages",
+ )
+
+ # Health check interval for registered agents (seconds)
+ health_check_interval: int = Field(
+ default=30,
+ description="Interval in seconds for health checking registered agents",
+ )
+
+
class Settings(BaseSettings):
"""Main settings class that aggregates all configuration components."""
@@ -976,6 +1043,7 @@ class Settings(BaseSettings):
retry: RetrySettings = RetrySettings()
negotiation: NegotiationSettings = NegotiationSettings()
sentry: SentrySettings = SentrySettings()
+ grpc: GrpcSettings = GrpcSettings()
app_settings = Settings()
diff --git a/bindu/utils/server_runner.py b/bindu/utils/server_runner.py
index 5d61083d..581ea5b6 100644
--- a/bindu/utils/server_runner.py
+++ b/bindu/utils/server_runner.py
@@ -6,6 +6,7 @@
import signal
import sys
+import threading
from typing import Any
import uvicorn
@@ -19,7 +20,12 @@ def setup_signal_handlers() -> None:
"""Register signal handlers for graceful shutdown.
Registers handlers for SIGINT (Ctrl+C) and SIGTERM (Docker/systemd stop).
+ Skips registration if not running in the main thread (e.g., when uvicorn
+ is started in a background thread by the gRPC registration flow).
"""
+ if threading.current_thread() is not threading.main_thread():
+ logger.debug("Skipping signal handler registration (not in main thread)")
+ return
def handle_shutdown(signum: int, frame: Any) -> None:
"""Handle shutdown signals gracefully."""
@@ -39,13 +45,17 @@ def handle_shutdown(signum: int, frame: Any) -> None:
def run_server(app: Any, host: str, port: int, display_info: bool = True) -> None:
"""Run uvicorn server with graceful shutdown handling.
+ Supports being called from both the main thread (normal bindufy flow)
+ and from a background thread (gRPC registration flow via _bindufy_core
+ with run_server_in_background=True).
+
Args:
app: ASGI application to serve
host: Host address to bind to
port: Port number to bind to
display_info: Whether to display startup info messages
"""
- # Setup signal handlers
+ # Setup signal handlers (skips automatically if not in main thread)
setup_signal_handlers()
if display_info:
diff --git a/docs/GRPC_LANGUAGE_AGNOSTIC.md b/docs/GRPC_LANGUAGE_AGNOSTIC.md
new file mode 100644
index 00000000..4f34f54d
--- /dev/null
+++ b/docs/GRPC_LANGUAGE_AGNOSTIC.md
@@ -0,0 +1,838 @@
+# gRPC Language-Agnostic Agent Support
+
+> **📁 This documentation has been reorganized!**
+> The content below is preserved for reference, but the **new structured documentation** is at:
+> - **[docs/grpc/](./grpc/)** - Main documentation hub
+> - **[docs/grpc/api-reference.md](./grpc/api-reference.md)** - Complete API reference
+> - **[docs/grpc/client.md](./grpc/client.md)** - GrpcAgentClient implementation
+> - **[docs/grpc/limitations.md](./grpc/limitations.md)** - Known limitations and gaps
+
+---
+
+Bindu's gRPC adapter enables agents written in **any programming language** — TypeScript, Kotlin, Rust, Go, or any language with gRPC support — to transform themselves into full Bindu microservices with DID identity, A2A protocol, x402 payments, scheduling, and storage.
+
+The gRPC layer is the bridge between the language-agnostic developer world and the Python-powered Bindu core. Developers call `bindufy()` from their language SDK, and the gRPC adapter handles everything behind the scenes.
+
+## Architecture Overview
+
+```mermaid
+graph TB
+ subgraph "Developer's Code (Any Language)"
+ TS["TypeScript Agent
(OpenAI, LangChain, etc.)"]
+ KT["Kotlin Agent
(any framework)"]
+ RS["Rust Agent
(any framework)"]
+ end
+
+ subgraph "Language SDKs (Thin Wrappers)"
+ TS_SDK["@bindu/sdk
bindufy(config, handler)"]
+ KT_SDK["bindu-sdk (Kotlin)
bindufy(config, handler)"]
+ RS_SDK["bindu-sdk (Rust)
bindufy(config, handler)"]
+ end
+
+ subgraph "Bindu Core (Python)"
+ GRPC_SERVER["gRPC Server
:3774
BinduService"]
+ BINDUFY["_bindufy_core()
DID, Auth, x402
Manifest, Scheduler, Storage"]
+ HTTP["HTTP/A2A Server
:3773
BinduApplication"]
+ WORKER["ManifestWorker
manifest.run(messages)"]
+ GRPC_CLIENT["GrpcAgentClient
(callable)"]
+ end
+
+ TS --> TS_SDK
+ KT --> KT_SDK
+ RS --> RS_SDK
+
+ TS_SDK -->|"RegisterAgent
(gRPC)"| GRPC_SERVER
+ KT_SDK -->|"RegisterAgent
(gRPC)"| GRPC_SERVER
+ RS_SDK -->|"RegisterAgent
(gRPC)"| GRPC_SERVER
+
+ GRPC_SERVER --> BINDUFY
+ BINDUFY --> HTTP
+ BINDUFY --> WORKER
+
+ WORKER -->|"manifest.run()"| GRPC_CLIENT
+ GRPC_CLIENT -->|"HandleMessages
(gRPC)"| TS_SDK
+ GRPC_CLIENT -->|"HandleMessages
(gRPC)"| KT_SDK
+ GRPC_CLIENT -->|"HandleMessages
(gRPC)"| RS_SDK
+
+ CLIENT["External Client
(A2A Protocol)"] -->|"POST /"| HTTP
+```
+
+## Two gRPC Services
+
+The gRPC adapter defines **two services** in a single proto file (`proto/agent_handler.proto`):
+
+### 1. BinduService (Core Side — Port 3774)
+
+SDKs call this service on the Bindu core to register agents and manage their lifecycle.
+
+| RPC Method | Direction | Purpose |
+|-----------|-----------|---------|
+| `RegisterAgent` | SDK → Core | Register an agent with full config, skills, and callback address. Core runs bindufy logic (DID, auth, x402, manifest, HTTP server). |
+| `Heartbeat` | SDK → Core | Periodic keep-alive signal (every 30s). Core tracks agent liveness. |
+| `UnregisterAgent` | SDK → Core | Disconnect and clean up. Core stops the agent's HTTP server. |
+
+### 2. AgentHandler (SDK Side — Dynamic Port)
+
+The core calls this service on the SDK whenever a task needs to be executed.
+
+| RPC Method | Direction | Purpose |
+|-----------|-----------|---------|
+| `HandleMessages` | Core → SDK | Execute the developer's handler with conversation history. This is called every time an A2A request arrives. |
+| `HandleMessagesStream` | Core → SDK | ⚠️ **NOT IMPLEMENTED** - Defined in proto but `GrpcAgentClient` doesn't support streaming. See [limitations](./grpc/limitations.md). |
+| `GetCapabilities` | Core → SDK | Query what the agent supports (skills, streaming, etc.). |
+| `HealthCheck` | Core → SDK | Verify the SDK process is responsive. |
+
+## Complete Message Flow
+
+```mermaid
+sequenceDiagram
+ participant Dev as Developer's Code
+ participant SDK as Language SDK
+ participant Core as Bindu Core (:3774)
+ participant HTTP as A2A Server (:3773)
+ participant Worker as ManifestWorker
+ participant Client as External Client
+
+ Note over Dev,SDK: 1. Agent Startup
+
+ Dev->>SDK: bindufy(config, handler)
+ SDK->>SDK: Read skill files locally
+ SDK->>SDK: Start AgentHandler gRPC server (random port)
+ SDK->>Core: RegisterAgent(config_json, skills, callback_address)
+
+ Note over Core: Core runs full bindufy logic
+
+ Core->>Core: Validate config
+ Core->>Core: Generate agent ID (SHA256)
+ Core->>Core: Setup DID (Ed25519 keys)
+ Core->>Core: Setup x402 payments (if configured)
+ Core->>Core: Create manifest (manifest.run = GrpcAgentClient)
+ Core->>Core: Create BinduApplication (Starlette + middleware)
+ Core->>HTTP: Start uvicorn (background thread)
+
+ Core-->>SDK: RegisterAgentResponse {agent_id, did, agent_url}
+ SDK-->>Dev: "Agent registered! A2A URL: http://localhost:3773"
+
+ Note over SDK,Core: 2. Heartbeat Loop (every 30s)
+
+ loop Every 30 seconds
+ SDK->>Core: Heartbeat(agent_id, timestamp)
+ Core-->>SDK: HeartbeatResponse(acknowledged)
+ end
+
+ Note over Client,Dev: 3. Runtime — Message Execution
+
+ Client->>HTTP: POST / (A2A message/send)
+ HTTP->>Worker: TaskManager → Scheduler → Worker
+ Worker->>Worker: Build message history
+ Worker->>Worker: manifest.run(messages)
+
+ Note over Worker,SDK: manifest.run is GrpcAgentClient
+
+ Worker->>SDK: HandleMessages(messages) via gRPC
+ SDK->>Dev: handler(messages) — developer's function
+ Dev-->>SDK: response (string or {state, prompt})
+ SDK-->>Worker: HandleResponse
+
+ Note over Worker: ResultProcessor → ResponseDetector
+
+ Worker->>Worker: Normalize result, detect state
+ Worker->>HTTP: Update storage, create artifacts
+ HTTP-->>Client: A2A JSON-RPC response
+
+ Note over Client,Dev: 4. Shutdown
+
+ Dev->>SDK: Ctrl+C
+ SDK->>Core: UnregisterAgent(agent_id)
+ SDK->>SDK: Kill Python core child process
+```
+
+## GrpcAgentClient — The Core Bridge
+
+`GrpcAgentClient` is the key component that makes gRPC transparent to the rest of the Bindu core. It is a **callable class** that replaces `manifest.run` for remote agents.
+
+### How it works
+
+In `ManifestWorker.run_task()` (line 171 of `manifest_worker.py`):
+
+```python
+raw_results = self.manifest.run(message_history or [])
+```
+
+For a **Python agent**, `manifest.run` is a direct Python function call.
+
+For a **remote agent** (TypeScript, Kotlin, etc.), `manifest.run` is a `GrpcAgentClient` instance. When called, it:
+
+1. Converts `list[dict[str, str]]` → proto `ChatMessage` objects
+2. Calls `AgentHandler.HandleMessages` on the SDK via gRPC
+3. Converts the proto `HandleResponse` back to `str` or `dict`
+4. Returns the result to ManifestWorker
+
+### Response format contract
+
+The GrpcAgentClient returns exactly what `ResultProcessor` and `ResponseDetector` expect:
+
+| SDK returns | GrpcAgentClient returns | Task state |
+|------------|------------------------|------------|
+| Plain string (`"Hello"`) | `str` → `"Hello"` | `completed` |
+| `{state: "input-required", prompt: "Clarify?"}` | `dict` → `{"state": "input-required", "prompt": "Clarify?"}` | `input-required` |
+| `{state: "auth-required"}` | `dict` → `{"state": "auth-required"}` | `auth-required` |
+
+This means **zero changes** to ManifestWorker, ResultProcessor, ResponseDetector, or any downstream component. They cannot tell the difference between a local Python handler and a remote gRPC handler.
+
+## Proto Definition
+
+The full proto file is at `proto/agent_handler.proto`. Key design decisions:
+
+### Config sent as JSON
+
+```protobuf
+message RegisterAgentRequest {
+ string config_json = 1; // Full config as JSON string
+ repeated SkillDefinition skills = 2;
+ string grpc_callback_address = 3;
+}
+```
+
+The config is sent as a JSON string rather than typed proto fields. This means:
+- Adding new config fields to `bindufy()` does **not** require proto changes
+- Config validation happens once, in the Python core (`ConfigValidator`)
+- SDKs define their own typed config interfaces that serialize to JSON
+- **DRY principle**: config schema lives in one place (Python)
+
+### Skills sent with content
+
+```protobuf
+message SkillDefinition {
+ string name = 1;
+ string description = 2;
+ repeated string tags = 3;
+ string raw_content = 8; // Full skill.yaml/SKILL.md content
+ string format = 9; // "yaml" or "markdown"
+}
+```
+
+The SDK reads skill files from the developer's filesystem and sends the content in the proto. The core processes skills without needing filesystem access to the SDK's project directory.
+
+### Response state handling
+
+```protobuf
+message HandleResponse {
+ string content = 1;
+ string state = 2; // "", "input-required", "auth-required"
+ string prompt = 3;
+ bool is_final = 4;
+ map metadata = 5;
+}
+```
+
+When `state` is empty, the response is a normal completion. When `state` is set, it triggers a task state transition — the task stays open for follow-up messages.
+
+## SDK Developer Experience
+
+The gRPC layer is completely invisible to the developer. All SDKs expose the same `bindufy(config, handler)` function:
+
+### TypeScript
+
+```typescript
+import { bindufy, ChatMessage } from "@bindu/sdk";
+import OpenAI from "openai";
+
+const openai = new OpenAI();
+
+bindufy({
+ author: "dev@example.com",
+ name: "my-agent",
+ deployment: { url: "http://localhost:3773", expose: true },
+ skills: ["skills/question-answering"],
+}, async (messages: ChatMessage[]) => {
+ const response = await openai.chat.completions.create({
+ model: "gpt-4o",
+ messages: messages.map(m => ({
+ role: m.role as "user" | "assistant" | "system",
+ content: m.content,
+ })),
+ });
+ return response.choices[0].message.content || "";
+});
+```
+
+### Kotlin
+
+```kotlin
+import com.getbindu.sdk.bindufy
+
+fun main() {
+ bindufy(
+ config = mapOf(
+ "author" to "dev@example.com",
+ "name" to "my-agent",
+ "deployment" to mapOf("url" to "http://localhost:3773"),
+ )
+ ) { messages ->
+ "Echo: ${messages.last().content}"
+ }
+}
+```
+
+### Python (unchanged)
+
+```python
+from bindu.penguin.bindufy import bindufy
+
+def handler(messages):
+ return my_agent.run(messages)
+
+bindufy(config, handler) # No gRPC — direct in-process call
+```
+
+## SDK Internal Flow
+
+When a developer calls `bindufy()` from a language SDK, this is what happens inside:
+
+```mermaid
+flowchart TD
+ A["SDK: bindufy(config, handler)"] --> B["1. Read skill files from disk"]
+ B --> C["2. Start AgentHandler gRPC server\n(random port, e.g., :57139)"]
+ C --> D["3. Detect & spawn Python core\nas child process"]
+ D --> E{"bindu CLI found?"}
+ E -->|"pip installed"| F["bindu serve --grpc"]
+ E -->|"uv available"| G["uv run bindu serve --grpc"]
+ E -->|"fallback"| H["python -m bindu.cli serve --grpc"]
+ F --> I["4. Wait for :3774 to be ready"]
+ G --> I
+ H --> I
+ I --> J["5. Call RegisterAgent on :3774\n(config JSON + skills + callback)"]
+ J --> K["6. Core runs bindufy logic\n(DID, auth, x402, manifest)"]
+ K --> L["7. Core starts uvicorn on :3773\n(background thread)"]
+ L --> M["8. Return {agent_id, did, url}"]
+ M --> N["9. Start heartbeat loop (30s)"]
+ N --> O["10. Wait for HandleMessages calls"]
+
+ style A fill:#e1f5fe
+ style O fill:#e8f5e9
+```
+
+## Configuration
+
+### Environment Variables
+
+| Variable | Default | Description |
+|----------|---------|-------------|
+| `GRPC__ENABLED` | `false` | Enable gRPC server (set automatically by `bindu serve --grpc`) |
+| `GRPC__HOST` | `0.0.0.0` | gRPC server bind host |
+| `GRPC__PORT` | `3774` | gRPC server port |
+| `GRPC__MAX_WORKERS` | `10` | Thread pool size for gRPC server |
+| `GRPC__MAX_MESSAGE_LENGTH` | `4194304` | Max gRPC message size (4MB) |
+| `GRPC__HANDLER_TIMEOUT` | `30.0` | Timeout for HandleMessages calls (seconds) |
+| `GRPC__HEALTH_CHECK_INTERVAL` | `30` | Health check interval (seconds) |
+
+### Python Settings
+
+```python
+from bindu.settings import app_settings
+
+# Access gRPC settings
+app_settings.grpc.enabled # bool
+app_settings.grpc.host # str
+app_settings.grpc.port # int
+app_settings.grpc.max_workers # int
+```
+
+## Port Layout
+
+```
+Bindu Core Process
+├── :3773 Uvicorn (HTTP) — A2A protocol, agent card, DID, health, x402, metrics
+└── :3774 gRPC Server — RegisterAgent, Heartbeat, UnregisterAgent
+
+SDK Process
+└── :XXXXX gRPC Server (dynamic port) — HandleMessages, GetCapabilities, HealthCheck
+```
+
+## Agent Registry
+
+The core maintains a thread-safe in-memory registry of connected SDK agents:
+
+```python
+from bindu.grpc.registry import AgentRegistry
+
+registry = AgentRegistry()
+
+# After RegisterAgent
+registry.register(agent_id, callback_address, manifest)
+
+# Lookup
+entry = registry.get(agent_id)
+# entry.agent_id, entry.grpc_callback_address, entry.manifest,
+# entry.registered_at, entry.last_heartbeat
+
+# Heartbeat update
+registry.update_heartbeat(agent_id)
+
+# List all connected agents
+agents = registry.list_agents()
+
+# Cleanup
+registry.unregister(agent_id)
+```
+
+## Testing gRPC
+
+### Prerequisites
+
+Start the Bindu core with gRPC enabled:
+
+```bash
+cd /path/to/Bindu
+uv run bindu serve --grpc
+```
+
+You should see:
+
+```
+INFO gRPC server started on 0.0.0.0:3774
+INFO Waiting for SDK agent registrations...
+```
+
+Leave this terminal running. Open a new terminal for the tests below.
+
+### Option A: Testing with grpcurl (Command Line)
+
+Install grpcurl:
+
+```bash
+brew install grpcurl
+```
+
+#### 1. List available services
+
+```bash
+grpcurl -plaintext \
+ -import-path /path/to/Bindu/proto \
+ -proto agent_handler.proto \
+ localhost:3774 list
+```
+
+**Expected output:**
+
+```
+bindu.grpc.AgentHandler
+bindu.grpc.BinduService
+```
+
+#### 2. List methods in BinduService
+
+```bash
+grpcurl -plaintext \
+ -import-path /path/to/Bindu/proto \
+ -proto agent_handler.proto \
+ localhost:3774 list bindu.grpc.BinduService
+```
+
+**Expected output:**
+
+```
+bindu.grpc.BinduService.Heartbeat
+bindu.grpc.BinduService.RegisterAgent
+bindu.grpc.BinduService.UnregisterAgent
+```
+
+#### 3. Test Heartbeat
+
+```bash
+grpcurl -plaintext -emit-defaults \
+ -proto '/path/to/Bindu/proto/agent_handler.proto' \
+ -import-path '/path/to/Bindu/proto' \
+ -d '{"agent_id": "test-123", "timestamp": 1711234567890}' \
+ 'localhost:3774' \
+ bindu.grpc.BinduService.Heartbeat
+```
+
+**Expected response:**
+
+```json
+{
+ "acknowledged": false,
+ "server_timestamp": "1774280770851"
+}
+```
+
+`acknowledged: false` is correct — no agent with ID "test-123" is registered yet. The response confirms the gRPC server is alive and processing requests.
+
+#### 4. Test RegisterAgent (Full bindufy over gRPC)
+
+```bash
+grpcurl -plaintext -emit-defaults \
+ -proto '/path/to/Bindu/proto/agent_handler.proto' \
+ -import-path '/path/to/Bindu/proto' \
+ -d '{
+ "config_json": "{\"author\":\"test@example.com\",\"name\":\"grpc-test-agent\",\"description\":\"Testing gRPC registration\",\"deployment\":{\"url\":\"http://localhost:3773\",\"expose\":true}}",
+ "skills": [],
+ "grpc_callback_address": "localhost:50052"
+ }' \
+ 'localhost:3774' \
+ bindu.grpc.BinduService.RegisterAgent
+```
+
+**Expected response:**
+
+```json
+{
+ "success": true,
+ "agentId": "91547067-c183-e0fd-c150-27a3ca4135ed",
+ "did": "did:bindu:test_at_example_com:grpc-test-agent:91547067-c183-e0fd-c150-27a3ca4135ed",
+ "agentUrl": "http://localhost:3773",
+ "error": ""
+}
+```
+
+This confirms the **full bindufy flow** ran successfully over gRPC:
+
+| Step | Status |
+|------|--------|
+| Config validation | ✅ Passed |
+| Agent ID generation (SHA256 of author+name) | ✅ `91547067...` |
+| DID creation (Ed25519 keys) | ✅ `did:bindu:test_at_example_com:...` |
+| Manifest creation with GrpcAgentClient | ✅ Handler points to `localhost:50052` |
+| A2A HTTP server started | ✅ Running on `http://localhost:3773` |
+
+#### 5. Verify the A2A server is alive
+
+After a successful RegisterAgent, verify the HTTP server is running:
+
+```bash
+curl -s http://localhost:3773/.well-known/agent.json | python3 -m json.tool
+```
+
+**Expected:** Full agent card with DID, skills, capabilities — identical to a Python-bindufied agent.
+
+#### 6. Test Heartbeat again (now with registered agent)
+
+```bash
+grpcurl -plaintext -emit-defaults \
+ -proto '/path/to/Bindu/proto/agent_handler.proto' \
+ -import-path '/path/to/Bindu/proto' \
+ -d '{"agent_id": "91547067-c183-e0fd-c150-27a3ca4135ed", "timestamp": 1711234567890}' \
+ 'localhost:3774' \
+ bindu.grpc.BinduService.Heartbeat
+```
+
+**Expected response:**
+
+```json
+{
+ "acknowledged": true,
+ "server_timestamp": "1774280800000"
+}
+```
+
+`acknowledged: true` — the agent is registered and the heartbeat was recorded.
+
+#### 7. Test UnregisterAgent
+
+```bash
+grpcurl -plaintext -emit-defaults \
+ -proto '/path/to/Bindu/proto/agent_handler.proto' \
+ -import-path '/path/to/Bindu/proto' \
+ -d '{"agent_id": "91547067-c183-e0fd-c150-27a3ca4135ed"}' \
+ 'localhost:3774' \
+ bindu.grpc.BinduService.UnregisterAgent
+```
+
+#### 8. Clean up ports
+
+```bash
+lsof -ti:3773 -ti:3774 | xargs kill 2>/dev/null
+```
+
+### Option B: Testing with Postman
+
+#### Setup
+
+1. Open Postman
+2. Click **+** (new tab) → change the dropdown from **HTTP** to **gRPC**
+3. Enter URL: `localhost:3774`
+4. Click **Import .proto file** → select `proto/agent_handler.proto`
+5. The method dropdown will show all available RPCs
+
+#### Test Heartbeat
+
+1. Select method: `bindu.grpc.BinduService/Heartbeat`
+2. In the **Message** tab, paste:
+
+```json
+{
+ "agent_id": "test-123",
+ "timestamp": 1711234567890
+}
+```
+
+3. Click **Invoke**
+4. Verify response shows `acknowledged` and `server_timestamp`
+
+#### Test RegisterAgent
+
+1. Select method: `bindu.grpc.BinduService/RegisterAgent`
+2. In the **Message** tab, paste:
+
+```json
+{
+ "config_json": "{\"author\":\"test@example.com\",\"name\":\"postman-agent\",\"description\":\"Testing from Postman\",\"deployment\":{\"url\":\"http://localhost:3773\",\"expose\":true}}",
+ "skills": [],
+ "grpc_callback_address": "localhost:50052"
+}
+```
+
+3. Click **Invoke**
+4. Verify response shows `success: true` with `agentId`, `did`, and `agentUrl`
+
+#### Save to Collection
+
+Click **Save** → create collection `Bindu gRPC` → save each method as a separate request.
+
+> **Note:** `curl` does not work with gRPC. gRPC uses HTTP/2 with binary protobuf encoding. Use `grpcurl` or Postman's gRPC tab instead.
+
+### Option C: Testing with Python unit tests
+
+```bash
+cd /path/to/Bindu
+uv run pytest tests/unit/grpc/ -v
+```
+
+This runs all gRPC unit tests including GrpcAgentClient, AgentRegistry, and BinduServiceImpl.
+
+## Proto Generation
+
+### What is the `generated/` folder?
+
+The `bindu/grpc/generated/` folder contains auto-generated Python code from the proto definition. These files are created by the protobuf compiler (`protoc`) and should **never be edited by hand**.
+
+| Generated file | Purpose |
+|---------------|---------|
+| `agent_handler_pb2.py` | Python classes for all proto messages — `ChatMessage`, `HandleRequest`, `HandleResponse`, `RegisterAgentRequest`, etc. These are the serialization/deserialization layer. |
+| `agent_handler_pb2_grpc.py` | gRPC server base classes (`BinduServiceServicer`, `AgentHandlerServicer`) and client stubs (`BinduServiceStub`, `AgentHandlerStub`). The core's `service.py` extends the servicers, and `client.py` uses the stubs. |
+| `agent_handler_pb2.pyi` | Type hints (`.pyi` stub file) so IDEs provide autocomplete and type checking for the generated classes. |
+
+### How to regenerate
+
+If you modify `proto/agent_handler.proto`, regenerate the stubs:
+
+```bash
+cd /path/to/Bindu
+bash scripts/generate_protos.sh python
+```
+
+Or manually:
+
+```bash
+uv run python -m grpc_tools.protoc \
+ -I proto \
+ --python_out=bindu/grpc/generated \
+ --grpc_python_out=bindu/grpc/generated \
+ --pyi_out=bindu/grpc/generated \
+ proto/agent_handler.proto
+```
+
+### Generate for all languages
+
+```bash
+# Python + TypeScript
+bash scripts/generate_protos.sh python
+bash scripts/generate_protos.sh typescript
+
+# Or all at once
+bash scripts/generate_protos.sh all
+```
+
+Kotlin stubs are generated automatically by the Gradle protobuf plugin during `./gradlew build`.
+
+### How the generated code is used
+
+```python
+# In bindu/grpc/client.py (GrpcAgentClient)
+from bindu.grpc.generated.agent_handler_pb2 import ChatMessage, HandleRequest
+from bindu.grpc.generated.agent_handler_pb2_grpc import AgentHandlerStub
+
+# Convert Python dicts → proto messages
+proto_msgs = [ChatMessage(role=m["role"], content=m["content"]) for m in messages]
+request = HandleRequest(messages=proto_msgs)
+
+# Make gRPC call using generated stub
+response = self._stub.HandleMessages(request, timeout=30.0)
+```
+
+```python
+# In bindu/grpc/service.py (BinduServiceImpl)
+from bindu.grpc.generated.agent_handler_pb2 import RegisterAgentResponse
+from bindu.grpc.generated.agent_handler_pb2_grpc import BinduServiceServicer
+
+class BinduServiceImpl(BinduServiceServicer):
+ def RegisterAgent(self, request, context):
+ # request is a generated RegisterAgentRequest class
+ config = json.loads(request.config_json)
+ # ... run bindufy logic ...
+ return RegisterAgentResponse(success=True, agent_id=str(agent_id), ...)
+```
+
+### The flow
+
+```
+proto/agent_handler.proto (single source of truth)
+ │
+ │ protoc compiler
+ ▼
+bindu/grpc/generated/ (auto-generated, never edit)
+ ├── agent_handler_pb2.py → message classes
+ ├── agent_handler_pb2_grpc.py → server/client stubs
+ └── agent_handler_pb2.pyi → type hints
+ │
+ │ imported by
+ ▼
+bindu/grpc/
+ ├── client.py → uses AgentHandlerStub
+ ├── service.py → extends BinduServiceServicer
+ └── server.py → uses add_BinduServiceServicer_to_server
+```
+
+> **Important:** The `generated/` folder is committed to git so that users don't need `grpcio-tools` installed just to use Bindu. Only contributors who modify the proto need the generation tools.
+
+## File Structure
+
+```
+proto/
+ agent_handler.proto # Single source of truth for the gRPC contract
+
+bindu/grpc/
+ __init__.py # Package exports
+ generated/ # protoc output (Python stubs)
+ agent_handler_pb2.py
+ agent_handler_pb2_grpc.py
+ agent_handler_pb2.pyi
+ client.py # GrpcAgentClient (core → SDK callable)
+ server.py # gRPC server startup
+ service.py # BinduServiceImpl (handles RegisterAgent)
+ registry.py # Thread-safe agent registry
+
+bindu/cli/
+ __init__.py # `bindu serve --grpc` command
+
+sdks/
+ typescript/ # @bindu/sdk npm package
+ src/
+ index.ts # bindufy() function
+ server.ts # AgentHandler gRPC server
+ client.ts # BinduService gRPC client
+ core-launcher.ts # Spawns Python core as child process
+ types.ts # TypeScript interfaces
+ proto/
+ agent_handler.proto # Copy of proto for npm packaging
+
+ kotlin/ # bindu-sdk Gradle package
+ src/main/kotlin/com/getbindu/sdk/
+ BinduAgent.kt # bindufy() function
+ Server.kt # AgentHandler gRPC server
+ Client.kt # BinduService gRPC client
+ CoreLauncher.kt # Spawns Python core as child process
+
+scripts/
+ generate_protos.sh # Generates stubs for all languages
+```
+
+## Extending to New Languages
+
+To add support for a new language (e.g., Go, Rust, Swift):
+
+1. **Generate stubs** from `proto/agent_handler.proto` using the language's protoc plugin
+2. **Implement AgentHandler service** — receives `HandleMessages` calls, invokes the developer's handler
+3. **Implement BinduService client** — calls `RegisterAgent` on core port 3774
+4. **Implement CoreLauncher** — spawns `bindu serve --grpc` as a child process
+5. **Expose `bindufy(config, handler)`** — the developer-facing API
+
+The SDK should be ~200-400 lines. The proto contract is the single source of truth — as long as the SDK speaks the same proto, it works with any version of the Bindu core.
+
+## Testing
+
+### Test Pyramid
+
+```
+ ┌─────────────┐
+ │ E2E Tests │ tests/integration/grpc/ — real servers, real ports
+ │ (5 tests) │ Run: uv run pytest tests/integration/grpc/ -v -m e2e
+ ├──────────────┤
+ │ Unit Tests │ tests/unit/grpc/ — mocked, hermetic, fast
+ │ (40+ tests) │ Run: uv run pytest tests/unit/grpc/ -v
+ └──────────────┘
+```
+
+### Unit Tests (in pre-commit, every commit)
+
+Fast, hermetic, no network ports. All gRPC calls are mocked.
+
+```bash
+uv run pytest tests/unit/grpc/ -v
+```
+
+| Test file | What it covers |
+|-----------|---------------|
+| `test_client.py` | GrpcAgentClient — unary, streaming, health check, capabilities, connection lifecycle |
+| `test_registry.py` | AgentRegistry — register, unregister, heartbeat, thread safety |
+| `test_service.py` | BinduServiceImpl — RegisterAgent, config conversion, error handling |
+
+### E2E Integration Tests (in CI, every PR)
+
+Full round-trip with real gRPC and HTTP servers on non-standard ports (13773, 13774, 13999) to avoid conflicts.
+
+```bash
+uv run pytest tests/integration/grpc/ -v -m e2e
+```
+
+| Test | What it proves |
+|------|---------------|
+| `test_heartbeat_unregistered` | gRPC server starts, accepts requests |
+| `test_register_agent` | Full bindufy flow over gRPC — DID, manifest, HTTP server |
+| `test_heartbeat_registered` | Heartbeat acknowledged for registered agents |
+| `test_agent_card_available` | A2A agent card served with DID extension after registration |
+| `test_send_message_and_get_response` | **Full round-trip**: A2A HTTP → TaskManager → Scheduler → Worker → GrpcAgentClient → Mock AgentHandler → response with DID signature |
+| `test_health_endpoint` | /health endpoint works on registered agent's server |
+
+The E2E tests use a `MockAgentHandler` that simulates what a TypeScript or Kotlin SDK does — receives `HandleMessages` calls and returns echo responses.
+
+### CI Pipeline
+
+The CI workflow (`.github/workflows/ci.yml`) runs on every PR to main:
+
+```
+┌──────────────────┐ ┌──────────────────┐ ┌──────────────────┐
+│ Unit Tests │ │ E2E gRPC Tests │ │ TypeScript SDK │
+│ Python 3.12+ │────►│ Real servers │ │ Build verify │
+│ Pre-commit │ │ Full round-trip │ │ npm install │
+│ Coverage ≥60% │ │ │ │ npm run build │
+└──────────────────┘ └──────────────────┘ └──────────────────┘
+```
+
+- **Unit tests** run on Python 3.12 and 3.13 in parallel
+- **E2E tests** run after unit tests pass (real gRPC + HTTP servers)
+- **TypeScript SDK** build verification runs in parallel
+
+### Running All Tests Locally
+
+```bash
+# Unit tests only (fast, ~7s)
+uv run pytest tests/unit/ -v
+
+# E2E tests only (needs ports, ~10s)
+uv run pytest tests/integration/grpc/ -v -m e2e
+
+# Everything
+uv run pytest tests/ -v
+
+# With coverage
+uv run pytest tests/ --cov=bindu --cov-report=term-missing
+```
+
+## Backward Compatibility
+
+- **Python agents are unaffected.** `bindufy(config, handler)` works exactly as before — no gRPC, no second process, direct in-process handler call.
+- **gRPC is opt-in.** The gRPC server only starts when `bindu serve --grpc` is called or when a language SDK spawns the core.
+- **Proto evolution.** The proto uses proto3 with optional fields. New fields can be added without breaking existing SDKs. Field numbers are never reused.
diff --git a/docs/grpc/README.md b/docs/grpc/README.md
new file mode 100644
index 00000000..4489b1d5
--- /dev/null
+++ b/docs/grpc/README.md
@@ -0,0 +1,105 @@
+# Language-Agnostic Agents
+
+## The Problem
+
+You built a great agent in TypeScript. It uses the OpenAI SDK, calls GPT-4o, handles multi-turn conversations. But to make it a **real microservice** — with identity, authentication, payments, task scheduling, and an interoperable protocol — you'd need to rewrite all of that infrastructure from scratch. In TypeScript. Again.
+
+That's months of work. And then someone wants a Kotlin agent. Start over.
+
+## The Solution
+
+Bindu's gRPC adapter lets any language call `bindufy()` and get the **exact same microservice** a Python agent gets. DID identity, A2A protocol, x402 payments, OAuth2 auth, Redis scheduling, PostgreSQL storage — all of it. No reimplementation.
+
+**Python** (direct, in-process):
+```python
+bindufy(config, handler) # handler runs in the same process
+```
+
+**TypeScript** (via gRPC):
+```typescript
+bindufy(config, handler) // handler runs here, infrastructure runs in Python
+```
+
+Same function name. Same config. Same result. Different language.
+
+The gRPC layer is invisible to the developer. They never write proto files, start gRPC servers, or think about serialization. They call `bindufy()`, write a handler, and get a microservice.
+
+## How It Actually Works
+
+When a TypeScript developer calls `bindufy()`, three things happen:
+
+**1. The SDK starts the Bindu core as a child process.**
+The Python core handles all the infrastructure — DID, auth, x402, scheduling, storage, the HTTP server. The TypeScript developer doesn't install Python manually; the SDK detects it and spawns it.
+
+**2. The SDK registers the agent over gRPC.**
+It sends the config (author, name, skills, payment settings) to the core. The core runs the full bindufy logic — the same code path a Python agent takes — and starts an A2A HTTP server.
+
+**3. When messages arrive, the core calls the SDK's handler over gRPC.**
+A client sends an A2A message to `:3773`. The core's worker picks it up and calls `manifest.run(messages)`. For a gRPC agent, that's a `HandleMessages` call to the TypeScript process. The handler runs, returns a response, and the core sends it back to the client.
+
+```
+Client ──HTTP──► Bindu Core ──gRPC──► TypeScript Handler ──► OpenAI
+ :3773 (Python) :3774 (your code)
+
+ DID, Auth, x402 Just the handler.
+ Scheduler, Storage That's all you write.
+ A2A protocol
+```
+
+The developer writes the handler. Bindu writes everything else.
+
+## Documentation
+
+| Page | What you'll learn |
+|------|------------------|
+| [Architecture](./overview.md) | How the pieces fit together — diagrams, message flow, component breakdown |
+| [API Reference](./api-reference.md) | Every gRPC method, every field, every response code |
+| [GrpcAgentClient](./client.md) | How the core calls remote agents — the bridge between Python and everything else |
+| [TypeScript SDK](./sdk-typescript.md) | Building TypeScript agents — installation, config, handler patterns, debugging |
+| [Building New SDKs](./sdk-development.md) | Adding support for Rust, Go, Swift, or any language with gRPC |
+| [Limitations](./limitations.md) | What doesn't work yet — streaming, TLS, connection pooling |
+
+## Real Examples
+
+- [TypeScript + OpenAI](../../examples/typescript-openai-agent/) — GPT-4o agent with one `bindufy()` call
+- [TypeScript + LangChain](../../examples/typescript-langchain-agent/) — LangChain.js research assistant
+- [Kotlin + OpenAI](../../examples/kotlin-openai-agent/) — Kotlin agent with the same pattern
+
+## Quick Test
+
+Start the gRPC server and verify it's alive:
+
+```bash
+uv run bindu serve --grpc
+
+# In another terminal:
+grpcurl -plaintext localhost:3774 list
+# → bindu.grpc.AgentHandler
+# → bindu.grpc.BinduService
+```
+
+Register an agent from grpcurl:
+
+```bash
+grpcurl -plaintext -emit-defaults \
+ -proto proto/agent_handler.proto \
+ -import-path proto \
+ -d '{
+ "config_json": "{\"author\":\"test@example.com\",\"name\":\"test-agent\",\"description\":\"Test\",\"deployment\":{\"url\":\"http://localhost:3773\",\"expose\":true}}",
+ "skills": [],
+ "grpc_callback_address": "localhost:50052"
+ }' \
+ localhost:3774 bindu.grpc.BinduService.RegisterAgent
+
+# → {"success": true, "agentId": "...", "did": "did:bindu:...", "agentUrl": "http://localhost:3773"}
+```
+
+That response means the full bindufy pipeline ran: config validation, DID key generation, manifest creation, HTTP server started. Over gRPC. From the command line.
+
+## Ports
+
+```
+:3773 HTTP — A2A protocol (clients connect here)
+:3774 gRPC — Agent registration (SDKs connect here)
+:XXXXX gRPC — Handler execution (core calls SDKs here, dynamic port)
+```
diff --git a/docs/grpc/SUMMARY.md b/docs/grpc/SUMMARY.md
new file mode 100644
index 00000000..e368989f
--- /dev/null
+++ b/docs/grpc/SUMMARY.md
@@ -0,0 +1,21 @@
+# Documentation Summary
+
+## What's Covered
+
+| Page | Content | Status |
+|------|---------|--------|
+| [README](./README.md) | The problem, the solution, how it works, quick test | Complete |
+| [Architecture](./overview.md) | Two-process design, two services, message flow, component breakdown | Complete |
+| [API Reference](./api-reference.md) | Every gRPC method, message type, config variable, grpcurl examples | Complete |
+| [GrpcAgentClient](./client.md) | How the core calls remote agents, response contract, connection lifecycle | Complete |
+| [TypeScript SDK](./sdk-typescript.md) | Installation, handler patterns, config, types, debugging | Complete |
+| [Building New SDKs](./sdk-development.md) | Step-by-step guide for adding Rust/Go/Swift support | Complete |
+| [Limitations](./limitations.md) | Streaming gap, no TLS, no reconnection, feature comparison | Complete |
+
+## Reading Order
+
+**If you're using the TypeScript SDK:** README -> TypeScript SDK -> examples
+
+**If you're building a new SDK:** README -> Architecture -> API Reference -> Building New SDKs
+
+**If you're a core contributor:** Architecture -> GrpcAgentClient -> API Reference -> Limitations
diff --git a/docs/grpc/api-reference.md b/docs/grpc/api-reference.md
new file mode 100644
index 00000000..ea6e6578
--- /dev/null
+++ b/docs/grpc/api-reference.md
@@ -0,0 +1,207 @@
+# API Reference
+
+The complete gRPC contract between SDKs and the Bindu core. Defined in `proto/agent_handler.proto`.
+
+## Services
+
+### BinduService (port 3774)
+
+Lives in the Bindu core. SDKs call this to register and manage agents.
+
+#### `RegisterAgent`
+
+The main entry point. SDK sends config + skills, core runs the full bindufy pipeline and returns the agent's identity.
+
+**Request:**
+```protobuf
+message RegisterAgentRequest {
+ string config_json = 1; // Full config as JSON string
+ repeated SkillDefinition skills = 2; // Skills with file content
+ string grpc_callback_address = 3; // SDK's AgentHandler address
+}
+```
+
+`config_json` matches the Python `bindufy()` config format:
+```json
+{
+ "author": "dev@example.com",
+ "name": "my-agent",
+ "description": "What it does",
+ "deployment": {"url": "http://localhost:3773", "expose": true},
+ "execution_cost": {"amount": "1000000", "token": "USDC"}
+}
+```
+
+**Response:**
+```protobuf
+message RegisterAgentResponse {
+ bool success = 1;
+ string agent_id = 2; // Generated UUID
+ string did = 3; // "did:bindu:author:name:id"
+ string agent_url = 4; // "http://localhost:3773"
+ string error = 5; // Error message if success=false
+}
+```
+
+**What the core does:** validates config, generates agent ID (SHA256 of author+name), creates Ed25519 DID keys, sets up x402 payments, creates manifest with `GrpcAgentClient` as handler, starts HTTP/A2A server on the configured URL.
+
+#### `Heartbeat`
+
+Keep-alive signal. SDKs send this every 30 seconds.
+
+**Request:**
+```protobuf
+message HeartbeatRequest {
+ string agent_id = 1;
+ int64 timestamp = 2; // Unix timestamp in milliseconds
+}
+```
+
+**Response:**
+```protobuf
+message HeartbeatResponse {
+ bool acknowledged = 1; // true if agent_id is registered
+ int64 server_timestamp = 2;
+}
+```
+
+#### `UnregisterAgent`
+
+Clean shutdown. SDK calls this before exiting.
+
+**Request/Response:**
+```protobuf
+message UnregisterAgentRequest { string agent_id = 1; }
+message UnregisterAgentResponse { bool success = 1; string error = 2; }
+```
+
+---
+
+### AgentHandler (dynamic port)
+
+Lives in the SDK. The core calls this when work arrives.
+
+#### `HandleMessages`
+
+The core sends conversation history, the SDK runs the developer's handler and returns the response.
+
+**Request:**
+```protobuf
+message HandleRequest {
+ repeated ChatMessage messages = 1; // Conversation history
+ string task_id = 2;
+ string context_id = 3;
+}
+
+message ChatMessage {
+ string role = 1; // "user", "assistant", or "system"
+ string content = 2;
+}
+```
+
+**Response:**
+```protobuf
+message HandleResponse {
+ string content = 1; // The response text
+ string state = 2; // "" = completed, "input-required", "auth-required"
+ string prompt = 3; // Follow-up prompt (when state is set)
+ bool is_final = 4; // Always true (streaming not implemented)
+ map metadata = 5;
+}
+```
+
+**Response rules:**
+- **Normal response:** `{content: "answer", state: ""}` -> task completes
+- **Need more info:** `{state: "input-required", prompt: "Can you clarify?"}` -> task stays open
+- **Need auth:** `{state: "auth-required"}` -> task stays open
+- **Error:** Return gRPC `INTERNAL` status -> task fails
+
+#### `HandleMessagesStream`
+
+Server-side streaming variant. **Defined in proto but not implemented** in `GrpcAgentClient`. See [limitations](./limitations.md).
+
+#### `GetCapabilities`
+
+Core queries what the SDK agent supports.
+
+**Response:**
+```protobuf
+message GetCapabilitiesResponse {
+ string name = 1;
+ string description = 2;
+ string version = 3;
+ bool supports_streaming = 4;
+ repeated SkillDefinition skills = 5;
+}
+```
+
+#### `HealthCheck`
+
+Core verifies the SDK is responsive.
+
+**Response:**
+```protobuf
+message HealthCheckResponse {
+ bool healthy = 1;
+ string message = 2; // "OK" or diagnostic info
+}
+```
+
+---
+
+## Shared Message Types
+
+#### `SkillDefinition`
+
+Sent during registration. Carries the skill file content so the core doesn't need filesystem access.
+
+```protobuf
+message SkillDefinition {
+ string name = 1;
+ string description = 2;
+ repeated string tags = 3;
+ repeated string input_modes = 4;
+ repeated string output_modes = 5;
+ string version = 6;
+ string author = 7;
+ string raw_content = 8; // Full skill.yaml or SKILL.md content
+ string format = 9; // "yaml" or "markdown"
+}
+```
+
+---
+
+## Configuration
+
+Environment variables for the gRPC server:
+
+| Variable | Default | Description |
+|----------|---------|-------------|
+| `GRPC__ENABLED` | `false` | Enable gRPC server |
+| `GRPC__HOST` | `0.0.0.0` | Bind address |
+| `GRPC__PORT` | `3774` | Server port |
+| `GRPC__MAX_WORKERS` | `10` | Thread pool size |
+| `GRPC__MAX_MESSAGE_LENGTH` | `4194304` | Max message size (4MB) |
+| `GRPC__HANDLER_TIMEOUT` | `30.0` | HandleMessages timeout (seconds) |
+| `GRPC__HEALTH_CHECK_INTERVAL` | `30` | Health check interval (seconds) |
+
+---
+
+## Testing with grpcurl
+
+```bash
+# List services
+grpcurl -plaintext -import-path proto -proto agent_handler.proto localhost:3774 list
+
+# Heartbeat
+grpcurl -plaintext -emit-defaults \
+ -proto proto/agent_handler.proto -import-path proto \
+ -d '{"agent_id": "test", "timestamp": 1711234567890}' \
+ localhost:3774 bindu.grpc.BinduService.Heartbeat
+
+# RegisterAgent
+grpcurl -plaintext -emit-defaults \
+ -proto proto/agent_handler.proto -import-path proto \
+ -d '{"config_json": "{\"author\":\"test@example.com\",\"name\":\"test\",\"deployment\":{\"url\":\"http://localhost:3773\",\"expose\":true}}", "skills": [], "grpc_callback_address": "localhost:50052"}' \
+ localhost:3774 bindu.grpc.BinduService.RegisterAgent
+```
diff --git a/docs/grpc/client.md b/docs/grpc/client.md
new file mode 100644
index 00000000..36426b56
--- /dev/null
+++ b/docs/grpc/client.md
@@ -0,0 +1,115 @@
+# GrpcAgentClient
+
+## What It Is
+
+`GrpcAgentClient` is a Python class that looks like a function. You call it with messages, it returns a string or dict. Internally, it makes a gRPC call to a remote process in another language. But the caller doesn't know that.
+
+This is the trick that makes the entire language-agnostic system work without changing a single line in ManifestWorker.
+
+## The Problem It Solves
+
+ManifestWorker has this line:
+
+```python
+raw_results = self.manifest.run(message_history or [])
+```
+
+For Python agents, `manifest.run` is a wrapper around the developer's handler function. It takes a list of message dicts, returns a string or dict.
+
+For TypeScript/Kotlin agents, we need that same call to go over the network. But we can't change ManifestWorker — it handles task state transitions, error handling, tracing, payment settlement. Touching it risks breaking everything.
+
+Solution: make `GrpcAgentClient` a callable that quacks like a handler function.
+
+## How It Works
+
+```python
+class GrpcAgentClient:
+ def __init__(self, callback_address: str, timeout: float = 30.0):
+ self._address = callback_address # e.g., "localhost:50052"
+ self._timeout = timeout
+
+ def __call__(self, messages, **kwargs):
+ # 1. Convert Python dicts to protobuf
+ proto_msgs = [ChatMessage(role=m["role"], content=m["content"]) for m in messages]
+ request = HandleRequest(messages=proto_msgs)
+
+ # 2. Call the SDK's AgentHandler over gRPC
+ response = self._stub.HandleMessages(request, timeout=self._timeout)
+
+ # 3. Convert back to what ManifestWorker expects
+ if response.state:
+ return {"state": response.state, "prompt": response.prompt}
+ else:
+ return response.content
+```
+
+Three steps: convert, call, convert back. That's the entire bridge.
+
+## The Response Contract
+
+ManifestWorker doesn't care how the response was produced. It only cares about the type:
+
+| Handler returns | ManifestWorker does | Task state |
+|----------------|---------------------|------------|
+| `"The capital of France is Paris."` | Creates message + artifact | `completed` |
+| `{"state": "input-required", "prompt": "Can you clarify?"}` | Creates message, keeps task open | `input-required` |
+| `{"state": "auth-required"}` | Creates message, keeps task open | `auth-required` |
+
+GrpcAgentClient returns exactly these types. The downstream code — `ResultProcessor`, `ResponseDetector`, `ArtifactBuilder` — processes them identically to a local Python handler's output.
+
+## Real Example: What Happens When a User Asks a Question
+
+A user sends "What is quantum computing?" to a TypeScript agent:
+
+```
+ManifestWorker calls manifest.run(messages)
+ → GrpcAgentClient.__call__([{"role": "user", "content": "What is quantum computing?"}])
+ → Converts to protobuf: ChatMessage(role="user", content="What is quantum computing?")
+ → gRPC call: AgentHandler.HandleMessages(HandleRequest{messages: [...]})
+ → TypeScript SDK receives the call
+ → Developer's handler runs: await openai.chat.completions.create(...)
+ → OpenAI returns: "Quantum computing is a type of computation..."
+ → SDK returns: HandleResponse{content: "Quantum computing is...", state: ""}
+ → GrpcAgentClient sees state is empty, returns the string
+→ ManifestWorker receives "Quantum computing is..." (same as a local handler)
+→ ResultProcessor normalizes → ResponseDetector says "completed"
+→ ArtifactBuilder creates DID-signed artifact
+→ User gets the response
+```
+
+The GrpcAgentClient is the only component that knows gRPC exists. Everything above and below it is oblivious.
+
+## When It's Created
+
+During `RegisterAgent`, the gRPC service creates a `GrpcAgentClient` and attaches it to the manifest:
+
+```python
+# In BinduServiceImpl.RegisterAgent():
+grpc_client = GrpcAgentClient(request.grpc_callback_address)
+
+# In create_manifest():
+manifest.run = grpc_client # GrpcAgentClient IS the handler now
+```
+
+From this point on, every task for this agent flows through the client.
+
+## Connection Lifecycle
+
+The client connects lazily — the gRPC channel is created on the first call, not during initialization. This avoids connection errors during registration if the SDK's server isn't fully ready yet.
+
+When the SDK disconnects (Ctrl+C, crash), the next `HandleMessages` call fails with `grpc.StatusCode.UNAVAILABLE`. ManifestWorker's existing error handling catches this and marks the task as failed. No special handling needed.
+
+## Health Checks and Capabilities
+
+```python
+grpc_client.health_check() # Is the SDK still running? Returns True/False
+grpc_client.get_capabilities() # What can the SDK do? Returns name, version, etc.
+```
+
+Used during heartbeat processing and capability discovery.
+
+## What It Doesn't Do Yet
+
+- **Streaming** — proto defines `HandleMessagesStream` but the client doesn't implement it. Remote agents can only return complete responses. See [limitations](./limitations.md).
+- **Reconnection** — if the SDK crashes, the client doesn't retry. The agent must be re-registered.
+- **TLS** — uses insecure channels. Only safe on localhost or trusted networks.
diff --git a/docs/grpc/limitations.md b/docs/grpc/limitations.md
new file mode 100644
index 00000000..595b0c82
--- /dev/null
+++ b/docs/grpc/limitations.md
@@ -0,0 +1,75 @@
+# Limitations
+
+Honest accounting of what doesn't work yet and what trade-offs we made.
+
+## Streaming Responses
+
+**Status: Not implemented**
+
+The proto defines `HandleMessagesStream` — a server-side streaming RPC where the SDK yields response chunks incrementally. But `GrpcAgentClient` doesn't call it. Remote agents can only return complete responses.
+
+**What this means in practice:**
+
+You're building a TypeScript agent with GPT-4o. In a Python agent, you could stream tokens back to the user as they're generated — they see the response forming word by word. With a gRPC agent, the user waits for the entire response, then sees it all at once.
+
+For short answers (< 2 seconds), this doesn't matter. For long responses (analysis, code generation, research), the UX is noticeably worse.
+
+**Workaround:** Return complete responses. Most agents do this anyway — the streaming gap only matters for chat-like interfaces where perceived latency matters.
+
+**What needs to happen:**
+1. Add `stream_messages()` method to `GrpcAgentClient`
+2. Wire it into `ManifestWorker` for streaming task execution
+3. Update SDK `AgentHandler` to support streaming handlers
+4. Add E2E tests for streaming round-trips
+
+## No TLS
+
+gRPC connections use `grpc.insecure_channel`. Traffic between the core and SDK is unencrypted.
+
+**Why it's okay for now:** The core and SDK run on the same machine (localhost). The SDK spawns the core as a child process. There's no network exposure.
+
+**When it matters:** If you deploy the core and SDK on different machines, or in a zero-trust network environment. TLS/mTLS support is planned.
+
+## No Automatic Reconnection
+
+If the SDK process crashes mid-execution, the `GrpcAgentClient` doesn't retry. The task fails, and the agent must be re-registered.
+
+**What happens:** ManifestWorker catches the gRPC `UNAVAILABLE` error and marks the task as failed. The user gets an error response. On restart, the SDK calls `RegisterAgent` again and the agent is back.
+
+**What would be better:** Automatic reconnection with exponential backoff, so transient failures (SDK restart, brief network blip) recover without re-registration.
+
+## No Connection Pooling
+
+Each `GrpcAgentClient` creates a single gRPC channel. Under high concurrency (many simultaneous tasks), all calls share one channel.
+
+For most agents this is fine — gRPC channels handle multiplexing well. But for agents processing hundreds of concurrent requests, a connection pool would reduce contention.
+
+## No gRPC-Specific Metrics
+
+The `/metrics` endpoint (Prometheus) reports HTTP request metrics but not gRPC call metrics. You can't see HandleMessages latency, error rates, or call counts in the dashboard.
+
+**Workaround:** Check the core's log output, which includes timing information for each handler call.
+
+## No Load Balancing
+
+If you run two instances of the same TypeScript agent, each one registers separately with a different callback address. There's no built-in routing to spread load across instances.
+
+**Workaround:** Use a reverse proxy (like Envoy) in front of the SDK instances, and register the proxy address as the callback.
+
+## Feature Comparison
+
+| Feature | Python Agents | gRPC Agents |
+|---------|--------------|-------------|
+| Unary responses | works | works |
+| Streaming responses | works | **not implemented** |
+| DID identity | works | works |
+| x402 payments | works | works |
+| Skills | works | works |
+| State transitions (input-required) | works | works |
+| Health checks | works | works |
+| Multi-language | Python only | any language |
+| Latency overhead | 0ms | 1-5ms |
+| TLS | N/A (in-process) | **not implemented** |
+| Auto-reconnection | N/A (in-process) | **not implemented** |
+
+The bottom line: gRPC agents have **full feature parity** with Python agents for the core functionality (DID, auth, payments, skills, A2A protocol). The gaps are in streaming, security, and resilience — all planned for future releases.
diff --git a/docs/grpc/overview.md b/docs/grpc/overview.md
new file mode 100644
index 00000000..b4118d34
--- /dev/null
+++ b/docs/grpc/overview.md
@@ -0,0 +1,152 @@
+# Architecture
+
+## The Big Picture
+
+A TypeScript developer writes an agent. They call `bindufy()`. Here's what happens:
+
+```
+Their TypeScript code Bindu Core (Python, auto-started)
+┌─────────────────────┐ ┌────────────────────────────┐
+│ │ │ │
+│ OpenAI SDK │ 1. Register │ Config validation │
+│ LangChain │ ──────gRPC────► │ DID key generation │
+│ Any framework │ │ Auth (Hydra OAuth2) │
+│ │ │ x402 payment setup │
+│ handler(messages) │ 2. Execute │ Manifest creation │
+│ ◄──────gRPC────────│────────────── │ Scheduler + Storage │
+│ │ │ HTTP/A2A server (:3773) │
+└─────────────────────┘ └────────────────────────────┘
+ SDK process Core process
+ (developer's language) (Python, invisible)
+```
+
+Two processes. One terminal. The developer only sees their code. The Python process is a hidden child process that the SDK manages automatically.
+
+## Why Two Processes?
+
+**Because the alternative is worse.**
+
+Option A: Rewrite Bindu's core in every language. DID, auth, x402, scheduler, storage, A2A protocol — in TypeScript, then Kotlin, then Rust. Thousands of lines, each time. Every bug fixed three times.
+
+Option B: Keep one core. Connect to it over a wire. The handler runs in the developer's language. Everything else runs in Python. One codebase for infrastructure. Thin SDKs for each language.
+
+We chose B. The wire is gRPC.
+
+## Two Services, Two Directions
+
+gRPC isn't one-way. Both sides are servers AND clients:
+
+**BinduService** — lives in the Python core on `:3774`
+
+The SDK calls this to register and manage its agent:
+
+| Method | What it does |
+|--------|-------------|
+| `RegisterAgent` | "Here's my config, skills, and callback address. Make me a microservice." |
+| `Heartbeat` | "I'm still alive." (every 30 seconds) |
+| `UnregisterAgent` | "I'm shutting down. Clean up." |
+
+**AgentHandler** — lives in the SDK on a dynamic port
+
+The core calls this when work arrives:
+
+| Method | What it does |
+|--------|-------------|
+| `HandleMessages` | "A user sent this message. Run your handler and give me the response." |
+| `GetCapabilities` | "What can you do?" |
+| `HealthCheck` | "Are you still there?" |
+
+This bidirectional design is why gRPC was chosen over REST. Both sides initiate calls. REST can't do that without polling or websockets.
+
+## Message Flow: What Happens When a User Sends a Message
+
+A user sends "What is the capital of France?" to a TypeScript agent that's been bindufied:
+
+```
+1. User sends HTTP POST to :3773
+ {"method": "message/send", "params": {"message": {"text": "What is the capital of France?"}}}
+
+2. Bindu Core receives the request
+ TaskManager creates a task, Scheduler queues it
+
+3. ManifestWorker picks up the task
+ Builds conversation history from storage
+ Calls manifest.run(messages)
+
+4. manifest.run is a GrpcAgentClient
+ Converts messages to protobuf
+ Calls HandleMessages on the SDK's gRPC server
+
+5. TypeScript SDK receives the call
+ Deserializes messages: [{role: "user", content: "What is the capital of France?"}]
+ Calls the developer's handler function
+
+6. Developer's handler runs
+ const response = await openai.chat.completions.create({model: "gpt-4o", messages})
+ Returns "The capital of France is Paris."
+
+7. SDK sends the response back over gRPC
+ HandleResponse {content: "The capital of France is Paris."}
+
+8. GrpcAgentClient receives the response
+ Returns the string to ManifestWorker
+
+9. ManifestWorker processes the result
+ ResultProcessor normalizes it
+ ResponseDetector determines task state → "completed"
+ ArtifactBuilder creates a DID-signed artifact
+
+10. Core sends the A2A response back to the user
+ Task completed, with DID signature on the artifact
+```
+
+The entire round trip: ~2-5 seconds. The gRPC overhead is ~1-5ms. The rest is the LLM call.
+
+## GrpcAgentClient: The Invisible Bridge
+
+This is the component that makes everything work. It's a Python class that pretends to be a handler function.
+
+In `ManifestWorker`, line 171:
+
+```python
+raw_results = self.manifest.run(message_history or [])
+```
+
+For a Python agent, `manifest.run` is a local function. For a gRPC agent, it's a `GrpcAgentClient` instance. The worker can't tell the difference. It calls it the same way, gets the same types back, and processes the result identically.
+
+This is why we didn't change ManifestWorker, ResultProcessor, ResponseDetector, or any downstream code. The abstraction holds. A callable is a callable.
+
+## What the SDK Does When You Call `bindufy()`
+
+Step by step, from the developer typing `npx tsx index.ts` to seeing "Waiting for messages...":
+
+1. **SDK reads skill files** from the project directory (yaml or markdown)
+2. **SDK starts an AgentHandler gRPC server** on a random available port
+3. **SDK detects how to run Python** — checks for `bindu` CLI, `uv`, or `python3`
+4. **SDK spawns the Bindu core** as a child process: `bindu serve --grpc --grpc-port 3774`
+5. **SDK waits for `:3774` to be ready** (polls with TCP connect, 30s timeout)
+6. **SDK calls `RegisterAgent`** with config JSON, skill data, and its callback address
+7. **Core validates config**, generates agent ID, creates DID keys, sets up x402/auth
+8. **Core creates manifest** with `manifest.run = GrpcAgentClient(callback_address)`
+9. **Core starts uvicorn** on `:3773` in a background thread
+10. **Core returns** `{agent_id, did, agent_url}` to the SDK
+11. **SDK starts a heartbeat loop** — pings the core every 30 seconds
+12. **SDK prints** "Agent registered!" and waits for HandleMessages calls
+
+When the developer presses `Ctrl+C`, the SDK kills the Python child process and exits cleanly.
+
+## Python vs gRPC Agents: What's Different?
+
+| | Python Agent | gRPC Agent |
+|---|---|---|
+| **Developer calls** | `bindufy(config, handler)` | `bindufy(config, handler)` (identical) |
+| **Handler runs in** | Same process as core | Separate process |
+| **Core started by** | `bindufy()` directly | SDK spawns as child process |
+| **Communication** | In-process function call | gRPC over localhost |
+| **Latency overhead** | 0ms | 1-5ms |
+| **Language** | Python only | Any language with gRPC |
+| **DID, auth, x402** | Full support | Full support (identical) |
+| **Skills** | Loaded from filesystem | Sent as data during registration |
+| **Streaming** | Supported | Not yet implemented |
+
+The key insight: from the outside (A2A clients, other agents, the frontend), there is **no visible difference**. The agent card looks the same. The DID is generated the same way. The A2A responses have the same structure. The artifacts carry the same DID signatures. A client cannot tell whether the agent behind `:3773` is Python, TypeScript, or Kotlin.
diff --git a/docs/grpc/sdk-development.md b/docs/grpc/sdk-development.md
new file mode 100644
index 00000000..0a40f355
--- /dev/null
+++ b/docs/grpc/sdk-development.md
@@ -0,0 +1,151 @@
+# Building SDKs for New Languages
+
+You want to add Bindu support for Rust, Go, Swift, or another language. Here's what's involved.
+
+## What an SDK Does
+
+An SDK is a thin wrapper — typically 200-400 lines — that hides gRPC from the developer. From their perspective, they call `bindufy(config, handler)` and get a microservice. The SDK handles everything in between.
+
+Concretely, an SDK does four things:
+
+1. **Implements `AgentHandler`** — a gRPC server that receives `HandleMessages` calls from the core and invokes the developer's handler
+2. **Calls `BinduService.RegisterAgent`** — a gRPC client that registers the agent with the core
+3. **Launches the Python core** — spawns `bindu serve --grpc` as a child process
+4. **Exposes `bindufy(config, handler)`** — the developer-facing API that orchestrates all of the above
+
+The proto contract at `proto/agent_handler.proto` is the single source of truth. As long as your SDK speaks the same proto, it works with any version of the core.
+
+## Step 1: Generate gRPC Stubs
+
+Every language has a protoc plugin. Generate client and server stubs from the proto:
+
+| Language | Tool | Command |
+|----------|------|---------|
+| Rust | `tonic-build` | Add `tonic-build` to `build.rs`, it compiles the proto at build time |
+| Go | `protoc-gen-go-grpc` | `protoc --go_out=. --go-grpc_out=. proto/agent_handler.proto` |
+| Swift | `grpc-swift` | `protoc --swift_out=. --grpc-swift_out=. proto/agent_handler.proto` |
+| C# | `Grpc.Tools` | NuGet package auto-generates from `.proto` in the project |
+
+The generated code gives you typed message classes and service interfaces.
+
+## Step 2: Implement AgentHandler (Server)
+
+The core calls three methods on your SDK. You need to implement them:
+
+**HandleMessages** — the critical one. Receives conversation history, calls the developer's handler, returns the response.
+
+```
+Input: HandleRequest { messages: [ChatMessage{role, content}, ...] }
+Output: HandleResponse { content: string, state: string, prompt: string, is_final: bool }
+```
+
+Rules:
+- If the handler returns a plain string, set `content` to the string and leave `state` empty
+- If the handler returns a state transition, set `state` to `"input-required"` or `"auth-required"` and `prompt` to the follow-up question
+- If the handler throws, return a gRPC `INTERNAL` error with the error message
+- Always set `is_final` to `true` (streaming not yet supported)
+
+**GetCapabilities** — return static info about the SDK.
+
+```
+Output: GetCapabilitiesResponse { name, description, version, supports_streaming }
+```
+
+**HealthCheck** — return `{healthy: true, message: "OK"}`.
+
+## Step 3: Implement BinduService Client
+
+Your SDK needs to call two methods on the core:
+
+**RegisterAgent** — sends config, skills, and the SDK's callback address.
+
+```
+Input: RegisterAgentRequest {
+ config_json: string, // Full config as JSON
+ skills: [SkillDefinition], // Skills with raw file content
+ grpc_callback_address: string // e.g., "localhost:50052"
+}
+Output: RegisterAgentResponse { success, agent_id, did, agent_url, error }
+```
+
+The `config_json` is a JSON string matching the Python `bindufy()` config format. This is intentional — the config schema lives in one place (Python), and SDKs just serialize to JSON.
+
+**Heartbeat** — call every 30 seconds to signal liveness.
+
+```
+Input: HeartbeatRequest { agent_id, timestamp }
+```
+
+## Step 4: Implement Core Launcher
+
+The SDK needs to start the Python core as a child process. The logic:
+
+1. Check if `bindu` CLI is available (pip-installed)
+2. If not, check if `uv` is available
+3. If not, fall back to `python3 -m bindu.cli`
+4. Spawn: ` serve --grpc --grpc-port 3774`
+5. Wait for `:3774` to accept TCP connections (poll every 500ms, timeout 30s)
+6. On parent exit (Ctrl+C), kill the child process
+
+## Step 5: Implement `bindufy()`
+
+Wire everything together in a single function:
+
+```
+function bindufy(config, handler):
+ skills = read_skill_files(config.skills)
+ callback_port = start_agent_handler_server(handler)
+ launch_python_core(grpc_port=3774)
+ wait_for_port(3774)
+ result = register_agent(config, skills, callback_address="localhost:{callback_port}")
+ start_heartbeat_loop(result.agent_id)
+ print("Agent registered! A2A URL: {result.agent_url}")
+```
+
+That's the entire SDK. Everything else is type definitions and error handling.
+
+## Skill Loading
+
+Skills are files in the developer's project. The SDK reads them and sends the content in the `RegisterAgent` call:
+
+1. For each skill path in `config.skills`, look for `skill.yaml` or `SKILL.md`
+2. Read the file content
+3. Parse the name and description (from YAML frontmatter or YAML fields)
+4. Send as `SkillDefinition { name, description, tags, raw_content, format }`
+
+The core processes the skill content without needing filesystem access to the SDK's project.
+
+## Testing Your SDK
+
+**Unit test:** Mock the gRPC channel and verify `HandleMessages` correctly invokes the handler and serializes the response.
+
+**Integration test:** Start a real Bindu core with `bindu serve --grpc`, register an agent from your SDK, send an A2A message, and verify the response. The Python E2E tests in `tests/integration/grpc/test_grpc_e2e.py` show exactly this pattern.
+
+**Smoke test:** Run one of the examples end-to-end and `curl` the agent.
+
+## Reference: TypeScript SDK
+
+The TypeScript SDK at `sdks/typescript/` is the reference implementation. Study these files:
+
+| File | What it does | Lines |
+|------|-------------|-------|
+| `src/index.ts` | `bindufy()` function + skill loader | ~220 |
+| `src/server.ts` | AgentHandler gRPC server | ~130 |
+| `src/client.ts` | BinduService gRPC client | ~105 |
+| `src/core-launcher.ts` | Spawns Python core | ~170 |
+| `src/types.ts` | TypeScript interfaces | ~120 |
+
+Total: ~745 lines. That's the entire SDK. Most of that is type definitions and error handling. The core logic is under 300 lines.
+
+## Publishing
+
+Publish to your language's package registry:
+
+| Language | Registry | Package name convention |
+|----------|----------|----------------------|
+| Rust | crates.io | `bindu-sdk` |
+| Go | Go modules | `github.com/getbindu/bindu-sdk-go` |
+| Swift | Swift Package Manager | `bindu-sdk` |
+| C# | NuGet | `Bindu.Sdk` |
+
+Include the proto file in the package so users don't need to download it separately.
diff --git a/docs/grpc/sdk-typescript.md b/docs/grpc/sdk-typescript.md
new file mode 100644
index 00000000..792c642b
--- /dev/null
+++ b/docs/grpc/sdk-typescript.md
@@ -0,0 +1,269 @@
+# TypeScript SDK
+
+## The Idea
+
+You have a TypeScript agent. Maybe it uses the OpenAI SDK, LangChain.js, or just raw `fetch` calls. You want it to be a real microservice — with identity, authentication, payments, and a standard protocol. But you don't want to rewrite infrastructure.
+
+```typescript
+import { bindufy } from "@bindu/sdk";
+
+bindufy({
+ author: "dev@example.com",
+ name: "my-agent",
+ deployment: { url: "http://localhost:3773", expose: true },
+}, async (messages) => {
+ // Your agent logic — any framework, any LLM
+ return "Hello from TypeScript!";
+});
+```
+
+One function call. One terminal. Full microservice.
+
+## Installation
+
+```bash
+npm install @bindu/sdk
+```
+
+The SDK also needs the Bindu Python core installed on the machine:
+
+```bash
+pip install bindu
+```
+
+The SDK finds and launches the Python core automatically. You don't start it manually.
+
+## What Happens When You Call `bindufy()`
+
+1. SDK reads your skill files (yaml/markdown) from disk
+2. SDK starts a gRPC server on a random port — this is where the core will call your handler
+3. SDK spawns `bindu serve --grpc` as a child process
+4. SDK waits for the core's gRPC server to be ready on `:3774`
+5. SDK calls `RegisterAgent` with your config, skills, and callback address
+6. Core runs the full bindufy pipeline — DID, auth, x402, manifest, HTTP server
+7. SDK receives the agent ID, DID, and A2A URL
+8. SDK starts a heartbeat loop (every 30 seconds)
+9. You see "Waiting for messages..."
+
+When a message arrives via A2A HTTP, the core calls your handler over gRPC. You process it, return a string, and the core sends it back to the client with a DID signature.
+
+When you press `Ctrl+C`, the SDK kills the Python core and exits.
+
+## Handler Patterns
+
+### Simple response
+
+```typescript
+async (messages) => {
+ return "The answer is 42.";
+}
+```
+
+Task completes immediately with this response.
+
+### OpenAI SDK
+
+```typescript
+import OpenAI from "openai";
+const openai = new OpenAI();
+
+async (messages) => {
+ const response = await openai.chat.completions.create({
+ model: "gpt-4o",
+ messages: messages.map(m => ({
+ role: m.role as "user" | "assistant" | "system",
+ content: m.content,
+ })),
+ });
+ return response.choices[0].message.content || "";
+}
+```
+
+### LangChain.js
+
+```typescript
+import { ChatOpenAI } from "@langchain/openai";
+const llm = new ChatOpenAI({ model: "gpt-4o" });
+
+async (messages) => {
+ const response = await llm.invoke(
+ messages.map(m => ({ role: m.role, content: m.content }))
+ );
+ return typeof response.content === "string"
+ ? response.content
+ : JSON.stringify(response.content);
+}
+```
+
+### Multi-turn conversation
+
+Sometimes your agent needs more information before it can answer. Return a state transition:
+
+```typescript
+async (messages) => {
+ if (messages.length === 1) {
+ // First message — ask for clarification
+ return {
+ state: "input-required",
+ prompt: "Could you be more specific about what you're looking for?"
+ };
+ }
+
+ // Second message — now answer
+ const lastMessage = messages[messages.length - 1].content;
+ return `Based on your clarification: here's the detailed answer about "${lastMessage}"...`;
+}
+```
+
+The task stays open after `input-required`. The user sends a follow-up. The core calls your handler again with the full conversation history.
+
+### Error handling
+
+If your handler throws, the SDK catches it and returns a gRPC error. ManifestWorker marks the task as failed. The user gets an error response.
+
+```typescript
+async (messages) => {
+ try {
+ return await myLlmCall(messages);
+ } catch (err) {
+ // Option A: Let it throw — task fails with error message
+ throw err;
+
+ // Option B: Return a graceful message
+ return "Sorry, I'm having trouble processing your request right now.";
+ }
+}
+```
+
+## Configuration
+
+```typescript
+bindufy({
+ // Required
+ author: "dev@example.com", // Used for DID generation
+ name: "my-agent", // Agent name
+ deployment: {
+ url: "http://localhost:3773", // A2A HTTP server address
+ expose: true, // Enable CORS
+ cors_origins: ["http://localhost:5173"],
+ },
+
+ // Optional
+ description: "What my agent does",
+ version: "1.0.0",
+ skills: ["skills/question-answering"],
+ execution_cost: { // x402 payments
+ amount: "1000000",
+ token: "USDC",
+ network: "base-sepolia",
+ },
+ capabilities: {
+ streaming: false,
+ push_notifications: false,
+ },
+
+ // Advanced
+ coreAddress: "localhost:3774", // Override core gRPC address
+ callbackPort: 0, // 0 = auto-assign
+ debug_mode: false,
+ telemetry: true,
+ num_history_sessions: 10,
+}, handler);
+```
+
+## Skills
+
+Define what your agent can do. Two options:
+
+**File-based** (recommended) — create `skills/my-skill/skill.yaml` or `skills/my-skill/SKILL.md`:
+
+```typescript
+bindufy({
+ skills: ["skills/question-answering", "skills/code-review"],
+}, handler);
+```
+
+The SDK reads the files and sends the content to the core during registration.
+
+**Inline** — define skills directly in code:
+
+```typescript
+bindufy({
+ skills: [{
+ name: "question-answering",
+ description: "Answer questions using GPT-4o",
+ tags: ["qa", "assistant"],
+ }],
+}, handler);
+```
+
+## Types
+
+The SDK exports these types for your handler:
+
+```typescript
+interface ChatMessage {
+ role: string; // "user", "assistant", or "system"
+ content: string;
+}
+
+// Your handler signature
+type MessageHandler = (messages: ChatMessage[]) => Promise;
+
+interface HandlerResponse {
+ content?: string;
+ state?: "input-required" | "auth-required";
+ prompt?: string;
+ metadata?: Record;
+}
+
+// Returned by bindufy()
+interface RegistrationResult {
+ agentId: string;
+ did: string;
+ agentUrl: string;
+}
+```
+
+## Debugging
+
+**Check core logs:** The Python core's output is prefixed with `[bindu-core]` in your terminal:
+
+```
+[bindu-core] INFO gRPC server started on 0.0.0.0:3774
+[bindu-core] INFO Agent registered: openai-assistant-agent
+[bindu-core] INFO HTTP server started on 0.0.0.0:3773
+```
+
+**Test the agent manually:**
+
+```bash
+# Is the A2A server running?
+curl http://localhost:3773/health
+
+# What does the agent card look like?
+curl http://localhost:3773/.well-known/agent.json | python3 -m json.tool
+
+# Send a test message
+curl -X POST http://localhost:3773 -H "Content-Type: application/json" \
+ -d '{"jsonrpc":"2.0","method":"message/send","params":{"message":{"role":"user","parts":[{"kind":"text","text":"Hello"}],"messageId":"test-1","contextId":"test-2","taskId":"test-3","kind":"message"}},"id":"1"}'
+```
+
+**Port conflicts:**
+
+```bash
+lsof -ti:3773 -ti:3774 | xargs kill 2>/dev/null
+```
+
+## Limitations
+
+- **No streaming** — handler must return complete responses, can't yield chunks
+- **Requires Python** — the Bindu core must be installed (`pip install bindu`)
+- **Single agent per port** — each `bindufy()` call uses `:3773` for HTTP
+
+See [full limitations](./limitations.md) for details.
+
+## Examples
+
+- [OpenAI Agent](../../examples/typescript-openai-agent/) — direct OpenAI SDK usage
+- [LangChain Agent](../../examples/typescript-langchain-agent/) — LangChain.js with ChatOpenAI
diff --git a/examples/README.md b/examples/README.md
index 88413f09..2c079001 100644
--- a/examples/README.md
+++ b/examples/README.md
@@ -63,6 +63,12 @@ For full URL override, use `BINDU_DEPLOYMENT_URL` (e.g. `http://127.0.0.1:5001`)
- `ai-data-analysis-agent/` - Autonomous data analyst with CSV profiling and visualization
- `cybersecurity-newsletter/` - Security news aggregator with CVE tracking
+### TypeScript (Language-Agnostic via gRPC)
+- `typescript-openai-agent/` - OpenAI SDK agent bindufied with TypeScript SDK
+- `typescript-langchain-agent/` - LangChain.js agent bindufied with TypeScript SDK
+
+> TypeScript agents use `@bindu/sdk` which automatically launches the Bindu Python core in the background. Same A2A protocol, same DID, same everything — just a different language. See the [gRPC documentation](../docs/GRPC_LANGUAGE_AGNOSTIC.md) for details.
+
### Advanced
- `agent_swarm/` - Multi-agent collaboration system
- `cerina_bindu/cbt/` - CBT therapy protocol generator
@@ -133,6 +139,8 @@ if __name__ == "__main__":
## Documentation
- [Bindu Docs](https://docs.getbindu.com)
+- [gRPC Language-Agnostic Guide](../docs/GRPC_LANGUAGE_AGNOSTIC.md)
+- [TypeScript SDK](../sdks/typescript/README.md)
- [Payment Guide](../docs/PAYMENT.md)
- [DID Guide](../docs/DID.md)
- [Skills Guide](../docs/SKILLS.md)
diff --git a/examples/kotlin-openai-agent/README.md b/examples/kotlin-openai-agent/README.md
new file mode 100644
index 00000000..2147dfeb
--- /dev/null
+++ b/examples/kotlin-openai-agent/README.md
@@ -0,0 +1,42 @@
+# Kotlin OpenAI Agent
+
+An assistant built with Kotlin and the [Bindu Kotlin SDK](../../sdks/kotlin/).
+
+## Prerequisites
+
+- JDK 17+
+- Python >= 3.12 with Bindu installed (`pip install bindu[grpc]`)
+- OpenAI API key
+
+## Setup
+
+```bash
+export OPENAI_API_KEY=sk-your-api-key-here
+```
+
+## Run
+
+```bash
+./gradlew run
+```
+
+## Send a message
+
+```bash
+curl -X POST http://localhost:3773 \
+ -H "Content-Type: application/json" \
+ -d '{
+ "jsonrpc": "2.0",
+ "method": "message/send",
+ "params": {
+ "message": {
+ "role": "user",
+ "parts": [{"kind": "text", "text": "Explain coroutines in Kotlin"}],
+ "messageId": "msg-1",
+ "contextId": "ctx-1",
+ "taskId": "task-1"
+ }
+ },
+ "id": "1"
+ }'
+```
diff --git a/examples/kotlin-openai-agent/build.gradle.kts b/examples/kotlin-openai-agent/build.gradle.kts
new file mode 100644
index 00000000..5f8d222a
--- /dev/null
+++ b/examples/kotlin-openai-agent/build.gradle.kts
@@ -0,0 +1,24 @@
+plugins {
+ kotlin("jvm") version "1.9.22"
+ application
+}
+
+group = "com.getbindu.examples"
+version = "1.0.0"
+
+repositories {
+ mavenCentral()
+}
+
+dependencies {
+ implementation(project(":")) // bindu-sdk
+ implementation("com.google.code.gson:gson:2.10.1")
+}
+
+application {
+ mainClass.set("MainKt")
+}
+
+kotlin {
+ jvmToolchain(17)
+}
diff --git a/examples/kotlin-openai-agent/skills/question-answering/skill.yaml b/examples/kotlin-openai-agent/skills/question-answering/skill.yaml
new file mode 100644
index 00000000..a09e83f4
--- /dev/null
+++ b/examples/kotlin-openai-agent/skills/question-answering/skill.yaml
@@ -0,0 +1,23 @@
+id: question-answering-v1
+name: question-answering
+description: |
+ General question answering capability using OpenAI GPT-4o.
+ Built with Kotlin and the Bindu SDK.
+version: 1.0.0
+author: dev@example.com
+
+tags:
+ - question-answering
+ - conversation
+ - assistant
+
+input_modes:
+ - text/plain
+
+output_modes:
+ - text/plain
+
+examples:
+ - "Explain coroutines in Kotlin"
+ - "What is the JVM garbage collector?"
+ - "Compare Kotlin and Java for backend development"
diff --git a/examples/kotlin-openai-agent/src/main/kotlin/Main.kt b/examples/kotlin-openai-agent/src/main/kotlin/Main.kt
new file mode 100644
index 00000000..e0bb29c8
--- /dev/null
+++ b/examples/kotlin-openai-agent/src/main/kotlin/Main.kt
@@ -0,0 +1,73 @@
+/**
+ * Kotlin OpenAI Agent — Bindufied
+ *
+ * Demonstrates using the Bindu Kotlin SDK with an OpenAI-compatible API.
+ * The developer writes their agent logic in Kotlin — Bindu handles
+ * the conversion to a microservice with DID, auth, x402, and A2A protocol.
+ *
+ * Usage:
+ * 1. Set OPENAI_API_KEY in environment
+ * 2. ./gradlew run
+ */
+
+import com.getbindu.sdk.ChatMessage
+import com.getbindu.sdk.bindufy
+import java.net.URI
+import java.net.http.HttpClient
+import java.net.http.HttpRequest
+import java.net.http.HttpResponse
+import com.google.gson.Gson
+import com.google.gson.JsonObject
+
+val httpClient: HttpClient = HttpClient.newHttpClient()
+val gson = Gson()
+
+suspend fun callOpenAI(messages: List): String {
+ val apiKey = System.getenv("OPENAI_API_KEY")
+ ?: throw RuntimeException("OPENAI_API_KEY not set")
+
+ val messagesJson = messages.map { msg ->
+ mapOf("role" to msg.role, "content" to msg.content)
+ }
+
+ val body = gson.toJson(mapOf(
+ "model" to "gpt-4o",
+ "messages" to messagesJson
+ ))
+
+ val request = HttpRequest.newBuilder()
+ .uri(URI.create("https://api.openai.com/v1/chat/completions"))
+ .header("Content-Type", "application/json")
+ .header("Authorization", "Bearer $apiKey")
+ .POST(HttpRequest.BodyPublishers.ofString(body))
+ .build()
+
+ val response = httpClient.send(request, HttpResponse.BodyHandlers.ofString())
+ val json = gson.fromJson(response.body(), JsonObject::class.java)
+
+ return json
+ .getAsJsonArray("choices")
+ .get(0).asJsonObject
+ .getAsJsonObject("message")
+ .get("content").asString
+}
+
+fun main() {
+ bindufy(
+ config = mapOf(
+ "author" to "dev@example.com",
+ "name" to "kotlin-openai-agent",
+ "description" to "An assistant built with Kotlin and Bindu",
+ "version" to "1.0.0",
+ "deployment" to mapOf(
+ "url" to "http://localhost:3773",
+ "expose" to true,
+ "cors_origins" to listOf("http://localhost:5173")
+ ),
+ ),
+ skills = listOf("skills/question-answering")
+ ) { messages ->
+ // Call OpenAI and return the response
+ callOpenAI(messages)
+ }
+}
diff --git a/examples/typescript-langchain-agent/.env.example b/examples/typescript-langchain-agent/.env.example
new file mode 100644
index 00000000..96ab95d7
--- /dev/null
+++ b/examples/typescript-langchain-agent/.env.example
@@ -0,0 +1,19 @@
+# ----------------------------------------------------------------------------
+# OpenAI Configuration (used by LangChain.js)
+# ----------------------------------------------------------------------------
+# Get your API key at https://platform.openai.com/api-keys
+OPENAI_API_KEY=sk-your-openai-api-key
+
+# ----------------------------------------------------------------------------
+# Storage Configuration (optional)
+# ----------------------------------------------------------------------------
+# Type: "postgres" for PostgreSQL or "memory" for in-memory storage
+# STORAGE_TYPE=postgres
+# DATABASE_URL=postgresql://user:pass@host/db # pragma: allowlist secret
+
+# ----------------------------------------------------------------------------
+# Scheduler Configuration (optional)
+# ----------------------------------------------------------------------------
+# Type: "redis" for distributed scheduling or "memory" for single-process
+# SCHEDULER_TYPE=redis
+# REDIS_URL=redis://localhost:6379
diff --git a/examples/typescript-langchain-agent/README.md b/examples/typescript-langchain-agent/README.md
new file mode 100644
index 00000000..c47d2f70
--- /dev/null
+++ b/examples/typescript-langchain-agent/README.md
@@ -0,0 +1,264 @@
+# TypeScript LangChain Agent
+
+A research assistant built with [LangChain.js](https://js.langchain.com/) and bindufied using the [Bindu TypeScript SDK](../../sdks/typescript/). One `bindufy()` call transforms the LangChain agent into a full A2A-compliant microservice with DID identity, authentication, x402 payments, and task scheduling.
+
+## What This Example Demonstrates
+
+- Writing an agent in TypeScript using LangChain.js (`ChatOpenAI`)
+- Calling `bindufy()` to convert it into a networked microservice
+- The Bindu core (Python) starts automatically in the background
+- The agent registers over gRPC and receives task execution calls
+- External clients interact via standard A2A HTTP protocol
+
+## Architecture
+
+```
+Developer runs: npx tsx index.ts
+
+ TypeScript Process Python Process (auto-started)
+ ┌─────────────────────┐ ┌──────────────────────────────┐
+ │ LangChain.js │ │ Bindu Core │
+ │ ChatOpenAI │◄── gRPC ────►│ DID, Auth, x402, A2A │
+ │ handler(messages) │ :50052 │ Scheduler, Storage │
+ │ │ │ HTTP Server :3773 │
+ │ @bindu/sdk │ │ │
+ └─────────────────────┘ └──────────────────────────────┘
+ ▲
+ │ A2A Protocol
+ │ (HTTP/JSON-RPC)
+ External Clients
+```
+
+## Prerequisites
+
+- **Node.js** >= 18
+- **Python** >= 3.12 with Bindu installed:
+ ```bash
+ pip install bindu
+ # or with uv:
+ uv pip install bindu
+ ```
+- **OpenAI API key** from [platform.openai.com/api-keys](https://platform.openai.com/api-keys)
+
+## Setup
+
+### 1. Clone and navigate
+
+```bash
+cd examples/typescript-langchain-agent
+```
+
+### 2. Create your `.env` file
+
+```bash
+cp .env.example .env
+```
+
+Edit `.env` and add your OpenAI API key:
+
+```env
+OPENAI_API_KEY=sk-your-openai-api-key
+```
+
+### 3. Install dependencies
+
+```bash
+npm install
+```
+
+This installs:
+- `@bindu/sdk` — the Bindu TypeScript SDK (linked from `../../sdks/typescript`)
+- `@langchain/openai` — LangChain.js OpenAI integration
+- `dotenv` — loads `.env` variables
+
+## Run
+
+```bash
+npm start
+# or directly:
+npx tsx index.ts
+```
+
+You should see output like:
+
+```
+Starting Bindu core: uv run bindu serve --grpc ...
+Bindu core is ready on :3774
+AgentHandler gRPC server on :50052
+Registering with Bindu core...
+
+Agent registered successfully!
+ Agent ID: ...
+ DID: did:bindu:dev_at_example_com:langchain-research-agent:...
+ A2A URL: http://localhost:3773
+
+Waiting for messages...
+```
+
+## Test the Agent
+
+### Send a message
+
+Open a **new terminal** and run:
+
+```bash
+curl -s -X POST http://localhost:3773 \
+ -H "Content-Type: application/json" \
+ -d '{
+ "jsonrpc": "2.0",
+ "method": "message/send",
+ "params": {
+ "message": {
+ "role": "user",
+ "parts": [{"kind": "text", "text": "Explain the A2A protocol in simple terms"}],
+ "messageId": "a1b2c3d4-e5f6-7890-abcd-ef1234567890",
+ "contextId": "b2c3d4e5-f6a7-8901-bcde-f12345678901",
+ "taskId": "c3d4e5f6-a7b8-9012-cdef-123456789012",
+ "kind": "message"
+ },
+ "configuration": {
+ "acceptedOutputModes": ["text/plain"],
+ "blocking": true
+ }
+ },
+ "id": "test-1"
+ }' | python3 -m json.tool
+```
+
+### Get the completed task
+
+Wait a few seconds for GPT-4o to respond, then:
+
+```bash
+curl -s -X POST http://localhost:3773 \
+ -H "Content-Type: application/json" \
+ -d '{
+ "jsonrpc": "2.0",
+ "method": "tasks/get",
+ "params": {
+ "taskId": "c3d4e5f6-a7b8-9012-cdef-123456789012"
+ },
+ "id": "test-2"
+ }' | python3 -m json.tool
+```
+
+### Check the agent card
+
+```bash
+curl -s http://localhost:3773/.well-known/agent.json | python3 -m json.tool
+```
+
+### Check health
+
+```bash
+curl -s http://localhost:3773/health
+```
+
+## How the Code Works
+
+```typescript
+import { bindufy, ChatMessage } from "@bindu/sdk";
+import { ChatOpenAI } from "@langchain/openai";
+
+// Create LangChain model — developer's choice
+const llm = new ChatOpenAI({ model: "gpt-4o", temperature: 0.7 });
+
+bindufy(
+ {
+ author: "dev@example.com",
+ name: "langchain-research-agent",
+ description: "A research assistant built with LangChain.js",
+ deployment: { url: "http://localhost:3773", expose: true },
+ skills: ["skills/research"],
+ },
+ async (messages: ChatMessage[]) => {
+ // Convert Bindu messages to LangChain format and invoke
+ const response = await llm.invoke(
+ messages.map((m) => ({ role: m.role, content: m.content }))
+ );
+
+ // Return content — Bindu handles the rest
+ return typeof response.content === "string"
+ ? response.content
+ : JSON.stringify(response.content);
+ }
+);
+```
+
+## Message Flow
+
+```
+1. Client sends A2A HTTP POST to :3773
+2. Bindu Core receives request
+3. TaskManager creates task, Scheduler queues it
+4. Worker picks up task, builds message history
+5. Worker calls manifest.run(messages)
+ └── GrpcAgentClient — makes gRPC call to TypeScript process
+6. TypeScript SDK receives HandleMessages on :50052
+7. SDK calls your handler(messages)
+8. Your handler calls LangChain ChatOpenAI.invoke()
+9. LangChain calls OpenAI GPT-4o API
+10. Response flows back: LangChain → handler → gRPC → Worker → A2A → Client
+```
+
+## Project Structure
+
+```
+typescript-langchain-agent/
+ index.ts # Agent code — LangChain.js + bindufy()
+ package.json # Dependencies (@bindu/sdk, @langchain/openai)
+ tsconfig.json # TypeScript configuration
+ .env.example # Environment variable template
+ .env # Your actual keys (git-ignored)
+ README.md # This file
+ skills/
+ research/
+ skill.yaml # Skill definition (YAML format)
+ SKILL.md # Skill documentation (Markdown format)
+```
+
+## Ports Used
+
+| Port | Protocol | Purpose |
+|------|----------|---------|
+| 3773 | HTTP | A2A server (external clients connect here) |
+| 3774 | gRPC | Bindu core registration (SDK connects here) |
+| 50052 | gRPC | AgentHandler (core calls SDK handler here) |
+
+## Troubleshooting
+
+### "Bindu not found"
+
+Install the Python package:
+
+```bash
+pip install bindu
+```
+
+### "Port 3773 already in use"
+
+Kill existing processes:
+
+```bash
+lsof -ti:3773 -ti:3774 | xargs kill 2>/dev/null
+```
+
+### "OPENAI_API_KEY not set"
+
+Make sure your `.env` file exists and has a valid key:
+
+```bash
+cat .env
+# Should show: OPENAI_API_KEY=sk-...
+```
+
+## Stop the Agent
+
+Press `Ctrl+C` in the terminal. This kills both the TypeScript process and the Python core.
+
+## Next Steps
+
+- Try the [TypeScript OpenAI Agent](../typescript-openai-agent/) for a direct OpenAI SDK example
+- Read the [gRPC Documentation](../../docs/GRPC_LANGUAGE_AGNOSTIC.md) for architecture details
+- Check the [SDK README](../../sdks/typescript/README.md) for full API reference
+- Build your own agent: copy this folder, change the handler, run `bindufy()`
diff --git a/examples/typescript-langchain-agent/index.ts b/examples/typescript-langchain-agent/index.ts
new file mode 100644
index 00000000..9c30ee8b
--- /dev/null
+++ b/examples/typescript-langchain-agent/index.ts
@@ -0,0 +1,63 @@
+/**
+ * TypeScript LangChain Agent — Bindufied
+ *
+ * Demonstrates using the Bindu TypeScript SDK with LangChain.js.
+ * The developer writes their agent using any TS framework — Bindu handles
+ * the conversion to a microservice with DID, auth, x402, and A2A protocol.
+ *
+ * Usage:
+ * 1. Set OPENAI_API_KEY in .env or environment
+ * 2. npx tsx index.ts
+ *
+ * The SDK will:
+ * - Start the Bindu Python core in the background
+ * - Register this agent with DID identity and A2A endpoints
+ * - Listen for tasks via gRPC and execute them with LangChain
+ */
+
+import { bindufy, ChatMessage } from "@bindu/sdk";
+import { ChatOpenAI } from "@langchain/openai";
+import * as dotenv from "dotenv";
+
+dotenv.config();
+
+// Create LangChain agent — this is the developer's choice
+const llm = new ChatOpenAI({
+ model: "gpt-4o",
+ temperature: 0.7,
+});
+
+// bindufy — one call, full microservice
+bindufy(
+ {
+ author: "dev@example.com",
+ name: "langchain-research-agent",
+ description: "A research assistant built with LangChain.js and Bindu",
+ version: "1.0.0",
+ deployment: {
+ url: "http://localhost:3773",
+ expose: true,
+ cors_origins: ["http://localhost:5173"],
+ },
+ skills: ["skills/research"],
+ capabilities: {
+ streaming: false,
+ push_notifications: false,
+ },
+ },
+ async (messages: ChatMessage[]) => {
+ // Convert Bindu messages to LangChain format
+ const langchainMessages = messages.map((m) => ({
+ role: m.role as "user" | "assistant" | "system",
+ content: m.content,
+ }));
+
+ // Invoke LangChain
+ const response = await llm.invoke(langchainMessages);
+
+ // Return the content — Bindu handles the rest
+ return typeof response.content === "string"
+ ? response.content
+ : JSON.stringify(response.content);
+ }
+);
diff --git a/examples/typescript-langchain-agent/package.json b/examples/typescript-langchain-agent/package.json
new file mode 100644
index 00000000..aec6cde5
--- /dev/null
+++ b/examples/typescript-langchain-agent/package.json
@@ -0,0 +1,18 @@
+{
+ "name": "typescript-langchain-agent",
+ "version": "1.0.0",
+ "private": true,
+ "description": "LangChain.js agent bindufied with Bindu TypeScript SDK",
+ "scripts": {
+ "start": "npx tsx index.ts"
+ },
+ "dependencies": {
+ "@bindu/sdk": "file:../../sdks/typescript",
+ "@langchain/openai": "^0.4.0",
+ "dotenv": "^16.4.0"
+ },
+ "devDependencies": {
+ "tsx": "^4.7.0",
+ "typescript": "^5.4.0"
+ }
+}
diff --git a/examples/typescript-langchain-agent/skills/research/SKILL.md b/examples/typescript-langchain-agent/skills/research/SKILL.md
new file mode 100644
index 00000000..52535080
--- /dev/null
+++ b/examples/typescript-langchain-agent/skills/research/SKILL.md
@@ -0,0 +1,95 @@
+---
+id: research-v1
+name: research
+version: 1.0.0
+author: dev@example.com
+tags:
+ - research
+ - question-answering
+ - analysis
+ - summarization
+ - langchain
+input_modes:
+ - text/plain
+ - application/json
+output_modes:
+ - text/plain
+ - application/json
+---
+
+# Research Skill
+
+Research and information retrieval capability powered by LangChain.js.
+Uses GPT-4o to answer questions, summarize information, provide detailed
+analysis, and generate structured research outputs.
+
+## Capabilities
+
+### Question Answering
+- Direct question answering with contextual understanding
+- Multi-turn conversation with history awareness
+- Follow-up questions and clarification handling
+
+### Summarization
+- Summarize complex topics into clear outputs
+- Bullet points, paragraphs, or structured JSON
+- Adjustable depth and detail level
+
+### Analysis
+- Comparative analysis (pros/cons, trade-offs)
+- Technical evaluation of tools and frameworks
+- Architectural decision support
+
+### Code Explanation
+- Explain code concepts and patterns
+- TypeScript, Python, Rust, Go, Java support
+- Architecture and design pattern suggestions
+
+## Examples
+
+- "What is the current state of quantum computing?"
+- "Summarize the key points of machine learning"
+- "Explain the A2A protocol in simple terms"
+- "Compare React vs Vue for a new project"
+- "What are the best practices for API design?"
+- "Analyze the pros and cons of microservices architecture"
+
+## Performance
+
+| Metric | Value |
+|--------|-------|
+| Average response time | 1-5s (model dependent) |
+| Max concurrent requests | 10 |
+| Context window | Up to 128k tokens |
+
+## Requirements
+
+- OpenAI API key (used by LangChain.js `ChatOpenAI`)
+- Internet connection for API calls
+
+## Integration
+
+This skill is used by the TypeScript LangChain agent example:
+
+```typescript
+import { ChatOpenAI } from "@langchain/openai";
+
+const llm = new ChatOpenAI({ model: "gpt-4o", temperature: 0.7 });
+
+bindufy({
+ skills: ["skills/research"],
+}, async (messages) => {
+ const response = await llm.invoke(messages);
+ return response.content;
+});
+```
+
+## Assessment
+
+### Keywords
+research, explain, summarize, analyze, compare, question, answer, what, how, why
+
+### Specializations
+- domain: research (confidence_boost: 0.3)
+- domain: analysis (confidence_boost: 0.2)
+- domain: summarization (confidence_boost: 0.2)
diff --git a/examples/typescript-langchain-agent/skills/research/skill.yaml b/examples/typescript-langchain-agent/skills/research/skill.yaml
new file mode 100644
index 00000000..31dad1f7
--- /dev/null
+++ b/examples/typescript-langchain-agent/skills/research/skill.yaml
@@ -0,0 +1,131 @@
+# Research Skill
+# Research and information retrieval powered by LangChain.js + GPT-4o
+
+# Basic Metadata
+id: research-v1
+name: research
+version: 1.0.0
+author: dev@example.com
+
+# Description
+description: |
+ Research and information retrieval capability powered by LangChain.js.
+ Uses GPT-4o to answer questions, summarize information, provide detailed
+ analysis, and generate structured research outputs. Supports multi-turn
+ conversations with context awareness.
+
+# Tags and Modes
+tags:
+ - research
+ - question-answering
+ - analysis
+ - summarization
+ - langchain
+
+input_modes:
+ - text/plain
+ - application/json
+
+output_modes:
+ - text/plain
+ - application/json
+
+# Example Queries
+examples:
+ - "What is the current state of quantum computing?"
+ - "Summarize the key points of machine learning"
+ - "Explain the A2A protocol in simple terms"
+ - "Compare React vs Vue for a new project"
+ - "What are the best practices for API design?"
+ - "Analyze the pros and cons of microservices architecture"
+
+# Detailed Capabilities
+capabilities_detail:
+ question_answering:
+ supported: true
+ description: "Direct question answering with contextual understanding"
+ features:
+ - multi_turn_conversation
+ - context_awareness
+ - follow_up_handling
+
+ summarization:
+ supported: true
+ description: "Summarize complex topics into clear, structured outputs"
+ output_formats:
+ - bullet_points
+ - paragraphs
+ - structured_json
+
+ analysis:
+ supported: true
+ description: "Comparative analysis, pros/cons, trade-off evaluation"
+
+ code_explanation:
+ supported: true
+ description: "Explain code concepts and architectural patterns"
+ languages:
+ - typescript
+ - python
+ - rust
+ - go
+ - java
+
+# Requirements
+requirements:
+ packages:
+ - "@langchain/openai>=0.4.0"
+ system:
+ - internet_connection
+ api_keys:
+ - OPENAI_API_KEY
+
+# Performance Metrics
+performance:
+ avg_processing_time_ms: 2000
+ max_concurrent_requests: 10
+ context_window_tokens: 128000
+ scalability: horizontal
+
+# Assessment fields for skill negotiation
+assessment:
+ keywords:
+ - research
+ - explain
+ - summarize
+ - analyze
+ - compare
+ - question
+ - answer
+ - what
+ - how
+ - why
+
+ specializations:
+ - domain: research
+ confidence_boost: 0.3
+ - domain: analysis
+ confidence_boost: 0.2
+ - domain: summarization
+ confidence_boost: 0.2
+
+ anti_patterns:
+ - "real-time data"
+ - "image generation"
+ - "pdf processing"
+ - "database query"
+ - "file upload"
+
+ complexity_indicators:
+ simple:
+ - "what is"
+ - "explain"
+ - "define"
+ medium:
+ - "compare"
+ - "summarize"
+ - "analyze"
+ complex:
+ - "research paper"
+ - "comprehensive analysis"
+ - "multi-domain synthesis"
diff --git a/examples/typescript-langchain-agent/tsconfig.json b/examples/typescript-langchain-agent/tsconfig.json
new file mode 100644
index 00000000..11aab691
--- /dev/null
+++ b/examples/typescript-langchain-agent/tsconfig.json
@@ -0,0 +1,9 @@
+{
+ "compilerOptions": {
+ "target": "ES2022",
+ "module": "commonjs",
+ "esModuleInterop": true,
+ "strict": true,
+ "skipLibCheck": true
+ }
+}
diff --git a/examples/typescript-openai-agent/.env.example b/examples/typescript-openai-agent/.env.example
new file mode 100644
index 00000000..1d1b32b7
--- /dev/null
+++ b/examples/typescript-openai-agent/.env.example
@@ -0,0 +1,24 @@
+# ----------------------------------------------------------------------------
+# OpenAI Configuration
+# ----------------------------------------------------------------------------
+# Get your API key at https://platform.openai.com/api-keys
+OPENAI_API_KEY=sk-your-openai-key-here
+
+# Model to use (optional, defaults to gpt-4o)
+# OPENAI_MODEL=gpt-4o
+# OPENAI_MODEL=gpt-4o-mini
+# OPENAI_MODEL=gpt-4-turborbo
+
+# ----------------------------------------------------------------------------
+# Storage Configuration (optional)
+# ----------------------------------------------------------------------------
+# Type: "postgres" for PostgreSQL or "memory" for in-memory storage
+# STORAGE_TYPE=postgres
+# DATABASE_URL=postgresql://user:pass@host/db # pragma: allowlist secret
+
+# ----------------------------------------------------------------------------
+# Scheduler Configuration (optional)
+# ----------------------------------------------------------------------------
+# Type: "redis" for distributed scheduling or "memory" for single-process
+# SCHEDULER_TYPE=redis
+# REDIS_URL=redis://localhost:6379
diff --git a/examples/typescript-openai-agent/README.md b/examples/typescript-openai-agent/README.md
new file mode 100644
index 00000000..2d946f39
--- /dev/null
+++ b/examples/typescript-openai-agent/README.md
@@ -0,0 +1,298 @@
+# TypeScript OpenAI Agent
+
+A general-purpose assistant built with the [OpenAI SDK](https://github.com/openai/openai-node) and bindufied using the [Bindu TypeScript SDK](../../sdks/typescript/). One `bindufy()` call transforms the OpenAI agent into a full A2A-compliant microservice with DID identity, authentication, x402 payments, and task scheduling.
+
+## What This Example Demonstrates
+
+- Writing an agent in TypeScript using the OpenAI SDK
+- Calling `bindufy()` to convert it into a networked microservice
+- The Bindu core (Python) starts automatically in the background
+- The agent registers over gRPC and receives task execution calls
+- External clients interact via standard A2A HTTP protocol
+
+## Architecture
+
+```
+Developer runs: npx tsx index.ts
+
+ TypeScript Process Python Process (auto-started)
+ ┌─────────────────────┐ ┌──────────────────────────────┐
+ │ OpenAI SDK │ │ Bindu Core │
+ │ handler(messages) │◄── gRPC ────►│ DID, Auth, x402, A2A │
+ │ │ :50052 │ Scheduler, Storage │
+ │ @bindu/sdk │ │ HTTP Server :3773 │
+ └─────────────────────┘ └──────────────────────────────┘
+ ▲
+ │ A2A Protocol
+ │ (HTTP/JSON-RPC)
+ External Clients
+```
+
+## Prerequisites
+
+- **Node.js** >= 18
+- **Python** >= 3.12 with Bindu installed:
+ ```bash
+ pip install bindu
+ # or with uv:
+ uv pip install bindu
+ ```
+- **OpenAI API key** from [platform.openai.com/api-keys](https://platform.openai.com/api-keys)
+
+## Setup
+
+### 1. Clone and navigate
+
+```bash
+cd examples/typescript-openai-agent
+```
+
+### 2. Create your `.env` file
+
+```bash
+cp .env.example .env
+```
+
+Edit `.env` and add your OpenAI API key:
+
+```env
+OPENAI_API_KEY=sk-your-openai-api-key
+```
+
+Optionally set a different model:
+
+```env
+OPENAI_MODEL=gpt-4o-mini
+```
+
+### 3. Install dependencies
+
+```bash
+npm install
+```
+
+This installs:
+- `@bindu/sdk` — the Bindu TypeScript SDK (linked from `../../sdks/typescript`)
+- `openai` — the OpenAI Node.js SDK
+- `dotenv` — loads `.env` variables
+
+## Run
+
+```bash
+npm start
+# or directly:
+npx tsx index.ts
+```
+
+You should see output like:
+
+```
+[Bindu SDK] Starting Bindu core...
+[Bindu SDK] Bindu core is ready on :3774
+[Bindu SDK] AgentHandler gRPC server started on :50052
+[Bindu SDK] Registering agent with Bindu core...
+[Bindu SDK]
+[Bindu SDK] Agent registered successfully!
+[Bindu SDK] Agent ID: 91547067-c183-e0fd-c150-27a3ca4135ed
+[Bindu SDK] DID: did:bindu:opnai_sample_ts_at_getbindu_com:openai-assistant-agent:91547067...
+[Bindu SDK] A2A URL: http://localhost:3773
+[Bindu SDK]
+[Bindu SDK] Waiting for messages...
+```
+
+**What happened behind the scenes:**
+1. The SDK started the Python Bindu core as a child process
+2. The core started a gRPC server on `:3774`
+3. The SDK started an AgentHandler gRPC server on `:50052`
+4. The SDK called `RegisterAgent` on the core with your config
+5. The core ran the full bindufy logic: generated DID, set up auth, created manifest
+6. The core started an HTTP/A2A server on `:3773`
+7. The agent is now a fully functional A2A microservice
+
+## Test the Agent
+
+### Send a message
+
+Open a **new terminal** and run:
+
+```bash
+curl -s -X POST http://localhost:3773 \
+ -H "Content-Type: application/json" \
+ -d '{
+ "jsonrpc": "2.0",
+ "method": "message/send",
+ "params": {
+ "message": {
+ "role": "user",
+ "parts": [{"kind": "text", "text": "What is the capital of France?"}],
+ "messageId": "a1b2c3d4-e5f6-7890-abcd-ef1234567890",
+ "contextId": "b2c3d4e5-f6a7-8901-bcde-f12345678901",
+ "taskId": "c3d4e5f6-a7b8-9012-cdef-123456789012",
+ "kind": "message"
+ },
+ "configuration": {
+ "acceptedOutputModes": ["text/plain"],
+ "blocking": true
+ }
+ },
+ "id": "test-1"
+ }' | python3 -m json.tool
+```
+
+### Get the completed task
+
+Wait a few seconds for GPT-4o to respond, then:
+
+```bash
+curl -s -X POST http://localhost:3773 \
+ -H "Content-Type: application/json" \
+ -d '{
+ "jsonrpc": "2.0",
+ "method": "tasks/get",
+ "params": {
+ "taskId": "c3d4e5f6-a7b8-9012-cdef-123456789012"
+ },
+ "id": "test-2"
+ }' | python3 -m json.tool
+```
+
+You should see GPT-4o's answer in the task history.
+
+### Check the agent card
+
+```bash
+curl -s http://localhost:3773/.well-known/agent.json | python3 -m json.tool
+```
+
+This returns the full A2A agent card with DID, skills, and capabilities.
+
+### Check health
+
+```bash
+curl -s http://localhost:3773/health
+```
+
+## How the Code Works
+
+```typescript
+import { bindufy, ChatMessage } from "@bindu/sdk";
+import OpenAI from "openai";
+
+const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
+
+bindufy(
+ {
+ author: "opnai-sample-ts@getbindu.com", // Your identity
+ name: "openai-assistant-agent", // Agent name
+ description: "An assistant powered by GPT-4o",
+ deployment: {
+ url: "http://localhost:3773", // A2A HTTP server URL
+ expose: true,
+ },
+ skills: ["skills/question-answering"], // Skill definitions
+ },
+ async (messages: ChatMessage[]) => {
+ // This handler is called every time a message arrives via A2A.
+ // messages = [{role: "user", content: "..."}, ...]
+ // Return a string for normal responses.
+ // Return {state: "input-required", prompt: "..."} for multi-turn.
+
+ const response = await openai.chat.completions.create({
+ model: "gpt-4o",
+ messages: messages.map((m) => ({
+ role: m.role as "user" | "assistant" | "system",
+ content: m.content,
+ })),
+ });
+
+ return response.choices[0].message.content || "";
+ }
+);
+```
+
+## Message Flow
+
+```
+1. Client sends A2A HTTP POST to :3773
+2. Bindu Core receives request
+3. TaskManager creates task, Scheduler queues it
+4. Worker picks up task, builds message history
+5. Worker calls manifest.run(messages)
+ └── This is GrpcAgentClient — makes gRPC call to TypeScript process
+6. TypeScript SDK receives HandleMessages on :50052
+7. SDK calls your handler(messages) — the async function above
+8. Your handler calls OpenAI GPT-4o API
+9. OpenAI returns response
+10. SDK sends response back over gRPC
+11. Worker processes result (ResultProcessor, ResponseDetector)
+12. Worker updates storage, creates artifacts with DID signature
+13. Client receives A2A JSON-RPC response
+```
+
+## Project Structure
+
+```
+typescript-openai-agent/
+ index.ts # Agent code — OpenAI SDK + bindufy()
+ package.json # Dependencies (@bindu/sdk, openai, dotenv)
+ tsconfig.json # TypeScript configuration
+ .env.example # Environment variable template
+ .env # Your actual keys (git-ignored)
+ README.md # This file
+ skills/
+ question-answering/
+ skill.yaml # Skill definition (YAML format)
+ SKILL.md # Skill documentation (Markdown format)
+```
+
+## Ports Used
+
+| Port | Protocol | Purpose |
+|------|----------|---------|
+| 3773 | HTTP | A2A server (external clients connect here) |
+| 3774 | gRPC | Bindu core registration (SDK connects here) |
+| 50052 | gRPC | AgentHandler (core calls SDK handler here) |
+
+## Troubleshooting
+
+### "Bindu not found"
+
+Install the Python package:
+
+```bash
+pip install bindu[grpc]
+```
+
+### "Port 3773 already in use"
+
+Kill existing processes:
+
+```bash
+lsof -ti:3773 -ti:3774 | xargs kill 2>/dev/null
+```
+
+### "OPENAI_API_KEY not set"
+
+Make sure your `.env` file exists and has a valid key:
+
+```bash
+cat .env
+# Should show: OPENAI_API_KEY=sk-...
+```
+
+### Agent starts but no response to messages
+
+Check the first terminal for error logs. Common issues:
+- Invalid API key
+- Model not available on your OpenAI plan
+- Rate limiting
+
+## Stop the Agent
+
+Press `Ctrl+C` in the terminal. This kills both the TypeScript process and the Python core.
+
+## Next Steps
+
+- Try the [TypeScript LangChain Agent](../typescript-langchain-agent/) for a framework-based example
+- Read the [gRPC Documentation](../../docs/GRPC_LANGUAGE_AGNOSTIC.md) for architecture details
+- Build your own agent: copy this folder, change the handler, run `bindufy()`
diff --git a/examples/typescript-openai-agent/index.ts b/examples/typescript-openai-agent/index.ts
new file mode 100644
index 00000000..7eae8e17
--- /dev/null
+++ b/examples/typescript-openai-agent/index.ts
@@ -0,0 +1,49 @@
+/**
+ * TypeScript OpenAI Agent — Bindufied
+ *
+ * Demonstrates using the Bindu TypeScript SDK with the OpenAI SDK.
+ * Uses GPT-4o to answer questions and assist users.
+ *
+ * Usage:
+ * 1. Set OPENAI_API_KEY in .env
+ * 2. npx tsx index.ts
+ */
+
+import { bindufy, ChatMessage } from "@bindu/sdk";
+import OpenAI from "openai";
+import * as dotenv from "dotenv";
+
+dotenv.config();
+
+const openai = new OpenAI({
+ apiKey: process.env.OPENAI_API_KEY,
+});
+
+// bindufy — one call, full microservice
+bindufy(
+ {
+ author: "opnai-sample-ts@getbindu.com",
+ name: "openai-assistant-agent",
+ description:
+ "An assistant built with the OpenAI SDK and Bindu. Powered by GPT-4o.",
+ version: "1.0.0",
+ deployment: {
+ url: "http://localhost:3773",
+ expose: true,
+ cors_origins: ["http://localhost:5173"],
+ },
+ skills: ["skills/question-answering"],
+ },
+ async (messages: ChatMessage[]) => {
+ // Call OpenAI GPT-4o
+ const response = await openai.chat.completions.create({
+ model: process.env.OPENAI_MODEL || "gpt-4o",
+ messages: messages.map((m) => ({
+ role: m.role as "user" | "assistant" | "system",
+ content: m.content,
+ })),
+ });
+
+ return response.choices[0].message.content || "";
+ }
+);
diff --git a/examples/typescript-openai-agent/nul b/examples/typescript-openai-agent/nul
new file mode 100644
index 00000000..a783ed13
--- /dev/null
+++ b/examples/typescript-openai-agent/nul
@@ -0,0 +1 @@
+/bin/sh: where: command not found
diff --git a/examples/typescript-openai-agent/package-lock.json b/examples/typescript-openai-agent/package-lock.json
new file mode 100644
index 00000000..2979a66b
--- /dev/null
+++ b/examples/typescript-openai-agent/package-lock.json
@@ -0,0 +1,1045 @@
+{
+ "name": "typescript-openai-agent",
+ "version": "1.0.0",
+ "lockfileVersion": 3,
+ "requires": true,
+ "packages": {
+ "": {
+ "name": "typescript-openai-agent",
+ "version": "1.0.0",
+ "dependencies": {
+ "@bindu/sdk": "file:../../sdks/typescript",
+ "dotenv": "^16.4.0",
+ "openai": "^4.0.0"
+ },
+ "devDependencies": {
+ "tsx": "^4.7.0",
+ "typescript": "^5.4.0"
+ }
+ },
+ "../../sdks/typescript": {
+ "name": "@bindu/sdk",
+ "version": "0.1.0",
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@grpc/grpc-js": "^1.10.0",
+ "@grpc/proto-loader": "^0.7.0",
+ "yaml": "^2.4.0"
+ },
+ "devDependencies": {
+ "@types/node": "^20.0.0",
+ "typescript": "^5.4.0"
+ },
+ "engines": {
+ "node": ">=18.0.0"
+ }
+ },
+ "node_modules/@bindu/sdk": {
+ "resolved": "../../sdks/typescript",
+ "link": true
+ },
+ "node_modules/@esbuild/aix-ppc64": {
+ "version": "0.27.4",
+ "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.4.tgz",
+ "integrity": "sha512-cQPwL2mp2nSmHHJlCyoXgHGhbEPMrEEU5xhkcy3Hs/O7nGZqEpZ2sUtLaL9MORLtDfRvVl2/3PAuEkYZH0Ty8Q==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "aix"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/android-arm": {
+ "version": "0.27.4",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.4.tgz",
+ "integrity": "sha512-X9bUgvxiC8CHAGKYufLIHGXPJWnr0OCdR0anD2e21vdvgCI8lIfqFbnoeOz7lBjdrAGUhqLZLcQo6MLhTO2DKQ==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/android-arm64": {
+ "version": "0.27.4",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.4.tgz",
+ "integrity": "sha512-gdLscB7v75wRfu7QSm/zg6Rx29VLdy9eTr2t44sfTW7CxwAtQghZ4ZnqHk3/ogz7xao0QAgrkradbBzcqFPasw==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/android-x64": {
+ "version": "0.27.4",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.4.tgz",
+ "integrity": "sha512-PzPFnBNVF292sfpfhiyiXCGSn9HZg5BcAz+ivBuSsl6Rk4ga1oEXAamhOXRFyMcjwr2DVtm40G65N3GLeH1Lvw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/darwin-arm64": {
+ "version": "0.27.4",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.4.tgz",
+ "integrity": "sha512-b7xaGIwdJlht8ZFCvMkpDN6uiSmnxxK56N2GDTMYPr2/gzvfdQN8rTfBsvVKmIVY/X7EM+/hJKEIbbHs9oA4tQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/darwin-x64": {
+ "version": "0.27.4",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.4.tgz",
+ "integrity": "sha512-sR+OiKLwd15nmCdqpXMnuJ9W2kpy0KigzqScqHI3Hqwr7IXxBp3Yva+yJwoqh7rE8V77tdoheRYataNKL4QrPw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/freebsd-arm64": {
+ "version": "0.27.4",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.4.tgz",
+ "integrity": "sha512-jnfpKe+p79tCnm4GVav68A7tUFeKQwQyLgESwEAUzyxk/TJr4QdGog9sqWNcUbr/bZt/O/HXouspuQDd9JxFSw==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/freebsd-x64": {
+ "version": "0.27.4",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.4.tgz",
+ "integrity": "sha512-2kb4ceA/CpfUrIcTUl1wrP/9ad9Atrp5J94Lq69w7UwOMolPIGrfLSvAKJp0RTvkPPyn6CIWrNy13kyLikZRZQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-arm": {
+ "version": "0.27.4",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.4.tgz",
+ "integrity": "sha512-aBYgcIxX/wd5n2ys0yESGeYMGF+pv6g0DhZr3G1ZG4jMfruU9Tl1i2Z+Wnj9/KjGz1lTLCcorqE2viePZqj4Eg==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-arm64": {
+ "version": "0.27.4",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.4.tgz",
+ "integrity": "sha512-7nQOttdzVGth1iz57kxg9uCz57dxQLHWxopL6mYuYthohPKEK0vU0C3O21CcBK6KDlkYVcnDXY099HcCDXd9dA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-ia32": {
+ "version": "0.27.4",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.4.tgz",
+ "integrity": "sha512-oPtixtAIzgvzYcKBQM/qZ3R+9TEUd1aNJQu0HhGyqtx6oS7qTpvjheIWBbes4+qu1bNlo2V4cbkISr8q6gRBFA==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-loong64": {
+ "version": "0.27.4",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.4.tgz",
+ "integrity": "sha512-8mL/vh8qeCoRcFH2nM8wm5uJP+ZcVYGGayMavi8GmRJjuI3g1v6Z7Ni0JJKAJW+m0EtUuARb6Lmp4hMjzCBWzA==",
+ "cpu": [
+ "loong64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-mips64el": {
+ "version": "0.27.4",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.4.tgz",
+ "integrity": "sha512-1RdrWFFiiLIW7LQq9Q2NES+HiD4NyT8Itj9AUeCl0IVCA459WnPhREKgwrpaIfTOe+/2rdntisegiPWn/r/aAw==",
+ "cpu": [
+ "mips64el"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-ppc64": {
+ "version": "0.27.4",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.4.tgz",
+ "integrity": "sha512-tLCwNG47l3sd9lpfyx9LAGEGItCUeRCWeAx6x2Jmbav65nAwoPXfewtAdtbtit/pJFLUWOhpv0FpS6GQAmPrHA==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-riscv64": {
+ "version": "0.27.4",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.4.tgz",
+ "integrity": "sha512-BnASypppbUWyqjd1KIpU4AUBiIhVr6YlHx/cnPgqEkNoVOhHg+YiSVxM1RLfiy4t9cAulbRGTNCKOcqHrEQLIw==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-s390x": {
+ "version": "0.27.4",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.4.tgz",
+ "integrity": "sha512-+eUqgb/Z7vxVLezG8bVB9SfBie89gMueS+I0xYh2tJdw3vqA/0ImZJ2ROeWwVJN59ihBeZ7Tu92dF/5dy5FttA==",
+ "cpu": [
+ "s390x"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-x64": {
+ "version": "0.27.4",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.4.tgz",
+ "integrity": "sha512-S5qOXrKV8BQEzJPVxAwnryi2+Iq5pB40gTEIT69BQONqR7JH1EPIcQ/Uiv9mCnn05jff9umq/5nqzxlqTOg9NA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/netbsd-arm64": {
+ "version": "0.27.4",
+ "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.4.tgz",
+ "integrity": "sha512-xHT8X4sb0GS8qTqiwzHqpY00C95DPAq7nAwX35Ie/s+LO9830hrMd3oX0ZMKLvy7vsonee73x0lmcdOVXFzd6Q==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "netbsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/netbsd-x64": {
+ "version": "0.27.4",
+ "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.4.tgz",
+ "integrity": "sha512-RugOvOdXfdyi5Tyv40kgQnI0byv66BFgAqjdgtAKqHoZTbTF2QqfQrFwa7cHEORJf6X2ht+l9ABLMP0dnKYsgg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "netbsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/openbsd-arm64": {
+ "version": "0.27.4",
+ "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.4.tgz",
+ "integrity": "sha512-2MyL3IAaTX+1/qP0O1SwskwcwCoOI4kV2IBX1xYnDDqthmq5ArrW94qSIKCAuRraMgPOmG0RDTA74mzYNQA9ow==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "openbsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/openbsd-x64": {
+ "version": "0.27.4",
+ "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.4.tgz",
+ "integrity": "sha512-u8fg/jQ5aQDfsnIV6+KwLOf1CmJnfu1ShpwqdwC0uA7ZPwFws55Ngc12vBdeUdnuWoQYx/SOQLGDcdlfXhYmXQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "openbsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/openharmony-arm64": {
+ "version": "0.27.4",
+ "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.4.tgz",
+ "integrity": "sha512-JkTZrl6VbyO8lDQO3yv26nNr2RM2yZzNrNHEsj9bm6dOwwu9OYN28CjzZkH57bh4w0I2F7IodpQvUAEd1mbWXg==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "openharmony"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/sunos-x64": {
+ "version": "0.27.4",
+ "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.4.tgz",
+ "integrity": "sha512-/gOzgaewZJfeJTlsWhvUEmUG4tWEY2Spp5M20INYRg2ZKl9QPO3QEEgPeRtLjEWSW8FilRNacPOg8R1uaYkA6g==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "sunos"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/win32-arm64": {
+ "version": "0.27.4",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.4.tgz",
+ "integrity": "sha512-Z9SExBg2y32smoDQdf1HRwHRt6vAHLXcxD2uGgO/v2jK7Y718Ix4ndsbNMU/+1Qiem9OiOdaqitioZwxivhXYg==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/win32-ia32": {
+ "version": "0.27.4",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.4.tgz",
+ "integrity": "sha512-DAyGLS0Jz5G5iixEbMHi5KdiApqHBWMGzTtMiJ72ZOLhbu/bzxgAe8Ue8CTS3n3HbIUHQz/L51yMdGMeoxXNJw==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/win32-x64": {
+ "version": "0.27.4",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.4.tgz",
+ "integrity": "sha512-+knoa0BDoeXgkNvvV1vvbZX4+hizelrkwmGJBdT17t8FNPwG2lKemmuMZlmaNQ3ws3DKKCxpb4zRZEIp3UxFCg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@types/node": {
+ "version": "18.19.130",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.130.tgz",
+ "integrity": "sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg==",
+ "license": "MIT",
+ "dependencies": {
+ "undici-types": "~5.26.4"
+ }
+ },
+ "node_modules/@types/node-fetch": {
+ "version": "2.6.13",
+ "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.13.tgz",
+ "integrity": "sha512-QGpRVpzSaUs30JBSGPjOg4Uveu384erbHBoT1zeONvyCfwQxIkUshLAOqN/k9EjGviPRmWTTe6aH2qySWKTVSw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/node": "*",
+ "form-data": "^4.0.4"
+ }
+ },
+ "node_modules/abort-controller": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz",
+ "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==",
+ "license": "MIT",
+ "dependencies": {
+ "event-target-shim": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=6.5"
+ }
+ },
+ "node_modules/agentkeepalive": {
+ "version": "4.6.0",
+ "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.6.0.tgz",
+ "integrity": "sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==",
+ "license": "MIT",
+ "dependencies": {
+ "humanize-ms": "^1.2.1"
+ },
+ "engines": {
+ "node": ">= 8.0.0"
+ }
+ },
+ "node_modules/asynckit": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
+ "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==",
+ "license": "MIT"
+ },
+ "node_modules/call-bind-apply-helpers": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
+ "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
+ "license": "MIT",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/combined-stream": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
+ "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
+ "license": "MIT",
+ "dependencies": {
+ "delayed-stream": "~1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/delayed-stream": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
+ "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/dotenv": {
+ "version": "16.6.1",
+ "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.6.1.tgz",
+ "integrity": "sha512-uBq4egWHTcTt33a72vpSG0z3HnPuIl6NqYcTrKEg2azoEyl2hpW0zqlxysq2pK9HlDIHyHyakeYaYnSAwd8bow==",
+ "license": "BSD-2-Clause",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://dotenvx.com"
+ }
+ },
+ "node_modules/dunder-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
+ "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
+ "license": "MIT",
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.1",
+ "es-errors": "^1.3.0",
+ "gopd": "^1.2.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-define-property": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz",
+ "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-errors": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
+ "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-object-atoms": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
+ "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
+ "license": "MIT",
+ "dependencies": {
+ "es-errors": "^1.3.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-set-tostringtag": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz",
+ "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==",
+ "license": "MIT",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "get-intrinsic": "^1.2.6",
+ "has-tostringtag": "^1.0.2",
+ "hasown": "^2.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/esbuild": {
+ "version": "0.27.4",
+ "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.4.tgz",
+ "integrity": "sha512-Rq4vbHnYkK5fws5NF7MYTU68FPRE1ajX7heQ/8QXXWqNgqqJ/GkmmyxIzUnf2Sr/bakf8l54716CcMGHYhMrrQ==",
+ "dev": true,
+ "hasInstallScript": true,
+ "license": "MIT",
+ "bin": {
+ "esbuild": "bin/esbuild"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "optionalDependencies": {
+ "@esbuild/aix-ppc64": "0.27.4",
+ "@esbuild/android-arm": "0.27.4",
+ "@esbuild/android-arm64": "0.27.4",
+ "@esbuild/android-x64": "0.27.4",
+ "@esbuild/darwin-arm64": "0.27.4",
+ "@esbuild/darwin-x64": "0.27.4",
+ "@esbuild/freebsd-arm64": "0.27.4",
+ "@esbuild/freebsd-x64": "0.27.4",
+ "@esbuild/linux-arm": "0.27.4",
+ "@esbuild/linux-arm64": "0.27.4",
+ "@esbuild/linux-ia32": "0.27.4",
+ "@esbuild/linux-loong64": "0.27.4",
+ "@esbuild/linux-mips64el": "0.27.4",
+ "@esbuild/linux-ppc64": "0.27.4",
+ "@esbuild/linux-riscv64": "0.27.4",
+ "@esbuild/linux-s390x": "0.27.4",
+ "@esbuild/linux-x64": "0.27.4",
+ "@esbuild/netbsd-arm64": "0.27.4",
+ "@esbuild/netbsd-x64": "0.27.4",
+ "@esbuild/openbsd-arm64": "0.27.4",
+ "@esbuild/openbsd-x64": "0.27.4",
+ "@esbuild/openharmony-arm64": "0.27.4",
+ "@esbuild/sunos-x64": "0.27.4",
+ "@esbuild/win32-arm64": "0.27.4",
+ "@esbuild/win32-ia32": "0.27.4",
+ "@esbuild/win32-x64": "0.27.4"
+ }
+ },
+ "node_modules/event-target-shim": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz",
+ "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/form-data": {
+ "version": "4.0.5",
+ "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz",
+ "integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==",
+ "license": "MIT",
+ "dependencies": {
+ "asynckit": "^0.4.0",
+ "combined-stream": "^1.0.8",
+ "es-set-tostringtag": "^2.1.0",
+ "hasown": "^2.0.2",
+ "mime-types": "^2.1.12"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/form-data-encoder": {
+ "version": "1.7.2",
+ "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-1.7.2.tgz",
+ "integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==",
+ "license": "MIT"
+ },
+ "node_modules/formdata-node": {
+ "version": "4.4.1",
+ "resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-4.4.1.tgz",
+ "integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==",
+ "license": "MIT",
+ "dependencies": {
+ "node-domexception": "1.0.0",
+ "web-streams-polyfill": "4.0.0-beta.3"
+ },
+ "engines": {
+ "node": ">= 12.20"
+ }
+ },
+ "node_modules/fsevents": {
+ "version": "2.3.3",
+ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
+ "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==",
+ "dev": true,
+ "hasInstallScript": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": "^8.16.0 || ^10.6.0 || >=11.0.0"
+ }
+ },
+ "node_modules/function-bind": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
+ "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
+ "license": "MIT",
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/get-intrinsic": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
+ "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==",
+ "license": "MIT",
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.2",
+ "es-define-property": "^1.0.1",
+ "es-errors": "^1.3.0",
+ "es-object-atoms": "^1.1.1",
+ "function-bind": "^1.1.2",
+ "get-proto": "^1.0.1",
+ "gopd": "^1.2.0",
+ "has-symbols": "^1.1.0",
+ "hasown": "^2.0.2",
+ "math-intrinsics": "^1.1.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/get-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz",
+ "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
+ "license": "MIT",
+ "dependencies": {
+ "dunder-proto": "^1.0.1",
+ "es-object-atoms": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/get-tsconfig": {
+ "version": "4.13.7",
+ "resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.13.7.tgz",
+ "integrity": "sha512-7tN6rFgBlMgpBML5j8typ92BKFi2sFQvIdpAqLA2beia5avZDrMs0FLZiM5etShWq5irVyGcGMEA1jcDaK7A/Q==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "resolve-pkg-maps": "^1.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/privatenumber/get-tsconfig?sponsor=1"
+ }
+ },
+ "node_modules/gopd": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
+ "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-symbols": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
+ "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-tostringtag": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz",
+ "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==",
+ "license": "MIT",
+ "dependencies": {
+ "has-symbols": "^1.0.3"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/hasown": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
+ "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
+ "license": "MIT",
+ "dependencies": {
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/humanize-ms": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz",
+ "integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==",
+ "license": "MIT",
+ "dependencies": {
+ "ms": "^2.0.0"
+ }
+ },
+ "node_modules/math-intrinsics": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
+ "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/mime-types": {
+ "version": "2.1.35",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
+ "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
+ "license": "MIT",
+ "dependencies": {
+ "mime-db": "1.52.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/ms": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
+ "license": "MIT"
+ },
+ "node_modules/node-domexception": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz",
+ "integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==",
+ "deprecated": "Use your platform's native DOMException instead",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/jimmywarting"
+ },
+ {
+ "type": "github",
+ "url": "https://paypal.me/jimmywarting"
+ }
+ ],
+ "license": "MIT",
+ "engines": {
+ "node": ">=10.5.0"
+ }
+ },
+ "node_modules/node-fetch": {
+ "version": "2.7.0",
+ "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz",
+ "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==",
+ "license": "MIT",
+ "dependencies": {
+ "whatwg-url": "^5.0.0"
+ },
+ "engines": {
+ "node": "4.x || >=6.0.0"
+ },
+ "peerDependencies": {
+ "encoding": "^0.1.0"
+ },
+ "peerDependenciesMeta": {
+ "encoding": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/openai": {
+ "version": "4.104.0",
+ "resolved": "https://registry.npmjs.org/openai/-/openai-4.104.0.tgz",
+ "integrity": "sha512-p99EFNsA/yX6UhVO93f5kJsDRLAg+CTA2RBqdHK4RtK8u5IJw32Hyb2dTGKbnnFmnuoBv5r7Z2CURI9sGZpSuA==",
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@types/node": "^18.11.18",
+ "@types/node-fetch": "^2.6.4",
+ "abort-controller": "^3.0.0",
+ "agentkeepalive": "^4.2.1",
+ "form-data-encoder": "1.7.2",
+ "formdata-node": "^4.3.2",
+ "node-fetch": "^2.6.7"
+ },
+ "bin": {
+ "openai": "bin/cli"
+ },
+ "peerDependencies": {
+ "ws": "^8.18.0",
+ "zod": "^3.23.8"
+ },
+ "peerDependenciesMeta": {
+ "ws": {
+ "optional": true
+ },
+ "zod": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/resolve-pkg-maps": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/resolve-pkg-maps/-/resolve-pkg-maps-1.0.0.tgz",
+ "integrity": "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==",
+ "dev": true,
+ "license": "MIT",
+ "funding": {
+ "url": "https://github.com/privatenumber/resolve-pkg-maps?sponsor=1"
+ }
+ },
+ "node_modules/tr46": {
+ "version": "0.0.3",
+ "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz",
+ "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==",
+ "license": "MIT"
+ },
+ "node_modules/tsx": {
+ "version": "4.21.0",
+ "resolved": "https://registry.npmjs.org/tsx/-/tsx-4.21.0.tgz",
+ "integrity": "sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "esbuild": "~0.27.0",
+ "get-tsconfig": "^4.7.5"
+ },
+ "bin": {
+ "tsx": "dist/cli.mjs"
+ },
+ "engines": {
+ "node": ">=18.0.0"
+ },
+ "optionalDependencies": {
+ "fsevents": "~2.3.3"
+ }
+ },
+ "node_modules/typescript": {
+ "version": "5.9.3",
+ "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz",
+ "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "bin": {
+ "tsc": "bin/tsc",
+ "tsserver": "bin/tsserver"
+ },
+ "engines": {
+ "node": ">=14.17"
+ }
+ },
+ "node_modules/undici-types": {
+ "version": "5.26.5",
+ "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
+ "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
+ "license": "MIT"
+ },
+ "node_modules/web-streams-polyfill": {
+ "version": "4.0.0-beta.3",
+ "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz",
+ "integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 14"
+ }
+ },
+ "node_modules/webidl-conversions": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz",
+ "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==",
+ "license": "BSD-2-Clause"
+ },
+ "node_modules/whatwg-url": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz",
+ "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==",
+ "license": "MIT",
+ "dependencies": {
+ "tr46": "~0.0.3",
+ "webidl-conversions": "^3.0.0"
+ }
+ }
+ }
+}
diff --git a/examples/typescript-openai-agent/package.json b/examples/typescript-openai-agent/package.json
new file mode 100644
index 00000000..a018f298
--- /dev/null
+++ b/examples/typescript-openai-agent/package.json
@@ -0,0 +1,18 @@
+{
+ "name": "typescript-openai-agent",
+ "version": "1.0.0",
+ "private": true,
+ "description": "OpenAI SDK agent bindufied with Bindu TypeScript SDK",
+ "scripts": {
+ "start": "npx tsx index.ts"
+ },
+ "dependencies": {
+ "@bindu/sdk": "file:../../sdks/typescript",
+ "openai": "^4.0.0",
+ "dotenv": "^16.4.0"
+ },
+ "devDependencies": {
+ "tsx": "^4.7.0",
+ "typescript": "^5.4.0"
+ }
+}
diff --git a/examples/typescript-openai-agent/skills/question-answering/SKILL.md b/examples/typescript-openai-agent/skills/question-answering/SKILL.md
new file mode 100644
index 00000000..e5baff48
--- /dev/null
+++ b/examples/typescript-openai-agent/skills/question-answering/SKILL.md
@@ -0,0 +1,125 @@
+---
+id: question-answering-v1
+name: question-answering
+version: 1.0.0
+author: dev@example.com
+tags:
+ - question-answering
+ - conversation
+ - assistant
+ - general-purpose
+ - multi-model
+input_modes:
+ - text/plain
+ - application/json
+output_modes:
+ - text/plain
+ - application/json
+---
+
+# Question Answering Skill
+
+General-purpose question answering capability powered by OpenRouter.
+Access 300+ LLM models (GPT-4o, Claude, Llama, Gemini, Mistral, etc.) through a single unified API.
+Handles conversational queries, explanations, code generation, analysis, and creative writing.
+
+## Capabilities
+
+### Conversational Q&A
+- Direct question answering with contextual understanding
+- Multi-turn conversation with history awareness
+- Follow-up questions and clarification handling
+
+### Code Assistance
+- Code generation in multiple languages
+- Code explanation and debugging
+- Architecture and design pattern suggestions
+
+### Analysis and Reasoning
+- Data interpretation and summarization
+- Comparative analysis
+- Logical reasoning and problem solving
+
+### Creative Writing
+- Content generation (articles, emails, documentation)
+- Tone adaptation (formal, casual, technical)
+- Multi-language support
+
+## Supported Models (via OpenRouter)
+
+| Provider | Models | Strengths |
+|----------|--------|-----------|
+| OpenAI | GPT-4o, GPT-4o-mini | General purpose, fast |
+| Anthropic | Claude Sonnet, Claude Haiku | Analysis, safety, long context |
+| Meta | Llama 3.1 70B/405B | Open source, multilingual |
+| Google | Gemini 2.0 Flash | Multimodal, fast |
+| Mistral | Mistral Large, Codestral | European, code-focused |
+
+## Examples
+
+- "Explain how microservices work"
+- "What are the pros and cons of GraphQL vs REST?"
+- "Help me understand async/await in TypeScript"
+- "Write a Python function to parse CSV files"
+- "Compare PostgreSQL and MongoDB for my use case"
+- "Summarize the key points of this document"
+
+## Performance
+
+| Metric | Value |
+|--------|-------|
+| Average response time | 1-5s (model dependent) |
+| Max concurrent requests | 10 |
+| Context window | Up to 128k tokens (model dependent) |
+| Supported languages | 50+ natural languages |
+
+## Requirements
+
+- OpenRouter API key (get one at https://openrouter.ai/keys)
+- Internet connection for API calls
+
+## When to Use
+
+- General knowledge questions
+- Code assistance and review
+- Content generation and editing
+- Data analysis and interpretation
+- Conversational AI applications
+
+## When NOT to Use
+
+- Real-time data (stock prices, live sports) - use a web search agent
+- Image generation - use a DALL-E or Stable Diffusion agent
+- File processing (PDF, Excel) - use a document processing agent
+- Database queries - use a data agent with direct DB access
+
+## Integration
+
+This skill is used by the TypeScript OpenRouter agent example:
+
+```typescript
+bindufy({
+ skills: ["skills/question-answering"],
+}, async (messages) => {
+ const response = await openrouter.chat.completions.create({
+ model: "openai/gpt-4o",
+ messages: messages,
+ });
+ return response.choices[0].message.content;
+});
+```
+
+## Assessment
+
+### Keywords
+question, answer, explain, help, how, what, why, write, generate, analyze, summarize, compare, code, debug
+
+### Specializations
+- domain: general_knowledge (confidence_boost: 0.2)
+- domain: code_assistance (confidence_boost: 0.3)
+- domain: content_generation (confidence_boost: 0.2)
+
+### Complexity Indicators
+- Simple: "what is", "explain", "define", single-topic questions
+- Medium: "compare", "analyze", multi-step reasoning
+- Complex: "design a system", "debug this code", multi-domain synthesis
diff --git a/examples/typescript-openai-agent/skills/question-answering/skill.yaml b/examples/typescript-openai-agent/skills/question-answering/skill.yaml
new file mode 100644
index 00000000..ce48a684
--- /dev/null
+++ b/examples/typescript-openai-agent/skills/question-answering/skill.yaml
@@ -0,0 +1,138 @@
+# Question Answering Skill
+# General-purpose Q&A powered by OpenRouter (300+ models)
+
+# Basic Metadata
+id: question-answering-v1
+name: question-answering
+version: 1.0.0
+author: dev@example.com
+
+# Description
+description: |
+ General-purpose question answering capability powered by OpenRouter.
+ Access 300+ LLM models (GPT-4o, Claude, Llama, Gemini, Mistral, etc.)
+ through a single unified API. Handles conversational queries, explanations,
+ code generation, analysis, and creative writing.
+
+# Tags and Modes
+tags:
+ - question-answering
+ - conversation
+ - assistant
+ - general-purpose
+ - multi-model
+
+input_modes:
+ - text/plain
+ - application/json
+
+output_modes:
+ - text/plain
+ - application/json
+
+# Example Queries
+examples:
+ - "Explain how microservices work"
+ - "What are the pros and cons of GraphQL vs REST?"
+ - "Help me understand async/await in TypeScript"
+ - "Write a Python function to parse CSV files"
+ - "Compare PostgreSQL and MongoDB for my use case"
+ - "Summarize the key points of this document"
+
+# Detailed Capabilities
+capabilities_detail:
+ conversational_qa:
+ supported: true
+ description: "Direct question answering with contextual understanding"
+ features:
+ - multi_turn_conversation
+ - follow_up_handling
+ - context_awareness
+
+ code_assistance:
+ supported: true
+ description: "Code generation, explanation, and debugging"
+ languages:
+ - typescript
+ - python
+ - rust
+ - kotlin
+ - go
+ - java
+
+ analysis_reasoning:
+ supported: true
+ description: "Data interpretation, comparative analysis, logical reasoning"
+
+ creative_writing:
+ supported: true
+ description: "Content generation with tone adaptation"
+ formats:
+ - articles
+ - emails
+ - documentation
+ - summaries
+
+# Requirements
+requirements:
+ packages:
+ - "@openrouter/sdk>=0.1.0"
+ system:
+ - internet_connection
+ api_keys:
+ - OPENROUTER_API_KEY
+
+# Performance Metrics
+performance:
+ avg_processing_time_ms: 2000
+ max_concurrent_requests: 10
+ context_window_tokens: 128000
+ supported_natural_languages: 50
+ scalability: horizontal
+
+# Assessment fields for skill negotiation
+assessment:
+ keywords:
+ - question
+ - answer
+ - explain
+ - help
+ - how
+ - what
+ - why
+ - write
+ - generate
+ - analyze
+ - summarize
+ - compare
+ - code
+ - debug
+
+ specializations:
+ - domain: general_knowledge
+ confidence_boost: 0.2
+ - domain: code_assistance
+ confidence_boost: 0.3
+ - domain: content_generation
+ confidence_boost: 0.2
+
+ anti_patterns:
+ - "real-time data"
+ - "stock prices"
+ - "image generation"
+ - "pdf processing"
+ - "database query"
+
+ complexity_indicators:
+ simple:
+ - "what is"
+ - "explain"
+ - "define"
+ medium:
+ - "compare"
+ - "analyze"
+ - "multi-step"
+ complex:
+ - "design a system"
+ - "debug this code"
+ - "multi-domain synthesis"
diff --git a/examples/typescript-openai-agent/tsconfig.json b/examples/typescript-openai-agent/tsconfig.json
new file mode 100644
index 00000000..11aab691
--- /dev/null
+++ b/examples/typescript-openai-agent/tsconfig.json
@@ -0,0 +1,9 @@
+{
+ "compilerOptions": {
+ "target": "ES2022",
+ "module": "commonjs",
+ "esModuleInterop": true,
+ "strict": true,
+ "skipLibCheck": true
+ }
+}
diff --git a/proto/agent_handler.proto b/proto/agent_handler.proto
new file mode 100644
index 00000000..62d3874a
--- /dev/null
+++ b/proto/agent_handler.proto
@@ -0,0 +1,197 @@
+// Bindu gRPC Protocol Definition
+//
+// This proto defines the contract between the Bindu core (Python) and
+// language-agnostic agent SDKs (TypeScript, Kotlin, Rust, etc.).
+//
+// Two services exist:
+//
+// BinduService (runs on core, port 3774)
+// - SDKs call RegisterAgent to register themselves with the core
+// - Core then handles DID, auth, x402, A2A, scheduler, storage
+//
+// AgentHandler (runs on SDK side, dynamic port)
+// - Core calls HandleMessages when a task arrives
+// - SDK executes the developer's handler function and returns the result
+//
+// The config is sent as a JSON string to keep this proto decoupled from
+// the Python config schema. This means adding new config fields to bindufy()
+// does NOT require proto changes (DRY principle).
+
+syntax = "proto3";
+
+package bindu.grpc;
+
+option java_package = "com.getbindu.grpc";
+option java_multiple_files = true;
+option go_package = "github.com/getbindu/bindu/proto";
+
+// =============================================================================
+// BinduService — SDK calls this on the Core to register and manage agents
+// =============================================================================
+
+service BinduService {
+ // Register an agent with the Bindu core.
+ // Core runs the full bindufy logic: DID, auth, x402, manifest, HTTP server.
+ // Returns agent identity and the A2A endpoint URL.
+ rpc RegisterAgent(RegisterAgentRequest) returns (RegisterAgentResponse);
+
+ // Periodic heartbeat to signal the SDK is still alive.
+ rpc Heartbeat(HeartbeatRequest) returns (HeartbeatResponse);
+
+ // Unregister an agent and shut down its A2A server.
+ rpc UnregisterAgent(UnregisterAgentRequest) returns (UnregisterAgentResponse);
+}
+
+// =============================================================================
+// AgentHandler — Core calls this on the SDK to execute tasks
+// =============================================================================
+
+service AgentHandler {
+ // Execute a handler with conversation history (unary).
+ // Core sends messages, SDK runs the developer's handler, returns response.
+ rpc HandleMessages(HandleRequest) returns (HandleResponse);
+
+ // Execute a handler with streaming response (server-side streaming).
+ // SDK yields chunks; core collects them via ResultProcessor.
+ rpc HandleMessagesStream(HandleRequest) returns (stream HandleResponse);
+
+ // Query agent capabilities (skills, supported modes).
+ rpc GetCapabilities(GetCapabilitiesRequest) returns (GetCapabilitiesResponse);
+
+ // Health check to verify the SDK process is responsive.
+ rpc HealthCheck(HealthCheckRequest) returns (HealthCheckResponse);
+}
+
+// =============================================================================
+// Registration Messages
+// =============================================================================
+
+message RegisterAgentRequest {
+ // Full agent config as JSON string. Parsed and validated by the core using
+ // the same ConfigValidator as Python bindufy(). This keeps the proto
+ // decoupled from config schema evolution.
+ string config_json = 1;
+
+ // Skills with their content pre-loaded from the SDK filesystem.
+ // The SDK reads skill.yaml/SKILL.md files and sends the content here
+ // so the core doesn't need filesystem access to the SDK's project.
+ repeated SkillDefinition skills = 2;
+
+ // The SDK's AgentHandler gRPC server address (e.g., "localhost:50052").
+ // Core will connect to this address to call HandleMessages.
+ string grpc_callback_address = 3;
+}
+
+message RegisterAgentResponse {
+ bool success = 1;
+ string agent_id = 2; // UUID of the registered agent
+ string did = 3; // W3C Decentralized Identifier assigned to the agent
+ string agent_url = 4; // A2A HTTP endpoint URL (e.g., "http://localhost:3773")
+ string error = 5; // Error message if success=false
+}
+
+message HeartbeatRequest {
+ string agent_id = 1;
+ int64 timestamp = 2; // Unix timestamp in milliseconds
+}
+
+message HeartbeatResponse {
+ bool acknowledged = 1;
+ int64 server_timestamp = 2;
+}
+
+message UnregisterAgentRequest {
+ string agent_id = 1;
+}
+
+message UnregisterAgentResponse {
+ bool success = 1;
+ string error = 2;
+}
+
+// =============================================================================
+// Handler Messages — Used for task execution between Core and SDK
+// =============================================================================
+
+message ChatMessage {
+ // Conversation message in chat format.
+ // Maps directly to {"role": "user", "content": "..."} dicts in Python.
+ string role = 1; // "user", "assistant", or "system"
+ string content = 2;
+}
+
+message HandleRequest {
+ // Conversation history sent by the core worker.
+ // This is the same list[dict[str, str]] that Python handlers receive.
+ repeated ChatMessage messages = 1;
+
+ // Task metadata for context (optional, informational).
+ string task_id = 2;
+ string context_id = 3;
+}
+
+message HandleResponse {
+ // Agent's response content.
+ string content = 1;
+
+ // Task state transition (empty string = completed normally).
+ // Supported values: "", "input-required", "auth-required"
+ // Maps to ResponseDetector.determine_task_state() in the core.
+ string state = 2;
+
+ // Prompt text when state is "input-required" or "auth-required".
+ string prompt = 3;
+
+ // Whether this is the final chunk in a streaming response.
+ bool is_final = 4;
+
+ // Additional key-value metadata to include in the response.
+ map metadata = 5;
+}
+
+// =============================================================================
+// Skill Definition — Sent during registration with pre-loaded content
+// =============================================================================
+
+message SkillDefinition {
+ string name = 1;
+ string description = 2;
+ repeated string tags = 3;
+ repeated string input_modes = 4;
+ repeated string output_modes = 5;
+ string version = 6;
+ string author = 7;
+
+ // Raw content of the skill file (skill.yaml or SKILL.md).
+ // The SDK reads this from disk and sends it so the core
+ // can process skills without filesystem access to the SDK project.
+ string raw_content = 8;
+
+ // File format hint: "yaml" or "markdown"
+ string format = 9;
+}
+
+// =============================================================================
+// Capabilities Messages
+// =============================================================================
+
+message GetCapabilitiesRequest {}
+
+message GetCapabilitiesResponse {
+ string name = 1;
+ string description = 2;
+ string version = 3;
+ bool supports_streaming = 4;
+ repeated SkillDefinition skills = 5;
+}
+
+// =============================================================================
+// Health Check Messages
+// =============================================================================
+
+message HealthCheckRequest {}
+
+message HealthCheckResponse {
+ bool healthy = 1;
+ string message = 2;
+}
diff --git a/pyproject.toml b/pyproject.toml
index 88ae8b24..0fa5dc1f 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -57,6 +57,9 @@ dependencies = [
# Security
"detect-secrets==1.5.0",
"python-dotenv>=1.1.0",
+ "grpcio>=1.78.0",
+ "grpcio-tools>=1.78.0",
+ "protobuf>=6.33.5",
]
[project.optional-dependencies]
@@ -79,6 +82,13 @@ agents = [
"eth-utils>=5.0.0",
]
+# gRPC adapter for language-agnostic agent support (use: pip install bindu[grpc])
+grpc = [
+ "grpcio>=1.62.0",
+ "grpcio-tools>=1.62.0",
+ "protobuf>=4.25.0",
+]
+
# Minimal core only (use: pip install bindu[core] --only-deps)
core = [
"uvicorn>=0.35",
@@ -98,6 +108,9 @@ core = [
"pynacl==1.5.0",
]
+[project.scripts]
+bindu = "bindu.cli:main"
+
[tool.hatch.build.targets.wheel]
packages = ["bindu"]
diff --git a/scripts/generate_protos.sh b/scripts/generate_protos.sh
new file mode 100755
index 00000000..20d6a829
--- /dev/null
+++ b/scripts/generate_protos.sh
@@ -0,0 +1,85 @@
+#!/bin/bash
+# Generate protobuf stubs for all supported languages.
+#
+# Usage:
+# bash scripts/generate_protos.sh [language]
+#
+# Languages: python (default), typescript, all
+#
+# Prerequisites:
+# Python: pip install grpcio-tools protobuf
+# TypeScript: npm install -g grpc_tools_node_protoc_ts @grpc/grpc-js
+
+set -euo pipefail
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
+PROTO_DIR="$PROJECT_ROOT/proto"
+PROTO_FILE="$PROTO_DIR/agent_handler.proto"
+
+LANGUAGE="${1:-python}"
+
+if [ ! -f "$PROTO_FILE" ]; then
+ echo "Error: Proto file not found at $PROTO_FILE"
+ exit 1
+fi
+
+generate_python() {
+ echo "Generating Python stubs..."
+ local OUT_DIR="$PROJECT_ROOT/bindu/grpc/generated"
+ mkdir -p "$OUT_DIR"
+
+ uv run python -m grpc_tools.protoc \
+ -I"$PROTO_DIR" \
+ --python_out="$OUT_DIR" \
+ --grpc_python_out="$OUT_DIR" \
+ --pyi_out="$OUT_DIR" \
+ "$PROTO_FILE"
+
+ # Fix imports in generated grpc file (grpcio-tools generates absolute imports)
+ local GRPC_FILE="$OUT_DIR/agent_handler_pb2_grpc.py"
+ if [ -f "$GRPC_FILE" ]; then
+ sed -i.bak 's/^import agent_handler_pb2/from bindu.grpc.generated import agent_handler_pb2/' "$GRPC_FILE"
+ rm -f "$GRPC_FILE.bak"
+ fi
+
+ echo "Python stubs generated in $OUT_DIR"
+}
+
+generate_typescript() {
+ echo "Generating TypeScript stubs..."
+ local OUT_DIR="$PROJECT_ROOT/sdks/typescript/src/generated"
+ mkdir -p "$OUT_DIR"
+
+ # Using @grpc/proto-loader compatible generation
+ npx grpc_tools_node_protoc \
+ --ts_out=grpc_js:"$OUT_DIR" \
+ --grpc_out=grpc_js:"$OUT_DIR" \
+ -I"$PROTO_DIR" \
+ "$PROTO_FILE" 2>/dev/null || {
+ echo "Warning: TypeScript generation requires grpc_tools_node_protoc_ts"
+ echo "Install with: npm install -g grpc_tools_node_protoc_ts"
+ }
+
+ echo "TypeScript stubs generated in $OUT_DIR"
+}
+
+case "$LANGUAGE" in
+ python)
+ generate_python
+ ;;
+ typescript|ts)
+ generate_typescript
+ ;;
+ all)
+ generate_python
+ generate_typescript
+ ;;
+ *)
+ echo "Unknown language: $LANGUAGE"
+ echo "Supported: python, typescript, all"
+ exit 1
+ ;;
+esac
+
+echo "Done."
diff --git a/sdks/kotlin/build.gradle.kts b/sdks/kotlin/build.gradle.kts
new file mode 100644
index 00000000..3d76937e
--- /dev/null
+++ b/sdks/kotlin/build.gradle.kts
@@ -0,0 +1,71 @@
+import com.google.protobuf.gradle.*
+
+plugins {
+ kotlin("jvm") version "1.9.22"
+ id("com.google.protobuf") version "0.9.4"
+ application
+}
+
+group = "com.getbindu"
+version = "0.1.0"
+
+repositories {
+ mavenCentral()
+}
+
+dependencies {
+ // gRPC
+ implementation("io.grpc:grpc-netty-shaded:1.62.2")
+ implementation("io.grpc:grpc-protobuf:1.62.2")
+ implementation("io.grpc:grpc-stub:1.62.2")
+ implementation("io.grpc:grpc-kotlin-stub:1.4.1")
+ implementation("com.google.protobuf:protobuf-kotlin:3.25.3")
+
+ // Coroutines
+ implementation("org.jetbrains.kotlinx:kotlinx-coroutines-core:1.8.0")
+
+ // JSON
+ implementation("com.google.code.gson:gson:2.10.1")
+
+ // YAML parsing for skills
+ implementation("org.yaml:snakeyaml:2.2")
+
+ // Annotations
+ compileOnly("javax.annotation:javax.annotation-api:1.3.2")
+
+ // Testing
+ testImplementation(kotlin("test"))
+}
+
+protobuf {
+ protoc {
+ artifact = "com.google.protobuf:protoc:3.25.3"
+ }
+ plugins {
+ id("grpc") {
+ artifact = "io.grpc:protoc-gen-grpc-java:1.62.2"
+ }
+ id("grpckt") {
+ artifact = "io.grpc:protoc-gen-grpc-kotlin:1.4.1:jdk8@jar"
+ }
+ }
+ generateProtoTasks {
+ all().forEach {
+ it.plugins {
+ id("grpc")
+ id("grpckt")
+ }
+ it.builtins {
+ id("kotlin")
+ }
+ }
+ }
+}
+
+tasks.test {
+ useJUnitPlatform()
+}
+
+kotlin {
+ jvmToolchain(17)
+}
diff --git a/sdks/kotlin/settings.gradle.kts b/sdks/kotlin/settings.gradle.kts
new file mode 100644
index 00000000..f31fd0a3
--- /dev/null
+++ b/sdks/kotlin/settings.gradle.kts
@@ -0,0 +1 @@
+rootProject.name = "bindu-sdk"
diff --git a/sdks/kotlin/src/main/kotlin/com/getbindu/sdk/BinduAgent.kt b/sdks/kotlin/src/main/kotlin/com/getbindu/sdk/BinduAgent.kt
new file mode 100644
index 00000000..73010350
--- /dev/null
+++ b/sdks/kotlin/src/main/kotlin/com/getbindu/sdk/BinduAgent.kt
@@ -0,0 +1,275 @@
+/**
+ * Bindu SDK for Kotlin — transform any Kotlin agent into a microservice.
+ *
+ * This is the main entry point. Developers call bindufy() with their
+ * config and handler — the SDK handles gRPC, core launching, and registration.
+ *
+ * Example:
+ * bindufy(
+ * config = mapOf(
+ * "author" to "dev@example.com",
+ * "name" to "my-agent",
+ * "deployment" to mapOf("url" to "http://localhost:3773", "expose" to true),
+ * )
+ * ) { messages ->
+ * "Echo: ${messages.last().content}"
+ * }
+ */
+
+package com.getbindu.sdk
+
+import com.google.gson.Gson
+import io.grpc.ManagedChannelBuilder
+import io.grpc.ServerBuilder
+import io.grpc.stub.StreamObserver
+import kotlinx.coroutines.runBlocking
+import java.io.File
+import java.net.ServerSocket
+import java.util.concurrent.TimeUnit
+
+/** A single message in conversation history. */
+data class ChatMessage(
+ val role: String,
+ val content: String
+)
+
+/** Response from the handler. */
+data class HandlerResponse(
+ val content: String = "",
+ val state: String = "",
+ val prompt: String = "",
+ val metadata: Map = emptyMap()
+)
+
+/** Registration result from the Bindu core. */
+data class RegistrationResult(
+ val agentId: String,
+ val did: String,
+ val agentUrl: String
+)
+
+/** Handler function type. */
+typealias MessageHandler = suspend (List) -> Any
+
+/**
+ * Transform a Kotlin agent into a Bindu microservice.
+ *
+ * This function:
+ * 1. Launches the Bindu Python core as a child process
+ * 2. Starts a gRPC server for receiving HandleMessages calls
+ * 3. Registers the agent with the core via RegisterAgent
+ * 4. Blocks, handling tasks via gRPC
+ *
+ * @param config Agent configuration as a map (matches Python bindufy config).
+ * @param skills List of skill directory paths (relative to CWD).
+ * @param coreAddress Bindu core gRPC address (default: "localhost:3774").
+ * @param handler The handler function that processes messages.
+ * @return Registration result with agent_id, DID, and A2A URL.
+ */
+fun bindufy(
+ config: Map,
+ skills: List = emptyList(),
+ coreAddress: String = "localhost:3774",
+ handler: MessageHandler
+): RegistrationResult = runBlocking {
+ val agentName = config["name"] as? String ?: "kotlin-agent"
+ println("\n Bindufy: $agentName")
+ println(" Author: ${config["author"]}")
+ println()
+
+ // Step 1: Launch Bindu Python core
+ val grpcPort = coreAddress.split(":").last().toInt()
+ val core = CoreLauncher.launch(grpcPort = grpcPort)
+
+ // Step 2: Start AgentHandler gRPC server
+ val callbackPort = findFreePort()
+ val agentServer = startAgentHandlerServer(handler, callbackPort)
+ println(" AgentHandler gRPC server on :$callbackPort")
+
+ // Step 3: Load skills from filesystem
+ val loadedSkills = loadSkills(skills)
+
+ // Step 4: Register with Bindu core
+ println(" Registering with Bindu core at $coreAddress...")
+ val configJson = Gson().toJson(config)
+ val result = registerWithCore(coreAddress, configJson, loadedSkills, "localhost:$callbackPort")
+
+ println()
+ println(" Agent registered successfully!")
+ println(" Agent ID: ${result.agentId}")
+ println(" DID: ${result.did}")
+ println(" A2A URL: ${result.agentUrl}")
+ println()
+ println(" Waiting for messages...\n")
+
+ // Step 5: Handle shutdown
+ Runtime.getRuntime().addShutdownHook(Thread {
+ agentServer.shutdown()
+ core?.destroy()
+ })
+
+ // Block until interrupted
+ agentServer.awaitTermination()
+
+ result
+}
+
+/** Find a free port for the AgentHandler server. */
+private fun findFreePort(): Int {
+ ServerSocket(0).use { return it.localPort }
+}
+
+/** Load skill files from directories. */
+private fun loadSkills(skillPaths: List): List