diff --git a/multimodal-gpu_marketplace-bot/.env.example b/multimodal-gpu_marketplace-bot/.env.example
new file mode 100644
index 0000000..aa71ec2
--- /dev/null
+++ b/multimodal-gpu_marketplace-bot/.env.example
@@ -0,0 +1,8 @@
+# Might need to replace w Hyperbolic's API Keys / Credentials
+
+# Daily.co API credentials
+DAILY_API_KEY=your_daily_api_key_here
+DAILY_SAMPLE_ROOM_URL=your_daily_room_url_here # Optional: URL of an existing Daily room
+
+# Google Gemini API credentials
+GOOGLE_API_KEY=your_gemini_api_key_here
\ No newline at end of file
diff --git a/multimodal-gpu_marketplace-bot/Dockerfile b/multimodal-gpu_marketplace-bot/Dockerfile
new file mode 100644
index 0000000..d38414d
--- /dev/null
+++ b/multimodal-gpu_marketplace-bot/Dockerfile
@@ -0,0 +1,31 @@
+# Use an official Python runtime as a parent image
+FROM python:3.12-slim
+
+# Set environment variables to prevent Python from writing .pyc files
+ENV PYTHONDONTWRITEBYTECODE 1
+ENV PYTHONUNBUFFERED 1
+
+# Set the working directory in the container
+WORKDIR /app
+
+# Install system dependencies needed for audio processing (for pipecat's Silero VAD)
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ build-essential \
+ ffmpeg \
+ && rm -rf /var/lib/apt/lists/*
+
+# Copy the requirements file into the container
+COPY requirements.txt .
+
+# Install any needed packages specified in requirements.txt
+RUN pip install --no-cache-dir --upgrade pip && \
+ pip install --no-cache-dir -r requirements.txt
+
+# Copy the rest of your application code into the container
+COPY . .
+
+# Expose the port your healthcheck server runs on
+EXPOSE 8080
+
+# The command to run your application
+CMD ["python", "launcher.py"]
\ No newline at end of file
diff --git a/multimodal-gpu_marketplace-bot/README.md b/multimodal-gpu_marketplace-bot/README.md
new file mode 100644
index 0000000..9acd579
--- /dev/null
+++ b/multimodal-gpu_marketplace-bot/README.md
@@ -0,0 +1,77 @@
+# GPU Marketplace Voice Assistant Bot
+
+An interactive voice assistant bot that helps users explore and find GPU instances on the Hyperbolic GPU Marketplace.
+
+## Features
+
+- **Real-time GPU Availability**: Live access to Hyperbolic's GPU marketplace
+- **Voice Interaction**: Natural conversation about GPU options and pricing
+- **Smart Filtering**:
+ - Price ranges (budget to high-end)
+ - GPU quantities (1X to 8X+)
+ - Storage capacity
+ - Availability status
+- **Dynamic Price Display**:
+ - Under $1: Shows in cents (e.g., "13¢/hr")
+ - $1 and above: Shows in dollars (e.g., "$1.50/hr")
+
+## Available GPU Types
+
+- Consumer GPUs (RTX 3070, 3080, 4090)
+- Data Center GPUs (H100 SXM, NVIDIA H200)
+- Various configurations (1X to 8X+)
+
+## Requirements
+
+- Python 3.12+
+- Google API key (for Gemini)
+- Daily.co API key
+- Access to Hyperbolic Marketplace API
+
+## Environment Setup
+
+Create a `.env` file with:
+
+```
+GOOGLE_API_KEY=your_google_api_key
+DAILY_API_KEY=your_daily_api_key
+DAILY_SAMPLE_ROOM_URL=your_daily_room_url
+```
+
+## Installation
+
+```bash
+pip install -r requirements.txt
+```
+
+## Usage
+
+Start the bot:
+
+```bash
+python main.py
+```
+
+Join the Daily.co room to interact with the bot. You can:
+
+- Ask about available GPUs
+- Filter by price range
+- Sort by price (low to high or high to low)
+- Filter by GPU quantity
+- Check storage options
+- Get real-time availability updates
+
+## Example Queries
+
+- "What GPUs are available?"
+- "Show me budget options under 50 cents per hour"
+- "What are your high-end GPUs?"
+- "Do you have any 8X GPU configurations?"
+- "Show me GPUs with over 500GB storage"
+- "What's the price range for H100s?"
+
+## Notes
+
+- All GPU instances are located in US, North America
+- Prices are always displayed per hour
+- The bot automatically refreshes data for the most current availability
diff --git a/multimodal-gpu_marketplace-bot/config.py b/multimodal-gpu_marketplace-bot/config.py
new file mode 100644
index 0000000..82d3621
--- /dev/null
+++ b/multimodal-gpu_marketplace-bot/config.py
@@ -0,0 +1,47 @@
+SYSTEM_INSTRUCTION = """
+You are a helpful assistant for Hyperbolic Labs' GPU Marketplace. You can help users find and understand available GPU instances for rent.
+
+You have access to the marketplace data through the get_available_gpus tool. When users ask about available GPUs, pricing, or specifications, use this tool to get the most current information.
+
+Always be professional and helpful. When listing GPUs:
+1. Mention the GPU model, memory, and hourly price
+2. Indicate if the instance is currently available
+3. Include the location/region
+
+By default, only mention GPU model, memory, price, location, and availability. If a user wants to learn more about a specific instance, invite them to ask for details using the instance's GPU model or ID. When asked, provide all available technical details (CPU, storage, RAM, network, etc) for that instance in a clear, friendly, and expert manner.
+
+Encourage users to ask about their use case (e.g., 'If you're doing XYZ, I recommend...') and offer expert advice as a pro GPU specialist. If a user describes their workload, suggest the best GPU for their needs and explain why.
+
+If users ask about specific GPU models or price ranges, filter and highlight the relevant options from the data.
+
+You can also see and analyze video feeds (screen share or camera) in real time. If a user shares their screen or camera, you can describe what is visible and help with on-screen tasks. Encourage users to share their screen for more detailed help.
+"""
+
+LLMCONTEXT_CONTENT = "Start by greeting me warmly and introducing me to GPU Rentals by Hyperbolic Labs and mention that you can do everything verbally. Encourage me to start by asking available GPU. Also mention that you can help me with my use case and suggest the best GPU for my needs. You can also see my screen or camera if I share it, and help with what you see!"
+
+TOOLS = [
+ {
+ "function_declarations": [
+ {
+ "name": "get_available_gpus",
+ "description": "Get the list of available GPU instances in the marketplace",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "filter_type": {
+ "type": "string",
+ "enum": ["all", "available_only"],
+ "description": "Filter type for GPU instances",
+ }
+ },
+ "required": ["filter_type"],
+ },
+ }
+ ]
+ }
+]
+
+REGION_MAP = {
+ "region-1": "US, North America",
+ # Add more mappings as needed
+}
diff --git a/multimodal-gpu_marketplace-bot/launcher.py b/multimodal-gpu_marketplace-bot/launcher.py
new file mode 100644
index 0000000..891fdee
--- /dev/null
+++ b/multimodal-gpu_marketplace-bot/launcher.py
@@ -0,0 +1,146 @@
+import os
+import subprocess
+import sys
+from fastapi import FastAPI, Request
+from fastapi.responses import HTMLResponse, JSONResponse, RedirectResponse
+import uvicorn
+from loguru import logger
+import signal
+import time
+
+app = FastAPI()
+
+# --- Global State Management ---
+# This dictionary holds the state of our bot process.
+# This simple in-memory state is perfect for a single Render instance.
+bot_state = {
+ "process": None,
+ "status": "STOPPED", # Can be: STOPPED, STARTING, RUNNING, STOPPING
+}
+
+DAILY_ROOM_URL = os.getenv(
+ "DAILY_SAMPLE_ROOM_URL", "https://your-room.daily.co/default-room"
+)
+
+
+def get_page_html():
+ """Generates the HTML for the control page based on the current bot state."""
+ status = bot_state["status"]
+
+ if status == "RUNNING":
+ return f"""
+
GPU Bot is RUNNING
+ The bot is active. You can now join the call.
+ Join Daily Room
+
+ """
+ elif status == "STOPPED":
+ return """
+ GPU Bot is STOPPED
+ Click the button to start a new demo session.
+
+ """
+ elif status == "STARTING":
+ return """
+ GPU Bot is STARTING...
+ Please wait, this can take up to 30 seconds.
+ Starting...
+ """
+ elif status == "STOPPING":
+ return """
+ GPU Bot is STOPPING...
+ Please wait while the session is terminated.
+ Stopping...
+ """
+
+
+@app.get("/", response_class=HTMLResponse)
+async def root():
+ """Serves the main control page, which changes based on the bot's status."""
+ return f"""
+ Bot Control
+
+
+
+
+ {get_page_html()}
+
+
+ """
+
+
+@app.post("/start-bot")
+async def start_bot_endpoint():
+ """Endpoint to launch the bot. Prevents starting if not STOPPED."""
+ if bot_state["status"] != "STOPPED":
+ logger.warning(f"Attempted to start bot while in state: {bot_state['status']}")
+ return RedirectResponse(url="/", status_code=303)
+
+ bot_state["status"] = "STARTING"
+ logger.info("Bot state changed to STARTING.")
+
+ try:
+ script_path = os.path.join(os.path.dirname(__file__), "main.py")
+ python_executable = sys.executable
+ process = subprocess.Popen([python_executable, script_path])
+
+ bot_state["process"] = process
+ # Give it a moment to stabilize before changing state to RUNNING
+ time.sleep(5) # A small delay to let the process actually start
+ bot_state["status"] = "RUNNING"
+ logger.info(
+ f"Bot process started with PID {process.pid}. State is now RUNNING."
+ )
+
+ except Exception as e:
+ logger.error(f"Failed to launch bot process: {e}")
+ bot_state["status"] = "STOPPED"
+
+ return RedirectResponse(url="/", status_code=303)
+
+
+@app.post("/stop-bot")
+async def stop_bot_endpoint():
+ """Endpoint to stop the bot. Prevents stopping if not RUNNING."""
+ if bot_state["status"] != "RUNNING":
+ logger.warning(f"Attempted to stop bot while in state: {bot_state['status']}")
+ return RedirectResponse(url="/", status_code=303)
+
+ bot_state["status"] = "STOPPING"
+ logger.info("Bot state changed to STOPPING.")
+
+ process = bot_state["process"]
+ if process and process.poll() is None:
+ logger.info(f"Sending SIGTERM to bot process with PID: {process.pid}")
+ process.send_signal(signal.SIGTERM)
+ try:
+ process.wait(timeout=15)
+ logger.info("Bot process terminated gracefully.")
+ except subprocess.TimeoutExpired:
+ logger.warning("Bot did not terminate in time, sending SIGKILL.")
+ process.kill()
+
+ bot_state["process"] = None
+ bot_state["status"] = "STOPPED"
+ logger.info("Bot state changed to STOPPED.")
+
+ return RedirectResponse(url="/", status_code=303)
+
+
+if __name__ == "__main__":
+ port = int(os.environ.get("PORT", 8080))
+ logger.info(f"Starting bot launcher on http://0.0.0.0:{port}")
+ uvicorn.run(app, host="0.0.0.0", port=port)
diff --git a/multimodal-gpu_marketplace-bot/main.py b/multimodal-gpu_marketplace-bot/main.py
new file mode 100644
index 0000000..96ba2fc
--- /dev/null
+++ b/multimodal-gpu_marketplace-bot/main.py
@@ -0,0 +1,128 @@
+#
+# Copyright (c) 2024–2025, Daily
+#
+# SPDX-License-Identifier: BSD 2-Clause License
+#
+
+# Carl's note: we extended this code from the Daily SDK, which is licensed under the BSD 2-Clause License.
+# # The Daily SDK is available at https://github.com/pipecat-ai/pipecat/tree/main
+
+import asyncio
+import os
+import sys
+from datetime import datetime
+
+import aiohttp
+from dotenv import load_dotenv
+from loguru import logger
+from runner import configure
+from websockets.exceptions import ConnectionClosedError
+
+from pipecat.audio.vad.silero import SileroVADAnalyzer
+from pipecat.audio.vad.vad_analyzer import VADParams
+from pipecat.pipeline.pipeline import Pipeline
+from pipecat.pipeline.runner import PipelineRunner
+from pipecat.pipeline.task import PipelineParams, PipelineTask
+from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext
+from pipecat.services.gemini_multimodal_live.gemini import (
+ GeminiMultimodalLiveLLMService,
+)
+from pipecat.transports.services.daily import DailyParams, DailyTransport
+from config import SYSTEM_INSTRUCTION, LLMCONTEXT_CONTENT
+from tools import get_tool_declarations, register_all_tools
+
+load_dotenv(override=True)
+
+logger.remove(0)
+logger.add(sys.stderr, level="DEBUG")
+
+
+async def main():
+ async with aiohttp.ClientSession() as session:
+ (room_url, token) = await configure(session)
+
+ transport = DailyTransport(
+ room_url,
+ token,
+ "Respond bot",
+ DailyParams(
+ audio_out_enabled=True,
+ vad_enabled=True,
+ vad_audio_passthrough=True,
+ # set stop_secs to something roughly similar to the internal setting
+ # of the Multimodal Live api, just to align events. This doesn't really
+ # matter because we can only use the Multimodal Live API's phrase
+ # endpointing, for now.
+ vad_analyzer=SileroVADAnalyzer(params=VADParams(stop_secs=0.5)),
+ ),
+ )
+
+ llm = GeminiMultimodalLiveLLMService(
+ api_key=os.getenv("GOOGLE_API_KEY"),
+ system_instruction=SYSTEM_INSTRUCTION,
+ tools=get_tool_declarations(),
+ transcribe_user_audio=True,
+ transcribe_model_audio=True,
+ inference_on_context_initialization=True,
+ )
+
+ register_all_tools(llm)
+
+ context = OpenAILLMContext(
+ [
+ {
+ "role": "user",
+ "content": LLMCONTEXT_CONTENT,
+ }
+ ],
+ )
+ context_aggregator = llm.create_context_aggregator(context)
+
+ pipeline = Pipeline(
+ [
+ transport.input(),
+ context_aggregator.user(),
+ llm,
+ context_aggregator.assistant(),
+ transport.output(),
+ ]
+ )
+
+ task = PipelineTask(
+ pipeline,
+ params=PipelineParams(
+ allow_interruptions=True,
+ enable_metrics=True,
+ enable_usage_metrics=True,
+ ),
+ )
+
+ @transport.event_handler("on_participant_joined")
+ async def on_first_participant_joined(transport, participant):
+ await task.queue_frames([context_aggregator.user().get_context_frame()])
+ await asyncio.sleep(3)
+ await transport.capture_participant_video(
+ participant["id"], framerate=1, video_source="screenVideo"
+ )
+ await transport.capture_participant_video(
+ participant["id"], framerate=1, video_source="camera"
+ )
+
+ logger.debug("Unpausing audio and video")
+ llm.set_audio_input_paused(False)
+ llm.set_video_input_paused(False)
+
+ runner = PipelineRunner()
+
+ try:
+ await runner.run(task)
+ except ConnectionClosedError as e:
+ logger.error(f"WebSocket connection closed unexpectedly: {e}")
+ except TimeoutError:
+ logger.info("Pipeline task timed out after 1 hour.")
+ except Exception as e:
+ logger.error(f"An unexpected error occurred: {e}")
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/multimodal-gpu_marketplace-bot/marketplace.py b/multimodal-gpu_marketplace-bot/marketplace.py
new file mode 100644
index 0000000..8b2998c
--- /dev/null
+++ b/multimodal-gpu_marketplace-bot/marketplace.py
@@ -0,0 +1,51 @@
+import aiohttp
+from config import REGION_MAP
+
+
+def format_memory(mb: int) -> str:
+ if mb >= 1024 * 1024:
+ return f"{mb / (1024 * 1024):.2f} TB"
+ elif mb >= 1024:
+ return f"{mb / 1024:.2f} GB"
+ else:
+ return f"{mb} MB"
+
+def format_price(amount_cents: int) -> str:
+ if amount_cents < 100:
+ return f"{amount_cents}¢/hr"
+ else:
+ return f"${amount_cents / 100:.2f}/hr"
+
+def extract_instance_summary(instance: dict) -> dict:
+ gpu = instance["hardware"]["gpus"][0]
+ return {
+ "gpu_model": gpu["model"],
+ "gpu_memory": format_memory(gpu["ram"]),
+ "price_per_hour": format_price(instance["pricing"]["price"]["amount"]),
+ "location": REGION_MAP.get(instance["location"]["region"], instance["location"]["region"]),
+ "available": not instance["reserved"] and instance["gpus_reserved"] < instance["gpus_total"],
+ }
+
+async def fetch_marketplace_data(
+ function_name, tool_call_id, args, llm, context, result_callback
+):
+ async with aiohttp.ClientSession() as session:
+ try:
+ url = "https://api.hyperbolic.xyz/v1/marketplace"
+ headers = {"Content-Type": "application/json"}
+ filters = {} if args["filter_type"] == "all" else {"available": True}
+ data = {"filters": filters}
+
+ async with session.post(url, json=data, headers=headers) as response:
+ if response.status == 200:
+ marketplace_data = await response.json()
+ available_instances = [
+ extract_instance_summary(instance)
+ for instance in marketplace_data["instances"]
+ if "gpus" in instance["hardware"] and instance["hardware"]["gpus"]
+ ]
+ await result_callback({"instances": available_instances})
+ else:
+ await result_callback({"error": f"API request failed with status {response.status}"})
+ except Exception as e:
+ await result_callback({"error": str(e)})
diff --git a/multimodal-gpu_marketplace-bot/requirements.txt b/multimodal-gpu_marketplace-bot/requirements.txt
new file mode 100644
index 0000000..ebdfd0c
--- /dev/null
+++ b/multimodal-gpu_marketplace-bot/requirements.txt
@@ -0,0 +1,15 @@
+# Minimal requirements for main.py and core bot logic
+aiohttp
+python-dotenv
+loguru
+pydantic
+openai
+websockets
+daily-python
+google-generativeai
+pipecat-ai[google,silero]
+openpipe
+onnxruntime
+uvicorn
+fastapi
+
diff --git a/multimodal-gpu_marketplace-bot/runner.py b/multimodal-gpu_marketplace-bot/runner.py
new file mode 100644
index 0000000..2400217
--- /dev/null
+++ b/multimodal-gpu_marketplace-bot/runner.py
@@ -0,0 +1,77 @@
+#
+# Copyright (c) 2024–2025, Daily
+#
+# SPDX-License-Identifier: BSD 2-Clause License
+#
+
+# Carl's note: we borrowed this code from the Daily SDK, which is licensed under the BSD 2-Clause License.
+# The Daily SDK is available at https://github.com/pipecat-ai/pipecat/blob/main/examples/foundational/runner.py
+
+import argparse
+import os
+from typing import Optional
+from dotenv import load_dotenv
+
+import aiohttp
+
+from pipecat.transports.services.helpers.daily_rest import DailyRESTHelper
+
+load_dotenv(override=True)
+
+
+async def configure(aiohttp_session: aiohttp.ClientSession):
+ (url, token, _) = await configure_with_args(aiohttp_session)
+ return (url, token)
+
+
+async def configure_with_args(
+ aiohttp_session: aiohttp.ClientSession,
+ parser: Optional[argparse.ArgumentParser] = None,
+):
+ if not parser:
+ parser = argparse.ArgumentParser(description="Daily AI SDK Bot Sample")
+ parser.add_argument(
+ "-u", "--url", type=str, required=False, help="URL of the Daily room to join"
+ )
+ parser.add_argument(
+ "-k",
+ "--apikey",
+ type=str,
+ required=False,
+ help="Daily API Key (needed to create an owner token for the room)",
+ )
+
+ args, unknown = parser.parse_known_args()
+
+ url = args.url or os.getenv("DAILY_SAMPLE_ROOM_URL")
+ key = args.apikey or os.getenv("DAILY_API_KEY")
+
+ if not url:
+ raise Exception(
+ "No Daily room specified. use the -u/--url option from the command line, or set DAILY_SAMPLE_ROOM_URL in your environment to specify a Daily room URL."
+ )
+
+ if not key:
+ raise Exception(
+ "No Daily API key specified. use the -k/--apikey option from the command line, or set DAILY_API_KEY in your environment to specify a Daily API key, available from https://dashboard.daily.co/developers."
+ )
+
+ daily_api_key = os.environ.get("DAILY_API_KEY")
+ if not daily_api_key:
+ raise Exception(
+ "The environment variable 'DAILY_API_KEY' must be set to run this example."
+ )
+
+ daily_rest_helper = DailyRESTHelper(
+ daily_api_key=key,
+ daily_api_url=os.getenv("DAILY_API_URL", "https://api.daily.co/v1"),
+ aiohttp_session=aiohttp_session,
+ )
+
+ # Create a meeting token for the given room with an expiration 1 hour in
+ # the future.
+ expiry_time: float = 60 * 60
+
+ token = await daily_rest_helper.get_token(url, expiry_time)
+
+ return (url, token, args)
diff --git a/multimodal-gpu_marketplace-bot/tools.py b/multimodal-gpu_marketplace-bot/tools.py
new file mode 100644
index 0000000..afec670
--- /dev/null
+++ b/multimodal-gpu_marketplace-bot/tools.py
@@ -0,0 +1,38 @@
+from typing import Callable, Dict, Any, Awaitable
+from marketplace import fetch_marketplace_data
+
+# Tool metadata definitions
+TOOL_DEFINITIONS = [
+ {
+ "name": "get_available_gpus",
+ "description": "Get the list of available GPU instances in the marketplace",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "filter_type": {
+ "type": "string",
+ "enum": ["all", "available_only"],
+ "description": "Filter type for GPU instances",
+ }
+ },
+ "required": ["filter_type"],
+ },
+ },
+ # Add more tool definitions here as needed
+]
+
+# Tool registry: maps tool name to handler function
+TOOL_REGISTRY: Dict[str, Callable[..., Awaitable[Any]]] = {
+ "get_available_gpus": fetch_marketplace_data,
+ # Add more tool handlers here as needed
+}
+
+def get_tool_declarations() -> list:
+ """Return tool declarations in the format expected by the LLM service."""
+ return [{"function_declarations": TOOL_DEFINITIONS}]
+
+
+def register_all_tools(llm):
+ """Register all tools in the registry with the LLM service."""
+ for tool_name, handler in TOOL_REGISTRY.items():
+ llm.register_function(tool_name, handler)
diff --git a/multimodal-gpu_marketplace-bot/validator.ipynb b/multimodal-gpu_marketplace-bot/validator.ipynb
new file mode 100644
index 0000000..b3edc04
--- /dev/null
+++ b/multimodal-gpu_marketplace-bot/validator.ipynb
@@ -0,0 +1,793 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "id": "68e46622",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Note: you may need to restart the kernel to use updated packages.\n"
+ ]
+ }
+ ],
+ "source": [
+ "%pip install aiohttp nest_asyncio pandas --quiet\n",
+ "\n",
+ "import aiohttp\n",
+ "import nest_asyncio\n",
+ "import asyncio\n",
+ "import json\n",
+ "\n",
+ "nest_asyncio.apply()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "id": "b8e5ddec",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "async def fetch_gpus(filter_type=\"all\"):\n",
+ " url = \"https://api.hyperbolic.xyz/v1/marketplace\"\n",
+ " headers = {\"Content-Type\": \"application/json\"}\n",
+ " filters = {} if filter_type == \"all\" else {\"available\": True}\n",
+ " data = {\"filters\": filters}\n",
+ " async with aiohttp.ClientSession() as session:\n",
+ " async with session.post(url, json=data, headers=headers) as response:\n",
+ " if response.status == 200:\n",
+ " return await response.json()\n",
+ " else:\n",
+ " print(f\"API request failed with status {response.status}\")\n",
+ " return None"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "id": "a4c006ff",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "application/vnd.microsoft.datawrangler.viewer.v0+json": {
+ "columns": [
+ {
+ "name": "index",
+ "rawType": "int64",
+ "type": "integer"
+ },
+ {
+ "name": "id",
+ "rawType": "object",
+ "type": "string"
+ },
+ {
+ "name": "gpu_model",
+ "rawType": "object",
+ "type": "string"
+ },
+ {
+ "name": "gpu_memory",
+ "rawType": "int64",
+ "type": "integer"
+ },
+ {
+ "name": "price_per_hour",
+ "rawType": "int64",
+ "type": "integer"
+ },
+ {
+ "name": "location",
+ "rawType": "object",
+ "type": "string"
+ },
+ {
+ "name": "available",
+ "rawType": "bool",
+ "type": "boolean"
+ }
+ ],
+ "conversionMethod": "pd.DataFrame",
+ "ref": "3258f801-509e-4f66-9b28-56dabf5adba1",
+ "rows": [
+ [
+ "0",
+ "ceti14",
+ "NVIDIA-H100-80GB-HBM3",
+ "81559",
+ "150",
+ "region-1",
+ "True"
+ ],
+ [
+ "1",
+ "l-hgx-05",
+ "NVIDIA-H200",
+ "143771",
+ "220",
+ "region-1",
+ "True"
+ ],
+ [
+ "2",
+ "sfc-016",
+ "NVIDIA-H100-80GB-HBM3",
+ "81559",
+ "150",
+ "region-1",
+ "True"
+ ],
+ [
+ "3",
+ "l-hgx-01",
+ "NVIDIA-H200",
+ "143771",
+ "225",
+ "region-1",
+ "True"
+ ],
+ [
+ "4",
+ "antalpha-super-server100132",
+ "NVIDIA-GeForce-RTX-4090",
+ "24564",
+ "35",
+ "region-1",
+ "False"
+ ],
+ [
+ "5",
+ "ses-a16",
+ "NVIDIA-H100-80GB-HBM3",
+ "81559",
+ "155",
+ "region-1",
+ "True"
+ ],
+ [
+ "6",
+ "antalpha-super-server100154",
+ "NVIDIA-GeForce-RTX-4090",
+ "24564",
+ "35",
+ "region-1",
+ "False"
+ ],
+ [
+ "7",
+ "sfc-025",
+ "NVIDIA-H100-80GB-HBM3",
+ "81559",
+ "150",
+ "region-1",
+ "True"
+ ],
+ [
+ "8",
+ "korea-amd9-17",
+ "NVIDIA-GeForce-RTX-3070",
+ "8192",
+ "16",
+ "region-1",
+ "False"
+ ],
+ [
+ "9",
+ "sfc-010",
+ "NVIDIA-H100-80GB-HBM3",
+ "81559",
+ "150",
+ "region-1",
+ "True"
+ ],
+ [
+ "10",
+ "ceti16",
+ "NVIDIA-H100-80GB-HBM3",
+ "81559",
+ "160",
+ "region-1",
+ "True"
+ ],
+ [
+ "11",
+ "sfc-008",
+ "NVIDIA-H100-80GB-HBM3",
+ "81559",
+ "150",
+ "region-1",
+ "False"
+ ],
+ [
+ "12",
+ "antalpha-super-server100116",
+ "NVIDIA-GeForce-RTX-4090",
+ "24564",
+ "30",
+ "region-1",
+ "False"
+ ],
+ [
+ "13",
+ "sfc-026",
+ "NVIDIA-H100-80GB-HBM3",
+ "81559",
+ "150",
+ "region-1",
+ "True"
+ ],
+ [
+ "14",
+ "ns-ai-server010",
+ "NVIDIA-GeForce-RTX-4090",
+ "24564",
+ "25",
+ "region-1",
+ "False"
+ ],
+ [
+ "15",
+ "sfc-007",
+ "NVIDIA-H100-80GB-HBM3",
+ "81559",
+ "150",
+ "region-1",
+ "True"
+ ],
+ [
+ "16",
+ "antalpha-super-server100155",
+ "NVIDIA-GeForce-RTX-4090",
+ "24564",
+ "35",
+ "region-1",
+ "False"
+ ],
+ [
+ "17",
+ "sfc-003",
+ "NVIDIA-H100-80GB-HBM3",
+ "81559",
+ "150",
+ "region-1",
+ "True"
+ ],
+ [
+ "18",
+ "sfc-005",
+ "NVIDIA-H100-80GB-HBM3",
+ "81559",
+ "150",
+ "region-1",
+ "True"
+ ],
+ [
+ "19",
+ "sfc-018",
+ "NVIDIA-H100-80GB-HBM3",
+ "81559",
+ "150",
+ "region-1",
+ "True"
+ ],
+ [
+ "20",
+ "antalpha-super-server-100194",
+ "NVIDIA-GeForce-RTX-4090",
+ "24564",
+ "35",
+ "region-1",
+ "False"
+ ],
+ [
+ "21",
+ "sfc-001",
+ "NVIDIA-H100-80GB-HBM3",
+ "81559",
+ "150",
+ "region-1",
+ "True"
+ ],
+ [
+ "22",
+ "ns-ai-server018",
+ "NVIDIA-GeForce-RTX-4090",
+ "24564",
+ "30",
+ "region-1",
+ "False"
+ ],
+ [
+ "23",
+ "sfc-017",
+ "NVIDIA-H100-80GB-HBM3",
+ "81559",
+ "150",
+ "region-1",
+ "False"
+ ],
+ [
+ "24",
+ "antalpha-super-server100202",
+ "NVIDIA-GeForce-RTX-4090",
+ "24564",
+ "35",
+ "region-1",
+ "False"
+ ],
+ [
+ "25",
+ "sfc-030",
+ "NVIDIA-H100-80GB-HBM3",
+ "81559",
+ "150",
+ "region-1",
+ "True"
+ ],
+ [
+ "26",
+ "sfc-002",
+ "NVIDIA-H100-80GB-HBM3",
+ "81559",
+ "150",
+ "region-1",
+ "True"
+ ],
+ [
+ "27",
+ "gpu-cluster-helsinki-hyperbolic",
+ "NVIDIA-H100-80GB-HBM3",
+ "81559",
+ "185",
+ "region-1",
+ "True"
+ ],
+ [
+ "28",
+ "l-hgx-02",
+ "NVIDIA-H200",
+ "143771",
+ "230",
+ "region-1",
+ "True"
+ ],
+ [
+ "29",
+ "antalpha-super-server100123",
+ "NVIDIA-GeForce-RTX-4090",
+ "24564",
+ "33",
+ "region-1",
+ "False"
+ ],
+ [
+ "30",
+ "sfc-011",
+ "NVIDIA-H100-80GB-HBM3",
+ "81559",
+ "150",
+ "region-1",
+ "True"
+ ],
+ [
+ "31",
+ "antalpha-super-server100152",
+ "NVIDIA-GeForce-RTX-4090",
+ "24564",
+ "35",
+ "region-1",
+ "False"
+ ],
+ [
+ "32",
+ "ceti13",
+ "NVIDIA-H100-80GB-HBM3",
+ "81559",
+ "170",
+ "region-1",
+ "True"
+ ],
+ [
+ "33",
+ "sfc-022",
+ "NVIDIA-H100-80GB-HBM3",
+ "81559",
+ "150",
+ "region-1",
+ "True"
+ ],
+ [
+ "34",
+ "antalpha-super-server100120",
+ "NVIDIA-GeForce-RTX-4090",
+ "24564",
+ "40",
+ "region-1",
+ "True"
+ ],
+ [
+ "35",
+ "antalpha-super-server100130",
+ "NVIDIA-GeForce-RTX-4090",
+ "24564",
+ "35",
+ "region-1",
+ "False"
+ ],
+ [
+ "36",
+ "sfc-009",
+ "NVIDIA-H100-80GB-HBM3",
+ "81559",
+ "150",
+ "region-1",
+ "True"
+ ],
+ [
+ "37",
+ "sfc-004",
+ "NVIDIA-H100-80GB-HBM3",
+ "81559",
+ "150",
+ "region-1",
+ "True"
+ ],
+ [
+ "38",
+ "sfc-032",
+ "NVIDIA-H100-80GB-HBM3",
+ "81559",
+ "150",
+ "region-1",
+ "True"
+ ],
+ [
+ "39",
+ "ns-ai-server007",
+ "NVIDIA-GeForce-RTX-4090",
+ "24564",
+ "25",
+ "region-1",
+ "False"
+ ],
+ [
+ "40",
+ "ceti12",
+ "NVIDIA-H100-80GB-HBM3",
+ "81559",
+ "170",
+ "region-1",
+ "True"
+ ],
+ [
+ "41",
+ "antalpha-super-server100115",
+ "NVIDIA-GeForce-RTX-4090",
+ "24564",
+ "30",
+ "region-1",
+ "False"
+ ],
+ [
+ "42",
+ "sfc-028",
+ "NVIDIA-H100-80GB-HBM3",
+ "81559",
+ "150",
+ "region-1",
+ "False"
+ ],
+ [
+ "43",
+ "ns-ai-server019",
+ "NVIDIA-GeForce-RTX-4090",
+ "24564",
+ "25",
+ "region-1",
+ "False"
+ ],
+ [
+ "44",
+ "ses-a6",
+ "NVIDIA-H100-80GB-HBM3",
+ "81559",
+ "155",
+ "region-1",
+ "True"
+ ],
+ [
+ "45",
+ "antalpha-super-server100153",
+ "NVIDIA-GeForce-RTX-4090",
+ "24564",
+ "50",
+ "region-1",
+ "True"
+ ],
+ [
+ "46",
+ "antalpha-super-server100156",
+ "NVIDIA-GeForce-RTX-4090",
+ "24564",
+ "33",
+ "region-1",
+ "False"
+ ],
+ [
+ "47",
+ "ses-a11",
+ "NVIDIA-H100-80GB-HBM3",
+ "81559",
+ "155",
+ "region-1",
+ "True"
+ ],
+ [
+ "48",
+ "sfc-023",
+ "NVIDIA-H100-80GB-HBM3",
+ "81559",
+ "150",
+ "region-1",
+ "True"
+ ],
+ [
+ "49",
+ "sfc-006",
+ "NVIDIA-H100-80GB-HBM3",
+ "81559",
+ "150",
+ "region-1",
+ "True"
+ ]
+ ],
+ "shape": {
+ "columns": 6,
+ "rows": 69
+ }
+ },
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " id \n",
+ " gpu_model \n",
+ " gpu_memory \n",
+ " price_per_hour \n",
+ " location \n",
+ " available \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 \n",
+ " ceti14 \n",
+ " NVIDIA-H100-80GB-HBM3 \n",
+ " 81559 \n",
+ " 150 \n",
+ " region-1 \n",
+ " True \n",
+ " \n",
+ " \n",
+ " 1 \n",
+ " l-hgx-05 \n",
+ " NVIDIA-H200 \n",
+ " 143771 \n",
+ " 220 \n",
+ " region-1 \n",
+ " True \n",
+ " \n",
+ " \n",
+ " 2 \n",
+ " sfc-016 \n",
+ " NVIDIA-H100-80GB-HBM3 \n",
+ " 81559 \n",
+ " 150 \n",
+ " region-1 \n",
+ " True \n",
+ " \n",
+ " \n",
+ " 3 \n",
+ " l-hgx-01 \n",
+ " NVIDIA-H200 \n",
+ " 143771 \n",
+ " 225 \n",
+ " region-1 \n",
+ " True \n",
+ " \n",
+ " \n",
+ " 4 \n",
+ " antalpha-super-server100132 \n",
+ " NVIDIA-GeForce-RTX-4090 \n",
+ " 24564 \n",
+ " 35 \n",
+ " region-1 \n",
+ " False \n",
+ " \n",
+ " \n",
+ " ... \n",
+ " ... \n",
+ " ... \n",
+ " ... \n",
+ " ... \n",
+ " ... \n",
+ " ... \n",
+ " \n",
+ " \n",
+ " 64 \n",
+ " antalpha-super-server100164 \n",
+ " NVIDIA-GeForce-RTX-4090 \n",
+ " 24564 \n",
+ " 30 \n",
+ " region-1 \n",
+ " False \n",
+ " \n",
+ " \n",
+ " 65 \n",
+ " sfc-020 \n",
+ " NVIDIA-H100-80GB-HBM3 \n",
+ " 81559 \n",
+ " 150 \n",
+ " region-1 \n",
+ " False \n",
+ " \n",
+ " \n",
+ " 66 \n",
+ " sfc-014 \n",
+ " NVIDIA-H100-80GB-HBM3 \n",
+ " 81559 \n",
+ " 150 \n",
+ " region-1 \n",
+ " True \n",
+ " \n",
+ " \n",
+ " 67 \n",
+ " ceti15 \n",
+ " NVIDIA-H100-80GB-HBM3 \n",
+ " 81559 \n",
+ " 150 \n",
+ " region-1 \n",
+ " False \n",
+ " \n",
+ " \n",
+ " 68 \n",
+ " ses-a5 \n",
+ " NVIDIA-H100-80GB-HBM3 \n",
+ " 81559 \n",
+ " 175 \n",
+ " region-1 \n",
+ " False \n",
+ " \n",
+ " \n",
+ "
\n",
+ "
69 rows × 6 columns
\n",
+ "
"
+ ],
+ "text/plain": [
+ " id gpu_model gpu_memory \\\n",
+ "0 ceti14 NVIDIA-H100-80GB-HBM3 81559 \n",
+ "1 l-hgx-05 NVIDIA-H200 143771 \n",
+ "2 sfc-016 NVIDIA-H100-80GB-HBM3 81559 \n",
+ "3 l-hgx-01 NVIDIA-H200 143771 \n",
+ "4 antalpha-super-server100132 NVIDIA-GeForce-RTX-4090 24564 \n",
+ ".. ... ... ... \n",
+ "64 antalpha-super-server100164 NVIDIA-GeForce-RTX-4090 24564 \n",
+ "65 sfc-020 NVIDIA-H100-80GB-HBM3 81559 \n",
+ "66 sfc-014 NVIDIA-H100-80GB-HBM3 81559 \n",
+ "67 ceti15 NVIDIA-H100-80GB-HBM3 81559 \n",
+ "68 ses-a5 NVIDIA-H100-80GB-HBM3 81559 \n",
+ "\n",
+ " price_per_hour location available \n",
+ "0 150 region-1 True \n",
+ "1 220 region-1 True \n",
+ "2 150 region-1 True \n",
+ "3 225 region-1 True \n",
+ "4 35 region-1 False \n",
+ ".. ... ... ... \n",
+ "64 30 region-1 False \n",
+ "65 150 region-1 False \n",
+ "66 150 region-1 True \n",
+ "67 150 region-1 False \n",
+ "68 175 region-1 False \n",
+ "\n",
+ "[69 rows x 6 columns]"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "import pandas as pd\n",
+ "\n",
+ "\n",
+ "async def show_gpus(filter_type=\"all\"):\n",
+ " data = await fetch_gpus(filter_type)\n",
+ " if data and \"instances\" in data:\n",
+ " instances = [\n",
+ " {\n",
+ " \"id\": inst[\"id\"],\n",
+ " \"gpu_model\": inst[\"hardware\"][\"gpus\"][0][\"model\"],\n",
+ " \"gpu_memory\": inst[\"hardware\"][\"gpus\"][0][\"ram\"],\n",
+ " \"price_per_hour\": inst[\"pricing\"][\"price\"][\"amount\"],\n",
+ " \"location\": inst[\"location\"][\"region\"],\n",
+ " \"available\": not inst[\"reserved\"]\n",
+ " and inst[\"gpus_reserved\"] < inst[\"gpus_total\"],\n",
+ " }\n",
+ " for inst in data[\"instances\"]\n",
+ " if \"gpus\" in inst[\"hardware\"] and inst[\"hardware\"][\"gpus\"]\n",
+ " ]\n",
+ " df = pd.DataFrame(instances)\n",
+ " display(df)\n",
+ " return df\n",
+ " else:\n",
+ " print(\"No data found or API error.\")\n",
+ "\n",
+ "\n",
+ "# Example usage:\n",
+ "df = asyncio.run(show_gpus(\"available_only\"))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "id": "151d71c6",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Saved to marketplace_output.txt\n"
+ ]
+ }
+ ],
+ "source": [
+ "if df is not None:\n",
+ " df.to_csv(\"marketplace_output.txt\", index=False, sep=\"\\t\")\n",
+ " print(\"Saved to marketplace_output.txt\")"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "venv",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.12.8"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..5823070
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,136 @@
+aenum==3.1.15
+aiofiles==24.1.0
+aiohappyeyeballs==2.4.4
+aiohttp==3.11.11
+aiosignal==1.3.2
+annotated-types==0.7.0
+anthropic==0.45.2
+anyio==4.8.0
+arrow==0.15.2
+attrs==24.3.0
+av==14.1.0
+azure-cognitiveservices-speech==1.42.0
+boto3==1.36.13
+botocore==1.36.13
+cachetools==5.5.1
+certifi==2025.1.31
+charset-normalizer==3.4.1
+Click==7.0
+colorama==0.4.6
+coloredlogs==15.0.1
+ctranslate2==4.5.0
+cursor==1.3.5
+daily==0.2.1
+daily-python==0.14.2
+dataclasses-json==0.6.7
+deepgram-sdk==3.9.0
+deprecation==2.1.0
+distro==1.9.0
+fal_client==0.5.8
+fastapi==0.115.8
+faster-whisper==1.1.1
+filelock==3.17.0
+flatbuffers==25.1.24
+frozenlist==1.5.0
+fsspec==2025.2.0
+future==1.0.0
+google-ai-generativelanguage==0.6.15
+google-api-core==2.24.1
+google-api-python-client==1.7.11
+google-auth==2.38.0
+google-auth-httplib2==0.0.3
+google-auth-oauthlib==0.4.1
+google-cloud-texttospeech==2.24.0
+google-generativeai==0.8.4
+googleapis-common-protos==1.66.0
+grpcio==1.70.0
+grpcio-status==1.70.0
+gspread==3.1.0
+h11==0.14.0
+halo==0.0.28
+httpcore==1.0.7
+httplib2==0.22.0
+httpx==0.27.2
+httpx-sse==0.4.0
+huggingface-hub==0.28.1
+humanfriendly==10.0
+idna==3.10
+Jinja2==3.1.5
+jiter==0.8.2
+jmespath==1.0.1
+jsonpatch==1.33
+jsonpointer==3.0.0
+langchain==0.3.17
+langchain-core==0.3.33
+langchain-text-splitters==0.3.5
+langsmith==0.3.5
+livekit==0.19.1
+lmnt==1.1.7
+log-symbols==0.0.14
+loguru==0.7.3
+Markdown==3.7
+MarkupSafe==3.0.2
+marshmallow==3.26.1
+mpmath==1.3.0
+multidict==6.1.0
+mypy-extensions==1.0.0
+networkx==3.4.2
+numpy==1.26.4
+oauthlib==3.2.2
+onnxruntime==1.20.1
+openai==1.60.2
+openpipe==4.45.0
+orjson==3.10.15
+packaging==24.2
+pillow==11.1.0
+pipecat-ai==0.0.54
+propcache==0.2.1
+proto-plus==1.26.0
+protobuf==5.29.3
+pyasn1==0.6.1
+pyasn1_modules==0.4.1
+PyAudio==0.2.14
+pydantic==2.10.6
+pydantic_core==2.27.2
+pyht==0.1.12
+pyloudnorm==0.1.1
+pyparsing==3.2.1
+python-dateutil==2.8.0
+python-dotenv==1.0.1
+PyYAML==6.0.2
+regex==2024.11.6
+requests==2.32.3
+requests-oauthlib==2.0.0
+requests-toolbelt==1.0.0
+rsa==4.9
+s3transfer==0.11.2
+safetensors==0.5.2
+scipy==1.15.1
+setuptools==75.8.0
+silero-vad==5.1.2
+six==1.17.0
+slackclient==2.2.1
+sniffio==1.3.1
+soxr==0.5.0.post1
+spinners==0.0.24
+SQLAlchemy==2.0.37
+sseclient-py==1.7.2
+starlette==0.45.3
+sympy==1.13.1
+tenacity==9.0.0
+termcolor==2.5.0
+together==0.2.4
+tokenizers==0.21.0
+torch==2.6.0
+torchaudio==2.6.0
+tqdm==4.67.1
+transformers==4.48.2
+typer==0.0.9
+types-protobuf==5.29.1.20241207
+typing-inspect==0.9.0
+typing_extensions==4.12.2
+uritemplate==3.0.1
+urllib3==2.3.0
+websockets==13.1
+yarl==1.18.3
+zstandard==0.23.0