Skip to content
256 changes: 256 additions & 0 deletions examples/tracing/openai/responses_api_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,256 @@
#!/usr/bin/env python3
"""
Example demonstrating OpenAI Responses API tracing with Openlayer.

This example shows how to use both the Chat Completions API and the new Responses API
with Openlayer tracing enabled. The same trace_openai() function supports both APIs
transparently.
"""

import os
from typing import AsyncIterator

# Import OpenAI and Openlayer
import openai
from openlayer.lib import trace_openai, trace_async_openai


def setup_environment():
"""Set up environment variables for the example."""
# OpenAI API key
os.environ["OPENAI_API_KEY"] = "your-openai-api-key-here"

# Openlayer configuration
os.environ["OPENLAYER_API_KEY"] = "your-openlayer-api-key-here"
os.environ["OPENLAYER_INFERENCE_PIPELINE_ID"] = "your-pipeline-id-here"


def chat_completions_example():
"""Example using the traditional Chat Completions API with tracing."""
print("=== Chat Completions API Example ===")

# Create and trace OpenAI client
client = openai.OpenAI()
traced_client = trace_openai(client)

# Use Chat Completions API normally - tracing happens automatically
response = traced_client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "What is the capital of France?"},
],
temperature=0.7,
max_tokens=100,
)

print(f"Chat Completion Response: {response.choices[0].message.content}")
print("✓ Chat Completions API call traced successfully")


def responses_api_example():
"""Example using the new Responses API with tracing."""
print("\n=== Responses API Example ===")

# Create and trace OpenAI client
client = openai.OpenAI()
traced_client = trace_openai(client)

# Check if Responses API is available
if not hasattr(traced_client, "responses"):
print("⚠️ Responses API not available in this OpenAI client version")
return

# Use Responses API with different parameter format
response = traced_client.responses.create(
model="gpt-4o-mini",
input="What is the capital of Italy?",
instructions="Provide a brief, accurate answer.",
max_output_tokens=50,
temperature=0.5,
)

# Note: The actual response structure depends on OpenAI's implementation
print(f"Responses API Response: {response}")
print("✓ Responses API call traced successfully")


def streaming_chat_completions_example():
"""Example using streaming Chat Completions API with tracing."""
print("\n=== Streaming Chat Completions Example ===")

# Create and trace OpenAI client
client = openai.OpenAI()
traced_client = trace_openai(client)

# Streaming chat completion
stream = traced_client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "user", "content": "Count from 1 to 5 slowly."},
],
stream=True,
temperature=0.7,
)

print("Streaming response: ", end="", flush=True)
for chunk in stream:
if chunk.choices[0].delta.content is not None:
print(chunk.choices[0].delta.content, end="", flush=True)
print()
print("✓ Streaming Chat Completions call traced successfully")


def streaming_responses_api_example():
"""Example using streaming Responses API with tracing."""
print("\n=== Streaming Responses API Example ===")

# Create and trace OpenAI client
client = openai.OpenAI()
traced_client = trace_openai(client)

# Check if Responses API is available
if not hasattr(traced_client, "responses"):
print("⚠️ Responses API not available in this OpenAI client version")
return

# Streaming responses
stream = traced_client.responses.create(
model="gpt-4o-mini",
input="Tell me a short joke about programming.",
stream=True,
max_output_tokens=100,
)

print("Streaming response: ", end="", flush=True)
for event in stream:
# Handle different types of response stream events
# Note: Actual event structure depends on OpenAI's implementation
print(".", end="", flush=True)
print()
print("✓ Streaming Responses API call traced successfully")


def function_calling_example():
"""Example using function calling with both APIs."""
print("\n=== Function Calling Example ===")

# Create and trace OpenAI client
client = openai.OpenAI()
traced_client = trace_openai(client)

# Define a simple function
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location",
"parameters": {
"type": "object",
"properties": {"location": {"type": "string", "description": "City name"}},
"required": ["location"],
},
},
}
]

# Chat Completions with function calling
response = traced_client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "user", "content": "What's the weather like in Tokyo?"},
],
tools=tools,
tool_choice="auto",
)

print(f"Function call response: {response.choices[0].message}")
print("✓ Function calling with Chat Completions traced successfully")

# Responses API with function calling (if available)
if hasattr(traced_client, "responses"):
try:
response = traced_client.responses.create(
model="gpt-4o-mini",
input="What's the weather like in London?",
tools=tools,
max_tool_calls=1,
)
print(f"Responses API function call: {response}")
print("✓ Function calling with Responses API traced successfully")
except Exception as e:
print(f"⚠️ Responses API function calling not yet supported: {e}")


async def async_examples():
"""Examples using async clients."""
print("\n=== Async Examples ===")

# Create and trace async OpenAI client
client = openai.AsyncOpenAI()
traced_client = trace_async_openai(client)

# Async chat completion
response = await traced_client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "user", "content": "What is 2 + 2?"},
],
temperature=0.1,
)

print(f"Async chat response: {response.choices[0].message.content}")
print("✓ Async Chat Completions traced successfully")

# Async responses (if available)
if hasattr(traced_client, "responses"):
try:
response = await traced_client.responses.create(
model="gpt-4o-mini",
input="What is 3 + 3?",
max_output_tokens=20,
)
print(f"Async responses: {response}")
print("✓ Async Responses API traced successfully")
except Exception as e:
print(f"⚠️ Async Responses API error: {e}")


def main():
"""Run all examples."""
print("OpenAI Chat Completions + Responses API Tracing Examples")
print("=" * 60)

# Setup (in real usage, set these in your environment)
setup_environment()

try:
# Sync examples
chat_completions_example()
responses_api_example()
streaming_chat_completions_example()
streaming_responses_api_example()
function_calling_example()

# Async examples
import asyncio

asyncio.run(async_examples())

print("\n🎉 All examples completed successfully!")
print("\nKey Benefits of the New Implementation:")
print("✓ Backward compatibility - existing Chat Completions code works unchanged")
print("✓ Responses API support - new unified API is automatically traced")
print("✓ Streaming support - both APIs support streaming with proper trace collection")
print("✓ Function calling - tool/function calls are properly captured in traces")
print("✓ Enhanced metadata - Responses API provides richer traceability information")
print("✓ Async support - both sync and async clients work seamlessly")

except Exception as e:
print(f"❌ Example failed: {e}")
print("Note: This example requires valid OpenAI API keys and Openlayer configuration")


if __name__ == "__main__":
main()
Loading