OpenAI Agents
Integrate Orq.ai with OpenAI Agents using OpenTelemetry
Getting Started
OpenAI Agents and the Assistants API enable powerful AI-driven automation through structured conversations and tool calling. Tracing these interactions with Orq.ai provides deep insights into agent performance, token usage, tool utilization, and conversation flows to optimize your AI applications.
Prerequisites
Before you begin, ensure you have:
- An Orq.ai account and API key
- OpenAI API key and access to the Assistants API
- Python 3.8+
Install Dependencies
# Core OpenTelemetry packages
pip install opentelemetry-sdk opentelemetry-exporter-otlp
# OpenAI SDK
pip install openai
# Optional: Framework-specific instrumentation
pip install openlit traceloop-sdk
Configure Orq.ai
Set up your environment variables to connect to Orq.ai's OpenTelemetry collector:
Unix/Linux/macOS:
export OTEL_EXPORTER_OTLP_ENDPOINT="https://api.orq.ai/v2/otel"
export OTEL_EXPORTER_OTLP_HEADERS="Authorization=Bearer <ORQ_API_KEY>"
export OTEL_RESOURCE_ATTRIBUTES="service.name=openai-agents-app,service.version=1.0.0"
export OPENAI_API_KEY="<YOUR_OPENAI_API_KEY>"
Windows (PowerShell):
$env:OTEL_EXPORTER_OTLP_ENDPOINT = "https://api.orq.ai/v2/otel"
$env:OTEL_EXPORTER_OTLP_HEADERS = "Authorization=Bearer <ORQ_API_KEY>"
$env:OTEL_RESOURCE_ATTRIBUTES = "service.name=openai-agents-app,service.version=1.0.0"
$env:OPENAI_API_KEY = "<YOUR_OPENAI_API_KEY>"
Using .env file:
OTEL_EXPORTER_OTLP_ENDPOINT=https://api.orq.ai/v2/otel
OTEL_EXPORTER_OTLP_HEADERS=Authorization=Bearer <ORQ_API_KEY>
OTEL_RESOURCE_ATTRIBUTES=service.name=openai-agents-app,service.version=1.0.0
OPENAI_API_KEY=<YOUR_OPENAI_API_KEY>
Integrations
Choose your preferred OpenTelemetry framework for collecting traces:
OpenLit
Auto-instrumentation with minimal setup:
import openlit
from openai import OpenAI
# Initialize OpenLit
openlit.init(
otlp_endpoint="https://api.orq.ai/v2/otel",
otlp_headers="Authorization=Bearer <ORQ_API_KEY>"
)
client = OpenAI()
# Your OpenAI Agents code is automatically traced
assistant = client.beta.assistants.create(
name="Data Analyst",
instructions="You are a data analyst assistant.",
model="gpt-4",
tools=[{"type": "code_interpreter"}]
)
OpenLLMetry
Non-intrusive tracing with decorators:
from traceloop.sdk import Traceloop
from traceloop.sdk.decorators import workflow
from openai import OpenAI
Traceloop.init()
client = OpenAI()
@workflow(name="openai-agents-workflow")
def create_assistant_conversation():
assistant = client.beta.assistants.create(
name="Research Assistant",
instructions="You help with research tasks.",
model="gpt-4",
tools=[{"type": "file_search"}]
)
thread = client.beta.threads.create()
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content="Analyze this dataset for trends"
)
run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant.id
)
return run
create_assistant_conversation()
MLFlow
MLOps-focused tracing:
import mlflow
from openai import OpenAI
# Enable MLflow tracing
mlflow.openai.autolog()
client = OpenAI()
@mlflow.trace
def run_assistant_task(task_description):
assistant = client.beta.assistants.create(
name="Task Assistant",
instructions="You are a helpful task execution assistant.",
model="gpt-4",
tools=[{"type": "function", "function": {
"name": "execute_task",
"description": "Execute a given task",
"parameters": {
"type": "object",
"properties": {
"task": {"type": "string", "description": "Task to execute"}
}
}
}}]
)
thread = client.beta.threads.create()
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content=task_description
)
run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant.id
)
return {"assistant_id": assistant.id, "thread_id": thread.id, "run_id": run.id}
run_assistant_task("Create a summary of quarterly sales data")
OpenLLMetry
Comprehensive OpenAI instrumentation:
from traceloop.sdk import Traceloop
from openai import OpenAI
# Initialize tracing
Traceloop.init(
app_name="openai-agents",
disable_batch=True
)
client = OpenAI()
def create_coding_assistant():
assistant = client.beta.assistants.create(
name="Coding Assistant",
instructions="You are a coding assistant that helps with programming tasks.",
model="gpt-4",
tools=[{"type": "code_interpreter"}]
)
thread = client.beta.threads.create()
# Upload a file
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content="Help me debug this Python script"
)
run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant.id
)
# Poll for completion
while run.status in ['queued', 'in_progress']:
run = client.beta.threads.runs.retrieve(
thread_id=thread.id,
run_id=run.id
)
# Get messages
messages = client.beta.threads.messages.list(thread_id=thread.id)
return messages
create_coding_assistant()
Examples
Basic Assistants API Usage
import openlit
from openai import OpenAI
# Initialize tracing
openlit.init(
otlp_endpoint="https://api.orq.ai/v2/otel",
otlp_headers="Authorization=Bearer <ORQ_API_KEY>"
)
client = OpenAI()
def basic_assistant_example():
# Create assistant
assistant = client.beta.assistants.create(
name="Math Tutor",
instructions="You are a personal math tutor. Help solve math problems step by step.",
tools=[{"type": "code_interpreter"}],
model="gpt-4"
)
# Create thread
thread = client.beta.threads.create()
# Add a message to the thread
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content="I need to solve the equation `3x + 11 = 14`. Can you help me?"
)
# Run the assistant
run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant.id,
instructions="Please be concise in your explanations."
)
return {"thread_id": thread.id, "run_id": run.id}
result = basic_assistant_example()
Advanced Example with Function Calling
import json
import openlit
from openai import OpenAI
openlit.init(
otlp_endpoint="https://api.orq.ai/v2/otel",
otlp_headers="Authorization=Bearer <ORQ_API_KEY>"
)
client = OpenAI()
def get_weather(location):
"""Mock weather function"""
return f"The weather in {location} is sunny, 72°F"
def advanced_assistant_with_tools():
# Define tools
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA"
}
},
"required": ["location"]
}
}
}
]
# Create assistant with tools
assistant = client.beta.assistants.create(
name="Weather Assistant",
instructions="You are a weather assistant. Use the get_weather function to provide weather information.",
tools=tools,
model="gpt-4"
)
# Create thread and message
thread = client.beta.threads.create()
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content="What's the weather like in Boston?"
)
# Create and poll run
run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant.id
)
while True:
run = client.beta.threads.runs.retrieve(thread_id=thread.id, run_id=run.id)
if run.status == 'completed':
break
elif run.status == 'requires_action':
# Handle tool calls
tool_outputs = []
for tool_call in run.required_action.submit_tool_outputs.tool_calls:
if tool_call.function.name == "get_weather":
arguments = json.loads(tool_call.function.arguments)
weather_result = get_weather(arguments['location'])
tool_outputs.append({
"tool_call_id": tool_call.id,
"output": weather_result
})
# Submit tool outputs
run = client.beta.threads.runs.submit_tool_outputs(
thread_id=thread.id,
run_id=run.id,
tool_outputs=tool_outputs
)
# Get the final messages
messages = client.beta.threads.messages.list(thread_id=thread.id)
return messages
messages = advanced_assistant_with_tools()
Custom Spans for Agent Operations
from opentelemetry import trace
import openlit
from openai import OpenAI
openlit.init(
otlp_endpoint="https://api.orq.ai/v2/otel",
otlp_headers="Authorization=Bearer <ORQ_API_KEY>"
)
tracer = trace.get_tracer("openai-agents")
client = OpenAI()
def agent_workflow_with_custom_spans():
with tracer.start_as_current_span("agent-workflow") as span:
span.set_attribute("workflow.type", "research_assistant")
with tracer.start_as_current_span("assistant-creation") as create_span:
assistant = client.beta.assistants.create(
name="Research Assistant",
instructions="You are a research assistant specialized in data analysis.",
tools=[{"type": "code_interpreter"}, {"type": "file_search"}],
model="gpt-4"
)
create_span.set_attribute("assistant.id", assistant.id)
create_span.set_attribute("assistant.model", "gpt-4")
with tracer.start_as_current_span("thread-management") as thread_span:
thread = client.beta.threads.create()
thread_span.set_attribute("thread.id", thread.id)
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content="Analyze the trends in the uploaded dataset"
)
thread_span.set_attribute("message.content_length", len(message.content[0].text.value))
with tracer.start_as_current_span("run-execution") as run_span:
run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant.id
)
run_span.set_attribute("run.id", run.id)
run_span.set_attribute("run.status", run.status)
span.set_attribute("workflow.success", True)
return {"assistant": assistant.id, "thread": thread.id, "run": run.id}
result = agent_workflow_with_custom_spans()
Next Steps
✅ Verify traces: Check your Orq.ai dashboard to see incoming traces ✅ Add custom attributes: Enhance traces with business-specific metadata ✅ Set up alerts: Configure monitoring for performance degradation ✅ Explore metrics: Use trace data for performance optimization
Related Documentation
Support
Updated about 23 hours ago