API Reference
Platform APIs
- Models
- Functions
- Observability
- Knowledge base
- Datasets
- Other
traces
Get Trace
Get a trace by its id
GET
/
traces
/
{trace_id}
Copy
Ask AI
from opperai import Opper
import time
opper = Opper(http_bearer="YOUR_API_KEY")
# First, create a function and call it to generate a trace
unique_name = f"research_assistant_{int(time.time())}"
created_function = opper.functions.create(
name=unique_name,
description="Research assistant that provides detailed analysis",
instructions="You are a research assistant. Provide detailed, well-researched answers with multiple perspectives and cite your reasoning process.",
model="openai/gpt-4o-mini",
)
print(f"Created function: {created_function.name}")
# Call the function to generate a trace
response = opper.functions.call(
function_id=created_function.id,
input={
"question": "What are the key factors that led to the success of the Tesla Model S?",
"context": "I'm researching electric vehicle market adoption",
},
tags={"research_type": "automotive", "complexity": "detailed"},
)
print(f"Function call completed. Span ID: {response.span_id}")
# Get the trace ID from the response (spans are part of traces)
span = opper.spans.get(span_id=response.span_id)
trace_id = span.trace_id
print(f"Generated trace ID: {trace_id}")
def print_span_hierarchy(spans, parent_id, depth):
"""Helper function to print span hierarchy"""
children = [s for s in spans if s.parent_id == parent_id]
for child in children:
indent = " " * depth
print(f"{indent}āā {child.name} ({child.duration_ms}ms)")
print_span_hierarchy(spans, child.id, depth + 1)
# Get detailed trace with all spans
trace = opper.traces.get(trace_id=trace_id)
print(f"š Trace Analysis: {trace.name}")
print(f"ID: {trace.id}")
print(f"Duration: {trace.duration_ms}ms")
print(f"Status: {trace.status}")
print(f"Total Tokens: {trace.total_tokens:,}")
if trace.input:
print(f"Input: {trace.input[:100]}...")
if trace.output:
print(f"Output: {trace.output[:100]}...")
print(f"\nš Span Breakdown ({len(trace.spans)} spans):")
# Group spans by type for analysis
span_types = {}
for span in trace.spans:
span_type = span.type or "unknown"
if span_type not in span_types:
span_types[span_type] = []
span_types[span_type].append(span)
for span_type, spans in span_types.items():
print(f"\n{span_type.upper()} spans ({len(spans)}):")
for span in spans:
print(f" ⢠{span.name}")
print(
f" Duration: {span.duration_ms}ms"
if span.duration_ms
else " Duration: N/A"
)
if span.data and span.data.model:
print(f" Model: {span.data.model}")
if span.data and span.data.total_tokens:
print(f" Tokens: {span.data.total_tokens}")
if span.score:
print(f" Score: {span.score}/10")
if span.error:
print(f" ā Error: {span.error}")
# Show metrics if available
if span.metrics:
print(f" š Metrics:")
for metric in span.metrics:
print(f" - {metric.dimension}: {metric.value}")
# Analyze execution flow
print(f"\nš Execution Flow:")
root_spans = [s for s in trace.spans if not s.parent_id]
for root_span in root_spans:
print(f"š {root_span.name}")
print_span_hierarchy(trace.spans, root_span.id, 1)
# Performance insights
print(f"\nā” Performance Insights:")
if trace.duration_ms:
total_duration = trace.duration_ms
generation_spans = [s for s in trace.spans if s.type == "generation"]
if generation_spans:
generation_time = sum(s.duration_ms for s in generation_spans if s.duration_ms)
print(
f"LLM Generation Time: {generation_time}ms ({generation_time / total_duration * 100:.1f}%)"
)
call_spans = [s for s in trace.spans if s.type == "call"]
if call_spans:
call_durations = [s.duration_ms for s in call_spans if s.duration_ms]
if call_durations:
avg_call_duration = sum(call_durations) / len(call_durations)
print(f"Average Call Duration: {avg_call_duration:.1f}ms")
Copy
Ask AI
{
"id": "3c90c3cc-0d44-4b50-8888-8dd25736052a",
"start_time": "2023-11-07T05:31:56Z",
"end_time": "2023-11-07T05:31:56Z",
"duration_ms": 123,
"status": "<string>",
"name": "<string>",
"input": "<string>",
"output": "<string>",
"total_tokens": 123,
"spans": [
{
"id": "3c90c3cc-0d44-4b50-8888-8dd25736052a",
"name": "<string>",
"start_time": "2023-11-07T05:31:56Z",
"type": "generation",
"parent_id": "3c90c3cc-0d44-4b50-8888-8dd25736052a",
"end_time": "2023-11-07T05:31:56Z",
"duration_ms": 123,
"error": "<string>",
"meta": {},
"data": {
"input": "<string>",
"output": "<string>",
"total_tokens": 123,
"model": "<string>",
"instructions": "<string>",
"function": "<string>",
"tags": [
"<string>"
],
"score": 123,
"generation_id": "3c90c3cc-0d44-4b50-8888-8dd25736052a",
"observations": "<string>"
},
"metrics": [
{
"id": "3c90c3cc-0d44-4b50-8888-8dd25736052a",
"dimension": "latency",
"value": 123,
"created_at": "2023-11-07T05:31:56Z",
"comment": "Expert feedback"
}
],
"score": 123
}
]
}
Authorizations
Bearer authentication header of the form Bearer <token>
, where <token>
is your auth token.
Path Parameters
The id of the trace to get
Response
200
application/json
Successful Response
The response is of type object
.
Copy
Ask AI
from opperai import Opper
import time
opper = Opper(http_bearer="YOUR_API_KEY")
# First, create a function and call it to generate a trace
unique_name = f"research_assistant_{int(time.time())}"
created_function = opper.functions.create(
name=unique_name,
description="Research assistant that provides detailed analysis",
instructions="You are a research assistant. Provide detailed, well-researched answers with multiple perspectives and cite your reasoning process.",
model="openai/gpt-4o-mini",
)
print(f"Created function: {created_function.name}")
# Call the function to generate a trace
response = opper.functions.call(
function_id=created_function.id,
input={
"question": "What are the key factors that led to the success of the Tesla Model S?",
"context": "I'm researching electric vehicle market adoption",
},
tags={"research_type": "automotive", "complexity": "detailed"},
)
print(f"Function call completed. Span ID: {response.span_id}")
# Get the trace ID from the response (spans are part of traces)
span = opper.spans.get(span_id=response.span_id)
trace_id = span.trace_id
print(f"Generated trace ID: {trace_id}")
def print_span_hierarchy(spans, parent_id, depth):
"""Helper function to print span hierarchy"""
children = [s for s in spans if s.parent_id == parent_id]
for child in children:
indent = " " * depth
print(f"{indent}āā {child.name} ({child.duration_ms}ms)")
print_span_hierarchy(spans, child.id, depth + 1)
# Get detailed trace with all spans
trace = opper.traces.get(trace_id=trace_id)
print(f"š Trace Analysis: {trace.name}")
print(f"ID: {trace.id}")
print(f"Duration: {trace.duration_ms}ms")
print(f"Status: {trace.status}")
print(f"Total Tokens: {trace.total_tokens:,}")
if trace.input:
print(f"Input: {trace.input[:100]}...")
if trace.output:
print(f"Output: {trace.output[:100]}...")
print(f"\nš Span Breakdown ({len(trace.spans)} spans):")
# Group spans by type for analysis
span_types = {}
for span in trace.spans:
span_type = span.type or "unknown"
if span_type not in span_types:
span_types[span_type] = []
span_types[span_type].append(span)
for span_type, spans in span_types.items():
print(f"\n{span_type.upper()} spans ({len(spans)}):")
for span in spans:
print(f" ⢠{span.name}")
print(
f" Duration: {span.duration_ms}ms"
if span.duration_ms
else " Duration: N/A"
)
if span.data and span.data.model:
print(f" Model: {span.data.model}")
if span.data and span.data.total_tokens:
print(f" Tokens: {span.data.total_tokens}")
if span.score:
print(f" Score: {span.score}/10")
if span.error:
print(f" ā Error: {span.error}")
# Show metrics if available
if span.metrics:
print(f" š Metrics:")
for metric in span.metrics:
print(f" - {metric.dimension}: {metric.value}")
# Analyze execution flow
print(f"\nš Execution Flow:")
root_spans = [s for s in trace.spans if not s.parent_id]
for root_span in root_spans:
print(f"š {root_span.name}")
print_span_hierarchy(trace.spans, root_span.id, 1)
# Performance insights
print(f"\nā” Performance Insights:")
if trace.duration_ms:
total_duration = trace.duration_ms
generation_spans = [s for s in trace.spans if s.type == "generation"]
if generation_spans:
generation_time = sum(s.duration_ms for s in generation_spans if s.duration_ms)
print(
f"LLM Generation Time: {generation_time}ms ({generation_time / total_duration * 100:.1f}%)"
)
call_spans = [s for s in trace.spans if s.type == "call"]
if call_spans:
call_durations = [s.duration_ms for s in call_spans if s.duration_ms]
if call_durations:
avg_call_duration = sum(call_durations) / len(call_durations)
print(f"Average Call Duration: {avg_call_duration:.1f}ms")
Copy
Ask AI
{
"id": "3c90c3cc-0d44-4b50-8888-8dd25736052a",
"start_time": "2023-11-07T05:31:56Z",
"end_time": "2023-11-07T05:31:56Z",
"duration_ms": 123,
"status": "<string>",
"name": "<string>",
"input": "<string>",
"output": "<string>",
"total_tokens": 123,
"spans": [
{
"id": "3c90c3cc-0d44-4b50-8888-8dd25736052a",
"name": "<string>",
"start_time": "2023-11-07T05:31:56Z",
"type": "generation",
"parent_id": "3c90c3cc-0d44-4b50-8888-8dd25736052a",
"end_time": "2023-11-07T05:31:56Z",
"duration_ms": 123,
"error": "<string>",
"meta": {},
"data": {
"input": "<string>",
"output": "<string>",
"total_tokens": 123,
"model": "<string>",
"instructions": "<string>",
"function": "<string>",
"tags": [
"<string>"
],
"score": 123,
"generation_id": "3c90c3cc-0d44-4b50-8888-8dd25736052a",
"observations": "<string>"
},
"metrics": [
{
"id": "3c90c3cc-0d44-4b50-8888-8dd25736052a",
"dimension": "latency",
"value": 123,
"created_at": "2023-11-07T05:31:56Z",
"comment": "Expert feedback"
}
],
"score": 123
}
]
}
Assistant
Responses are generated using AI and may contain mistakes.