API Reference
Platform APIs
- Models
- Functions
- Observability
- Knowledge base
- Datasets
- Other
traces
List Traces
List traces
GET
/
traces
Copy
Ask AI
from opperai import Opper
opper = Opper(http_bearer="YOUR_API_KEY")
# List all traces with pagination
traces = opper.traces.list(limit=20, offset=0)
print(f"š Found {traces.meta.total_count} traces")
print(f"Showing {len(traces.data)} traces")
for trace in traces.data:
print(f"\nš Trace: {trace.name}")
print(f" ID: {trace.id}")
print(f" Status: {trace.status}")
if trace.start_time and trace.end_time:
print(f" Duration: {trace.duration_ms}ms")
if trace.total_tokens:
print(f" Tokens: {trace.total_tokens:,}")
if trace.input:
print(f" Input: {trace.input[:50]}...")
if trace.output:
print(f" Output: {trace.output[:50]}...")
# Filter traces by name
print("\nš Searching for 'question' traces...")
filtered_traces = opper.traces.list(name="question", limit=10)
print(f"Found {len(filtered_traces.data)} matching traces")
for trace in filtered_traces.data:
print(f" ⢠{trace.name} - {trace.duration_ms}ms")
# Analytics example - analyze performance
print("\nš Performance Analysis:")
if traces.data:
durations = [t.duration_ms for t in traces.data if t.duration_ms]
tokens = [t.total_tokens for t in traces.data if t.total_tokens]
if durations:
avg_duration = sum(durations) / len(durations)
print(f"Average duration: {avg_duration:.0f}ms")
if tokens:
avg_tokens = sum(tokens) / len(tokens)
print(f"Average tokens: {avg_tokens:.0f}")
successful = len([t for t in traces.data if t.status == "success"])
print(
f"Success rate: {successful}/{len(traces.data)} ({successful / len(traces.data) * 100:.1f}%)"
)
Copy
Ask AI
{
"meta": {
"total_count": 1
},
"data": [
{
"id": "3c90c3cc-0d44-4b50-8888-8dd25736052a",
"start_time": "2023-11-07T05:31:56Z",
"end_time": "2023-11-07T05:31:56Z",
"duration_ms": 123,
"status": "<string>",
"name": "<string>",
"input": "<string>",
"output": "<string>",
"total_tokens": 123
}
]
}
Authorizations
Bearer authentication header of the form Bearer <token>
, where <token>
is your auth token.
Query Parameters
The name of the trace to filter by, the name of a trace is the name of the root span of the trace
The offset to start the list from
Required range:
x >= 0
The number of traces to return
Required range:
1 <= x <= 100
Response
200
application/json
Successful Response
The response is of type object
.
Copy
Ask AI
from opperai import Opper
opper = Opper(http_bearer="YOUR_API_KEY")
# List all traces with pagination
traces = opper.traces.list(limit=20, offset=0)
print(f"š Found {traces.meta.total_count} traces")
print(f"Showing {len(traces.data)} traces")
for trace in traces.data:
print(f"\nš Trace: {trace.name}")
print(f" ID: {trace.id}")
print(f" Status: {trace.status}")
if trace.start_time and trace.end_time:
print(f" Duration: {trace.duration_ms}ms")
if trace.total_tokens:
print(f" Tokens: {trace.total_tokens:,}")
if trace.input:
print(f" Input: {trace.input[:50]}...")
if trace.output:
print(f" Output: {trace.output[:50]}...")
# Filter traces by name
print("\nš Searching for 'question' traces...")
filtered_traces = opper.traces.list(name="question", limit=10)
print(f"Found {len(filtered_traces.data)} matching traces")
for trace in filtered_traces.data:
print(f" ⢠{trace.name} - {trace.duration_ms}ms")
# Analytics example - analyze performance
print("\nš Performance Analysis:")
if traces.data:
durations = [t.duration_ms for t in traces.data if t.duration_ms]
tokens = [t.total_tokens for t in traces.data if t.total_tokens]
if durations:
avg_duration = sum(durations) / len(durations)
print(f"Average duration: {avg_duration:.0f}ms")
if tokens:
avg_tokens = sum(tokens) / len(tokens)
print(f"Average tokens: {avg_tokens:.0f}")
successful = len([t for t in traces.data if t.status == "success"])
print(
f"Success rate: {successful}/{len(traces.data)} ({successful / len(traces.data) * 100:.1f}%)"
)
Copy
Ask AI
{
"meta": {
"total_count": 1
},
"data": [
{
"id": "3c90c3cc-0d44-4b50-8888-8dd25736052a",
"start_time": "2023-11-07T05:31:56Z",
"end_time": "2023-11-07T05:31:56Z",
"duration_ms": 123,
"status": "<string>",
"name": "<string>",
"input": "<string>",
"output": "<string>",
"total_tokens": 123
}
]
}
Assistant
Responses are generated using AI and may contain mistakes.