API Reference
Platform APIs
- Models
- Functions
- Observability
- Knowledge base
- Datasets
- Other
span metrics
List Metrics
List metrics for a span
GET
/
spans
/
{span_id}
/
metrics
Copy
Ask AI
from datetime import datetime, timezone
from opperai import Opper
import time
opper = Opper(http_bearer="YOUR_API_KEY")
# First, create a span and add multiple metrics to list
created_span = opper.spans.create(
name="model_performance_analysis",
start_time=datetime.now(timezone.utc),
type="evaluation",
input="Analyze the performance of our latest model deployment",
output="Model shows 15% improvement in accuracy, 20% reduction in latency, and high user satisfaction scores.",
meta={"model_version": "v2.1", "deployment_id": "deploy_456"},
)
print(f"Created span with ID: {created_span.id}")
# Add multiple metrics to demonstrate the list functionality
metrics_to_add = [
("accuracy_score", 0.92, "Model accuracy on test dataset"),
("latency_ms", 340.5, "Average response latency in milliseconds"),
("user_satisfaction", 8.7, "User satisfaction score (1-10)"),
("throughput_rps", 125.3, "Requests per second throughput"),
("error_rate", 0.02, "Error rate percentage"),
]
for dimension, value, comment in metrics_to_add:
opper.span_metrics.create_metric(
span_id=created_span.id, dimension=dimension, value=value, comment=comment
)
print("Added performance metrics to span")
# List all metrics for the span
metrics = opper.span_metrics.list(span_id=created_span.id, offset=0, limit=20)
print(f"Total metrics for span: {metrics.meta.total_count}")
print(f"Showing {len(metrics.data)} metrics:")
print("=" * 50)
for metric in metrics.data:
print(f"Dimension: {metric.dimension}")
print(f"Value: {metric.value}")
if metric.comment:
print(f"Comment: {metric.comment}")
print(f"Created: {metric.created_at}")
print("---")
# Calculate average quality scores
quality_metrics = [m for m in metrics.data if "quality" in m.dimension.lower()]
if quality_metrics:
avg_quality = sum(m.value for m in quality_metrics) / len(quality_metrics)
print(f"\nAverage quality score: {avg_quality:.2f}")
Copy
Ask AI
{
"meta": {
"total_count": 1
},
"data": [
{
"dimension": "<string>",
"value": 123,
"comment": "<string>",
"id": "3c90c3cc-0d44-4b50-8888-8dd25736052a",
"span_id": "3c90c3cc-0d44-4b50-8888-8dd25736052a",
"created_at": "2023-11-07T05:31:56Z"
}
]
}
Authorizations
Bearer authentication header of the form Bearer <token>
, where <token>
is your auth token.
Path Parameters
The id of the span to list metrics for
Query Parameters
The offset to start the list from
Required range:
x >= 0
The number of metrics to return
Required range:
1 <= x <= 100
Response
200
application/json
Successful Response
The response is of type object
.
Copy
Ask AI
from datetime import datetime, timezone
from opperai import Opper
import time
opper = Opper(http_bearer="YOUR_API_KEY")
# First, create a span and add multiple metrics to list
created_span = opper.spans.create(
name="model_performance_analysis",
start_time=datetime.now(timezone.utc),
type="evaluation",
input="Analyze the performance of our latest model deployment",
output="Model shows 15% improvement in accuracy, 20% reduction in latency, and high user satisfaction scores.",
meta={"model_version": "v2.1", "deployment_id": "deploy_456"},
)
print(f"Created span with ID: {created_span.id}")
# Add multiple metrics to demonstrate the list functionality
metrics_to_add = [
("accuracy_score", 0.92, "Model accuracy on test dataset"),
("latency_ms", 340.5, "Average response latency in milliseconds"),
("user_satisfaction", 8.7, "User satisfaction score (1-10)"),
("throughput_rps", 125.3, "Requests per second throughput"),
("error_rate", 0.02, "Error rate percentage"),
]
for dimension, value, comment in metrics_to_add:
opper.span_metrics.create_metric(
span_id=created_span.id, dimension=dimension, value=value, comment=comment
)
print("Added performance metrics to span")
# List all metrics for the span
metrics = opper.span_metrics.list(span_id=created_span.id, offset=0, limit=20)
print(f"Total metrics for span: {metrics.meta.total_count}")
print(f"Showing {len(metrics.data)} metrics:")
print("=" * 50)
for metric in metrics.data:
print(f"Dimension: {metric.dimension}")
print(f"Value: {metric.value}")
if metric.comment:
print(f"Comment: {metric.comment}")
print(f"Created: {metric.created_at}")
print("---")
# Calculate average quality scores
quality_metrics = [m for m in metrics.data if "quality" in m.dimension.lower()]
if quality_metrics:
avg_quality = sum(m.value for m in quality_metrics) / len(quality_metrics)
print(f"\nAverage quality score: {avg_quality:.2f}")
Copy
Ask AI
{
"meta": {
"total_count": 1
},
"data": [
{
"dimension": "<string>",
"value": 123,
"comment": "<string>",
"id": "3c90c3cc-0d44-4b50-8888-8dd25736052a",
"span_id": "3c90c3cc-0d44-4b50-8888-8dd25736052a",
"created_at": "2023-11-07T05:31:56Z"
}
]
}
Assistant
Responses are generated using AI and may contain mistakes.