Overview
Conversational
Building conversational AI applications with Opper
Conversational AI applications use language models to engage in natural conversations with users. This section demonstrates how to build conversational interfaces using Opper, from simple chat responses to complex multi-turn conversations with tools and context management.
Simple conversation response
The most basic conversational task is generating a response to a user’s message. Here’s a simple example of how to create a conversational AI that responds to user input:
import os
from opperai import Opper
opper = Opper(api_key=os.getenv("OPPER_API_KEY"))
def generate_response(user_message: str) -> str:
"""Generate a conversational response to a user message."""
response, _ = opper.call(
name="chat_response",
instructions="You are a helpful AI assistant. Respond to the user's message in a friendly and helpful manner.",
input={"message": user_message},
output_type=str
)
return response
def main():
while True:
user_input = input("User: ")
if user_input.lower() in ['quit', 'exit', 'bye']:
print("Assistant: Goodbye! Have a great day!")
break
response = generate_response(user_input)
print(f"Assistant: {response}")
if __name__ == "__main__":
main()
Multi-turn conversation with context
For more sophisticated conversations, you need to maintain conversation history and context. Here’s an example that demonstrates how to handle multi-turn conversations:
import os
from typing import List, Dict
from opperai import Opper
opper = Opper(api_key=os.getenv("OPPER_API_KEY"))
class ConversationManager:
def __init__(self):
self.messages: List[Dict[str, str]] = []
def add_message(self, role: str, content: str):
"""Add a message to the conversation history."""
self.messages.append({"role": role, "content": content})
def generate_response(self, user_message: str) -> str:
"""Generate a response based on the full conversation context."""
# Add the user's message to the conversation
self.add_message("user", user_message)
response, _ = opper.call(
name="conversational_response",
instructions="You are a helpful AI assistant. Use the conversation history to provide contextually relevant responses. Be conversational and engaging.",
input={"messages": self.messages},
output_type=str
)
# Add the assistant's response to the conversation
self.add_message("assistant", response)
return response
def get_conversation_summary(self) -> str:
"""Get a summary of the conversation so far."""
if not self.messages:
return "No conversation history yet."
summary, _ = opper.call(
name="conversation_summary",
instructions="Provide a brief summary of the conversation so far, highlighting key topics and user needs.",
input={"messages": self.messages},
output_type=str
)
return summary
def main():
conversation = ConversationManager()
print("Assistant: Hello! I'm your AI assistant. How can I help you today?")
conversation.add_message("assistant", "Hello! I'm your AI assistant. How can I help you today?")
while True:
user_input = input("User: ")
if user_input.lower() in ['quit', 'exit', 'bye']:
print("Assistant: Goodbye! It was nice chatting with you!")
break
elif user_input.lower() == 'summary':
summary = conversation.get_conversation_summary()
print(f"Assistant: Here's a summary of our conversation:\n{summary}")
continue
response = conversation.generate_response(user_input)
print(f"Assistant: {response}")
if __name__ == "__main__":
main()
Intent-based conversation with tools
For more advanced conversational applications, you can combine intent classification with tool calling to create intelligent assistants that can perform actions. Here’s an example inspired by the customer service bot pattern:
import os
from typing import Literal, Optional
from pydantic import BaseModel
from opperai import Opper
opper = Opper(api_key=os.getenv("OPPER_API_KEY"))
# Intent classification for understanding user requests
class IntentClassification(BaseModel):
intent: Literal["greeting", "weather_inquiry", "help_request", "goodbye", "unknown"]
confidence: float
requires_location: bool = False
# Weather data structure
class WeatherInfo(BaseModel):
location: str
temperature: str
condition: str
humidity: str
# Mock weather database
weather_data = {
"New York": {"temperature": "72°F", "condition": "Sunny", "humidity": "45%"},
"London": {"temperature": "15°C", "condition": "Cloudy", "humidity": "70%"},
"Tokyo": {"temperature": "25°C", "condition": "Rainy", "humidity": "80%"},
}
def classify_intent(user_message: str) -> IntentClassification:
"""Classify the user's intent from their message."""
intent, _ = opper.call(
name="intent_classifier",
instructions="Classify the user's intent. Supported intents: greeting, weather_inquiry, help_request, goodbye, unknown. Set requires_location=True if the intent needs a location.",
input={"message": user_message},
output_type=IntentClassification
)
return intent
def extract_location(user_message: str) -> Optional[str]:
"""Extract location from user message."""
location, _ = opper.call(
name="location_extractor",
instructions="Extract a location name from the user's message. Return only the location name or None if no location is found.",
input={"message": user_message},
output_type=str
)
return location if location and location.lower() != "none" else None
def get_weather(location: str) -> Optional[WeatherInfo]:
"""Get weather information for a location."""
if location in weather_data:
data = weather_data[location]
return WeatherInfo(
location=location,
temperature=data["temperature"],
condition=data["condition"],
humidity=data["humidity"]
)
return None
def generate_response(user_message: str, conversation_history: list = None) -> str:
"""Generate a contextual response based on intent and conversation history."""
if conversation_history is None:
conversation_history = []
# Classify the user's intent
intent = classify_intent(user_message)
# Handle different intents
if intent.intent == "weather_inquiry":
location = extract_location(user_message)
if location:
weather = get_weather(location)
if weather:
context = f"Weather for {location}: {weather.temperature}, {weather.condition}, Humidity: {weather.humidity}"
else:
context = f"Sorry, I don't have weather data for {location}"
else:
context = "I can help you with weather information. Please provide a location."
elif intent.intent == "greeting":
context = "User greeted me"
elif intent.intent == "help_request":
context = "User is asking for help"
elif intent.intent == "goodbye":
context = "User is saying goodbye"
else:
context = "Unknown intent"
# Generate response with context
response, _ = opper.call(
name="contextual_response",
instructions="Generate a helpful and natural response based on the user's intent and context. Be conversational and engaging.",
input={
"user_message": user_message,
"intent": intent.intent,
"context": context,
"conversation_history": conversation_history
},
output_type=str
)
return response
def main():
conversation_history = []
print("Assistant: Hello! I'm your AI assistant. I can help you with weather information and general questions. How can I assist you today?")
while True:
user_input = input("User: ")
if user_input.lower() in ['quit', 'exit', 'bye']:
print("Assistant: Goodbye! Have a wonderful day!")
break
response = generate_response(user_input, conversation_history)
print(f"Assistant: {response}")
# Update conversation history
conversation_history.append({"role": "user", "content": user_input})
conversation_history.append({"role": "assistant", "content": response})
if __name__ == "__main__":
main()