Docs
AI Responses - Python SDK

AI Responses - Python SDK

Creating and managing AI responses with the Python SDK

Creating AI Responses

Basic Usage

from lumnisai import Client
 
client = Client(api_key="your-api-key")
 
# Simple string prompt
response = client.invoke("What is the meaning of life?")
print(response.output_text)
 
# With message format
response = client.invoke(
    messages=[{"role": "user", "content": "Hello!"}]
)

With Conversation History

# Continue existing thread
response = client.invoke(
    messages=[
        {"role": "system", "content": "You are a helpful assistant"},
        {"role": "user", "content": "What can you help me with?"}
    ],
    thread_id="existing-thread-id",
    user_id="user@example.com"
)

With Agent Configuration

from lumnisai import Client, AgentConfig
 
client = Client(api_key="your-api-key")
 
agent_config = AgentConfig(
    planner_model_name="openai:gpt-4.1",
    coordinator_model_name="openai:gpt-4.1",
    orchestrator_model_name="openai:gpt-4.1",
    use_cognitive_tools=True,
    enable_task_validation=True,
    generate_comprehensive_output=True
)
 
response = client.invoke(
    "Analyze complex data patterns",
    agent_config=agent_config,
    user_id="user@example.com"
)

Structured Output

Use Pydantic models for structured responses:

from pydantic import BaseModel, Field
from typing import Optional
 
class Address(BaseModel):
    street: str
    city: str
    country: str
    postal_code: Optional[str] = None
 
class BusinessInfo(BaseModel):
    name: str = Field(description="Business name")
    category: str = Field(description="Type of business")
    address: Address
    phone: Optional[str] = None
    website: Optional[str] = None
    rating: Optional[float] = Field(None, ge=0, le=5)
 
# Pass the model class directly
response = client.invoke(
    "Tell me about the Louvre Museum. Get the address and business information.",
    response_format=BusinessInfo,
    response_format_instructions="Include all available details",
    user_id="user@example.com"
)
 
if response.structured_response:
    museum = BusinessInfo(**response.structured_response)
    print(f"Museum: {museum.name}")
    print(f"Location: {museum.address.city}, {museum.address.country}")

Streaming Progress Updates

Basic Streaming

from lumnisai import Client, display_progress
 
client = Client(api_key="your-api-key")
 
# Stream progress updates
for update in client.invoke("Analyze this dataset", stream=True):
    display_progress(update)
    
    # Check if completed
    if update.state == "completed":
        print(f"\nFinal output: {update.output_text}")
        break

Async Streaming

from lumnisai import AsyncClient, display_progress
 
client = AsyncClient(api_key="your-api-key")
 
async for update in await client.invoke("Research topic", stream=True):
    display_progress(update)
    
    if update.state == "completed":
        print(f"\n\nFinal output:\n{update.output_text}")

Accessing Update Details

from lumnisai import Client, display_progress
 
client = Client(api_key="your-api-key")
 
for update in client.invoke("Complex task", stream=True):
    # Display formatted progress (includes tool calls)
    display_progress(update)
    
    # Access update details for custom processing
    if update.tool_calls:
        for tool_call in update.tool_calls:
            tool_name = tool_call.get('name')
            # Custom logic based on tool usage
            if tool_name == 'web_search':
                print(f"  Searching the web...")
    
    if update.state == "completed":
        print(f"\n{update.output_text}")

Managing Responses

List Responses

from datetime import date
 
# List responses with filters
responses = client.list_responses(
    user_id="user@example.com",
    status="succeeded",
    start_date=date(2025, 1, 1),
    end_date=date(2025, 1, 31),
    limit=50,
    offset=0
)
 
for resp in responses.responses:
    print(f"{resp.response_id}: {resp.status}")

Get Response Details

# Get specific response
response = client.get_response(response_id="response-id")
 
# With long polling (wait for updates)
response = client.get_response(response_id="response-id", wait=30)
 
# Access response details
print(f"Status: {response.status}")
print(f"Output: {response.output_text}")
print(f"Thread: {response.thread_id}")
 
# Access progress entries
for entry in response.progress:
    print(f"{entry.state}: {entry.message}")
    if entry.tool_calls:
        for tool in entry.tool_calls:
            print(f"  → {tool.get('name')}")
 
# Access execution plan
if response.plan:
    print(f"Plan: {response.plan}")
 
# Access sub-agent executions
if response.sub_agent_executions:
    for sub_agent in response.sub_agent_executions:
        print(f"Sub-agent: {sub_agent}")

Cancel Response

# Cancel a running response
cancelled = client.cancel_response(response_id="response-id")
print(f"Cancelled: {cancelled.status}")

Managing Threads

Create and List Threads

# Create a new thread
thread = client.create_thread(
    user_id="user@example.com",
    title="Customer Support Conversation"
)
 
# List user's threads
threads = client.list_threads(
    user_id="user@example.com",
    limit=20
)
 
for thread in threads.threads:
    print(f"{thread.thread_id}: {thread.title}")

Get Thread Details

# Get thread details
thread = client.get_thread(thread_id="thread-id")
print(f"Thread: {thread.title}")
print(f"Responses: {thread.response_count}")

Delete Thread

# Delete a thread
client.delete_thread(thread_id="thread-id")

Complete Example Workflow

from lumnisai import Client, AgentConfig, display_progress
 
# Initialize client
client = Client(api_key="your-api-key")
 
# Create user
user = client.create_user(
    email="analyst@example.com",
    first_name="Data",
    last_name="Analyst"
)
 
# Configure agent
agent_config = AgentConfig(
    coordinator_model_name="openai:gpt-4.1",
    planner_model_name="openai:gpt-4.1",
    use_cognitive_tools=True,
    enable_task_validation=True,
    generate_comprehensive_output=True
)
 
# Create streaming response
for update in client.invoke(
    "What are the latest trends in AI agents?",
    stream=True,
    user_id=user.email,
    agent_config=agent_config
):
    display_progress(update)
    
    if update.state == "completed":
        print(f"\n\nFinal Analysis:\n{update.output_text}")
 
# List user's responses
responses = client.list_responses(user_id=user.email)
print(f"\nUser has {len(responses.responses)} total responses")

Error Handling

from lumnisai import (
    Client,
    AuthenticationError,
    NotFoundError,
    RateLimitError,
    ValidationError,
    LumnisAPIError
)
 
client = Client(api_key="your-api-key")
 
try:
    response = client.invoke("Hello!")
except AuthenticationError as e:
    print(f"Invalid API key: {e}")
except ValidationError as e:
    print(f"Invalid request: {e}")
    print(f"Details: {e.details}")
except RateLimitError as e:
    print(f"Rate limited. Retry after {e.retry_after} seconds")
except NotFoundError as e:
    print(f"Resource not found: {e}")
except LumnisAPIError as e:
    print(f"API error: {e}")
    print(f"Status code: {e.status_code}")