Installation
Copy
pip install tracelm
Requires Python 3.8+. The SDK depends on
openai and httpx.Quick Start
Copy
from tracelm import TraceLM
# Initialize the client
tracelm = TraceLM(
api_key="lt_your-tracelm-key", # Your TraceLM API key
openai_api_key="sk-your-openai-key", # Your OpenAI API key
base_url="https://api.tracelm.ai" # Optional: defaults to https://api.tracelm.ai
)
# Make a traced LLM call - works exactly like OpenAI's API
response = tracelm.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "What is the capital of France?"}
]
)
print(response.choices[0].message.content)
Environment Variables
For production, use environment variables to store your API keys:.env
Copy
TRACELM_API_KEY=lt_your-tracelm-key
OPENAI_API_KEY=sk-your-openai-key
TRACELM_BASE_URL=https://api.tracelm.ai
Copy
import os
from tracelm import TraceLM
# SDK automatically reads from environment variables
tracelm = TraceLM()
# Or explicitly pass them
tracelm = TraceLM(
api_key=os.environ["TRACELM_API_KEY"],
openai_api_key=os.environ["OPENAI_API_KEY"]
)
Task Tracking
Group multiple LLM calls into a task for agent observability. Tasks automatically track tool calls, detect loops, and identify failures.Copy
from tracelm import TraceLM
tracelm = TraceLM()
# Create a task to group related LLM calls
with tracelm.task(name="booking_flow", user_id="user_123") as task:
# First LLM call
response1 = tracelm.chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": "Find flights to NYC"}]
)
# Second LLM call - automatically grouped with the first
response2 = tracelm.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "user", "content": "Find flights to NYC"},
{"role": "assistant", "content": response1.choices[0].message.content},
{"role": "user", "content": "Book the cheapest one"}
]
)
# Complete the task and run detection
result = task.complete()
# Access detection results
print(f"Task ID: {task.id}")
print(f"Trace count: {task.trace_count}")
print(f"Loops detected: {result.loops.detected}")
print(f"Tool failures: {result.failures.total}")
Detection Results
When you complete a task, TraceLM automatically runs detection analysis:Copy
with tracelm.task(name="agent_task") as task:
# ... your LLM calls ...
result = task.complete()
# Loop Detection
if result.loops.detected:
print(f"Loops found: {result.loops.patterns} patterns")
print(f"Severity: {result.loops.highest_severity}") # low, medium, high
# Tool Failure Detection
if result.failures.total > 0:
print(f"Explicit failures: {result.failures.explicit}")
print(f"Semantic failures: {result.failures.semantic}")
print(f"Silent failures: {result.failures.silent}")
# Context Failure Detection
if result.context.total > 0:
print(f"Forgotten preferences: {result.context.forgotten_preferences}")
print(f"Repeated questions: {result.context.repeated_questions}")
print(f"Contradicted facts: {result.context.contradicted_facts}")
print(f"Violated constraints: {result.context.violated_constraints}")
print(f"Lost context: {result.context.lost_context}")
Use
task.detect() to run detection without completing the task. Useful for monitoring long-running agents.Conversation Tracking
Group multiple tasks into a conversation for multi-turn chat applications:Copy
from tracelm import TraceLM
tracelm = TraceLM()
# Create a conversation to group related tasks
with tracelm.conversation(user_id="user_123", session_id="session_abc") as conv:
# First turn
with conv.task(name="turn_1") as task:
response = tracelm.chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": "What's the weather in NYC?"}]
)
task.complete()
# Second turn
with conv.task(name="turn_2") as task:
response = tracelm.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "user", "content": "What's the weather in NYC?"},
{"role": "assistant", "content": "..."},
{"role": "user", "content": "What about tomorrow?"}
]
)
task.complete()
print(f"Conversation ID: {conv.id}")
print(f"Total tasks: {conv.task_count}")
Working with Tool Calls
TraceLM automatically tracks function/tool calls made by the LLM:Copy
from tracelm import TraceLM
tracelm = TraceLM()
tools = [
{
"type": "function",
"function": {
"name": "search_web",
"description": "Search the web for information",
"parameters": {
"type": "object",
"properties": {
"query": {"type": "string", "description": "Search query"}
},
"required": ["query"]
}
}
},
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get weather for a location",
"parameters": {
"type": "object",
"properties": {
"location": {"type": "string"}
},
"required": ["location"]
}
}
}
]
with tracelm.task(name="tool_agent") as task:
response = tracelm.chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": "Search for the weather in NYC"}],
tools=tools
)
# Handle tool calls as usual
if response.choices[0].message.tool_calls:
for tool_call in response.choices[0].message.tool_calls:
print(f"Tool: {tool_call.function.name}")
print(f"Args: {tool_call.function.arguments}")
# Get tool call details from task
tool_calls = task.get_tool_calls()
for tc in tool_calls:
print(f"{tc.tool_name}: {tc.status}")
task.complete()
Async Support
Use the AsyncTraceLM client for async applications:Copy
import asyncio
from tracelm import AsyncTraceLM
async def main():
tracelm = AsyncTraceLM(
api_key="lt_your-tracelm-key",
openai_api_key="sk-your-openai-key"
)
async with tracelm.task(name="async_task", user_id="user_123") as task:
response = await tracelm.chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": "Hello!"}]
)
print(response.choices[0].message.content)
result = await task.complete()
print(f"Loops: {result.loops.detected}")
asyncio.run(main())
Streaming Responses
TraceLM fully supports streaming responses:Copy
from tracelm import TraceLM
tracelm = TraceLM()
with tracelm.task(name="streaming_task") as task:
stream = tracelm.chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": "Write a poem about AI"}],
stream=True
)
for chunk in stream:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="")
print() # Newline after streaming
task.complete()
Query API
Query your tasks and conversations programmatically:Copy
from tracelm import TraceLM
from datetime import datetime, timedelta
tracelm = TraceLM()
# List tasks with filters
tasks = tracelm.list_tasks(
has_issues=True, # Only tasks with detected issues
user_id="user_123", # Filter by user
status="completed", # active, completed, failed, timeout
loop_detected=True, # Has loop detection
start_date=datetime.now() - timedelta(days=7),
page=1,
page_size=50,
sort_by="started_at",
sort_order="desc"
)
for task in tasks.tasks:
print(f"{task.task_name}: {task.trace_count} traces, {task.tool_failure_count} failures")
# Get detailed task info
task_detail = tracelm.get_task("task-uuid-here")
print(f"Traces: {len(task_detail.traces)}")
print(f"Loops: {len(task_detail.loops)}")
# List conversations
conversations = tracelm.list_conversations(user_id="user_123")
for conv in conversations.conversations:
print(f"Conversation {conv.id}: {conv.task_count} tasks")
# Get context failures for a conversation
failures = tracelm.get_conversation_context_failures("conv-uuid-here")
for failure in failures:
print(f"{failure.failure_type}: {failure.description}")
Error Handling
Handle errors gracefully in your application:Copy
from tracelm import TraceLM
from tracelm.exceptions import (
TraceLMError,
ConfigurationError,
APIError,
TaskError,
ConversationError
)
try:
tracelm = TraceLM()
except ConfigurationError as e:
print(f"Configuration error: {e}")
# Missing or invalid API keys
try:
with tracelm.task(name="my_task") as task:
response = tracelm.chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": "Hello!"}]
)
task.complete()
except APIError as e:
print(f"API error ({e.status_code}): {e}")
# Handle API failures
except TaskError as e:
print(f"Task error: {e}")
# Handle task operation failures
# Mark task as failed on error
with tracelm.task(name="risky_task") as task:
try:
response = tracelm.chat.completions.create(...)
task.complete()
except Exception as e:
task.fail(reason=str(e), metadata={"error_type": type(e).__name__})
Direct API Integration
If you prefer not to use the SDK, see the API Reference for direct HTTP integration.Copy
import os
import requests
response = requests.post(
f"{os.environ['TRACELM_BASE_URL']}/v1/chat/completions",
headers={
"Authorization": f"Bearer {os.environ['OPENAI_API_KEY']}",
"X-API-Key": os.environ["TRACELM_API_KEY"],
"Content-Type": "application/json",
# Agent observability headers
"X-Task-ID": "task_abc123",
"X-Conversation-ID": "conv_xyz789",
"X-User-ID": "user_456"
},
json={
"model": "gpt-4o-mini",
"messages": [{"role": "user", "content": "Hello!"}]
}
)
data = response.json()
print(data["choices"][0]["message"]["content"])