Installation
- npm
- yarn
- pnpm
Copy
npm install tracelm
Copy
yarn add tracelm
Copy
pnpm add tracelm
Requires Node.js 16+. The SDK has a peer dependency on
openai (v4+).Quick Start
Copy
import { TraceLM } from 'tracelm';
// Initialize the client
const tracelm = new TraceLM({
apiKey: 'lt_your-tracelm-key', // Your TraceLM API key
openaiApiKey: 'sk-your-openai-key', // Your OpenAI API key
baseUrl: 'https://api.tracelm.ai' // Optional: defaults to https://api.tracelm.ai
});
// Make a traced LLM call - works exactly like OpenAI's API
const response = await tracelm.chat.completions.create({
model: 'gpt-4o-mini',
messages: [
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'What is the capital of France?' }
]
});
console.log(response.choices[0].message.content);
Environment Variables
For production, use environment variables to store your API keys:.env
Copy
TRACELM_API_KEY=lt_your-tracelm-key
OPENAI_API_KEY=sk-your-openai-key
TRACELM_BASE_URL=https://api.tracelm.ai
Copy
import { TraceLM } from 'tracelm';
// SDK reads from environment variables
const tracelm = new TraceLM({
apiKey: process.env.TRACELM_API_KEY!,
openaiApiKey: process.env.OPENAI_API_KEY!
});
Task Tracking
Group multiple LLM calls into a task for agent observability. Tasks automatically track tool calls, detect loops, and identify failures.Copy
import { TraceLM } from 'tracelm';
const tracelm = new TraceLM({
apiKey: process.env.TRACELM_API_KEY!,
openaiApiKey: process.env.OPENAI_API_KEY!
});
// Create a task to group related LLM calls
const task = tracelm.startTask({
name: 'booking_flow',
userId: 'user_123'
});
try {
// First LLM call
const response1 = await tracelm.chat.completions.create({
model: 'gpt-4o-mini',
messages: [{ role: 'user', content: 'Find flights to NYC' }]
});
// Second LLM call - automatically grouped with the first
const response2 = await tracelm.chat.completions.create({
model: 'gpt-4o-mini',
messages: [
{ role: 'user', content: 'Find flights to NYC' },
{ role: 'assistant', content: response1.choices[0].message.content! },
{ role: 'user', content: 'Book the cheapest one' }
]
});
// Complete the task and run detection
const result = await task.complete();
// Access detection results
console.log('Task ID:', task.id);
console.log('Trace count:', task.traceCount);
console.log('Loops detected:', result?.loops.detected);
console.log('Tool failures:', result?.failures.total);
} catch (error) {
await task.fail({ reason: String(error) });
} finally {
tracelm.endTask();
}
Detection Results
When you complete a task, TraceLM automatically runs detection analysis:Copy
const task = tracelm.startTask({ name: 'agent_task' });
try {
// ... your LLM calls ...
const result = await task.complete();
if (result) {
// Loop Detection
if (result.loops.detected) {
console.log('Loops found:', result.loops.patterns, 'patterns');
console.log('Severity:', result.loops.highestSeverity); // low, medium, high
}
// Tool Failure Detection
if (result.failures.total > 0) {
console.log('Explicit failures:', result.failures.explicit);
console.log('Semantic failures:', result.failures.semantic);
console.log('Silent failures:', result.failures.silent);
}
// Context Failure Detection
if (result.context.total > 0) {
console.log('Forgotten preferences:', result.context.forgottenPreferences);
console.log('Repeated questions:', result.context.repeatedQuestions);
console.log('Contradicted facts:', result.context.contradictedFacts);
console.log('Violated constraints:', result.context.violatedConstraints);
console.log('Lost context:', result.context.lostContext);
}
}
} finally {
tracelm.endTask();
}
Use
task.detect() to run detection without completing the task. Useful for monitoring long-running agents.Conversation Tracking
Group multiple tasks into a conversation for multi-turn chat applications:Copy
import { TraceLM } from 'tracelm';
const tracelm = new TraceLM({
apiKey: process.env.TRACELM_API_KEY!,
openaiApiKey: process.env.OPENAI_API_KEY!
});
// Create a conversation to group related tasks
const conversation = tracelm.startConversation({
userId: 'user_123',
sessionId: 'session_abc'
});
// First turn
const task1 = conversation.startTask({ name: 'turn_1' });
const response1 = await tracelm.chat.completions.create({
model: 'gpt-4o-mini',
messages: [{ role: 'user', content: "What's the weather in NYC?" }]
});
await task1.complete();
tracelm.endTask();
// Second turn
const task2 = conversation.startTask({ name: 'turn_2' });
const response2 = await tracelm.chat.completions.create({
model: 'gpt-4o-mini',
messages: [
{ role: 'user', content: "What's the weather in NYC?" },
{ role: 'assistant', content: response1.choices[0].message.content! },
{ role: 'user', content: 'What about tomorrow?' }
]
});
await task2.complete();
tracelm.endTask();
console.log('Conversation ID:', conversation.id);
console.log('Total tasks:', conversation.taskCount);
tracelm.endConversation();
Working with Tool Calls
TraceLM automatically tracks function/tool calls made by the LLM:Copy
import { TraceLM } from 'tracelm';
const tracelm = new TraceLM({
apiKey: process.env.TRACELM_API_KEY!,
openaiApiKey: process.env.OPENAI_API_KEY!
});
const tools = [
{
type: 'function' as const,
function: {
name: 'search_web',
description: 'Search the web for information',
parameters: {
type: 'object',
properties: {
query: { type: 'string', description: 'Search query' }
},
required: ['query']
}
}
},
{
type: 'function' as const,
function: {
name: 'get_weather',
description: 'Get weather for a location',
parameters: {
type: 'object',
properties: {
location: { type: 'string' }
},
required: ['location']
}
}
}
];
const task = tracelm.startTask({ name: 'tool_agent' });
try {
const response = await tracelm.chat.completions.create({
model: 'gpt-4o-mini',
messages: [{ role: 'user', content: 'Search for the weather in NYC' }],
tools
});
// Handle tool calls as usual
const toolCalls = response.choices[0].message.tool_calls;
if (toolCalls) {
for (const toolCall of toolCalls) {
console.log('Tool:', toolCall.function.name);
console.log('Args:', toolCall.function.arguments);
}
}
// Get tool call details from task
const taskToolCalls = await task.getToolCalls();
for (const tc of taskToolCalls) {
console.log(`${tc.toolName}: ${tc.status}`);
}
await task.complete();
} finally {
tracelm.endTask();
}
Streaming Responses
TraceLM fully supports streaming responses:Copy
import { TraceLM } from 'tracelm';
const tracelm = new TraceLM({
apiKey: process.env.TRACELM_API_KEY!,
openaiApiKey: process.env.OPENAI_API_KEY!
});
const task = tracelm.startTask({ name: 'streaming_task' });
try {
const stream = await tracelm.chat.completions.create({
model: 'gpt-4o-mini',
messages: [{ role: 'user', content: 'Write a poem about AI' }],
stream: true
});
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content;
if (content) {
process.stdout.write(content);
}
}
console.log(); // Newline after streaming
await task.complete();
} finally {
tracelm.endTask();
}
Next.js Integration
Example of using TraceLM in a Next.js API route:app/api/chat/route.ts
Copy
import { NextResponse } from 'next/server';
import { TraceLM } from 'tracelm';
const tracelm = new TraceLM({
apiKey: process.env.TRACELM_API_KEY!,
openaiApiKey: process.env.OPENAI_API_KEY!
});
export async function POST(request: Request) {
const { message, userId, sessionId } = await request.json();
// Start a task for this request
const task = tracelm.startTask({
name: 'chat_request',
userId
});
try {
const response = await tracelm.chat.completions.create({
model: 'gpt-4o-mini',
messages: [{ role: 'user', content: message }]
});
const result = await task.complete();
return NextResponse.json({
content: response.choices[0].message.content,
taskId: task.id,
hasIssues: result?.loops.detected || (result?.failures.total ?? 0) > 0
});
} catch (error) {
await task.fail({ reason: String(error) });
return NextResponse.json({ error: 'Request failed' }, { status: 500 });
} finally {
tracelm.endTask();
}
}
Query API
Query your tasks and conversations programmatically:Copy
import { TraceLM } from 'tracelm';
const tracelm = new TraceLM({
apiKey: process.env.TRACELM_API_KEY!,
openaiApiKey: process.env.OPENAI_API_KEY!
});
// List tasks with filters
const tasks = await tracelm.listTasks({
hasIssues: true, // Only tasks with detected issues
userId: 'user_123', // Filter by user
status: 'completed', // active, completed, failed, timeout
loopDetected: true, // Has loop detection
startDate: new Date(Date.now() - 7 * 24 * 60 * 60 * 1000),
page: 1,
pageSize: 50,
sortBy: 'started_at',
sortOrder: 'desc'
});
for (const task of tasks.tasks) {
console.log(`${task.taskName}: ${task.traceCount} traces, ${task.toolFailureCount} failures`);
}
// Get detailed task info
const taskDetail = await tracelm.getTask('task-uuid-here');
console.log('Traces:', taskDetail.traces.length);
console.log('Loops:', taskDetail.loops.length);
// List conversations
const conversations = await tracelm.listConversations({ userId: 'user_123' });
for (const conv of conversations.conversations) {
console.log(`Conversation ${conv.id}: ${conv.taskCount} tasks`);
}
// Get context failures for a conversation
const failures = await tracelm.getConversationContextFailures('conv-uuid-here');
for (const failure of failures) {
console.log(`${failure.failureType}: ${failure.description}`);
}
TypeScript Types
The SDK exports all types for full type safety:Copy
import {
TraceLM,
TraceLMOptions,
TaskOptions,
ConversationOptions,
TaskStatus,
Severity,
DetectionResult,
LoopDetectionResult,
FailureDetectionResult,
ContextDetectionResult,
TaskDetail,
ConversationDetail,
ToolCallResponse,
ConfigurationError,
APIError,
TaskError
} from 'tracelm';
// Full type safety
const options: TraceLMOptions = {
apiKey: process.env.TRACELM_API_KEY!,
openaiApiKey: process.env.OPENAI_API_KEY!,
baseUrl: 'https://api.tracelm.ai'
};
const tracelm = new TraceLM(options);
const taskOptions: TaskOptions = {
name: 'my_task',
userId: 'user_123',
metadata: { custom: 'data' }
};
const task = tracelm.startTask(taskOptions);
// Detection result is fully typed
const result: DetectionResult | null = await task.complete();
if (result) {
const loopResult: LoopDetectionResult = result.loops;
const failureResult: FailureDetectionResult = result.failures;
const contextResult: ContextDetectionResult = result.context;
}
Error Handling
Handle errors gracefully in your application:Copy
import {
TraceLM,
ConfigurationError,
APIError,
TaskError,
ConversationError
} from 'tracelm';
try {
const tracelm = new TraceLM({
apiKey: process.env.TRACELM_API_KEY!,
openaiApiKey: process.env.OPENAI_API_KEY!
});
} catch (error) {
if (error instanceof ConfigurationError) {
console.error('Configuration error:', error.message);
// Missing or invalid API keys
}
}
const tracelm = new TraceLM({
apiKey: process.env.TRACELM_API_KEY!,
openaiApiKey: process.env.OPENAI_API_KEY!
});
const task = tracelm.startTask({ name: 'my_task' });
try {
const response = await tracelm.chat.completions.create({
model: 'gpt-4o-mini',
messages: [{ role: 'user', content: 'Hello!' }]
});
await task.complete();
} catch (error) {
if (error instanceof APIError) {
console.error(`API error (${error.statusCode}):`, error.message);
// Handle API failures
} else if (error instanceof TaskError) {
console.error('Task error:', error.message);
// Handle task operation failures
}
// Mark task as failed
await task.fail({
reason: String(error),
metadata: { errorType: error.constructor.name }
});
} finally {
tracelm.endTask();
}
Direct API Integration
If you prefer not to use the SDK, see the API Reference for direct HTTP integration.Copy
const response = await fetch(`${process.env.TRACELM_BASE_URL}/v1/chat/completions`, {
method: 'POST',
headers: {
'Authorization': `Bearer ${process.env.OPENAI_API_KEY}`,
'X-API-Key': process.env.TRACELM_API_KEY!,
'Content-Type': 'application/json',
// Agent observability headers
'X-Task-ID': 'task_abc123',
'X-Conversation-ID': 'conv_xyz789',
'X-User-ID': 'user_456'
},
body: JSON.stringify({
model: 'gpt-4o-mini',
messages: [{ role: 'user', content: 'Hello!' }]
})
});
const data = await response.json();
console.log(data.choices[0].message.content);