Introduction
Evaluation
- Overview
- Quickstart
- Future AGI Models
- Concept
- How To
- Eval Definition
Dataset
- Overview
- Concept
- Adding Dataset
- Create Dynamic Column
- Add Annotations
- Change Column Type
- Create Static Column
- Create Synthetic Data
- Experimentation
Tracing
- Overview
- Concept
- Instrumentation ( Auto )
- Manual Tracing
- Implementing Tracing
- Instrument with traceAI Helpers
- Get Current Tracer and Span
- Enriching Spans with Attributes, Metadata, and Tags
- Logging Prompt Templates & Variables
- Integrate Events, Exceptions, and Status into Spans
- Set Session ID and User ID
- Tool Spans Creation
- Mask Span Attributes
- Advanced Tracing (OTEL)
- FI Semantic Conventions
- In-line Evaluations
MCP
Admin & Settings
Manual Tracing
Tool Spans Creation
Example Tool Instrumentation
The following example illustrates how to manually trace a tool function alongside a chat completion response. You’ll learn how to establish spans for both the tool and LLM to capture their inputs, outputs, and key events.
Before diving into the code, ensure your tracer is properly configured:
Copy
Ask AI
# Python Tracer Setup (Illustrative)
from opentelemetry import trace
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import ConsoleSpanExporter, SimpleSpanProcessor
# Set up a tracer provider
trace.set_tracer_provider(TracerProvider())
tracer_provider = trace.get_tracer_provider()
# Configure an exporter (e.g., ConsoleExporter for demonstration)
exporter = ConsoleSpanExporter()
span_processor = SimpleSpanProcessor(exporter)
tracer_provider.add_span_processor(span_processor)
# Get a tracer
tracer = trace.get_tracer(__name__)
# Assume openai_client is configured elsewhere
import openai
openai_client = openai.OpenAI()
Copy
Ask AI
# Python Tracer Setup (Illustrative)
from opentelemetry import trace
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import ConsoleSpanExporter, SimpleSpanProcessor
# Set up a tracer provider
trace.set_tracer_provider(TracerProvider())
tracer_provider = trace.get_tracer_provider()
# Configure an exporter (e.g., ConsoleExporter for demonstration)
exporter = ConsoleSpanExporter()
span_processor = SimpleSpanProcessor(exporter)
tracer_provider.add_span_processor(span_processor)
# Get a tracer
tracer = trace.get_tracer(__name__)
# Assume openai_client is configured elsewhere
import openai
openai_client = openai.OpenAI()
Copy
Ask AI
// Typescript Tracer Setup (Illustrative)
import { trace, DiagConsoleLogger, DiagLogLevel, diag } from "@opentelemetry/api";
import { NodeTracerProvider } from "@opentelemetry/sdk-trace-node";
import { ConsoleSpanExporter, SimpleSpanProcessor } from "@opentelemetry/sdk-trace-base";
// Optional: For verbose logging from OpenTelemetry
// diag.setLogger(new DiagConsoleLogger(), DiagLogLevel.DEBUG);
const provider = new NodeTracerProvider();
// Configure an exporter (e.g., ConsoleExporter for demonstration)
const consoleExporter = new ConsoleSpanExporter();
const spanProcessor = new SimpleSpanProcessor(consoleExporter);
provider.addSpanProcessor(spanProcessor);
// Initialize the provider
provider.register();
// Get a tracer
const tracer = trace.getTracer("my-application-tracer");
// Assume openaiClient is configured elsewhere
import OpenAI from 'openai';
const openaiClient = new OpenAI();
Copy
Ask AI
import json
# Ensure 'tracer' is defined from the setup section above.
# Ensure 'openai_client' is defined, e.g., from openai library
# Placeholder definitions for the example
question = "What is the weather like in London?"
def example_tool_function(input_args):
print(f"Tool received: {input_args}")
return f"The weather in {input_args.get('city', 'default city')} is sunny."
tool_args_example = {"city": "London"}
model_version_example = "gpt-3.5-turbo"
current_user_message_example = [{"role": "user", "content": question}]
TEMPERATURE_example = 0.7
def run_tool_py(tool_function, tool_args, current_question, openai_client_instance, model, messages, temp):
# Begin by setting the context for the current span
with tracer.start_as_current_span(
name="Tool - specific tool",
attributes={
# Set these attributes prior to invoking the tool, in case the tool raises an exception
"fi.span.kind": "TOOL",
"input.value": current_question,
"message.tool_calls.0.tool_call.function.name": tool_function.__name__,
"message.tool_calls.0.tool_call.function.arguments": json.dumps(
tool_args
),
},
) as tool_span:
# Run the tool; the output is a formatted prompt for chat completion
resulting_prompt = tool_function(input_args=tool_args)
# Optionally, set the resulting prompt as the tool span output
tool_span.set_attribute(
"message.tool_calls.0.tool_call.function.output", resulting_prompt
)
# This LLM span is nested under the tool span in the trace
with tracer.start_as_current_span(
name="Tool - LLM response",
# Set these attributes before invoking the LLM
attributes={
"fi.span.kind": "LLM",
"input.value": resulting_prompt,
},
) as llm_span:
# llm_response = openai_client_instance.chat.completions.create(
# model=model,
# messages=messages,
# temperature=temp,
# )
# llm_span.set_attribute("output.value", str(llm_response)) # Convert to string if necessary
llm_response_example = "LLM response based on tool output."
llm_span.set_attribute("output.value", llm_response_example)
# Example call (assuming tracer and openai_client are initialized from setup)
# run_tool_py(example_tool_function, tool_args_example, question, openai_client, model_version_example, current_user_message_example, TEMPERATURE_example)
Copy
Ask AI
import json
# Ensure 'tracer' is defined from the setup section above.
# Ensure 'openai_client' is defined, e.g., from openai library
# Placeholder definitions for the example
question = "What is the weather like in London?"
def example_tool_function(input_args):
print(f"Tool received: {input_args}")
return f"The weather in {input_args.get('city', 'default city')} is sunny."
tool_args_example = {"city": "London"}
model_version_example = "gpt-3.5-turbo"
current_user_message_example = [{"role": "user", "content": question}]
TEMPERATURE_example = 0.7
def run_tool_py(tool_function, tool_args, current_question, openai_client_instance, model, messages, temp):
# Begin by setting the context for the current span
with tracer.start_as_current_span(
name="Tool - specific tool",
attributes={
# Set these attributes prior to invoking the tool, in case the tool raises an exception
"fi.span.kind": "TOOL",
"input.value": current_question,
"message.tool_calls.0.tool_call.function.name": tool_function.__name__,
"message.tool_calls.0.tool_call.function.arguments": json.dumps(
tool_args
),
},
) as tool_span:
# Run the tool; the output is a formatted prompt for chat completion
resulting_prompt = tool_function(input_args=tool_args)
# Optionally, set the resulting prompt as the tool span output
tool_span.set_attribute(
"message.tool_calls.0.tool_call.function.output", resulting_prompt
)
# This LLM span is nested under the tool span in the trace
with tracer.start_as_current_span(
name="Tool - LLM response",
# Set these attributes before invoking the LLM
attributes={
"fi.span.kind": "LLM",
"input.value": resulting_prompt,
},
) as llm_span:
# llm_response = openai_client_instance.chat.completions.create(
# model=model,
# messages=messages,
# temperature=temp,
# )
# llm_span.set_attribute("output.value", str(llm_response)) # Convert to string if necessary
llm_response_example = "LLM response based on tool output."
llm_span.set_attribute("output.value", llm_response_example)
# Example call (assuming tracer and openai_client are initialized from setup)
# run_tool_py(example_tool_function, tool_args_example, question, openai_client, model_version_example, current_user_message_example, TEMPERATURE_example)
Copy
Ask AI
import { trace, context, Attributes, SpanStatusCode } from "@opentelemetry/api";
// Ensure 'tracer' is initialized from the setup section above.
// Assume 'openaiClient', 'model_version', 'current_user_message', 'TEMPERATURE' are defined.
// import OpenAI from 'openai';
// const openaiClient = new OpenAI(); // Example
// const model_version_ts = "gpt-4o";
// const current_user_message_ts = [{ role: "user", content: "Placeholder" }];
// const TEMPERATURE_ts = 0.7;
// Placeholder definitions for the example
const questionTs = "What is the weather like in Berlin?";
interface ToolArgs { city: string; }
const exampleToolFunctionTs = async (inputArgs: ToolArgs): Promise<string> => {
console.log(`Tool received: ${JSON.stringify(inputArgs)}`);
return Promise.resolve(`The weather in ${inputArgs.city} is cloudy.`);
};
const toolArgsExampleTs: ToolArgs = { city: "Berlin" };
async function runToolTs(
toolFunction: (inputArgs: any) => Promise<any>,
toolArgs: any,
currentQuestion: string
// Pass openaiClient, model, messages, temp if doing a real call
) {
await tracer.startActiveSpan(`Tool - ${toolFunction.name}`, async (toolSpan) => {
try {
toolSpan.setAttributes({
"fi.span.kind": "TOOL",
"input.value": currentQuestion,
"message.tool_calls.0.tool_call.function.name": toolFunction.name,
"message.tool_calls.0.tool_call.function.arguments": JSON.stringify(toolArgs),
} as Attributes);
const resulting_prompt = await toolFunction(toolArgs);
toolSpan.setAttribute("message.tool_calls.0.tool_call.function.output", resulting_prompt);
await tracer.startActiveSpan("Tool - LLM response", async (llmSpan) => {
try {
llmSpan.setAttributes({
"fi.span.kind": "LLM",
"input.value": resulting_prompt,
} as Attributes);
// const llm_response = await openaiClient.chat.completions.create({
// model: model_version_ts,
// messages: current_user_message_ts,
// temperature: TEMPERATURE_ts,
// });
// llmSpan.setAttribute("output.value", llm_response.choices[0]?.message?.content || "");
const llmResponseExample = "LLM response based on tool output for Typescript.";
llmSpan.setAttribute("output.value", llmResponseExample);
llmSpan.setStatus({ code: SpanStatusCode.OK });
} catch (error) {
llmSpan.setStatus({ code: SpanStatusCode.ERROR, message: (error as Error).message });
if (error instanceof Error) llmSpan.recordException(error);
else llmSpan.recordException(String(error));
throw error;
} finally {
llmSpan.end();
}
});
toolSpan.setStatus({ code: SpanStatusCode.OK });
} catch (error) {
toolSpan.setStatus({ code: SpanStatusCode.ERROR, message: (error as Error).message });
if (error instanceof Error) toolSpan.recordException(error);
else toolSpan.recordException(String(error));
throw error;
} finally {
toolSpan.end();
}
});
}
// Example call (assuming tracer is initialized from setup):
// runToolTs(exampleToolFunctionTs, toolArgsExampleTs, questionTs).catch(console.error);
Was this page helpful?
On this page
Assistant
Responses are generated using AI and may contain mistakes.