Example Tool Instrumentation

The following example illustrates how to manually trace a tool function alongside a chat completion response. You’ll learn how to establish spans for both the tool and LLM to capture their inputs, outputs, and key events.

Before diving into the code, ensure your tracer is properly configured:

import json

#from your_tracer import tracer

def run_tool(tool_function, tool_args):
    # Begin by setting the context for the current span
    with tracer.start_as_current_span(
        name="Tool - specific tool",
        attributes={
            # Set these attributes prior to invoking the tool, in case the tool raises an exception
            **{
                "fi.span.kind": "TOOL",
                "input.value": question,
                "message.tool_calls.0.tool_call.function.name": tool_function.__name__,
                "message.tool_calls.0.tool_call.function.arguments": json.dumps(
                    tool_args
                ),
            },
        },
    ) as tool_span:
        # Run the tool; the output is a formatted prompt for chat completion
        resulting_prompt = tool_function(input=tool_args)
        # Optionally, set the resulting prompt as the tool span output
        tool_span.set_attribute(
            "message.tool_calls.0.tool_call.function.output", resulting_prompt
        )

        # This LLM span is nested under the tool span in the trace
        with tracer.start_as_current_span(
            name="Tool - LLM response",
            # Set these attributes before invoking the LLM
            attributes={
                "fi.span.kind": "LLM",
                "input.value": resulting_prompt,
            },
        ) as llm_span:
            llm_response = openai_client.chat.completions.create(
                    model=model_version,
                    messages=[current_user_message],
                    temperature=TEMPERATURE,
                )
            llm_span.set_attribute("output.value", llm_response)