# Python Tracer Setup (Illustrative)
from opentelemetry import trace
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import ConsoleSpanExporter, SimpleSpanProcessor
# Set up a tracer provider
trace.set_tracer_provider(TracerProvider())
tracer_provider = trace.get_tracer_provider()
# Configure an exporter (e.g., ConsoleExporter for demonstration)
exporter = ConsoleSpanExporter()
span_processor = SimpleSpanProcessor(exporter)
tracer_provider.add_span_processor(span_processor)
# Get a tracer
tracer = trace.get_tracer(__name__)
# Assume openai_client is configured elsewhere
import openai
openai_client = openai.OpenAI()
import json
# Ensure 'tracer' is defined from the setup section above.
# Ensure 'openai_client' is defined, e.g., from openai library
# Placeholder definitions for the example
question = "What is the weather like in London?"
def example_tool_function(input_args):
print(f"Tool received: {input_args}")
return f"The weather in {input_args.get('city', 'default city')} is sunny."
tool_args_example = {"city": "London"}
model_version_example = "gpt-3.5-turbo"
current_user_message_example = [{"role": "user", "content": question}]
TEMPERATURE_example = 0.7
def run_tool_py(tool_function, tool_args, current_question, openai_client_instance, model, messages, temp):
# Begin by setting the context for the current span
with tracer.start_as_current_span(
name="Tool - specific tool",
attributes={
# Set these attributes prior to invoking the tool, in case the tool raises an exception
"fi.span.kind": "TOOL",
"input.value": current_question,
"message.tool_calls.0.tool_call.function.name": tool_function.__name__,
"message.tool_calls.0.tool_call.function.arguments": json.dumps(
tool_args
),
},
) as tool_span:
# Run the tool; the output is a formatted prompt for chat completion
resulting_prompt = tool_function(input_args=tool_args)
# Optionally, set the resulting prompt as the tool span output
tool_span.set_attribute(
"message.tool_calls.0.tool_call.function.output", resulting_prompt
)
# This LLM span is nested under the tool span in the trace
with tracer.start_as_current_span(
name="Tool - LLM response",
# Set these attributes before invoking the LLM
attributes={
"fi.span.kind": "LLM",
"input.value": resulting_prompt,
},
) as llm_span:
# llm_response = openai_client_instance.chat.completions.create(
# model=model,
# messages=messages,
# temperature=temp,
# )
# llm_span.set_attribute("output.value", str(llm_response)) # Convert to string if necessary
llm_response_example = "LLM response based on tool output."
llm_span.set_attribute("output.value", llm_response_example)
# Example call (assuming tracer and openai_client are initialized from setup)
# run_tool_py(example_tool_function, tool_args_example, question, openai_client, model_version_example, current_user_message_example, TEMPERATURE_example)
Was this page helpful?