OpenAI
Integrate OpenAI with Future AGI for auto-instrumented tracing. Capture chat completions, embeddings, and tool calls with traceAI-openai.
1. Installation
First install the traceAI package to access the observability framework
pip install traceAI-openainpm install @traceai/openai 2. Set Environment Variables
Set up your environment variables to authenticate with both FutureAGI and OpenAI services.
import os
os.environ["OPENAI_API_KEY"] = "your-openai-api-key"
os.environ["FI_API_KEY"] = "your-futureagi-api-key"
os.environ["FI_SECRET_KEY"] = "your-futureagi-secret-key"process.env.OPENAI_API_KEY = OPENAI_API_KEY;
process.env.FI_API_KEY = FI_API_KEY;
process.env.FI_SECRET_KEY = FI_SECRET_KEY; 3. Initialize Trace Provider
Set up the trace provider to create a new project in FutureAGI, establish telemetry data pipelines .
from fi_instrumentation import register
from fi_instrumentation.fi_types import ProjectType
trace_provider = register(
project_type=ProjectType.OBSERVE,
project_name="openai_project",
)import { register, ProjectType } from "@traceai/fi-core";
const tracerProvider = register({
project_type: ProjectType.OBSERVE,
project_name: "openai_project",
}); 4. Instrument your Project
Instrument your Project with OpenAI Instrumentor. This step ensures that all interactions with the OpenAI are tracked and monitored.
from traceai_openai import OpenAIInstrumentor
OpenAIInstrumentor().instrument(tracer_provider=trace_provider)import { OpenAIInstrumentation } from "@traceai/openai";
import { registerInstrumentations } from "@opentelemetry/instrumentation";
const openaiInstrumentation = new OpenAIInstrumentation({});
registerInstrumentations({
instrumentations: [openaiInstrumentation],
tracerProvider: tracerProvider,
}); 5. Interact with OpenAI
Interact with the OpenAI as you normally would. Our Instrumentor will automatically trace and send the telemetry data to our platform.
Chat Completion
import httpx
import base64
from openai import OpenAI
client = OpenAI()
image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
image_media_type = "image/jpeg"
image_data = base64.standard_b64encode(httpx.get(image_url).content).decode("utf-8")
response = client.chat.completions.create(
model="gpt-4o",
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": "What is in this image?"},
{
"type": "image_url",
"image_url": {
"url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg",
},
}
],
},
],
)
print(response.choices[0].message.content)import { OpenAI } from "openai";
const client = new OpenAI();
const response = await client.chat.completions.create({
model: "gpt-4o",
messages: [{ role: "user", content: "What is the capital of South Africa?" }],
});
console.log(response.choices[0].message.content); Audio and speech
import requests
import base64
from openai import OpenAI
client = OpenAI()
# Fetch the audio file and convert it to a base64 encoded string
url = "https://cdn.openai.com/API/docs/audio/alloy.wav"
response = requests.get(url)
response.raise_for_status()
wav_data = response.content
encoded_string = base64.b64encode(wav_data).decode("utf-8")
completion = client.chat.completions.create(
model="gpt-4o-audio-preview",
modalities=["text", "audio"],
audio={"voice": "alloy", "format": "wav"},
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": "What is in this recording?"},
{
"type": "input_audio",
"input_audio": {"data": encoded_string, "format": "wav"},
},
],
},
],
)
Image Generation
from openai import OpenAI
client = OpenAI()
response = client.images.generate(
model="dall-e-3",
prompt="a horse running through a field of flowers",
size="1024x1024",
n=1,
)
print(response.data[0].url)
Chat Streaming
from openai import OpenAI
client = OpenAI()
completion = client.chat.completions.create(
model="gpt-4o",
stream=True,
messages=[
{
"role": "user",
"content": "What is OpenAI?",
},
],
)
for chunk in completion:
print(chunk.choices[0].delta.content, end="")