Get Simulation Metrics

Retrieve aggregated system metrics — latency, cost, and conversation analytics — for a simulation run.

Get Simulation Metrics

Returns system-level performance metrics for simulation executions. Supports three query modes based on the level of detail needed.

GET https://api.futureagi.com/sdk/api/v1/simulation/metrics/

Authentication

This endpoint uses API key authentication. Include both headers in every request:

X-Api-Key: YOUR_API_KEY
X-Secret-Key: YOUR_SECRET_KEY

Query Parameters

ParameterTypeRequiredDescription
run_test_namestringOne of these is requiredName of the run test. Returns paginated list of executions with aggregated metrics.
execution_idUUIDUUID of a test execution. Returns aggregated metrics for that execution.
call_execution_idUUIDUUID of a call execution. Returns raw per-call metrics.
pageintegerNoPage number for paginated results. Default: 1.
limitintegerNoNumber of results per page. Default: 10.

Responses

200 — By call_execution_id

Returns raw metrics for a single call.

{
  "status": true,
  "result": {
    "call_execution_id": "5af9e484-...",
    "execution_id": "2b19f6e6-...",
    "status": "completed",
    "duration_seconds": 88,
    "latency": {
      "avg_agent_latency_ms": 1234,
      "response_time_ms": null,
      "customer_latency_metrics": {
        "bot_wpm": 233.69,
        "user_wpm": 214.37,
        "talk_ratio": 0.217,
        "ai_interruption_rate": 0.67,
        "avg_agent_latency_ms": 1234
      }
    },
    "cost": {
      "total_cost_cents": 24,
      "stt_cost_cents": 0,
      "llm_cost_cents": 0,
      "tts_cost_cents": 0,
      "customer_cost_breakdown": {}
    },
    "conversation": {
      "user_wpm": 214.37,
      "bot_wpm": 233.69,
      "talk_ratio": 0.217,
      "user_interruption_count": 0,
      "user_interruption_rate": 0.0,
      "ai_interruption_count": 1,
      "ai_interruption_rate": 0.67,
      "avg_stop_time_after_interruption_ms": null
    },
    "chat_metrics": {
      "input_tokens": 12685,
      "total_tokens": 12885,
      "output_tokens": 200,
      "message_count": 15,
      "turn_count": 10
    }
  }
}

200 — By execution_id

Returns aggregated metrics across all calls in the execution.

{
  "status": true,
  "result": {
    "execution_id": "5819e158-...",
    "status": "completed",
    "started_at": "2025-11-30T06:57:38.592Z",
    "completed_at": "2025-11-30T07:17:57.583Z",
    "total_calls": 30,
    "completed_calls": 27,
    "failed_calls": 0,
    "metrics": {
      "latency": {
        "avg_agent_latency_ms": 2887.0,
        "avg_response_time_ms": 3123.0,
        "percentiles": {
          "p50": 3199.5,
          "p95": 3445.8,
          "p99": 3465.2
        }
      },
      "cost": {
        "total_duration_seconds": 69
      },
      "conversation": {
        "avg_user_wpm": 147.0,
        "avg_bot_wpm": 253.0,
        "avg_talk_ratio": 6.73,
        "avg_user_interruption_rate": 1.52,
        "avg_ai_interruption_rate": 0.0,
        "avg_stop_time_after_interruption_ms": 4770.0
      },
      "chat": {
        "avg_total_tokens": 0.0,
        "avg_input_tokens": 0.0,
        "avg_output_tokens": 0.0,
        "avg_chat_latency_ms": 0.0,
        "avg_turn_count": 0.0,
        "avg_csat_score": 0.0
      },
      "calls": {
        "total": 30,
        "completed": 27,
        "failed": 0,
        "pending": 0
      }
    }
  }
}

200 — By run_test_name

Returns a paginated list of executions, each with aggregated metrics.

{
  "status": true,
  "result": {
    "total_pages": 5,
    "current_page": 1,
    "count": 50,
    "results": [
      {
        "execution_id": "...",
        "status": "completed",
        "started_at": "...",
        "completed_at": "...",
        "total_calls": 30,
        "completed_calls": 27,
        "failed_calls": 0,
        "metrics": { ... }
      }
    ]
  }
}

400

Missing or invalid parameters.

404

The specified run test, execution, or call execution was not found.

500

Internal server error.

Code Examples

cURL

# Get metrics for a specific execution
curl "https://api.futureagi.com/sdk/api/v1/simulation/metrics/?execution_id=YOUR_EXECUTION_ID" \
  -H "X-Api-Key: YOUR_API_KEY" \
  -H "X-Secret-Key: YOUR_SECRET_KEY"

# Get metrics for all executions of a run test
curl "https://api.futureagi.com/sdk/api/v1/simulation/metrics/?run_test_name=My%20Agent%20Test&limit=5" \
  -H "X-Api-Key: YOUR_API_KEY" \
  -H "X-Secret-Key: YOUR_SECRET_KEY"

# Get raw metrics for a single call
curl "https://api.futureagi.com/sdk/api/v1/simulation/metrics/?call_execution_id=YOUR_CALL_ID" \
  -H "X-Api-Key: YOUR_API_KEY" \
  -H "X-Secret-Key: YOUR_SECRET_KEY"

Python

import requests

url = "https://api.futureagi.com/sdk/api/v1/simulation/metrics/"
headers = {
    "X-Api-Key": "YOUR_API_KEY",
    "X-Secret-Key": "YOUR_SECRET_KEY",
}

# By execution ID
response = requests.get(url, headers=headers, params={
    "execution_id": "YOUR_EXECUTION_ID"
})
data = response.json()
metrics = data["result"]["metrics"]
print(f"P95 Latency: {metrics['latency']['percentiles']['p95']}ms")

JavaScript

const response = await fetch(
  "https://api.futureagi.com/sdk/api/v1/simulation/metrics/?execution_id=YOUR_EXECUTION_ID",
  {
    headers: {
      "X-Api-Key": "YOUR_API_KEY",
      "X-Secret-Key": "YOUR_SECRET_KEY",
    },
  }
);

const data = await response.json();
console.log(data.result.metrics.latency.percentiles);
GET /
Authentication
REQUEST
 
RESPONSE