|
1 | 1 | # Copyright (C) 2025 Intel Corporation
|
2 | 2 | # SPDX-License-Identifier: Apache-2.0
|
3 | 3 |
|
| 4 | +from datetime import UTC, datetime |
4 | 5 | from unittest.mock import MagicMock
|
5 | 6 | from uuid import uuid4
|
6 | 7 |
|
|
11 | 12 | from app.api.dependencies import get_pipeline_service
|
12 | 13 | from app.main import app
|
13 | 14 | from app.schemas import Pipeline, PipelineStatus
|
| 15 | +from app.schemas.metrics import InferenceMetrics, LatencyMetrics, PipelineMetrics, TimeWindow |
14 | 16 | from app.services import (
|
15 | 17 | PipelineService,
|
16 | 18 | ResourceAlreadyExistsError,
|
@@ -199,3 +201,98 @@ def test_delete_pipeline_in_use(self, fxt_pipeline, fxt_pipeline_service, fxt_cl
|
199 | 201 |
|
200 | 202 | assert response.status_code == status.HTTP_409_CONFLICT
|
201 | 203 | assert str(err) == response.json()["detail"]
|
| 204 | + |
| 205 | + def test_get_pipeline_metrics_success(self, fxt_pipeline, fxt_pipeline_service, fxt_client): |
| 206 | + """Test successful retrieval of pipeline metrics with default time window.""" |
| 207 | + mock_metrics = PipelineMetrics( |
| 208 | + time_window=TimeWindow(start=datetime.now(UTC), end=datetime.now(UTC), time_window=60), |
| 209 | + inference=InferenceMetrics( |
| 210 | + latency=LatencyMetrics(avg_ms=100.5, min_ms=50.0, max_ms=200.0, p95_ms=180.0, latest_ms=120.0) |
| 211 | + ), |
| 212 | + ) |
| 213 | + fxt_pipeline_service.get_pipeline_metrics.return_value = mock_metrics |
| 214 | + |
| 215 | + response = fxt_client.get(f"/api/pipelines/{str(fxt_pipeline.id)}/metrics") |
| 216 | + |
| 217 | + assert response.status_code == status.HTTP_200_OK |
| 218 | + fxt_pipeline_service.get_pipeline_metrics.assert_called_once_with(fxt_pipeline.id, 60) |
| 219 | + |
| 220 | + def test_get_pipeline_metrics_invalid_pipeline_id(self, fxt_pipeline_service, fxt_client): |
| 221 | + """Test metrics endpoint with invalid pipeline ID format.""" |
| 222 | + response = fxt_client.get("/api/pipelines/invalid-id/metrics") |
| 223 | + |
| 224 | + assert response.status_code == status.HTTP_400_BAD_REQUEST |
| 225 | + fxt_pipeline_service.get_pipeline_metrics.assert_not_called() |
| 226 | + |
| 227 | + def test_get_pipeline_metrics_pipeline_not_found(self, fxt_pipeline, fxt_pipeline_service, fxt_client): |
| 228 | + """Test metrics endpoint when pipeline doesn't exist.""" |
| 229 | + fxt_pipeline_service.get_pipeline_metrics.side_effect = ResourceNotFoundError( |
| 230 | + ResourceType.PIPELINE, str(fxt_pipeline.id) |
| 231 | + ) |
| 232 | + |
| 233 | + response = fxt_client.get(f"/api/pipelines/{str(fxt_pipeline.id)}/metrics") |
| 234 | + |
| 235 | + assert response.status_code == status.HTTP_404_NOT_FOUND |
| 236 | + fxt_pipeline_service.get_pipeline_metrics.assert_called_once_with(fxt_pipeline.id, 60) |
| 237 | + |
| 238 | + def test_get_pipeline_metrics_pipeline_not_running(self, fxt_pipeline, fxt_pipeline_service, fxt_client): |
| 239 | + """Test metrics endpoint when pipeline is not in running state.""" |
| 240 | + fxt_pipeline_service.get_pipeline_metrics.side_effect = ValueError( |
| 241 | + "Cannot get metrics for a pipeline that is not running." |
| 242 | + ) |
| 243 | + |
| 244 | + response = fxt_client.get(f"/api/pipelines/{str(fxt_pipeline.id)}/metrics") |
| 245 | + |
| 246 | + assert response.status_code == status.HTTP_400_BAD_REQUEST |
| 247 | + assert "Cannot get metrics for a pipeline that is not running" in response.json()["detail"] |
| 248 | + fxt_pipeline_service.get_pipeline_metrics.assert_called_once_with(fxt_pipeline.id, 60) |
| 249 | + |
| 250 | + @pytest.mark.parametrize("invalid_time_window", [0, -1, 3601, 7200]) |
| 251 | + def test_get_pipeline_metrics_invalid_time_window( |
| 252 | + self, invalid_time_window, fxt_pipeline, fxt_pipeline_service, fxt_client |
| 253 | + ): |
| 254 | + """Test metrics endpoint with invalid time window values.""" |
| 255 | + response = fxt_client.get(f"/api/pipelines/{str(fxt_pipeline.id)}/metrics?time_window={invalid_time_window}") |
| 256 | + |
| 257 | + assert response.status_code == status.HTTP_400_BAD_REQUEST |
| 258 | + assert "Duration must be between 1 and 3600 seconds" in response.json()["detail"] |
| 259 | + fxt_pipeline_service.get_pipeline_metrics.assert_not_called() |
| 260 | + |
| 261 | + @pytest.mark.parametrize("valid_time_window", [1, 30, 300, 1800, 3600]) |
| 262 | + def test_get_pipeline_metrics_valid_time_windows( |
| 263 | + self, valid_time_window, fxt_pipeline, fxt_pipeline_service, fxt_client |
| 264 | + ): |
| 265 | + """Test metrics endpoint with various valid time window values.""" |
| 266 | + mock_metrics = PipelineMetrics( |
| 267 | + time_window=TimeWindow(start=datetime.now(UTC), end=datetime.now(UTC), time_window=valid_time_window), |
| 268 | + inference=InferenceMetrics( |
| 269 | + latency=LatencyMetrics(avg_ms=100.0, min_ms=50.0, max_ms=200.0, p95_ms=180.0, latest_ms=120.0) |
| 270 | + ), |
| 271 | + ) |
| 272 | + fxt_pipeline_service.get_pipeline_metrics.return_value = mock_metrics |
| 273 | + |
| 274 | + response = fxt_client.get(f"/api/pipelines/{str(fxt_pipeline.id)}/metrics?time_window={valid_time_window}") |
| 275 | + |
| 276 | + assert response.status_code == status.HTTP_200_OK |
| 277 | + fxt_pipeline_service.get_pipeline_metrics.assert_called_once_with(fxt_pipeline.id, valid_time_window) |
| 278 | + |
| 279 | + def test_get_pipeline_metrics_no_data_available(self, fxt_pipeline, fxt_pipeline_service, fxt_client): |
| 280 | + """Test metrics endpoint when no latency data is available.""" |
| 281 | + mock_metrics = PipelineMetrics( |
| 282 | + time_window=TimeWindow(start=datetime.now(UTC), end=datetime.now(UTC), time_window=60), |
| 283 | + inference=InferenceMetrics( |
| 284 | + latency=LatencyMetrics(avg_ms=None, min_ms=None, max_ms=None, p95_ms=None, latest_ms=None) |
| 285 | + ), |
| 286 | + ) |
| 287 | + fxt_pipeline_service.get_pipeline_metrics.return_value = mock_metrics |
| 288 | + |
| 289 | + response = fxt_client.get(f"/api/pipelines/{str(fxt_pipeline.id)}/metrics") |
| 290 | + |
| 291 | + assert response.status_code == status.HTTP_200_OK |
| 292 | + response_data = response.json() |
| 293 | + assert response_data["inference"]["latency"]["avg_ms"] is None |
| 294 | + assert response_data["inference"]["latency"]["min_ms"] is None |
| 295 | + assert response_data["inference"]["latency"]["max_ms"] is None |
| 296 | + assert response_data["inference"]["latency"]["p95_ms"] is None |
| 297 | + assert response_data["inference"]["latency"]["latest_ms"] is None |
| 298 | + fxt_pipeline_service.get_pipeline_metrics.assert_called_once_with(fxt_pipeline.id, 60) |
0 commit comments