|
18 | 18 | from ddtrace.testing.internal.git import get_workspace_path |
19 | 19 | from ddtrace.testing.internal.logging import catch_and_log_exceptions |
20 | 20 | from ddtrace.testing.internal.logging import setup_logging |
| 21 | +from ddtrace.testing.internal.pytest.benchmark import BenchmarkData |
| 22 | +from ddtrace.testing.internal.pytest.benchmark import get_benchmark_tags_and_metrics |
21 | 23 | from ddtrace.testing.internal.retry_handlers import RetryHandler |
22 | 24 | from ddtrace.testing.internal.session_manager import SessionManager |
23 | 25 | from ddtrace.testing.internal.telemetry import TelemetryAPI |
@@ -139,6 +141,7 @@ def __init__(self, session_manager: SessionManager) -> None: |
139 | 141 | self.enable_ddtrace = False |
140 | 142 | self.reports_by_nodeid: t.Dict[str, _ReportGroup] = defaultdict(lambda: {}) |
141 | 143 | self.excinfo_by_report: t.Dict[pytest.TestReport, t.Optional[pytest.ExceptionInfo[t.Any]]] = {} |
| 144 | + self.benchmark_data_by_nodeid: t.Dict[str, BenchmarkData] = {} |
142 | 145 | self.tests_by_nodeid: t.Dict[str, Test] = {} |
143 | 146 | self.is_xdist_worker = False |
144 | 147 |
|
@@ -282,10 +285,7 @@ def pytest_runtest_protocol_wrapper( |
282 | 285 | ) |
283 | 286 | test_run = test.make_test_run() |
284 | 287 | test_run.start(start_ns=test.start_ns) |
285 | | - status, tags = self._get_test_outcome(item.nodeid) |
286 | | - test_run.set_status(status) |
287 | | - test_run.set_tags(tags) |
288 | | - test_run.set_context(context) |
| 288 | + self._set_test_run_data(test_run, item, context) |
289 | 289 | test_run.finish() |
290 | 290 | test.set_status(test_run.get_status()) # TODO: this should be automatic? |
291 | 291 | self.manager.writer.put_item(test_run) |
@@ -323,10 +323,7 @@ def _do_one_test_run( |
323 | 323 | TelemetryAPI.get().record_test_created(test_framework=TEST_FRAMEWORK, test_run=test_run) |
324 | 324 |
|
325 | 325 | reports = _make_reports_dict(runtestprotocol(item, nextitem=nextitem, log=False)) |
326 | | - status, tags = self._get_test_outcome(item.nodeid) |
327 | | - test_run.set_status(status) |
328 | | - test_run.set_tags(tags) |
329 | | - test_run.set_context(context) |
| 326 | + self._set_test_run_data(test_run, item, context) |
330 | 327 |
|
331 | 328 | TelemetryAPI.get().record_test_finished( |
332 | 329 | test_framework=TEST_FRAMEWORK, |
@@ -354,6 +351,17 @@ def _do_test_runs(self, item: pytest.Item, nextitem: t.Optional[pytest.Item]) -> |
354 | 351 | test.set_status(test_run.get_status()) # TODO: this should be automatic? |
355 | 352 | self.manager.writer.put_item(test_run) |
356 | 353 |
|
| 354 | + def _set_test_run_data(self, test_run: TestRun, item: pytest.Item, context: TestContext) -> None: |
| 355 | + status, tags = self._get_test_outcome(item.nodeid) |
| 356 | + test_run.set_status(status) |
| 357 | + test_run.set_tags(tags) |
| 358 | + test_run.set_context(context) |
| 359 | + |
| 360 | + if benchmark_data := self.benchmark_data_by_nodeid.pop(item.nodeid): |
| 361 | + test_run.set_tags(benchmark_data.tags) |
| 362 | + test_run.set_metrics(benchmark_data.metrics) |
| 363 | + test_run.mark_benchmark() |
| 364 | + |
357 | 365 | def _do_retries( |
358 | 366 | self, |
359 | 367 | item: pytest.Item, |
@@ -526,6 +534,11 @@ def pytest_runtest_makereport( |
526 | 534 | self.reports_by_nodeid[item.nodeid][call.when] = report |
527 | 535 | self.excinfo_by_report[report] = call.excinfo |
528 | 536 |
|
| 537 | + if call.when == TestPhase.TEARDOWN: |
| 538 | + # We need to extract pytest-benchmark data _before_ the fixture teardown. |
| 539 | + if benchmark_data := get_benchmark_tags_and_metrics(item): |
| 540 | + self.benchmark_data_by_nodeid[item.nodeid] = benchmark_data |
| 541 | + |
529 | 542 | def pytest_report_teststatus(self, report: pytest.TestReport) -> t.Optional[_ReportTestStatus]: |
530 | 543 | if retry_outcome := _get_user_property(report, "dd_retry_outcome"): |
531 | 544 | retry_reason = _get_user_property(report, "dd_retry_reason") |
|
0 commit comments