Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 34 additions & 0 deletions ddtrace/_version.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# file generated by setuptools-scm
# don't change, don't track in version control

__all__ = [
"__version__",
"__version_tuple__",
"version",
"version_tuple",
"__commit_id__",
"commit_id",
]

TYPE_CHECKING = False
if TYPE_CHECKING:
from typing import Tuple
from typing import Union

VERSION_TUPLE = Tuple[Union[int, str], ...]
COMMIT_ID = Union[str, None]
else:
VERSION_TUPLE = object
COMMIT_ID = object

version: str
__version__: str
__version_tuple__: VERSION_TUPLE
version_tuple: VERSION_TUPLE
commit_id: COMMIT_ID
__commit_id__: COMMIT_ID

__version__ = version = '4.1.0.dev7+g9e0f1f8dd.d20251204'
__version_tuple__ = version_tuple = (4, 1, 0, 'dev7', 'g9e0f1f8dd.d20251204')

__commit_id__ = commit_id = 'g9e0f1f8dd'
5 changes: 5 additions & 0 deletions ddtrace/llmobs/_llmobs.py
Original file line number Diff line number Diff line change
Expand Up @@ -1762,6 +1762,11 @@ def submit_evaluation(
"Failed to parse tags. Tags for evaluation metrics must be strings."
)

# Auto-add source:otel tag when OTel tracing is enabled
# This allows the backend to wait for OTel span conversion
if config._otel_trace_enabled:
evaluation_tags["source"] = "otel"

evaluation_metric: LLMObsEvaluationMetricEvent = {
"join_on": join_on,
"label": str(label),
Expand Down
46 changes: 46 additions & 0 deletions tests/llmobs/test_llmobs_otel_evaluation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
"""
Tests for automatic source:otel tag on evaluations when OTel tracing is enabled.

When DD_TRACE_OTEL_ENABLED=true, all evaluations should have `source:otel` tag
to allow the backend to wait for OTel span conversion (~3 minutes) before
discarding unmatched evaluations.
"""

import mock


def test_submit_evaluation_adds_source_otel_when_otel_enabled(llmobs, mock_llmobs_eval_metric_writer):
"""Verify source:otel tag is auto-added when DD_TRACE_OTEL_ENABLED=true."""
with mock.patch("ddtrace.llmobs._llmobs.config._otel_trace_enabled", True):
llmobs.submit_evaluation(
span={"span_id": "123", "trace_id": "456"},
label="quality",
metric_type="score",
value=0.9,
ml_app="test-app",
)

mock_llmobs_eval_metric_writer.enqueue.assert_called_once()
call_args = mock_llmobs_eval_metric_writer.enqueue.call_args[0][0]

assert "tags" in call_args
assert "source:otel" in call_args["tags"]


def test_submit_evaluation_no_source_otel_when_otel_disabled(llmobs, mock_llmobs_eval_metric_writer):
"""Verify source:otel tag is NOT added when DD_TRACE_OTEL_ENABLED=false (default)."""
with mock.patch("ddtrace.llmobs._llmobs.config._otel_trace_enabled", False):
llmobs.submit_evaluation(
span={"span_id": "123", "trace_id": "456"},
label="quality",
metric_type="score",
value=0.9,
ml_app="test-app",
)

mock_llmobs_eval_metric_writer.enqueue.assert_called_once()
call_args = mock_llmobs_eval_metric_writer.enqueue.call_args[0][0]

assert "tags" in call_args
assert "source:otel" not in call_args["tags"]

Loading