Skip to content

Commit 15e61ec

Browse files
authored
fix(llmobs): swallow LLMObsAnnotateSpanError on auto-annotation in @llm decorator (#17093)
## Summary - Fixes a regression introduced in #16892 where the `@llm` decorator raised `LLMObsAnnotateSpanError: Failed to parse output messages` when a decorated function returned a value that couldn't be parsed as LLM messages (e.g. a plain string, integer, or non-messages dict). - The decorator now catches `LLMObsAnnotateSpanError` from auto-annotation, logs a warning, and continues — the user's function still succeeds and the span is still created. - Also adds the missing `operation_kind != "embedding"` guard from the 4.6 backport branch to `main`. ## Test plan - [x] Two regression tests added (sync + async) verifying the warning is logged and no exception is raised - [x] Full lint checks pass Co-authored-by: zach.groves <zach.groves@datadoghq.com>
1 parent 876c5f1 commit 15e61ec

File tree

3 files changed

+63
-2
lines changed

3 files changed

+63
-2
lines changed

ddtrace/llmobs/decorators.py

Lines changed: 19 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
from ddtrace.llmobs._constants import OUTPUT_MESSAGES
1414
from ddtrace.llmobs._constants import OUTPUT_VALUE
1515
from ddtrace.llmobs._constants import SPAN_START_WHILE_DISABLED_WARNING
16+
from ddtrace.llmobs._llmobs import LLMObsAnnotateSpanError
1617

1718

1819
log = get_logger(__name__)
@@ -107,10 +108,18 @@ async def wrapper(*args, **kwargs):
107108
resp = await func(*args, **kwargs)
108109
if (
109110
resp is not None
111+
and operation_kind != "embedding"
110112
and span._get_ctx_item(OUTPUT_VALUE) is None
111113
and span._get_ctx_item(OUTPUT_MESSAGES) is None
112114
):
113-
LLMObs.annotate(span=span, output_data=resp)
115+
try:
116+
LLMObs.annotate(span=span, output_data=resp)
117+
except LLMObsAnnotateSpanError:
118+
log.debug(
119+
"Failed to auto-annotate output for @%s decorated function. "
120+
"Use LLMObs.annotate() to manually annotate the output.",
121+
operation_kind,
122+
)
114123
return resp
115124

116125
else:
@@ -159,10 +168,18 @@ def wrapper(*args, **kwargs):
159168
resp = func(*args, **kwargs)
160169
if (
161170
resp is not None
171+
and operation_kind != "embedding"
162172
and span._get_ctx_item(OUTPUT_VALUE) is None
163173
and span._get_ctx_item(OUTPUT_MESSAGES) is None
164174
):
165-
LLMObs.annotate(span=span, output_data=resp)
175+
try:
176+
LLMObs.annotate(span=span, output_data=resp)
177+
except LLMObsAnnotateSpanError:
178+
log.debug(
179+
"Failed to auto-annotate output for @%s decorated function. "
180+
"Use LLMObs.annotate() to manually annotate the output.",
181+
operation_kind,
182+
)
166183
return resp
167184

168185
return generator_wrapper if (isgeneratorfunction(func) or isasyncgenfunction(func)) else wrapper
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
---
2+
fixes:
3+
- |
4+
LLM Observability: Fixes an issue where the ``@llm`` decorator raised a
5+
``LLMObsAnnotateSpanError`` exception when
6+
a decorated function returned a value that could not be parsed as LLM messages.
7+
Note that manual annotation still overrides this automatic annotation.

tests/llmobs/test_llmobs_decorators.py

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -305,6 +305,43 @@ async def f():
305305
)
306306

307307

308+
def test_llm_decorator_unparseable_output_logs_warning_not_raises(llmobs, llmobs_events, mock_logs, test_spans):
309+
"""Test that @llm decorator does not raise when return value cannot be parsed as messages."""
310+
311+
@llm(model_name="test_model", model_provider="test_provider", name="test_function")
312+
def f():
313+
return 42 # int cannot be parsed as LLM messages
314+
315+
f() # should not raise LLMObsAnnotateSpanError
316+
mock_logs.debug.assert_called_once_with(
317+
"Failed to auto-annotate output for @%s decorated function. "
318+
"Use LLMObs.annotate() to manually annotate the output.",
319+
"llm",
320+
)
321+
test_spans.pop()
322+
# span is still created, output messages are not set
323+
assert llmobs_events[0].get("output", {}) == {}
324+
325+
326+
async def test_llm_decorator_unparseable_output_logs_warning_not_raises_async(
327+
llmobs, llmobs_events, mock_logs, test_spans
328+
):
329+
"""Test that async @llm decorator does not raise when return value cannot be parsed as messages."""
330+
331+
@llm(model_name="test_model", model_provider="test_provider", name="test_function")
332+
async def f():
333+
return 42 # int cannot be parsed as LLM messages
334+
335+
await f() # should not raise LLMObsAnnotateSpanError
336+
mock_logs.debug.assert_called_once_with(
337+
"Failed to auto-annotate output for @%s decorated function. "
338+
"Use LLMObs.annotate() to manually annotate the output.",
339+
"llm",
340+
)
341+
test_spans.pop()
342+
assert llmobs_events[0].get("output", {}) == {}
343+
344+
308345
def test_llm_decorator_manual_annotation_not_overridden(llmobs, llmobs_events, test_spans):
309346
"""Test that manual LLMObs.annotate() is not overridden by automatic output annotation."""
310347

0 commit comments

Comments
 (0)