Skip to content

Commit d14fa4d

Browse files
author
Andrei Bratu
committed
introduce async decorators
1 parent 69686bd commit d14fa4d

File tree

15 files changed

+620
-46
lines changed

15 files changed

+620
-46
lines changed

pytest.ini

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,3 @@
11
[pytest]
22
addopts = -n auto
3+
asyncio_mode = auto

src/humanloop/client.py

Lines changed: 170 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,10 @@
1010

1111
from humanloop.base_client import AsyncBaseHumanloop, BaseHumanloop
1212
from humanloop.core.client_wrapper import SyncClientWrapper
13-
from humanloop.decorators.flow import flow as flow_decorator_factory
14-
from humanloop.decorators.prompt import prompt_decorator_factory
13+
from humanloop.decorators.flow import a_flow_decorator_factory as a_flow_decorator_factory
14+
from humanloop.decorators.flow import flow_decorator_factory as flow_decorator_factory
15+
from humanloop.decorators.prompt import a_prompt_decorator_factory, prompt_decorator_factory
16+
from humanloop.decorators.tool import a_tool_decorator_factory as a_tool_decorator_factory
1517
from humanloop.decorators.tool import tool_decorator_factory as tool_decorator_factory
1618
from humanloop.environment import HumanloopEnvironment
1719
from humanloop.evals import run_eval
@@ -273,6 +275,50 @@ def call_llm(messages):
273275
"""
274276
return prompt_decorator_factory(path=path)
275277

278+
def a_prompt(
279+
self,
280+
*,
281+
path: str,
282+
):
283+
"""Auto-instrument LLM providers and create [Prompt](https://humanloop.com/docs/explanation/prompts)
284+
Logs on Humanloop from them, for async functions.
285+
286+
```python
287+
@a_prompt(path="My Async Prompt")
288+
async def call_llm_async(messages):
289+
client = AsyncOpenAI(api_key=os.getenv("OPENAI_API_KEY"))
290+
response = await client.chat.completions.create(
291+
model="gpt-4o",
292+
temperature=0.8,
293+
frequency_penalty=0.5,
294+
max_tokens=200,
295+
messages=messages,
296+
)
297+
return response.choices[0].message.content
298+
299+
Calling the function above creates a new Log on Humanloop
300+
against this Prompt version:
301+
{
302+
provider: "openai",
303+
model: "gpt-4o",
304+
endpoint: "chat",
305+
max_tokens: 200,
306+
temperature: 0.8,
307+
frequency_penalty: 0.5,
308+
}
309+
```
310+
311+
If a different model, endpoint, or hyperparameter is used, a new
312+
Prompt version is created.
313+
314+
:param path: The path where the Prompt is created. If not
315+
provided, the function name is used as the path and the File
316+
is created in the root of your Humanloop organization workspace.
317+
318+
:param prompt_kernel: Attributes that define the Prompt. See `class:DecoratorPromptKernelRequestParams`
319+
"""
320+
return a_prompt_decorator_factory(path=path)
321+
276322
def tool(
277323
self,
278324
*,
@@ -331,6 +377,64 @@ def calculator(a: int, b: Optional[int]) -> int:
331377
setup_values=setup_values,
332378
)
333379

380+
def a_tool(
381+
self,
382+
*,
383+
path: str,
384+
attributes: Optional[dict[str, Any]] = None,
385+
setup_values: Optional[dict[str, Any]] = None,
386+
):
387+
"""Manage async [Tool](https://humanloop.com/docs/explanation/tools) Files through code.
388+
389+
The decorator inspects the wrapped async function's source code to infer the Tool's
390+
JSON Schema. If the function declaration changes, a new Tool version
391+
is upserted with an updated JSON Schema.
392+
393+
For example:
394+
395+
```python
396+
# Adding @a_tool on this function
397+
@humanloop_client.a_tool(path="async_calculator")
398+
async def async_calculator(a: int, b: Optional[int]) -> int:
399+
\"\"\"Add two numbers together asynchronously.\"\"\"
400+
return a + b
401+
402+
# Creates a Tool with this JSON Schema:
403+
{
404+
strict: True,
405+
function: {
406+
"name": "async_calculator",
407+
"description": "Add two numbers together asynchronously.",
408+
"parameters": {
409+
type: "object",
410+
properties: {
411+
a: {type: "integer"},
412+
b: {type: "integer"}
413+
},
414+
required: ["a"],
415+
},
416+
}
417+
}
418+
```
419+
420+
The return value of the decorated function must be JSON serializable.
421+
422+
If the function raises an exception, the created Log will have `output`
423+
set to null, and the `error` field populated.
424+
425+
:param path: The path of the File in the Humanloop workspace.
426+
427+
:param setup_values: Values needed to setup the Tool, defined in [JSON Schema](https://json-schema.org/)
428+
429+
:param attributes: Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used.
430+
"""
431+
return a_tool_decorator_factory(
432+
opentelemetry_tracer=self._opentelemetry_tracer,
433+
path=path,
434+
attributes=attributes,
435+
setup_values=setup_values,
436+
)
437+
334438
def flow(
335439
self,
336440
*,
@@ -394,6 +498,70 @@ def agent():
394498
attributes=attributes,
395499
)
396500

501+
def a_flow(
502+
self,
503+
*,
504+
path: str,
505+
attributes: Optional[dict[str, Any]] = None,
506+
):
507+
"""Trace SDK logging calls through [Flows](https://humanloop.com/docs/explanation/flows) for async functions.
508+
509+
Use it as the entrypoint of your async LLM feature. Logging calls like `prompts.call(...)`,
510+
`tools.call(...)`, or other Humanloop decorators will be automatically added to the trace.
511+
512+
For example:
513+
514+
```python
515+
@a_prompt(template="You are an assistant on the following topics: {{topics}}.")
516+
async def call_llm_async(messages):
517+
client = AsyncOpenAI(api_key=os.getenv("OPENAI_API_KEY"))
518+
response = await client.chat.completions.create(
519+
model="gpt-4o",
520+
temperature=0.8,
521+
frequency_penalty=0.5,
522+
max_tokens=200,
523+
messages=messages,
524+
)
525+
return response.choices[0].message.content
526+
527+
@a_flow(attributes={"version": "v1"})
528+
async def async_agent():
529+
while True:
530+
messages = []
531+
user_input = input("You: ")
532+
if user_input == "exit":
533+
break
534+
messages.append({"role": "user", "content": user_input})
535+
response = await call_llm_async(messages)
536+
messages.append({"role": "assistant", "content": response})
537+
print(f"Assistant: {response}")
538+
```
539+
540+
Each call to async_agent will create a trace corresponding to the conversation
541+
session. Multiple Prompt Logs will be created as the LLM is called. They
542+
will be added to the trace, allowing you to see the whole conversation
543+
in the UI.
544+
545+
If the function returns a ChatMessage-like object, the Log will
546+
populate the `output_message` field. Otherwise, it will serialize
547+
the return value and populate the `output` field.
548+
549+
If an exception is raised, the output fields will be set to None
550+
and the error message will be set in the Log's `error` field.
551+
552+
:param path: The path to the Flow. If not provided, the function name
553+
will be used as the path and the File will be created in the root
554+
of your organization workspace.
555+
556+
:param attributes: Additional fields to describe the Flow. Helpful to separate Flow versions from each other with details on how they were created or used.
557+
"""
558+
return a_flow_decorator_factory(
559+
client=self,
560+
opentelemetry_tracer=self._opentelemetry_tracer,
561+
path=path,
562+
attributes=attributes,
563+
)
564+
397565
def pull(self, path: Optional[str] = None, environment: Optional[str] = None) -> Tuple[List[str], List[str]]:
398566
"""Pull Prompt and Agent files from Humanloop to local filesystem.
399567

src/humanloop/context.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,14 @@
1+
import threading
12
from contextlib import contextmanager
23
from dataclasses import dataclass
3-
import threading
44
from typing import Any, Callable, Generator, Literal, Optional
5+
56
from opentelemetry import context as context_api
67

78
from humanloop.error import HumanloopRuntimeError
89
from humanloop.otel.constants import (
9-
HUMANLOOP_CONTEXT_EVALUATION,
1010
HUMANLOOP_CONTEXT_DECORATOR,
11+
HUMANLOOP_CONTEXT_EVALUATION,
1112
HUMANLOOP_CONTEXT_TRACE_ID,
1213
)
1314

src/humanloop/decorators/flow.py

Lines changed: 107 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
import logging
22
from functools import wraps
3-
from typing import Any, Callable, Optional, TypeVar
4-
from typing_extensions import ParamSpec
3+
from typing import Any, Awaitable, Callable, Optional, TypeVar
54

65
from opentelemetry.trace import Span, Tracer
6+
from typing_extensions import ParamSpec
77

88
from humanloop.base_client import BaseHumanloop
99
from humanloop.context import (
@@ -12,18 +12,18 @@
1212
set_decorator_context,
1313
set_trace_id,
1414
)
15-
from humanloop.evals.run import HumanloopRuntimeError
16-
from humanloop.types.chat_message import ChatMessage
1715
from humanloop.decorators.helpers import bind_args
16+
from humanloop.evals.run import HumanloopRuntimeError
1817
from humanloop.evals.types import FileEvalConfig
1918
from humanloop.otel.constants import (
20-
HUMANLOOP_FILE_TYPE_KEY,
21-
HUMANLOOP_LOG_KEY,
2219
HUMANLOOP_FILE_PATH_KEY,
20+
HUMANLOOP_FILE_TYPE_KEY,
2321
HUMANLOOP_FLOW_SPAN_NAME,
22+
HUMANLOOP_LOG_KEY,
2423
)
2524
from humanloop.otel.helpers import process_output, write_to_opentelemetry_span
2625
from humanloop.requests import FlowKernelRequestParams as FlowDict
26+
from humanloop.types.chat_message import ChatMessage
2727
from humanloop.types.flow_log_response import FlowLogResponse
2828

2929
logger = logging.getLogger("humanloop.sdk")
@@ -33,7 +33,7 @@
3333
R = TypeVar("R")
3434

3535

36-
def flow(
36+
def flow_decorator_factory(
3737
client: "BaseHumanloop",
3838
opentelemetry_tracer: Tracer,
3939
path: str,
@@ -131,3 +131,103 @@ def wrapper(*args: P.args, **kwargs: P.kwargs) -> Optional[R]:
131131
return wrapper
132132

133133
return decorator
134+
135+
136+
def a_flow_decorator_factory(
137+
client: "BaseHumanloop",
138+
opentelemetry_tracer: Tracer,
139+
path: str,
140+
attributes: Optional[dict[str, Any]] = None,
141+
):
142+
flow_kernel = {"attributes": attributes or {}}
143+
144+
def decorator(func: Callable[P, Awaitable[R]]) -> Callable[P, Awaitable[Optional[R]]]:
145+
decorator_path = path or func.__name__
146+
file_type = "flow"
147+
148+
@wraps(func)
149+
async def wrapper(*args: P.args, **kwargs: P.kwargs) -> Optional[R]:
150+
span: Span
151+
with set_decorator_context(
152+
DecoratorContext(
153+
path=decorator_path,
154+
type="flow",
155+
version=flow_kernel,
156+
)
157+
) as decorator_context:
158+
with opentelemetry_tracer.start_as_current_span(HUMANLOOP_FLOW_SPAN_NAME) as span: # type: ignore
159+
span.set_attribute(HUMANLOOP_FILE_PATH_KEY, decorator_path)
160+
span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, file_type)
161+
trace_id = get_trace_id()
162+
func_args = bind_args(func, args, kwargs)
163+
164+
# Create the trace ahead so we have a parent ID to reference
165+
init_log_inputs = {
166+
"inputs": {k: v for k, v in func_args.items() if k != "messages"},
167+
"messages": func_args.get("messages"),
168+
"trace_parent_id": trace_id,
169+
}
170+
this_flow_log: FlowLogResponse = client.flows._log( # type: ignore [attr-defined]
171+
path=decorator_context.path,
172+
flow=decorator_context.version,
173+
log_status="incomplete",
174+
**init_log_inputs,
175+
)
176+
177+
with set_trace_id(this_flow_log.id):
178+
func_output: Optional[R]
179+
log_output: Optional[str]
180+
log_error: Optional[str]
181+
log_output_message: Optional[ChatMessage]
182+
try:
183+
func_output = await func(*args, **kwargs)
184+
if (
185+
isinstance(func_output, dict)
186+
and len(func_output.keys()) == 2
187+
and "role" in func_output
188+
and "content" in func_output
189+
):
190+
log_output_message = func_output # type: ignore [assignment]
191+
log_output = None
192+
else:
193+
log_output = process_output(func=func, output=func_output)
194+
log_output_message = None
195+
log_error = None
196+
except HumanloopRuntimeError as e:
197+
# Critical error, re-raise
198+
client.logs.delete(id=this_flow_log.id)
199+
span.record_exception(e)
200+
raise e
201+
except Exception as e:
202+
logger.error(f"Error calling {func.__name__}: {e}")
203+
log_output = None
204+
log_output_message = None
205+
log_error = str(e)
206+
func_output = None
207+
208+
updated_flow_log = {
209+
"log_status": "complete",
210+
"output": log_output,
211+
"error": log_error,
212+
"output_message": log_output_message,
213+
"id": this_flow_log.id,
214+
}
215+
# Write the Flow Log to the Span on HL_LOG_OT_KEY
216+
write_to_opentelemetry_span(
217+
span=span, # type: ignore [arg-type]
218+
key=HUMANLOOP_LOG_KEY,
219+
value=updated_flow_log, # type: ignore
220+
)
221+
# Return the output of the decorated function
222+
return func_output # type: ignore [return-value]
223+
224+
wrapper.file = FileEvalConfig( # type: ignore
225+
path=decorator_path,
226+
type=file_type, # type: ignore [arg-type, typeddict-item]
227+
version=FlowDict(**flow_kernel), # type: ignore
228+
callable=wrapper,
229+
)
230+
231+
return wrapper
232+
233+
return decorator

0 commit comments

Comments
 (0)