|
10 | 10 |
|
11 | 11 | from humanloop.base_client import AsyncBaseHumanloop, BaseHumanloop |
12 | 12 | from humanloop.core.client_wrapper import SyncClientWrapper |
13 | | -from humanloop.decorators.flow import flow as flow_decorator_factory |
14 | | -from humanloop.decorators.prompt import prompt_decorator_factory |
| 13 | +from humanloop.decorators.flow import a_flow_decorator_factory as a_flow_decorator_factory |
| 14 | +from humanloop.decorators.flow import flow_decorator_factory as flow_decorator_factory |
| 15 | +from humanloop.decorators.prompt import a_prompt_decorator_factory, prompt_decorator_factory |
| 16 | +from humanloop.decorators.tool import a_tool_decorator_factory as a_tool_decorator_factory |
15 | 17 | from humanloop.decorators.tool import tool_decorator_factory as tool_decorator_factory |
16 | 18 | from humanloop.environment import HumanloopEnvironment |
17 | 19 | from humanloop.evals import run_eval |
@@ -273,6 +275,50 @@ def call_llm(messages): |
273 | 275 | """ |
274 | 276 | return prompt_decorator_factory(path=path) |
275 | 277 |
|
| 278 | + def a_prompt( |
| 279 | + self, |
| 280 | + *, |
| 281 | + path: str, |
| 282 | + ): |
| 283 | + """Auto-instrument LLM providers and create [Prompt](https://humanloop.com/docs/explanation/prompts) |
| 284 | + Logs on Humanloop from them, for async functions. |
| 285 | +
|
| 286 | + ```python |
| 287 | + @a_prompt(path="My Async Prompt") |
| 288 | + async def call_llm_async(messages): |
| 289 | + client = AsyncOpenAI(api_key=os.getenv("OPENAI_API_KEY")) |
| 290 | + response = await client.chat.completions.create( |
| 291 | + model="gpt-4o", |
| 292 | + temperature=0.8, |
| 293 | + frequency_penalty=0.5, |
| 294 | + max_tokens=200, |
| 295 | + messages=messages, |
| 296 | + ) |
| 297 | + return response.choices[0].message.content |
| 298 | +
|
| 299 | + Calling the function above creates a new Log on Humanloop |
| 300 | + against this Prompt version: |
| 301 | + { |
| 302 | + provider: "openai", |
| 303 | + model: "gpt-4o", |
| 304 | + endpoint: "chat", |
| 305 | + max_tokens: 200, |
| 306 | + temperature: 0.8, |
| 307 | + frequency_penalty: 0.5, |
| 308 | + } |
| 309 | + ``` |
| 310 | +
|
| 311 | + If a different model, endpoint, or hyperparameter is used, a new |
| 312 | + Prompt version is created. |
| 313 | +
|
| 314 | + :param path: The path where the Prompt is created. If not |
| 315 | + provided, the function name is used as the path and the File |
| 316 | + is created in the root of your Humanloop organization workspace. |
| 317 | +
|
| 318 | + :param prompt_kernel: Attributes that define the Prompt. See `class:DecoratorPromptKernelRequestParams` |
| 319 | + """ |
| 320 | + return a_prompt_decorator_factory(path=path) |
| 321 | + |
276 | 322 | def tool( |
277 | 323 | self, |
278 | 324 | *, |
@@ -331,6 +377,64 @@ def calculator(a: int, b: Optional[int]) -> int: |
331 | 377 | setup_values=setup_values, |
332 | 378 | ) |
333 | 379 |
|
| 380 | + def a_tool( |
| 381 | + self, |
| 382 | + *, |
| 383 | + path: str, |
| 384 | + attributes: Optional[dict[str, Any]] = None, |
| 385 | + setup_values: Optional[dict[str, Any]] = None, |
| 386 | + ): |
| 387 | + """Manage async [Tool](https://humanloop.com/docs/explanation/tools) Files through code. |
| 388 | +
|
| 389 | + The decorator inspects the wrapped async function's source code to infer the Tool's |
| 390 | + JSON Schema. If the function declaration changes, a new Tool version |
| 391 | + is upserted with an updated JSON Schema. |
| 392 | +
|
| 393 | + For example: |
| 394 | +
|
| 395 | + ```python |
| 396 | + # Adding @a_tool on this function |
| 397 | + @humanloop_client.a_tool(path="async_calculator") |
| 398 | + async def async_calculator(a: int, b: Optional[int]) -> int: |
| 399 | + \"\"\"Add two numbers together asynchronously.\"\"\" |
| 400 | + return a + b |
| 401 | +
|
| 402 | + # Creates a Tool with this JSON Schema: |
| 403 | + { |
| 404 | + strict: True, |
| 405 | + function: { |
| 406 | + "name": "async_calculator", |
| 407 | + "description": "Add two numbers together asynchronously.", |
| 408 | + "parameters": { |
| 409 | + type: "object", |
| 410 | + properties: { |
| 411 | + a: {type: "integer"}, |
| 412 | + b: {type: "integer"} |
| 413 | + }, |
| 414 | + required: ["a"], |
| 415 | + }, |
| 416 | + } |
| 417 | + } |
| 418 | + ``` |
| 419 | +
|
| 420 | + The return value of the decorated function must be JSON serializable. |
| 421 | +
|
| 422 | + If the function raises an exception, the created Log will have `output` |
| 423 | + set to null, and the `error` field populated. |
| 424 | +
|
| 425 | + :param path: The path of the File in the Humanloop workspace. |
| 426 | +
|
| 427 | + :param setup_values: Values needed to setup the Tool, defined in [JSON Schema](https://json-schema.org/) |
| 428 | +
|
| 429 | + :param attributes: Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used. |
| 430 | + """ |
| 431 | + return a_tool_decorator_factory( |
| 432 | + opentelemetry_tracer=self._opentelemetry_tracer, |
| 433 | + path=path, |
| 434 | + attributes=attributes, |
| 435 | + setup_values=setup_values, |
| 436 | + ) |
| 437 | + |
334 | 438 | def flow( |
335 | 439 | self, |
336 | 440 | *, |
@@ -394,6 +498,70 @@ def agent(): |
394 | 498 | attributes=attributes, |
395 | 499 | ) |
396 | 500 |
|
| 501 | + def a_flow( |
| 502 | + self, |
| 503 | + *, |
| 504 | + path: str, |
| 505 | + attributes: Optional[dict[str, Any]] = None, |
| 506 | + ): |
| 507 | + """Trace SDK logging calls through [Flows](https://humanloop.com/docs/explanation/flows) for async functions. |
| 508 | +
|
| 509 | + Use it as the entrypoint of your async LLM feature. Logging calls like `prompts.call(...)`, |
| 510 | + `tools.call(...)`, or other Humanloop decorators will be automatically added to the trace. |
| 511 | +
|
| 512 | + For example: |
| 513 | +
|
| 514 | + ```python |
| 515 | + @a_prompt(template="You are an assistant on the following topics: {{topics}}.") |
| 516 | + async def call_llm_async(messages): |
| 517 | + client = AsyncOpenAI(api_key=os.getenv("OPENAI_API_KEY")) |
| 518 | + response = await client.chat.completions.create( |
| 519 | + model="gpt-4o", |
| 520 | + temperature=0.8, |
| 521 | + frequency_penalty=0.5, |
| 522 | + max_tokens=200, |
| 523 | + messages=messages, |
| 524 | + ) |
| 525 | + return response.choices[0].message.content |
| 526 | +
|
| 527 | + @a_flow(attributes={"version": "v1"}) |
| 528 | + async def async_agent(): |
| 529 | + while True: |
| 530 | + messages = [] |
| 531 | + user_input = input("You: ") |
| 532 | + if user_input == "exit": |
| 533 | + break |
| 534 | + messages.append({"role": "user", "content": user_input}) |
| 535 | + response = await call_llm_async(messages) |
| 536 | + messages.append({"role": "assistant", "content": response}) |
| 537 | + print(f"Assistant: {response}") |
| 538 | + ``` |
| 539 | +
|
| 540 | + Each call to async_agent will create a trace corresponding to the conversation |
| 541 | + session. Multiple Prompt Logs will be created as the LLM is called. They |
| 542 | + will be added to the trace, allowing you to see the whole conversation |
| 543 | + in the UI. |
| 544 | +
|
| 545 | + If the function returns a ChatMessage-like object, the Log will |
| 546 | + populate the `output_message` field. Otherwise, it will serialize |
| 547 | + the return value and populate the `output` field. |
| 548 | +
|
| 549 | + If an exception is raised, the output fields will be set to None |
| 550 | + and the error message will be set in the Log's `error` field. |
| 551 | +
|
| 552 | + :param path: The path to the Flow. If not provided, the function name |
| 553 | + will be used as the path and the File will be created in the root |
| 554 | + of your organization workspace. |
| 555 | +
|
| 556 | + :param attributes: Additional fields to describe the Flow. Helpful to separate Flow versions from each other with details on how they were created or used. |
| 557 | + """ |
| 558 | + return a_flow_decorator_factory( |
| 559 | + client=self, |
| 560 | + opentelemetry_tracer=self._opentelemetry_tracer, |
| 561 | + path=path, |
| 562 | + attributes=attributes, |
| 563 | + ) |
| 564 | + |
397 | 565 | def pull(self, path: Optional[str] = None, environment: Optional[str] = None) -> Tuple[List[str], List[str]]: |
398 | 566 | """Pull Prompt and Agent files from Humanloop to local filesystem. |
399 | 567 |
|
|
0 commit comments