From 3aef43d543719ebc814a81be35b76772da94a700 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Wed, 6 Aug 2025 23:42:21 +0000 Subject: [PATCH 01/30] Let Agent be run in a Temporal workflow by moving model requests, tool calls, and MCP to Temporal activities --- .../pydantic_ai/ext/temporal/__init__.py | 72 ++ .../pydantic_ai/ext/temporal/_agent.py | 632 ++++++++++++ .../ext/temporal/_function_toolset.py | 87 ++ .../pydantic_ai/ext/temporal/_logfire.py | 45 + .../pydantic_ai/ext/temporal/_mcp_server.py | 130 +++ .../pydantic_ai/ext/temporal/_model.py | 148 +++ .../pydantic_ai/ext/temporal/_run_context.py | 53 + .../pydantic_ai/ext/temporal/_toolset.py | 45 + pydantic_ai_slim/pyproject.toml | 2 + pyproject.toml | 2 +- .../test_temporal/test_temporal.yaml | 929 ++++++++++++++++++ tests/test_temporal.py | 304 ++++++ uv.lock | 51 +- 13 files changed, 2496 insertions(+), 4 deletions(-) create mode 100644 pydantic_ai_slim/pydantic_ai/ext/temporal/__init__.py create mode 100644 pydantic_ai_slim/pydantic_ai/ext/temporal/_agent.py create mode 100644 pydantic_ai_slim/pydantic_ai/ext/temporal/_function_toolset.py create mode 100644 pydantic_ai_slim/pydantic_ai/ext/temporal/_logfire.py create mode 100644 pydantic_ai_slim/pydantic_ai/ext/temporal/_mcp_server.py create mode 100644 pydantic_ai_slim/pydantic_ai/ext/temporal/_model.py create mode 100644 pydantic_ai_slim/pydantic_ai/ext/temporal/_run_context.py create mode 100644 pydantic_ai_slim/pydantic_ai/ext/temporal/_toolset.py create mode 100644 tests/cassettes/test_temporal/test_temporal.yaml create mode 100644 tests/test_temporal.py diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/__init__.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/__init__.py new file mode 100644 index 0000000000..abe9a63ed0 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/__init__.py @@ -0,0 +1,72 @@ +from __future__ import annotations + +import warnings +from collections.abc import Sequence +from dataclasses import replace +from typing import Any, Callable + +from temporalio.client import ClientConfig, Plugin as ClientPlugin +from temporalio.contrib.pydantic import PydanticPayloadConverter, pydantic_data_converter +from temporalio.converter import DefaultPayloadConverter +from temporalio.worker import Plugin as WorkerPlugin, WorkerConfig +from temporalio.worker.workflow_sandbox import SandboxedWorkflowRunner + +from ._agent import TemporalAgent +from ._logfire import LogfirePlugin +from ._run_context import TemporalRunContext, TemporalRunContextWithDeps + +__all__ = [ + 'TemporalRunContext', + 'TemporalRunContextWithDeps', + 'PydanticAIPlugin', + 'LogfirePlugin', + 'AgentPlugin', + 'TemporalAgent', +] + + +class PydanticAIPlugin(ClientPlugin, WorkerPlugin): + """Temporal client and worker plugin for Pydantic AI.""" + + def configure_client(self, config: ClientConfig) -> ClientConfig: + if (data_converter := config.get('data_converter')) and data_converter.payload_converter_class not in ( + DefaultPayloadConverter, + PydanticPayloadConverter, + ): + warnings.warn( + 'A non-default Temporal data converter was used which has been replaced with the Pydantic data converter.' + ) + + config['data_converter'] = pydantic_data_converter + return super().configure_client(config) + + def configure_worker(self, config: WorkerConfig) -> WorkerConfig: + runner = config.get('workflow_runner') # pyright: ignore[reportUnknownMemberType] + if isinstance(runner, SandboxedWorkflowRunner): + config['workflow_runner'] = replace( + runner, + restrictions=runner.restrictions.with_passthrough_modules( + 'pydantic_ai', + 'logfire', + 'rich', + 'httpx', + # Imported inside `logfire._internal.json_encoder` when running `logfire.info` inside an activity with attributes to serialize + 'attrs', + # Imported inside `logfire._internal.json_schema` when running `logfire.info` inside an activity with attributes to serialize + 'numpy', + 'pandas', + ), + ) + return super().configure_worker(config) + + +class AgentPlugin(WorkerPlugin): + """Temporal worker plugin for a specific Pydantic AI agent.""" + + def __init__(self, agent: TemporalAgent[Any, Any]): + self.agent = agent + + def configure_worker(self, config: WorkerConfig) -> WorkerConfig: + activities: Sequence[Callable[..., Any]] = config.get('activities', []) # pyright: ignore[reportUnknownMemberType] + config['activities'] = [*activities, *self.agent.temporal_activities] + return super().configure_worker(config) diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_agent.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/_agent.py new file mode 100644 index 0000000000..8c2abde8de --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/_agent.py @@ -0,0 +1,632 @@ +from __future__ import annotations + +from collections.abc import AsyncIterator, Iterator, Sequence +from contextlib import AbstractAsyncContextManager, asynccontextmanager, contextmanager +from typing import Any, Callable, Literal, overload + +from temporalio import workflow +from temporalio.workflow import ActivityConfig +from typing_extensions import Never + +from pydantic_ai import ( + _utils, + messages as _messages, + models, + usage as _usage, +) +from pydantic_ai._run_context import AgentDepsT +from pydantic_ai.agent import AbstractAgent, AgentRun, AgentRunResult, EventStreamHandler, RunOutputDataT, WrapperAgent +from pydantic_ai.exceptions import UserError +from pydantic_ai.ext.temporal._run_context import TemporalRunContext +from pydantic_ai.models import Model +from pydantic_ai.output import OutputDataT, OutputSpec +from pydantic_ai.result import StreamedRunResult +from pydantic_ai.settings import ModelSettings +from pydantic_ai.tools import ( + Tool, + ToolFuncEither, +) +from pydantic_ai.toolsets import AbstractToolset + +from ._model import TemporalModel +from ._toolset import TemporalWrapperToolset, temporalize_toolset + + +class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]): + def __init__( + self, + wrapped: AbstractAgent[AgentDepsT, OutputDataT], + activity_config: ActivityConfig = {}, + toolset_activity_config: dict[str, ActivityConfig] = {}, + tool_activity_config: dict[str, dict[str, ActivityConfig | Literal[False]]] = {}, + run_context_type: type[TemporalRunContext] = TemporalRunContext, + temporalize_toolset_func: Callable[ + [ + AbstractToolset[Any], + ActivityConfig, + dict[str, ActivityConfig | Literal[False]], + type[TemporalRunContext], + ], + AbstractToolset[Any], + ] = temporalize_toolset, + ): + """Wrap an agent to make it compatible with Temporal. + + Args: + wrapped: The agent to wrap. + activity_config: The Temporal activity config to use. + toolset_activity_config: The Temporal activity config to use for specific toolsets identified by ID. + tool_activity_config: The Temporal activity config to use for specific tools identified by toolset ID and tool name. + run_context_type: The type of run context to use to serialize and deserialize the run context. + temporalize_toolset_func: The function to use to prepare the toolsets for Temporal. + """ + super().__init__(wrapped) + + agent = wrapped + + activities: list[Callable[..., Any]] = [] + if not isinstance(agent.model, Model): + raise UserError( + 'Model cannot be set at agent run time when using Temporal, it must be set at agent creation time.' + ) + + event_stream_handler = agent.event_stream_handler + if event_stream_handler is None: + raise UserError('Streaming with Temporal requires `Agent` to have an `event_stream_handler` set.') + + temporal_model = TemporalModel(agent.model, event_stream_handler, activity_config, run_context_type) + activities.extend(temporal_model.temporal_activities) + + def temporalize_toolset(toolset: AbstractToolset[AgentDepsT]) -> AbstractToolset[AgentDepsT]: + id = toolset.id + if not id: + raise UserError( + "Toolsets that implement their own tool calling need to have an ID in order to be used with Temporal. The ID will be used to identify the toolset's activities within the workflow." + ) + toolset = temporalize_toolset_func( + toolset, + activity_config | toolset_activity_config.get(id, {}), + tool_activity_config.get(id, {}), + run_context_type, + ) + if isinstance(toolset, TemporalWrapperToolset): + activities.extend(toolset.temporal_activities) + return toolset + + temporal_toolset = agent.toolset.visit_and_replace(temporalize_toolset) + + self._model = temporal_model + self._toolset = temporal_toolset + self._temporal_activities = activities + + @property + def model(self) -> Model: + return self._model + + @property + def toolset(self) -> AbstractToolset[AgentDepsT]: + with self._temporal_overrides(): + return super().toolset + + @property + def temporal_activities(self) -> list[Callable[..., Any]]: + return self._temporal_activities + + @contextmanager + def _temporal_overrides(self) -> Iterator[None]: + # We reset tools here as the temporalized function toolset is already in self._toolset. + with super().override(model=self._model, toolsets=[self._toolset], tools=[]): + yield + + @overload + async def run( + self, + user_prompt: str | Sequence[_messages.UserContent] | None = None, + *, + output_type: None = None, + message_history: list[_messages.ModelMessage] | None = None, + model: models.Model | models.KnownModelName | str | None = None, + deps: AgentDepsT = None, + model_settings: ModelSettings | None = None, + usage_limits: _usage.UsageLimits | None = None, + usage: _usage.Usage | None = None, + infer_name: bool = True, + toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + event_stream_handler: EventStreamHandler[AgentDepsT] | None = None, + ) -> AgentRunResult[OutputDataT]: ... + + @overload + async def run( + self, + user_prompt: str | Sequence[_messages.UserContent] | None = None, + *, + output_type: OutputSpec[RunOutputDataT], + message_history: list[_messages.ModelMessage] | None = None, + model: models.Model | models.KnownModelName | str | None = None, + deps: AgentDepsT = None, + model_settings: ModelSettings | None = None, + usage_limits: _usage.UsageLimits | None = None, + usage: _usage.Usage | None = None, + infer_name: bool = True, + toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + event_stream_handler: EventStreamHandler[AgentDepsT] | None = None, + ) -> AgentRunResult[RunOutputDataT]: ... + + async def run( + self, + user_prompt: str | Sequence[_messages.UserContent] | None = None, + *, + output_type: OutputSpec[RunOutputDataT] | None = None, + message_history: list[_messages.ModelMessage] | None = None, + model: models.Model | models.KnownModelName | str | None = None, + deps: AgentDepsT = None, + model_settings: ModelSettings | None = None, + usage_limits: _usage.UsageLimits | None = None, + usage: _usage.Usage | None = None, + infer_name: bool = True, + toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + event_stream_handler: EventStreamHandler[AgentDepsT] | None = None, + **_deprecated_kwargs: Never, + ) -> AgentRunResult[Any]: + """Run the agent with a user prompt in async mode. + + This method builds an internal agent graph (using system prompts, tools and result schemas) and then + runs the graph to completion. The result of the run is returned. + + Example: + ```python + from pydantic_ai import Agent + + agent = Agent('openai:gpt-4o') + + async def main(): + agent_run = await agent.run('What is the capital of France?') + print(agent_run.output) + #> Paris + ``` + + Args: + user_prompt: User input to start/continue the conversation. + output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no + output validators since output validators would expect an argument that matches the agent's output type. + message_history: History of the conversation so far. + model: Optional model to use for this run, required if `model` was not set when creating the agent. + deps: Optional dependencies to use for this run. + model_settings: Optional settings to use for this model's request. + usage_limits: Optional limits on model request count or token usage. + usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + infer_name: Whether to try to infer the agent name from the call frame if it's not set. + toolsets: Optional additional toolsets for this run. + event_stream_handler: Optional event stream handler to use for this run. + + Returns: + The result of the run. + """ + if workflow.in_workflow() and event_stream_handler is not None: + raise UserError( + 'Event stream handler cannot be set at agent run time when using Temporal, it must be set at agent creation time.' + ) + + return await super().run( + user_prompt, + output_type=output_type, + message_history=message_history, + model=model, + deps=deps, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + event_stream_handler=event_stream_handler, + **_deprecated_kwargs, + ) + + @overload + def run_sync( + self, + user_prompt: str | Sequence[_messages.UserContent] | None = None, + *, + output_type: None = None, + message_history: list[_messages.ModelMessage] | None = None, + model: models.Model | models.KnownModelName | str | None = None, + deps: AgentDepsT = None, + model_settings: ModelSettings | None = None, + usage_limits: _usage.UsageLimits | None = None, + usage: _usage.Usage | None = None, + infer_name: bool = True, + toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + event_stream_handler: EventStreamHandler[AgentDepsT] | None = None, + ) -> AgentRunResult[OutputDataT]: ... + + @overload + def run_sync( + self, + user_prompt: str | Sequence[_messages.UserContent] | None = None, + *, + output_type: OutputSpec[RunOutputDataT], + message_history: list[_messages.ModelMessage] | None = None, + model: models.Model | models.KnownModelName | str | None = None, + deps: AgentDepsT = None, + model_settings: ModelSettings | None = None, + usage_limits: _usage.UsageLimits | None = None, + usage: _usage.Usage | None = None, + infer_name: bool = True, + toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + event_stream_handler: EventStreamHandler[AgentDepsT] | None = None, + ) -> AgentRunResult[RunOutputDataT]: ... + + def run_sync( + self, + user_prompt: str | Sequence[_messages.UserContent] | None = None, + *, + output_type: OutputSpec[RunOutputDataT] | None = None, + message_history: list[_messages.ModelMessage] | None = None, + model: models.Model | models.KnownModelName | str | None = None, + deps: AgentDepsT = None, + model_settings: ModelSettings | None = None, + usage_limits: _usage.UsageLimits | None = None, + usage: _usage.Usage | None = None, + infer_name: bool = True, + toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + event_stream_handler: EventStreamHandler[AgentDepsT] | None = None, + **_deprecated_kwargs: Never, + ) -> AgentRunResult[Any]: + """Synchronously run the agent with a user prompt. + + This is a convenience method that wraps [`self.run`][pydantic_ai.agent.AbstractAgent.run] with `loop.run_until_complete(...)`. + You therefore can't use this method inside async code or if there's an active event loop. + + Example: + ```python + from pydantic_ai import Agent + + agent = Agent('openai:gpt-4o') + + result_sync = agent.run_sync('What is the capital of Italy?') + print(result_sync.output) + #> Rome + ``` + + Args: + user_prompt: User input to start/continue the conversation. + output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no + output validators since output validators would expect an argument that matches the agent's output type. + message_history: History of the conversation so far. + model: Optional model to use for this run, required if `model` was not set when creating the agent. + deps: Optional dependencies to use for this run. + model_settings: Optional settings to use for this model's request. + usage_limits: Optional limits on model request count or token usage. + usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + infer_name: Whether to try to infer the agent name from the call frame if it's not set. + toolsets: Optional additional toolsets for this run. + event_stream_handler: Optional event stream handler to use for this run. + + Returns: + The result of the run. + """ + if workflow.in_workflow() and event_stream_handler is not None: + raise UserError( + 'Event stream handler cannot be set at agent run time when using Temporal, it must be set at agent creation time.' + ) + + return super().run_sync( + user_prompt, + output_type=output_type, + message_history=message_history, + model=model, + deps=deps, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + event_stream_handler=event_stream_handler, + **_deprecated_kwargs, + ) + + @overload + def run_stream( + self, + user_prompt: str | Sequence[_messages.UserContent] | None = None, + *, + output_type: None = None, + message_history: list[_messages.ModelMessage] | None = None, + model: models.Model | models.KnownModelName | str | None = None, + deps: AgentDepsT = None, + model_settings: ModelSettings | None = None, + usage_limits: _usage.UsageLimits | None = None, + usage: _usage.Usage | None = None, + infer_name: bool = True, + toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + event_stream_handler: EventStreamHandler[AgentDepsT] | None = None, + ) -> AbstractAsyncContextManager[StreamedRunResult[AgentDepsT, OutputDataT]]: ... + + @overload + def run_stream( + self, + user_prompt: str | Sequence[_messages.UserContent] | None = None, + *, + output_type: OutputSpec[RunOutputDataT], + message_history: list[_messages.ModelMessage] | None = None, + model: models.Model | models.KnownModelName | str | None = None, + deps: AgentDepsT = None, + model_settings: ModelSettings | None = None, + usage_limits: _usage.UsageLimits | None = None, + usage: _usage.Usage | None = None, + infer_name: bool = True, + toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + event_stream_handler: EventStreamHandler[AgentDepsT] | None = None, + ) -> AbstractAsyncContextManager[StreamedRunResult[AgentDepsT, RunOutputDataT]]: ... + + @asynccontextmanager + async def run_stream( + self, + user_prompt: str | Sequence[_messages.UserContent] | None = None, + *, + output_type: OutputSpec[RunOutputDataT] | None = None, + message_history: list[_messages.ModelMessage] | None = None, + model: models.Model | models.KnownModelName | str | None = None, + deps: AgentDepsT = None, + model_settings: ModelSettings | None = None, + usage_limits: _usage.UsageLimits | None = None, + usage: _usage.Usage | None = None, + infer_name: bool = True, + toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + event_stream_handler: EventStreamHandler[AgentDepsT] | None = None, + **_deprecated_kwargs: Never, + ) -> AsyncIterator[StreamedRunResult[AgentDepsT, Any]]: + """Run the agent with a user prompt in async mode, returning a streamed response. + + Example: + ```python + from pydantic_ai import Agent + + agent = Agent('openai:gpt-4o') + + async def main(): + async with agent.run_stream('What is the capital of the UK?') as response: + print(await response.get_output()) + #> London + ``` + + Args: + user_prompt: User input to start/continue the conversation. + output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no + output validators since output validators would expect an argument that matches the agent's output type. + message_history: History of the conversation so far. + model: Optional model to use for this run, required if `model` was not set when creating the agent. + deps: Optional dependencies to use for this run. + model_settings: Optional settings to use for this model's request. + usage_limits: Optional limits on model request count or token usage. + usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + infer_name: Whether to try to infer the agent name from the call frame if it's not set. + toolsets: Optional additional toolsets for this run. + event_stream_handler: Optional event stream handler to use for this run. It will receive all the events up until the final result is found, which you can then read or stream from inside the context manager. + + Returns: + The result of the run. + """ + if workflow.in_workflow() and event_stream_handler is not None: + raise UserError( + 'Event stream handler cannot be set at agent run time when using Temporal, it must be set at agent creation time.' + ) + + async with super().run_stream( + user_prompt, + output_type=output_type, + message_history=message_history, + model=model, + deps=deps, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + event_stream_handler=event_stream_handler, + **_deprecated_kwargs, + ) as result: + yield result + + @overload + def iter( + self, + user_prompt: str | Sequence[_messages.UserContent] | None = None, + *, + output_type: None = None, + message_history: list[_messages.ModelMessage] | None = None, + model: models.Model | models.KnownModelName | str | None = None, + deps: AgentDepsT = None, + model_settings: ModelSettings | None = None, + usage_limits: _usage.UsageLimits | None = None, + usage: _usage.Usage | None = None, + infer_name: bool = True, + toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + **_deprecated_kwargs: Never, + ) -> AbstractAsyncContextManager[AgentRun[AgentDepsT, OutputDataT]]: ... + + @overload + def iter( + self, + user_prompt: str | Sequence[_messages.UserContent] | None = None, + *, + output_type: OutputSpec[RunOutputDataT], + message_history: list[_messages.ModelMessage] | None = None, + model: models.Model | models.KnownModelName | str | None = None, + deps: AgentDepsT = None, + model_settings: ModelSettings | None = None, + usage_limits: _usage.UsageLimits | None = None, + usage: _usage.Usage | None = None, + infer_name: bool = True, + toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + **_deprecated_kwargs: Never, + ) -> AbstractAsyncContextManager[AgentRun[AgentDepsT, RunOutputDataT]]: ... + + @asynccontextmanager + async def iter( + self, + user_prompt: str | Sequence[_messages.UserContent] | None = None, + *, + output_type: OutputSpec[RunOutputDataT] | None = None, + message_history: list[_messages.ModelMessage] | None = None, + model: models.Model | models.KnownModelName | str | None = None, + deps: AgentDepsT = None, + model_settings: ModelSettings | None = None, + usage_limits: _usage.UsageLimits | None = None, + usage: _usage.Usage | None = None, + infer_name: bool = True, + toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + **_deprecated_kwargs: Never, + ) -> AsyncIterator[AgentRun[AgentDepsT, Any]]: + """A contextmanager which can be used to iterate over the agent graph's nodes as they are executed. + + This method builds an internal agent graph (using system prompts, tools and output schemas) and then returns an + `AgentRun` object. The `AgentRun` can be used to async-iterate over the nodes of the graph as they are + executed. This is the API to use if you want to consume the outputs coming from each LLM model response, or the + stream of events coming from the execution of tools. + + The `AgentRun` also provides methods to access the full message history, new messages, and usage statistics, + and the final result of the run once it has completed. + + For more details, see the documentation of `AgentRun`. + + Example: + ```python + from pydantic_ai import Agent + + agent = Agent('openai:gpt-4o') + + async def main(): + nodes = [] + async with agent.iter('What is the capital of France?') as agent_run: + async for node in agent_run: + nodes.append(node) + print(nodes) + ''' + [ + UserPromptNode( + user_prompt='What is the capital of France?', + instructions=None, + instructions_functions=[], + system_prompts=(), + system_prompt_functions=[], + system_prompt_dynamic_functions={}, + ), + ModelRequestNode( + request=ModelRequest( + parts=[ + UserPromptPart( + content='What is the capital of France?', + timestamp=datetime.datetime(...), + ) + ] + ) + ), + CallToolsNode( + model_response=ModelResponse( + parts=[TextPart(content='Paris')], + usage=Usage( + requests=1, request_tokens=56, response_tokens=1, total_tokens=57 + ), + model_name='gpt-4o', + timestamp=datetime.datetime(...), + ) + ), + End(data=FinalResult(output='Paris')), + ] + ''' + print(agent_run.result.output) + #> Paris + ``` + + Args: + user_prompt: User input to start/continue the conversation. + output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no + output validators since output validators would expect an argument that matches the agent's output type. + message_history: History of the conversation so far. + model: Optional model to use for this run, required if `model` was not set when creating the agent. + deps: Optional dependencies to use for this run. + model_settings: Optional settings to use for this model's request. + usage_limits: Optional limits on model request count or token usage. + usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + infer_name: Whether to try to infer the agent name from the call frame if it's not set. + toolsets: Optional additional toolsets for this run. + + Returns: + The result of the run. + """ + if not workflow.in_workflow(): + async with super().iter( + user_prompt=user_prompt, + output_type=output_type, + message_history=message_history, + model=model, + deps=deps, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + **_deprecated_kwargs, + ) as result: + yield result + + if model is not None: + raise UserError( + 'Model cannot be set at agent run time when using Temporal, it must be set at agent creation time.' + ) + if toolsets is not None: + raise UserError( + 'Toolsets cannot be set at agent run time when using Temporal, it must be set at agent creation time.' + ) + + with self._temporal_overrides(): + async with super().iter( + user_prompt=user_prompt, + output_type=output_type, + message_history=message_history, + deps=deps, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + **_deprecated_kwargs, + ) as result: + yield result + + @contextmanager + def override( + self, + *, + deps: AgentDepsT | _utils.Unset = _utils.UNSET, + model: models.Model | models.KnownModelName | str | _utils.Unset = _utils.UNSET, + toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET, + tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET, + ) -> Iterator[None]: + """Context manager to temporarily override agent dependencies, model, toolsets, or tools. + + This is particularly useful when testing. + You can find an example of this [here](../testing.md#overriding-model-via-pytest-fixtures). + + Args: + deps: The dependencies to use instead of the dependencies passed to the agent run. + model: The model to use instead of the model passed to the agent run. + toolsets: The toolsets to use instead of the toolsets passed to the agent constructor and agent run. + tools: The tools to use instead of the tools registered with the agent. + """ + if workflow.in_workflow(): + if _utils.is_set(model): + raise UserError( + 'Model cannot be contextually overridden when using Temporal, it must be set at agent creation time.' + ) + if _utils.is_set(toolsets): + raise UserError( + 'Toolsets cannot be contextually overridden when using Temporal, they must be set at agent creation time.' + ) + if _utils.is_set(tools): + raise UserError( + 'Tools cannot be contextually overridden when using Temporal, they must be set at agent creation time.' + ) + + with super().override(deps=deps, model=model, toolsets=toolsets, tools=tools): + yield diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_function_toolset.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/_function_toolset.py new file mode 100644 index 0000000000..d0965ab7d6 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/_function_toolset.py @@ -0,0 +1,87 @@ +from __future__ import annotations + +from dataclasses import dataclass +from typing import Any, Callable, Literal + +from pydantic import ConfigDict, with_config +from temporalio import activity, workflow +from temporalio.workflow import ActivityConfig + +from pydantic_ai._run_context import RunContext +from pydantic_ai.exceptions import UserError +from pydantic_ai.toolsets import FunctionToolset, ToolsetTool +from pydantic_ai.toolsets.function import FunctionToolsetTool + +from ._run_context import TemporalRunContext +from ._toolset import TemporalWrapperToolset + + +@dataclass +@with_config(ConfigDict(arbitrary_types_allowed=True)) +class _CallToolParams: + name: str + tool_args: dict[str, Any] + serialized_run_context: Any + + +class TemporalFunctionToolset(TemporalWrapperToolset): + def __init__( + self, + toolset: FunctionToolset, + activity_config: ActivityConfig = {}, + tool_activity_config: dict[str, ActivityConfig | Literal[False]] = {}, + run_context_type: type[TemporalRunContext] = TemporalRunContext, + ): + super().__init__(toolset) + self.activity_config = activity_config + self.tool_activity_config = tool_activity_config + self.run_context_type = run_context_type + + id = toolset.id + assert id is not None + + @activity.defn(name=f'function_toolset__{id}__call_tool') + async def call_tool_activity(params: _CallToolParams) -> Any: + name = params.name + ctx = self.run_context_type.deserialize_run_context(params.serialized_run_context) + try: + tool = (await toolset.get_tools(ctx))[name] + except KeyError as e: + raise UserError( + f'Tool {name!r} not found in toolset {toolset.id!r}. ' + 'Removing or renaming tools during an agent run is not supported with Temporal.' + ) from e + + return await self.wrapped.call_tool(name, params.tool_args, ctx, tool) + + self.call_tool_activity = call_tool_activity + + @property + def wrapped_function_toolset(self) -> FunctionToolset: + assert isinstance(self.wrapped, FunctionToolset) + return self.wrapped + + @property + def temporal_activities(self) -> list[Callable[..., Any]]: + return [self.call_tool_activity] + + async def call_tool(self, name: str, tool_args: dict[str, Any], ctx: RunContext, tool: ToolsetTool) -> Any: + if not workflow.in_workflow(): + return await super().call_tool(name, tool_args, ctx, tool) + + tool_activity_config = self.tool_activity_config.get(name, {}) + if tool_activity_config is False: + assert isinstance(tool, FunctionToolsetTool) + if not tool.is_async: + raise UserError( + f'Temporal activity config for non-async tool {name!r} is `False` (activity disabled), but only async tools can be run outside of an activity. Make the tool function async instead.' + ) + return await super().call_tool(name, tool_args, ctx, tool) + + tool_activity_config = self.activity_config | tool_activity_config + serialized_run_context = self.run_context_type.serialize_run_context(ctx) + return await workflow.execute_activity( # pyright: ignore[reportUnknownMemberType] + activity=self.call_tool_activity, + arg=_CallToolParams(name=name, tool_args=tool_args, serialized_run_context=serialized_run_context), + **tool_activity_config, + ) diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_logfire.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/_logfire.py new file mode 100644 index 0000000000..c665c8d41f --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/_logfire.py @@ -0,0 +1,45 @@ +from __future__ import annotations + +from typing import Callable + +from logfire import Logfire +from opentelemetry.trace import get_tracer +from temporalio.client import ClientConfig, Plugin as ClientPlugin +from temporalio.contrib.opentelemetry import TracingInterceptor +from temporalio.runtime import OpenTelemetryConfig, Runtime, TelemetryConfig +from temporalio.service import ConnectConfig, ServiceClient + + +def _default_setup_logfire() -> Logfire: + import logfire + + instance = logfire.configure() + logfire.instrument_pydantic_ai() + return instance + + +class LogfirePlugin(ClientPlugin): + """Temporal client plugin for Logfire.""" + + def __init__(self, setup_logfire: Callable[[], Logfire] = _default_setup_logfire): + self.setup_logfire = setup_logfire + + def configure_client(self, config: ClientConfig) -> ClientConfig: + interceptors = config.get('interceptors', []) + config['interceptors'] = [*interceptors, TracingInterceptor(get_tracer('temporal'))] + return super().configure_client(config) + + async def connect_service_client(self, config: ConnectConfig) -> ServiceClient: + logfire = self.setup_logfire() + logfire_config = logfire.config + token = logfire_config.token + if token is not None: + base_url = logfire_config.advanced.generate_base_url(token) + metrics_url = base_url + '/v1/metrics' + headers = {'Authorization': f'Bearer {token}'} + + config.runtime = Runtime( + telemetry=TelemetryConfig(metrics=OpenTelemetryConfig(url=metrics_url, headers=headers)) + ) + + return await super().connect_service_client(config) diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_mcp_server.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/_mcp_server.py new file mode 100644 index 0000000000..1d0c156ee0 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/_mcp_server.py @@ -0,0 +1,130 @@ +from __future__ import annotations + +from dataclasses import dataclass +from typing import Any, Callable, Literal + +from pydantic import ConfigDict, with_config +from temporalio import activity, workflow +from temporalio.workflow import ActivityConfig +from typing_extensions import Self + +from pydantic_ai._run_context import RunContext +from pydantic_ai.exceptions import UserError +from pydantic_ai.mcp import MCPServer, ToolResult +from pydantic_ai.tools import ToolDefinition +from pydantic_ai.toolsets.abstract import ToolsetTool + +from ._run_context import TemporalRunContext +from ._toolset import TemporalWrapperToolset + + +@dataclass +@with_config(ConfigDict(arbitrary_types_allowed=True)) +class _GetToolsParams: + serialized_run_context: Any + + +@dataclass +@with_config(ConfigDict(arbitrary_types_allowed=True)) +class _CallToolParams: + name: str + tool_args: dict[str, Any] + serialized_run_context: Any + tool_def: ToolDefinition + + +class TemporalMCPServer(TemporalWrapperToolset): + def __init__( + self, + server: MCPServer, + activity_config: ActivityConfig = {}, + tool_activity_config: dict[str, ActivityConfig | Literal[False]] = {}, + run_context_type: type[TemporalRunContext] = TemporalRunContext, + ): + super().__init__(server) + self.activity_config = activity_config + self.tool_activity_config = tool_activity_config + self.run_context_type = run_context_type + + id = server.id + assert id is not None + + @activity.defn(name=f'mcp_server__{id}__get_tools') + async def get_tools_activity(params: _GetToolsParams) -> dict[str, ToolDefinition]: + run_context = self.run_context_type.deserialize_run_context(params.serialized_run_context) + tools = await self.wrapped.get_tools(run_context) + # ToolsetTool is not serializable as it holds a SchemaValidator (which is also the same for every MCP tool so unnecessary to pass along the wire every time), + # so we just return the ToolDefinitions and wrap them in ToolsetTool outside of the activity. + return {name: tool.tool_def for name, tool in tools.items()} + + self.get_tools_activity = get_tools_activity + + @activity.defn(name=f'mcp_server__{id}__call_tool') + async def call_tool_activity(params: _CallToolParams) -> ToolResult: + run_context = self.run_context_type.deserialize_run_context(params.serialized_run_context) + return await self.wrapped.call_tool( + params.name, + params.tool_args, + run_context, + self.wrapped_server.tool_for_tool_def(params.tool_def), + ) + + self.call_tool_activity = call_tool_activity + + @property + def wrapped_server(self) -> MCPServer: + assert isinstance(self.wrapped, MCPServer) + return self.wrapped + + @property + def temporal_activities(self) -> list[Callable[..., Any]]: + return [self.get_tools_activity, self.call_tool_activity] + + async def __aenter__(self) -> Self: + # The wrapped MCPServer enters itself around listing and calling tools + # so we don't need to enter it here (nor could we because we're not inside a Temporal activity). + return self + + async def __aexit__(self, *args: Any) -> bool | None: + return None + + async def get_tools(self, ctx: RunContext[Any]) -> dict[str, ToolsetTool[Any]]: + if not workflow.in_workflow(): + return await super().get_tools(ctx) + + serialized_run_context = self.run_context_type.serialize_run_context(ctx) + tool_defs = await workflow.execute_activity( # pyright: ignore[reportUnknownMemberType] + activity=self.get_tools_activity, + arg=_GetToolsParams(serialized_run_context=serialized_run_context), + **self.activity_config, + ) + return {name: self.wrapped_server.tool_for_tool_def(tool_def) for name, tool_def in tool_defs.items()} + + async def call_tool( + self, + name: str, + tool_args: dict[str, Any], + ctx: RunContext[Any], + tool: ToolsetTool[Any], + ) -> ToolResult: + if not workflow.in_workflow(): + return await super().call_tool(name, tool_args, ctx, tool) + + tool_activity_config = self.tool_activity_config.get(name, {}) + if tool_activity_config is False: + raise UserError( + f'Temporal activity config for MCP tool {name!r} is `False` (activity disabled), but MCP tools cannot be run outside of an activity.' + ) + + tool_activity_config = self.activity_config | tool_activity_config + serialized_run_context = self.run_context_type.serialize_run_context(ctx) + return await workflow.execute_activity( # pyright: ignore[reportUnknownMemberType] + activity=self.call_tool_activity, + arg=_CallToolParams( + name=name, + tool_args=tool_args, + serialized_run_context=serialized_run_context, + tool_def=tool.tool_def, + ), + **tool_activity_config, + ) diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_model.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/_model.py new file mode 100644 index 0000000000..92f379cac3 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/_model.py @@ -0,0 +1,148 @@ +from __future__ import annotations + +from collections.abc import AsyncIterator +from contextlib import asynccontextmanager +from dataclasses import dataclass +from datetime import datetime +from typing import Any, Callable + +from pydantic import ConfigDict, with_config +from temporalio import activity, workflow +from temporalio.workflow import ActivityConfig + +from pydantic_ai._run_context import RunContext +from pydantic_ai.agent import EventStreamHandler +from pydantic_ai.exceptions import UserError +from pydantic_ai.messages import ( + ModelMessage, + ModelResponse, + ModelResponseStreamEvent, +) +from pydantic_ai.models import Model, ModelRequestParameters, StreamedResponse +from pydantic_ai.models.wrapper import WrapperModel +from pydantic_ai.settings import ModelSettings +from pydantic_ai.usage import Usage + +from ._run_context import TemporalRunContext + + +@dataclass +@with_config(ConfigDict(arbitrary_types_allowed=True)) +class _RequestParams: + messages: list[ModelMessage] + model_settings: ModelSettings | None + model_request_parameters: ModelRequestParameters + serialized_run_context: Any + + +class _TemporalStreamedResponse(StreamedResponse): + def __init__(self, model_request_parameters: ModelRequestParameters, response: ModelResponse): + super().__init__(model_request_parameters) + self.response = response + + async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: + return + # noinspection PyUnreachableCode + yield + + def get(self) -> ModelResponse: + """Build a [`ModelResponse`][pydantic_ai.messages.ModelResponse] from the data received from the stream so far.""" + return self.response + + def usage(self) -> Usage: + """Get the usage of the response so far. This will not be the final usage until the stream is exhausted.""" + return self.response.usage + + @property + def model_name(self) -> str: + """Get the model name of the response.""" + return self.response.model_name or '' + + @property + def timestamp(self) -> datetime: + """Get the timestamp of the response.""" + return self.response.timestamp + + +class TemporalModel(WrapperModel): + def __init__( + self, + model: Model, + event_stream_handler: EventStreamHandler[Any], + activity_config: ActivityConfig = {}, + run_context_type: type[TemporalRunContext] = TemporalRunContext, + ): + super().__init__(model) + self.activity_config = activity_config + self.event_stream_handler = event_stream_handler + self.run_context_type = run_context_type + + id = '_'.join([model.system, model.model_name]) + + @activity.defn(name=f'model__{id}__request') + async def request_activity(params: _RequestParams) -> ModelResponse: + return await self.wrapped.request(params.messages, params.model_settings, params.model_request_parameters) + + self.request_activity = request_activity + + @activity.defn(name=f'model__{id}__request_stream') + async def request_stream_activity(params: _RequestParams) -> ModelResponse: + run_context = self.run_context_type.deserialize_run_context(params.serialized_run_context) + async with self.wrapped.request_stream( + params.messages, params.model_settings, params.model_request_parameters, run_context + ) as streamed_response: + await self.event_stream_handler(run_context, streamed_response) + + async for _ in streamed_response: + pass + return streamed_response.get() + + self.request_stream_activity = request_stream_activity + + @property + def temporal_activities(self) -> list[Callable[..., Any]]: + return [self.request_activity, self.request_stream_activity] + + async def request( + self, + messages: list[ModelMessage], + model_settings: ModelSettings | None, + model_request_parameters: ModelRequestParameters, + ) -> ModelResponse: + if not workflow.in_workflow(): + return await super().request(messages, model_settings, model_request_parameters) + + return await workflow.execute_activity( # pyright: ignore[reportUnknownMemberType] + activity=self.request_activity, + arg=_RequestParams( + messages=messages, + model_settings=model_settings, + model_request_parameters=model_request_parameters, + serialized_run_context=None, + ), + **self.activity_config, + ) + + @asynccontextmanager + async def request_stream( + self, + messages: list[ModelMessage], + model_settings: ModelSettings | None, + model_request_parameters: ModelRequestParameters, + run_context: RunContext[Any] | None = None, + ) -> AsyncIterator[StreamedResponse]: + if run_context is None: + raise UserError('Streaming with Temporal requires `request_stream` to be called with a `run_context`') + + serialized_run_context = self.run_context_type.serialize_run_context(run_context) + response = await workflow.execute_activity( # pyright: ignore[reportUnknownMemberType] + activity=self.request_stream_activity, + arg=_RequestParams( + messages=messages, + model_settings=model_settings, + model_request_parameters=model_request_parameters, + serialized_run_context=serialized_run_context, + ), + **self.activity_config, + ) + yield _TemporalStreamedResponse(model_request_parameters, response) diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_run_context.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/_run_context.py new file mode 100644 index 0000000000..cec240b3d3 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/_run_context.py @@ -0,0 +1,53 @@ +from __future__ import annotations + +from typing import Any + +from pydantic_ai._run_context import RunContext +from pydantic_ai.exceptions import UserError + + +class TemporalRunContext(RunContext[Any]): + def __init__(self, **kwargs: Any): + self.__dict__ = kwargs + setattr( + self, + '__dataclass_fields__', + {name: field for name, field in RunContext.__dataclass_fields__.items() if name in kwargs}, + ) + + def __getattribute__(self, name: str) -> Any: + try: + return super().__getattribute__(name) + except AttributeError as e: + if name in RunContext.__dataclass_fields__: + raise AttributeError( + f'{self.__class__.__name__!r} object has no attribute {name!r}. ' + 'To make the attribute available, create a `TemporalRunContext` subclass with a custom `serialize_run_context` class method that returns a dictionary that includes the attribute and pass it to `TemporalAgent`.' + ) + else: + raise e + + @classmethod + def serialize_run_context(cls, ctx: RunContext[Any]) -> dict[str, Any]: + return { + 'retries': ctx.retries, + 'tool_call_id': ctx.tool_call_id, + 'tool_name': ctx.tool_name, + 'retry': ctx.retry, + 'run_step': ctx.run_step, + } + + @classmethod + def deserialize_run_context(cls, ctx: dict[str, Any]) -> RunContext[Any]: + return cls(**ctx) + + +class TemporalRunContextWithDeps(TemporalRunContext): + @classmethod + def serialize_run_context(cls, ctx: RunContext[Any]) -> dict[str, Any]: + if not isinstance(ctx.deps, dict): + raise UserError( + 'The `deps` object must be a JSON-serializable dictionary in order to be used with Temporal. ' + 'To use a different type, pass a `TemporalRunContext` subclass to `TemporalAgent` with custom `serialize_run_context` and `deserialize_run_context` class methods.' + ) + return {**super().serialize_run_context(ctx), 'deps': ctx.deps} # pyright: ignore[reportUnknownMemberType] diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_toolset.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/_toolset.py new file mode 100644 index 0000000000..c568ef7932 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/_toolset.py @@ -0,0 +1,45 @@ +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import Any, Callable, Literal + +from temporalio.workflow import ActivityConfig + +from pydantic_ai.ext.temporal._run_context import TemporalRunContext +from pydantic_ai.mcp import MCPServer +from pydantic_ai.toolsets.abstract import AbstractToolset +from pydantic_ai.toolsets.function import FunctionToolset +from pydantic_ai.toolsets.wrapper import WrapperToolset + + +class TemporalWrapperToolset(WrapperToolset[Any], ABC): + @property + @abstractmethod + def temporal_activities(self) -> list[Callable[..., Any]]: + raise NotImplementedError + + +def temporalize_toolset( + toolset: AbstractToolset[Any], + activity_config: ActivityConfig = {}, + tool_activity_config: dict[str, ActivityConfig | Literal[False]] = {}, + run_context_type: type[TemporalRunContext] = TemporalRunContext, +) -> AbstractToolset[Any]: + """Temporalize a toolset. + + Args: + toolset: The toolset to temporalize. + activity_config: The Temporal activity config to use. + tool_activity_config: The Temporal activity config to use for specific tools identified by tool name. + run_context_type: The type of run context to use to serialize and deserialize the run context. + """ + if isinstance(toolset, FunctionToolset): + from ._function_toolset import TemporalFunctionToolset + + return TemporalFunctionToolset(toolset, activity_config, tool_activity_config, run_context_type) + elif isinstance(toolset, MCPServer): + from ._mcp_server import TemporalMCPServer + + return TemporalMCPServer(toolset, activity_config, tool_activity_config, run_context_type) + else: + return toolset diff --git a/pydantic_ai_slim/pyproject.toml b/pydantic_ai_slim/pyproject.toml index 47c758af5d..5ab8636724 100644 --- a/pydantic_ai_slim/pyproject.toml +++ b/pydantic_ai_slim/pyproject.toml @@ -86,6 +86,8 @@ a2a = ["fasta2a>=0.4.1"] ag-ui = ["ag-ui-protocol>=0.1.8", "starlette>=0.45.3"] # Retries retries = ["tenacity>=8.2.3"] +# Temporal +temporal = ["temporalio>=1.15.0"] [dependency-groups] dev = [ diff --git a/pyproject.toml b/pyproject.toml index 1d619f7732..424378faaa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,7 +47,7 @@ requires-python = ">=3.9" [tool.hatch.metadata.hooks.uv-dynamic-versioning] dependencies = [ - "pydantic-ai-slim[openai,vertexai,google,groq,anthropic,mistral,cohere,bedrock,huggingface,cli,mcp,evals,ag-ui,retries]=={{ version }}", + "pydantic-ai-slim[openai,vertexai,google,groq,anthropic,mistral,cohere,bedrock,huggingface,cli,mcp,evals,ag-ui,retries,temporal]=={{ version }}", ] [tool.hatch.metadata.hooks.uv-dynamic-versioning.optional-dependencies] diff --git a/tests/cassettes/test_temporal/test_temporal.yaml b/tests/cassettes/test_temporal/test_temporal.yaml new file mode 100644 index 0000000000..cf50925805 --- /dev/null +++ b/tests/cassettes/test_temporal/test_temporal.yaml @@ -0,0 +1,929 @@ +interactions: +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '4294' + content-type: + - application/json + host: + - api.openai.com + method: POST + parsed_body: + messages: + - content: 'Tell me: the capital of the country; the weather there; the product name' + role: user + model: gpt-4o + stream: true + stream_options: + include_usage: true + tool_choice: required + tools: + - function: + description: '' + name: get_weather + parameters: + additionalProperties: false + properties: + city: + type: string + required: + - city + type: object + strict: true + type: function + - function: + description: '' + name: get_country + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Convert Celsius to Fahrenheit.\n\n Args:\n celsius: Temperature in Celsius\n\n Returns:\n + \ Temperature in Fahrenheit\n " + name: celsius_to_fahrenheit + parameters: + additionalProperties: false + properties: + celsius: + type: number + required: + - celsius + type: object + strict: true + type: function + - function: + description: "Get the weather forecast for a location.\n\n Args:\n location: The location to get the weather + forecast for.\n\n Returns:\n The weather forecast for the location.\n " + name: get_weather_forecast + parameters: + additionalProperties: false + properties: + location: + type: string + required: + - location + type: object + strict: true + type: function + - function: + description: '' + name: get_image_resource + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_image_resource_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_audio_resource + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_audio_resource_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_product_name + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_product_name_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_image + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_dict + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_error + parameters: + additionalProperties: false + properties: + value: + default: false + type: boolean + type: object + type: function + - function: + description: '' + name: get_none + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_multiple_items + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Get the current log level.\n\n Returns:\n The current log level.\n " + name: get_log_level + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Echo the run context.\n\n Args:\n ctx: Context object containing request and session information.\n\n + \ Returns:\n Dictionary with an echo message and the deps.\n " + name: echo_deps + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: Use sampling callback. + name: use_sampling + parameters: + additionalProperties: false + properties: + foo: + type: string + required: + - foo + type: object + strict: true + type: function + - function: + description: The final response which ends this conversation + name: final_result + parameters: + $defs: + Answer: + additionalProperties: false + properties: + answer: + type: string + label: + type: string + required: + - label + - answer + type: object + additionalProperties: false + properties: + answers: + items: + $ref: '#/$defs/Answer' + type: array + required: + - answers + type: object + strict: true + type: function + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: |+ + data: {"id":"chatcmpl-C1KMEUDb1vVwsROQUCZTgG6A6vtWo","object":"chat.completion.chunk","created":1754432618,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","usage":null,"choices":[{"index":0,"delta":{"role":"assistant","content":null},"logprobs":null,"finish_reason":null}],"obfuscation":"jP9abrn9XF3"} + + data: {"id":"chatcmpl-C1KMEUDb1vVwsROQUCZTgG6A6vtWo","object":"chat.completion.chunk","created":1754432618,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","usage":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"id":"call_3rqTYrA6H21AYUaRGP4F66oq","type":"function","function":{"name":"get_country","arguments":""}}]},"logprobs":null,"finish_reason":null}],"obfuscation":"8RXlE4Z5NT"} + + data: {"id":"chatcmpl-C1KMEUDb1vVwsROQUCZTgG6A6vtWo","object":"chat.completion.chunk","created":1754432618,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","usage":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{}"}}]},"logprobs":null,"finish_reason":null}],"obfuscation":"CNtv"} + + data: {"id":"chatcmpl-C1KMEUDb1vVwsROQUCZTgG6A6vtWo","object":"chat.completion.chunk","created":1754432618,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","usage":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"id":"call_Xw9XMKBJU48kAAd78WgIswDx","type":"function","function":{"name":"get_product_name","arguments":""}}]},"logprobs":null,"finish_reason":null}],"obfuscation":"TomlO"} + + data: {"id":"chatcmpl-C1KMEUDb1vVwsROQUCZTgG6A6vtWo","object":"chat.completion.chunk","created":1754432618,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","usage":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"function":{"arguments":"{}"}}]},"logprobs":null,"finish_reason":null}],"obfuscation":"4Gko"} + + data: {"id":"chatcmpl-C1KMEUDb1vVwsROQUCZTgG6A6vtWo","object":"chat.completion.chunk","created":1754432618,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}],"usage":null,"obfuscation":"GtnJ"} + + data: {"id":"chatcmpl-C1KMEUDb1vVwsROQUCZTgG6A6vtWo","object":"chat.completion.chunk","created":1754432618,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[],"usage":{"prompt_tokens":364,"completion_tokens":40,"total_tokens":404,"prompt_tokens_details":{"cached_tokens":0,"audio_tokens":0},"completion_tokens_details":{"reasoning_tokens":0,"audio_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}},"obfuscation":"Ohcj4NkgRRFLL"} + + data: [DONE] + + headers: + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-type: + - text/event-stream; charset=utf-8 + openai-organization: + - pydantic-28gund + openai-processing-ms: + - '756' + openai-project: + - proj_dKobscVY9YJxeEaDJen54e3d + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + status: + code: 200 + message: OK +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '4720' + content-type: + - application/json + cookie: + - __cf_bm=glnXI8zJVAJ0K_QdEGsOKKRgWNfzACGXNWXiwREzaLg-1754432619-1.0.1.1-_Ef07EWA.dA.ieqsA1ZV5wshb3Z4zXVgZ6bbQCLkVEXqzEUQ4cPSApZhDGjVWQMg9aEywh0CfTkaZwvW0rjDh_nKfoZ5Cc8fMVgN5gAyMNc; + _cfuvid=Ckz8lfebgV0n8QtvIIYuQIvlcwwiwc67I0Aw.L8t4rM-1754432619011-0.0.1.1-604800000 + host: + - api.openai.com + method: POST + parsed_body: + messages: + - content: 'Tell me: the capital of the country; the weather there; the product name' + role: user + - role: assistant + tool_calls: + - function: + arguments: '{}' + name: get_country + id: call_3rqTYrA6H21AYUaRGP4F66oq + type: function + - function: + arguments: '{}' + name: get_product_name + id: call_Xw9XMKBJU48kAAd78WgIswDx + type: function + - content: Mexico + role: tool + tool_call_id: call_3rqTYrA6H21AYUaRGP4F66oq + - content: Pydantic AI + role: tool + tool_call_id: call_Xw9XMKBJU48kAAd78WgIswDx + model: gpt-4o + stream: true + stream_options: + include_usage: true + tool_choice: required + tools: + - function: + description: '' + name: get_weather + parameters: + additionalProperties: false + properties: + city: + type: string + required: + - city + type: object + strict: true + type: function + - function: + description: '' + name: get_country + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Convert Celsius to Fahrenheit.\n\n Args:\n celsius: Temperature in Celsius\n\n Returns:\n + \ Temperature in Fahrenheit\n " + name: celsius_to_fahrenheit + parameters: + additionalProperties: false + properties: + celsius: + type: number + required: + - celsius + type: object + strict: true + type: function + - function: + description: "Get the weather forecast for a location.\n\n Args:\n location: The location to get the weather + forecast for.\n\n Returns:\n The weather forecast for the location.\n " + name: get_weather_forecast + parameters: + additionalProperties: false + properties: + location: + type: string + required: + - location + type: object + strict: true + type: function + - function: + description: '' + name: get_image_resource + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_image_resource_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_audio_resource + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_audio_resource_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_product_name + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_product_name_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_image + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_dict + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_error + parameters: + additionalProperties: false + properties: + value: + default: false + type: boolean + type: object + type: function + - function: + description: '' + name: get_none + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_multiple_items + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Get the current log level.\n\n Returns:\n The current log level.\n " + name: get_log_level + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Echo the run context.\n\n Args:\n ctx: Context object containing request and session information.\n\n + \ Returns:\n Dictionary with an echo message and the deps.\n " + name: echo_deps + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: Use sampling callback. + name: use_sampling + parameters: + additionalProperties: false + properties: + foo: + type: string + required: + - foo + type: object + strict: true + type: function + - function: + description: The final response which ends this conversation + name: final_result + parameters: + $defs: + Answer: + additionalProperties: false + properties: + answer: + type: string + label: + type: string + required: + - label + - answer + type: object + additionalProperties: false + properties: + answers: + items: + $ref: '#/$defs/Answer' + type: array + required: + - answers + type: object + strict: true + type: function + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: |+ + data: {"id":"chatcmpl-C1KMJC4uUHgeJ4A0e8jM8wufrmdxX","object":"chat.completion.chunk","created":1754432623,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"role":"assistant","content":null,"tool_calls":[{"index":0,"id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","type":"function","function":{"name":"get_weather","arguments":""}}],"refusal":null},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"pJt0pVt5b"} + + data: {"id":"chatcmpl-C1KMJC4uUHgeJ4A0e8jM8wufrmdxX","object":"chat.completion.chunk","created":1754432623,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"u17"} + + data: {"id":"chatcmpl-C1KMJC4uUHgeJ4A0e8jM8wufrmdxX","object":"chat.completion.chunk","created":1754432623,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"city"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"W5"} + + data: {"id":"chatcmpl-C1KMJC4uUHgeJ4A0e8jM8wufrmdxX","object":"chat.completion.chunk","created":1754432623,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"R"} + + data: {"id":"chatcmpl-C1KMJC4uUHgeJ4A0e8jM8wufrmdxX","object":"chat.completion.chunk","created":1754432623,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"Mexico"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":""} + + data: {"id":"chatcmpl-C1KMJC4uUHgeJ4A0e8jM8wufrmdxX","object":"chat.completion.chunk","created":1754432623,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" City"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"d"} + + data: {"id":"chatcmpl-C1KMJC4uUHgeJ4A0e8jM8wufrmdxX","object":"chat.completion.chunk","created":1754432623,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"}"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"y7y"} + + data: {"id":"chatcmpl-C1KMJC4uUHgeJ4A0e8jM8wufrmdxX","object":"chat.completion.chunk","created":1754432623,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}],"usage":null,"obfuscation":"Wj82"} + + data: {"id":"chatcmpl-C1KMJC4uUHgeJ4A0e8jM8wufrmdxX","object":"chat.completion.chunk","created":1754432623,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[],"usage":{"prompt_tokens":423,"completion_tokens":15,"total_tokens":438,"prompt_tokens_details":{"cached_tokens":0,"audio_tokens":0},"completion_tokens_details":{"reasoning_tokens":0,"audio_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}},"obfuscation":"Umu4rjZrtKjmq"} + + data: [DONE] + + headers: + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-type: + - text/event-stream; charset=utf-8 + openai-organization: + - pydantic-28gund + openai-processing-ms: + - '535' + openai-project: + - proj_dKobscVY9YJxeEaDJen54e3d + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + status: + code: 200 + message: OK +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '4969' + content-type: + - application/json + cookie: + - __cf_bm=glnXI8zJVAJ0K_QdEGsOKKRgWNfzACGXNWXiwREzaLg-1754432619-1.0.1.1-_Ef07EWA.dA.ieqsA1ZV5wshb3Z4zXVgZ6bbQCLkVEXqzEUQ4cPSApZhDGjVWQMg9aEywh0CfTkaZwvW0rjDh_nKfoZ5Cc8fMVgN5gAyMNc; + _cfuvid=Ckz8lfebgV0n8QtvIIYuQIvlcwwiwc67I0Aw.L8t4rM-1754432619011-0.0.1.1-604800000 + host: + - api.openai.com + method: POST + parsed_body: + messages: + - content: 'Tell me: the capital of the country; the weather there; the product name' + role: user + - role: assistant + tool_calls: + - function: + arguments: '{}' + name: get_country + id: call_3rqTYrA6H21AYUaRGP4F66oq + type: function + - function: + arguments: '{}' + name: get_product_name + id: call_Xw9XMKBJU48kAAd78WgIswDx + type: function + - content: Mexico + role: tool + tool_call_id: call_3rqTYrA6H21AYUaRGP4F66oq + - content: Pydantic AI + role: tool + tool_call_id: call_Xw9XMKBJU48kAAd78WgIswDx + - role: assistant + tool_calls: + - function: + arguments: '{"city":"Mexico City"}' + name: get_weather + id: call_Vz0Sie91Ap56nH0ThKGrZXT7 + type: function + - content: sunny + role: tool + tool_call_id: call_Vz0Sie91Ap56nH0ThKGrZXT7 + model: gpt-4o + stream: true + stream_options: + include_usage: true + tool_choice: required + tools: + - function: + description: '' + name: get_weather + parameters: + additionalProperties: false + properties: + city: + type: string + required: + - city + type: object + strict: true + type: function + - function: + description: '' + name: get_country + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Convert Celsius to Fahrenheit.\n\n Args:\n celsius: Temperature in Celsius\n\n Returns:\n + \ Temperature in Fahrenheit\n " + name: celsius_to_fahrenheit + parameters: + additionalProperties: false + properties: + celsius: + type: number + required: + - celsius + type: object + strict: true + type: function + - function: + description: "Get the weather forecast for a location.\n\n Args:\n location: The location to get the weather + forecast for.\n\n Returns:\n The weather forecast for the location.\n " + name: get_weather_forecast + parameters: + additionalProperties: false + properties: + location: + type: string + required: + - location + type: object + strict: true + type: function + - function: + description: '' + name: get_image_resource + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_image_resource_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_audio_resource + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_audio_resource_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_product_name + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_product_name_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_image + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_dict + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_error + parameters: + additionalProperties: false + properties: + value: + default: false + type: boolean + type: object + type: function + - function: + description: '' + name: get_none + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_multiple_items + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Get the current log level.\n\n Returns:\n The current log level.\n " + name: get_log_level + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Echo the run context.\n\n Args:\n ctx: Context object containing request and session information.\n\n + \ Returns:\n Dictionary with an echo message and the deps.\n " + name: echo_deps + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: Use sampling callback. + name: use_sampling + parameters: + additionalProperties: false + properties: + foo: + type: string + required: + - foo + type: object + strict: true + type: function + - function: + description: The final response which ends this conversation + name: final_result + parameters: + $defs: + Answer: + additionalProperties: false + properties: + answer: + type: string + label: + type: string + required: + - label + - answer + type: object + additionalProperties: false + properties: + answers: + items: + $ref: '#/$defs/Answer' + type: array + required: + - answers + type: object + strict: true + type: function + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: |+ + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"role":"assistant","content":null,"tool_calls":[{"index":0,"id":"call_4kc6691zCzjPnOuEtbEGUvz2","type":"function","function":{"name":"final_result","arguments":""}}],"refusal":null},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"fJykmX6H"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"MvE"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"answers"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"dktqfAJehiLPjyt"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":["}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"1O"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"Lvw"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"label"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"f"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"U"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"Capital"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"dfpOaGxthacFGXR"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" of"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"ofN"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" the"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"Lw"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" country"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"dTcI51M3iiQGmY"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\",\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"8"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"answer"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":""} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"5"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"Mexico"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":""} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" City"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"l"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"},{\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"joxN8cSzo5Vtw0V"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"label"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"H"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"x"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"Weather"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"naC5I0l5UxNvni5"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" in"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"Osv"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" the"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"Hz"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" capital"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"XpkABYJn503NY0"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\",\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"D"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"answer"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":""} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"u"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"Sunny"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"Y"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"},{\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"q6lCElEngpao86s"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"label"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"l"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"p"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"Product"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"xxsXQcQiDZz87WR"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" Name"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"z"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\",\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"g"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"answer"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":""} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"i"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"P"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"4sQOS"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"yd"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"cEjx"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"antic"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"l"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" AI"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"1Fy"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"}"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"bId"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"]}"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"YeZF"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}],"usage":null,"obfuscation":"T9AO"} + + data: {"id":"chatcmpl-C1KMMrEA9QLIX25pFjKjoRdNkO0nN","object":"chat.completion.chunk","created":1754432626,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[],"usage":{"prompt_tokens":448,"completion_tokens":49,"total_tokens":497,"prompt_tokens_details":{"cached_tokens":0,"audio_tokens":0},"completion_tokens_details":{"reasoning_tokens":0,"audio_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}},"obfuscation":"0pSy41lq4PYDj"} + + data: [DONE] + + headers: + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-type: + - text/event-stream; charset=utf-8 + openai-organization: + - pydantic-28gund + openai-processing-ms: + - '585' + openai-project: + - proj_dKobscVY9YJxeEaDJen54e3d + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + status: + code: 200 + message: OK +version: 1 +... diff --git a/tests/test_temporal.py b/tests/test_temporal.py new file mode 100644 index 0000000000..495b529189 --- /dev/null +++ b/tests/test_temporal.py @@ -0,0 +1,304 @@ +from __future__ import annotations + +import os +from collections.abc import AsyncIterable, AsyncIterator +from dataclasses import dataclass +from datetime import timedelta + +from inline_snapshot import snapshot +from typing_extensions import TypedDict + +from pydantic_ai import Agent, RunContext +from pydantic_ai.messages import AgentStreamEvent, HandleResponseEvent +from pydantic_ai.toolsets import FunctionToolset + +try: + from temporalio import workflow + from temporalio.client import Client + from temporalio.testing import WorkflowEnvironment + from temporalio.worker import Worker + from temporalio.workflow import ActivityConfig + + from pydantic_ai.ext.temporal import ( + AgentPlugin, + LogfirePlugin, + PydanticAIPlugin, + TemporalAgent, + TemporalRunContextWithDeps, + ) +except ImportError: + import pytest + + pytest.skip('temporal not installed', allow_module_level=True) + +try: + import logfire + from logfire.testing import CaptureLogfire +except ImportError: + import pytest + + pytest.skip('logfire not installed', allow_module_level=True) + +try: + from pydantic_ai.mcp import MCPServerStdio +except ImportError: + import pytest + + pytest.skip('mcp not installed', allow_module_level=True) + +try: + from pydantic_ai.models.openai import OpenAIModel + from pydantic_ai.providers.openai import OpenAIProvider +except ImportError: + import pytest + + pytest.skip('openai not installed', allow_module_level=True) + + +with workflow.unsafe.imports_passed_through(): + # Workaround for a race condition when running `logfire.info` inside an activity with attributes to serialize and pandas importable: + # AttributeError: partially initialized module 'pandas' has no attribute '_pandas_parser_CAPI' (most likely due to a circular import) + import pandas # pyright: ignore[reportUnusedImport] # noqa: F401 + + # https://github.com/temporalio/sdk-python/blob/3244f8bffebee05e0e7efefb1240a75039903dda/tests/test_client.py#L112C1-L113C1 + import pytest + + # Loads `vcr`, which Temporal doesn't like without passing through the import + from .conftest import IsStr + +pytestmark = [ + pytest.mark.anyio, + pytest.mark.vcr, +] + +TEMPORAL_PORT = 7243 + + +@pytest.fixture +async def env() -> AsyncIterator[WorkflowEnvironment]: + async with await WorkflowEnvironment.start_local(port=TEMPORAL_PORT) as env: # pyright: ignore[reportUnknownMemberType] + yield env + + +@pytest.fixture +async def client(env: WorkflowEnvironment) -> Client: + return await Client.connect( + f'localhost:{TEMPORAL_PORT}', + plugins=[PydanticAIPlugin(), LogfirePlugin()], + ) + + +class Deps(TypedDict): + country: str + + +async def event_stream_handler( + ctx: RunContext[Deps], + stream: AsyncIterable[AgentStreamEvent | HandleResponseEvent], +): + logfire.info(f'{ctx.run_step=}') + async for event in stream: + logfire.info(f'{event=}') + + +async def get_country(ctx: RunContext[Deps]) -> str: + return ctx.deps['country'] + + +def get_weather(city: str) -> str: + return 'sunny' + + +@dataclass +class Answer: + label: str + answer: str + + +@dataclass +class Response: + answers: list[Answer] + + +agent = Agent( + # Can't use the `openai_api_key` fixture here because the workflow needs to be defined at the top level of the file. + OpenAIModel('gpt-4o', provider=OpenAIProvider(api_key=os.getenv('OPENAI_API_KEY', 'mock-api-key'))), + deps_type=Deps, + output_type=Response, + toolsets=[ + FunctionToolset[Deps](tools=[get_country], id='country'), + MCPServerStdio('python', ['-m', 'tests.mcp_server'], timeout=20, id='mcp'), + ], + tools=[get_weather], + event_stream_handler=event_stream_handler, +) + +# This needs to be done before the `agent` is bound to the workflow. +temporal_agent = TemporalAgent( + agent, + activity_config=ActivityConfig(start_to_close_timeout=timedelta(seconds=60)), + toolset_activity_config={ + 'country': ActivityConfig(start_to_close_timeout=timedelta(seconds=120)), + }, + tool_activity_config={ + 'country': { + 'get_country': False, + }, + }, + run_context_type=TemporalRunContextWithDeps, +) + + +@workflow.defn +class AgentWorkflow: + @workflow.run + async def run(self, prompt: str, deps: Deps) -> Response: + result = await temporal_agent.run(prompt, deps=deps) + return result.output + + +async def test_temporal(allow_model_requests: None, client: Client, capfire: CaptureLogfire): + task_queue = 'pydantic-ai-agent-task-queue' + + async with Worker( + client, + task_queue=task_queue, + workflows=[AgentWorkflow], + plugins=[AgentPlugin(temporal_agent)], + ): + output = await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] + AgentWorkflow.run, + args=[ + 'Tell me: the capital of the country; the weather there; the product name', + Deps(country='Mexico'), + ], + id='pydantic-ai-agent-workflow', + task_queue=task_queue, + ) + assert output == snapshot( + Response( + answers=[ + Answer(label='Capital of the country', answer='Mexico City'), + Answer(label='Weather in the capital', answer='Sunny'), + Answer(label='Product Name', answer='Pydantic AI'), + ] + ) + ) + exporter = capfire.exporter + + parsed_spans: list[str | AgentStreamEvent | HandleResponseEvent] = [] + for span in exporter.exported_spans_as_dict(): + attributes = span['attributes'] + if event := attributes.get('event'): + parsed_spans.append(event) + else: + parsed_spans.append(attributes['logfire.msg']) + + assert parsed_spans == snapshot( + [ + 'StartWorkflow:AgentWorkflow', + 'RunWorkflow:AgentWorkflow', + 'StartActivity:mcp_server__mcp__get_tools', + 'RunActivity:mcp_server__mcp__get_tools', + 'StartActivity:mcp_server__mcp__get_tools', + 'RunActivity:mcp_server__mcp__get_tools', + 'StartActivity:model__openai_gpt-4o__request_stream', + 'ctx.run_step=1', + '{"index":0,"part":{"tool_name":"get_country","args":"","tool_call_id":"call_3rqTYrA6H21AYUaRGP4F66oq","part_kind":"tool-call"},"event_kind":"part_start"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"{}","tool_call_id":"call_3rqTYrA6H21AYUaRGP4F66oq","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":1,"part":{"tool_name":"get_product_name","args":"","tool_call_id":"call_Xw9XMKBJU48kAAd78WgIswDx","part_kind":"tool-call"},"event_kind":"part_start"}', + '{"index":1,"delta":{"tool_name_delta":null,"args_delta":"{}","tool_call_id":"call_Xw9XMKBJU48kAAd78WgIswDx","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + 'RunActivity:model__openai_gpt-4o__request_stream', + 'ctx.run_step=1', + 'chat gpt-4o', + 'ctx.run_step=1', + '{"part":{"tool_name":"get_country","args":"{}","tool_call_id":"call_3rqTYrA6H21AYUaRGP4F66oq","part_kind":"tool-call"},"event_kind":"function_tool_call"}', + '{"part":{"tool_name":"get_product_name","args":"{}","tool_call_id":"call_Xw9XMKBJU48kAAd78WgIswDx","part_kind":"tool-call"},"event_kind":"function_tool_call"}', + 'running tool: get_country', + 'StartActivity:mcp_server__mcp__call_tool', + IsStr( + regex=r'{"result":{"tool_name":"get_country","content":"Mexico","tool_call_id":"call_3rqTYrA6H21AYUaRGP4F66oq","metadata":null,"timestamp":".+?","part_kind":"tool-return"},"event_kind":"function_tool_result"}' + ), + 'RunActivity:mcp_server__mcp__call_tool', + 'running tool: get_product_name', + IsStr( + regex=r'{"result":{"tool_name":"get_product_name","content":"Pydantic AI","tool_call_id":"call_Xw9XMKBJU48kAAd78WgIswDx","metadata":null,"timestamp":".+?","part_kind":"tool-return"},"event_kind":"function_tool_result"}' + ), + 'running 2 tools', + 'StartActivity:mcp_server__mcp__get_tools', + 'RunActivity:mcp_server__mcp__get_tools', + 'StartActivity:model__openai_gpt-4o__request_stream', + 'ctx.run_step=2', + '{"index":0,"part":{"tool_name":"get_weather","args":"","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","part_kind":"tool-call"},"event_kind":"part_start"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"{\\"","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"city","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"\\":\\"","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"Mexico","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":" City","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"\\"}","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + 'RunActivity:model__openai_gpt-4o__request_stream', + 'ctx.run_step=2', + 'chat gpt-4o', + 'ctx.run_step=2', + '{"part":{"tool_name":"get_weather","args":"{\\"city\\":\\"Mexico City\\"}","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","part_kind":"tool-call"},"event_kind":"function_tool_call"}', + 'StartActivity:function_toolset____call_tool', + 'RunActivity:function_toolset____call_tool', + 'running tool: get_weather', + IsStr( + regex=r'{"result":{"tool_name":"get_weather","content":"sunny","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","metadata":null,"timestamp":".+?","part_kind":"tool-return"},"event_kind":"function_tool_result"}' + ), + 'running 1 tool', + 'StartActivity:mcp_server__mcp__get_tools', + 'RunActivity:mcp_server__mcp__get_tools', + 'StartActivity:model__openai_gpt-4o__request_stream', + 'ctx.run_step=3', + '{"index":0,"part":{"tool_name":"final_result","args":"","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_kind":"tool-call"},"event_kind":"part_start"}', + '{"tool_name":"final_result","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","event_kind":"final_result"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"{\\"","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"answers","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"\\":[","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"{\\"","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"label","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"\\":\\"","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"Capital","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":" of","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":" the","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":" country","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"\\",\\"","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"answer","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"\\":\\"","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"Mexico","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":" City","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"\\"},{\\"","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"label","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"\\":\\"","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"Weather","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":" in","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":" the","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":" capital","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"\\",\\"","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"answer","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"\\":\\"","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"Sunny","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"\\"},{\\"","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"label","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"\\":\\"","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"Product","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":" Name","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"\\",\\"","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"answer","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"\\":\\"","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"P","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"yd","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"antic","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":" AI","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"\\"}","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"]}","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', + 'RunActivity:model__openai_gpt-4o__request_stream', + 'ctx.run_step=3', + 'chat gpt-4o', + 'ctx.run_step=3', + 'self run', + 'CompleteWorkflow:AgentWorkflow', + ] + ) diff --git a/uv.lock b/uv.lock index 8bc9238ef0..942861c103 100644 --- a/uv.lock +++ b/uv.lock @@ -2437,6 +2437,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/2a/e2/5d3f6ada4297caebe1a2add3b126fe800c96f56dbe5d1988a2cbe0b267aa/mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d", size = 4695, upload-time = "2023-02-04T12:11:25.002Z" }, ] +[[package]] +name = "nexus-rpc" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ef/66/540687556bd28cf1ec370cc6881456203dfddb9dab047b8979c6865b5984/nexus_rpc-1.1.0.tar.gz", hash = "sha256:d65ad6a2f54f14e53ebe39ee30555eaeb894102437125733fb13034a04a44553", size = 77383, upload-time = "2025-07-07T19:03:58.368Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bf/2f/9e9d0dcaa4c6ffa22b7aa31069a8a264c753ff8027b36af602cce038c92f/nexus_rpc-1.1.0-py3-none-any.whl", hash = "sha256:d1b007af2aba186a27e736f8eaae39c03aed05b488084ff6c3d1785c9ba2ad38", size = 27743, upload-time = "2025-07-07T19:03:57.556Z" }, +] + [[package]] name = "nodeenv" version = "1.9.1" @@ -3291,7 +3303,7 @@ wheels = [ name = "pydantic-ai" source = { editable = "." } dependencies = [ - { name = "pydantic-ai-slim", extra = ["ag-ui", "anthropic", "bedrock", "cli", "cohere", "evals", "google", "groq", "huggingface", "mcp", "mistral", "openai", "retries", "vertexai"] }, + { name = "pydantic-ai-slim", extra = ["ag-ui", "anthropic", "bedrock", "cli", "cohere", "evals", "google", "groq", "huggingface", "mcp", "mistral", "openai", "retries", "temporal", "vertexai"] }, ] [package.optional-dependencies] @@ -3331,7 +3343,7 @@ requires-dist = [ { name = "fasta2a", marker = "extra == 'a2a'", specifier = ">=0.4.1" }, { name = "logfire", marker = "extra == 'logfire'", specifier = ">=3.11.0" }, { name = "pydantic-ai-examples", marker = "extra == 'examples'", editable = "examples" }, - { name = "pydantic-ai-slim", extras = ["ag-ui", "anthropic", "bedrock", "cli", "cohere", "evals", "google", "groq", "huggingface", "mcp", "mistral", "openai", "retries", "vertexai"], editable = "pydantic_ai_slim" }, + { name = "pydantic-ai-slim", extras = ["ag-ui", "anthropic", "bedrock", "cli", "cohere", "evals", "google", "groq", "huggingface", "mcp", "mistral", "openai", "retries", "temporal", "vertexai"], editable = "pydantic_ai_slim" }, ] provides-extras = ["a2a", "examples", "logfire"] @@ -3465,6 +3477,9 @@ retries = [ tavily = [ { name = "tavily-python" }, ] +temporal = [ + { name = "temporalio" }, +] vertexai = [ { name = "google-auth" }, { name = "requests" }, @@ -3523,9 +3538,10 @@ requires-dist = [ { name = "starlette", marker = "extra == 'ag-ui'", specifier = ">=0.45.3" }, { name = "tavily-python", marker = "extra == 'tavily'", specifier = ">=0.5.0" }, { name = "tenacity", marker = "extra == 'retries'", specifier = ">=8.2.3" }, + { name = "temporalio", marker = "extra == 'temporal'", specifier = ">=1.15.0" }, { name = "typing-inspection", specifier = ">=0.4.0" }, ] -provides-extras = ["a2a", "ag-ui", "anthropic", "bedrock", "cli", "cohere", "duckduckgo", "evals", "google", "groq", "huggingface", "logfire", "mcp", "mistral", "openai", "retries", "tavily", "vertexai"] +provides-extras = ["a2a", "ag-ui", "anthropic", "bedrock", "cli", "cohere", "duckduckgo", "evals", "google", "groq", "huggingface", "logfire", "mcp", "mistral", "openai", "retries", "tavily", "temporal", "vertexai"] [package.metadata.requires-dev] dev = [ @@ -4434,6 +4450,26 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a5/cd/71088461d7720128c78802289b3b36298f42745e5f8c334b0ffc157b881e/tavily_python-0.5.1-py3-none-any.whl", hash = "sha256:169601f703c55cf338758dcacfa7102473b479a9271d65a3af6fc3668990f757", size = 43767, upload-time = "2025-02-07T00:22:04.99Z" }, ] +[[package]] +name = "temporalio" +version = "1.15.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nexus-rpc" }, + { name = "protobuf" }, + { name = "python-dateutil", marker = "python_full_version < '3.11'" }, + { name = "types-protobuf" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0b/af/1a3619fc62333d0acbdf90cfc5ada97e68e8c0f79610363b2dbb30871d83/temporalio-1.15.0.tar.gz", hash = "sha256:a4bc6ca01717880112caab75d041713aacc8263dc66e41f5019caef68b344fa0", size = 1684485, upload-time = "2025-07-29T03:44:09.071Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0e/2d/0153f2bc459e0cb59d41d4dd71da46bf9a98ca98bc37237576c258d6696b/temporalio-1.15.0-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:74bc5cc0e6bdc161a43015538b0821b8713f5faa716c4209971c274b528e0d47", size = 12703607, upload-time = "2025-07-29T03:43:30.083Z" }, + { url = "https://files.pythonhosted.org/packages/e4/39/1b867ec698c8987aef3b7a7024b5c0c732841112fa88d021303d0fc69bea/temporalio-1.15.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:ee8001304dae5723d79797516cfeebe04b966fdbdf348e658fce3b43afdda3cd", size = 12232853, upload-time = "2025-07-29T03:43:38.909Z" }, + { url = "https://files.pythonhosted.org/packages/5e/3e/647d9a7c8b2f638f639717404c0bcbdd7d54fddd7844fdb802e3f40dc55f/temporalio-1.15.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8febd1ac36720817e69c2176aa4aca14a97fe0b83f0d2449c0c730b8f0174d02", size = 12636700, upload-time = "2025-07-29T03:43:49.066Z" }, + { url = "https://files.pythonhosted.org/packages/9a/13/7aa9ec694fec9fba39efdbf61d892bccf7d2b1aa3d9bd359544534c1d309/temporalio-1.15.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:202d81a42cafaed9ccc7ccbea0898838e3b8bf92fee65394f8790f37eafbaa63", size = 12860186, upload-time = "2025-07-29T03:43:57.644Z" }, + { url = "https://files.pythonhosted.org/packages/9f/2b/ba962401324892236148046dbffd805d4443d6df7a7dc33cc7964b566bf9/temporalio-1.15.0-cp39-abi3-win_amd64.whl", hash = "sha256:aae5b18d7c9960238af0f3ebf6b7e5959e05f452106fc0d21a8278d78724f780", size = 12932800, upload-time = "2025-07-29T03:44:06.271Z" }, +] + [[package]] name = "tenacity" version = "8.5.0" @@ -4655,6 +4691,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b5/63/2463d89481e811f007b0e1cd0a91e52e141b47f9de724d20db7b861dcfec/types_certifi-2021.10.8.3-py3-none-any.whl", hash = "sha256:b2d1e325e69f71f7c78e5943d410e650b4707bb0ef32e4ddf3da37f54176e88a", size = 2136, upload-time = "2022-06-09T15:19:03.127Z" }, ] +[[package]] +name = "types-protobuf" +version = "6.30.2.20250516" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ac/6c/5cf088aaa3927d1cc39910f60f220f5ff573ab1a6485b2836e8b26beb58c/types_protobuf-6.30.2.20250516.tar.gz", hash = "sha256:aecd1881770a9bb225ede66872ef7f0da4505edd0b193108edd9892e48d49a41", size = 62254, upload-time = "2025-05-16T03:06:50.794Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c0/66/06a9c161f5dd5deb4f5c016ba29106a8f1903eb9a1ba77d407dd6588fecb/types_protobuf-6.30.2.20250516-py3-none-any.whl", hash = "sha256:8c226d05b5e8b2623111765fa32d6e648bbc24832b4c2fddf0fa340ba5d5b722", size = 76480, upload-time = "2025-05-16T03:06:49.444Z" }, +] + [[package]] name = "types-requests" version = "2.31.0.6" From d7bffadc616aee99fc0b389ae771fba645ba07ba Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Fri, 8 Aug 2025 15:04:32 +0000 Subject: [PATCH 02/30] Update for Agent.toolsets --- .../pydantic_ai/agent/__init__.py | 69 +++++++++++-------- .../pydantic_ai/ext/temporal/_agent.py | 12 ++-- .../pydantic_ai/ext/temporal/_toolset.py | 7 ++ .../pydantic_ai/toolsets/_dynamic.py | 4 +- tests/test_agent.py | 12 ++-- tests/test_toolsets.py | 11 ++- 6 files changed, 66 insertions(+), 49 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index a05ecca1e1..7a7cc4cf88 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -593,12 +593,7 @@ async def main(): run_step=state.run_step, ) - toolset = self._get_toolset(additional=toolsets) - - if output_toolset is not None: - if self._prepare_output_tools: - output_toolset = PreparedToolset(output_toolset, self._prepare_output_tools) - toolset = CombinedToolset([output_toolset, toolset]) + toolset = self._get_toolset(output_toolset=output_toolset, additional_toolsets=toolsets) async with toolset: # This will raise errors for any name conflicts @@ -1240,48 +1235,64 @@ def _get_deps(self: Agent[T, OutputDataT], deps: T) -> T: return deps def _get_toolset( - self, additional: Sequence[AbstractToolset[AgentDepsT]] | None = None + self, + output_toolset: AbstractToolset[AgentDepsT] | None | _utils.Unset = _utils.UNSET, + additional_toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, ) -> AbstractToolset[AgentDepsT]: - """Get the combined toolset containing function tools registered directly to the agent and user-provided toolsets including MCP servers. + """Get the complete toolset. Args: - additional: Additional toolsets to add. + output_toolset: The output toolset to use instead of the one built at agent construction time. + additional_toolsets: Additional toolsets to add, unless toolsets have been overridden. """ - if some_tools := self._override_tools.get(): - function_toolset = _AgentFunctionToolset(some_tools.value, max_retries=self._max_tool_retries) - else: - function_toolset = self._function_toolset + toolsets = self.toolsets + # Don't add additional toolsets if the toolsets have been overridden + if additional_toolsets and self._override_toolsets.get() is None: + toolsets = [*toolsets, *additional_toolsets] - if some_user_toolsets := self._override_toolsets.get(): - user_toolsets = some_user_toolsets.value - else: - # Copy the dynamic toolsets to ensure each run has its own instances - dynamic_toolsets = [dataclasses.replace(toolset) for toolset in self._dynamic_toolsets] - user_toolsets = [*self._user_toolsets, *dynamic_toolsets, *(additional or [])] + toolset = CombinedToolset(toolsets) - if user_toolsets: - toolset = CombinedToolset([function_toolset, *user_toolsets]) - else: - toolset = function_toolset + # Copy the dynamic toolsets to ensure each run has its own instances + def copy_dynamic_toolsets(toolset: AbstractToolset[AgentDepsT]) -> AbstractToolset[AgentDepsT]: + if isinstance(toolset, DynamicToolset): + return dataclasses.replace(toolset) + else: + return toolset + + toolset = toolset.visit_and_replace(copy_dynamic_toolsets) if self._prepare_tools: toolset = PreparedToolset(toolset, self._prepare_tools) + output_toolset = output_toolset if _utils.is_set(output_toolset) else self._output_toolset + if output_toolset is not None: + if self._prepare_output_tools: + output_toolset = PreparedToolset(output_toolset, self._prepare_output_tools) + toolset = CombinedToolset([output_toolset, toolset]) + return toolset @property def toolsets(self) -> Sequence[AbstractToolset[AgentDepsT]]: """All toolsets registered on the agent, including a function toolset holding tools that were registered on the agent directly. - If a `prepare_tools` function was configured on the agent, this will contain just a `PreparedToolset` wrapping the original toolsets. - Output tools are not included. """ - toolset = self._get_toolset() - if isinstance(toolset, CombinedToolset): - return toolset.toolsets + toolsets: list[AbstractToolset[AgentDepsT]] = [] + + if some_tools := self._override_tools.get(): + function_toolset = _AgentFunctionToolset(some_tools.value, max_retries=self._max_tool_retries) else: - return [toolset] + function_toolset = self._function_toolset + toolsets.append(function_toolset) + + if some_user_toolsets := self._override_toolsets.get(): + user_toolsets = some_user_toolsets.value + else: + user_toolsets = [*self._user_toolsets, *self._dynamic_toolsets] + toolsets.extend(user_toolsets) + + return toolsets def _prepare_output_schema( self, output_type: OutputSpec[RunOutputDataT] | None, model_profile: ModelProfile diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_agent.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/_agent.py index 8c2abde8de..5ea2c6da2f 100644 --- a/pydantic_ai_slim/pydantic_ai/ext/temporal/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/_agent.py @@ -93,10 +93,10 @@ def temporalize_toolset(toolset: AbstractToolset[AgentDepsT]) -> AbstractToolset activities.extend(toolset.temporal_activities) return toolset - temporal_toolset = agent.toolset.visit_and_replace(temporalize_toolset) + temporal_toolsets = [toolset.visit_and_replace(temporalize_toolset) for toolset in agent.toolsets] self._model = temporal_model - self._toolset = temporal_toolset + self._toolsets = temporal_toolsets self._temporal_activities = activities @property @@ -104,9 +104,9 @@ def model(self) -> Model: return self._model @property - def toolset(self) -> AbstractToolset[AgentDepsT]: + def toolsets(self) -> Sequence[AbstractToolset[AgentDepsT]]: with self._temporal_overrides(): - return super().toolset + return super().toolsets @property def temporal_activities(self) -> list[Callable[..., Any]]: @@ -114,8 +114,8 @@ def temporal_activities(self) -> list[Callable[..., Any]]: @contextmanager def _temporal_overrides(self) -> Iterator[None]: - # We reset tools here as the temporalized function toolset is already in self._toolset. - with super().override(model=self._model, toolsets=[self._toolset], tools=[]): + # We reset tools here as the temporalized function toolset is already in self._toolsets. + with super().override(model=self._model, toolsets=self._toolsets, tools=[]): yield @overload diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_toolset.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/_toolset.py index c568ef7932..3f815c9a5f 100644 --- a/pydantic_ai_slim/pydantic_ai/ext/temporal/_toolset.py +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/_toolset.py @@ -5,6 +5,7 @@ from temporalio.workflow import ActivityConfig +from pydantic_ai._run_context import AgentDepsT from pydantic_ai.ext.temporal._run_context import TemporalRunContext from pydantic_ai.mcp import MCPServer from pydantic_ai.toolsets.abstract import AbstractToolset @@ -18,6 +19,12 @@ class TemporalWrapperToolset(WrapperToolset[Any], ABC): def temporal_activities(self) -> list[Callable[..., Any]]: raise NotImplementedError + def visit_and_replace( + self, visitor: Callable[[AbstractToolset[AgentDepsT]], AbstractToolset[AgentDepsT]] + ) -> AbstractToolset[AgentDepsT]: + # Temporalized toolsets cannot be swapped out after the fact. + return self + def temporalize_toolset( toolset: AbstractToolset[Any], diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/_dynamic.py b/pydantic_ai_slim/pydantic_ai/toolsets/_dynamic.py index e8b031b855..0ad75219e5 100644 --- a/pydantic_ai_slim/pydantic_ai/toolsets/_dynamic.py +++ b/pydantic_ai_slim/pydantic_ai/toolsets/_dynamic.py @@ -73,7 +73,9 @@ async def call_tool( return await self._toolset.call_tool(name, tool_args, ctx, tool) def apply(self, visitor: Callable[[AbstractToolset[AgentDepsT]], None]) -> None: - if self._toolset is not None: + if self._toolset is None: + super().apply(visitor) + else: self._toolset.apply(visitor) def visit_and_replace( diff --git a/tests/test_agent.py b/tests/test_agent.py index e258f43627..bbe91d74c4 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -54,9 +54,8 @@ from pydantic_ai.toolsets.combined import CombinedToolset from pydantic_ai.toolsets.function import FunctionToolset from pydantic_ai.toolsets.prefixed import PrefixedToolset -from pydantic_ai.toolsets.prepared import PreparedToolset -from .conftest import IsDatetime, IsInstance, IsNow, IsStr, TestEnv +from .conftest import IsDatetime, IsNow, IsStr, TestEnv pytestmark = pytest.mark.anyio @@ -4047,11 +4046,10 @@ def foo() -> str: agent = Agent('test', toolsets=[toolset]) assert toolset in agent.toolsets - async def prepare_tools(ctx: RunContext[None], tool_defs: list[ToolDefinition]) -> list[ToolDefinition]: - return tool_defs # pragma: no cover - - agent = Agent('test', toolsets=[toolset], prepare_tools=prepare_tools) - assert agent.toolsets == [IsInstance(PreparedToolset)] + other_toolset = FunctionToolset() + with agent.override(toolsets=[other_toolset]): + assert other_toolset in agent.toolsets + assert toolset not in agent.toolsets async def test_wrapper_agent(): diff --git a/tests/test_toolsets.py b/tests/test_toolsets.py index 7c89265c55..2e256df5f3 100644 --- a/tests/test_toolsets.py +++ b/tests/test_toolsets.py @@ -734,18 +734,17 @@ def get_inner_toolset(toolset: DynamicToolset[None] | None) -> EnterableToolset assert inner_toolset.depth_count == 1 # Test that the visitor applies when the toolset is initialized - def visitor(toolset: AbstractToolset[None]) -> None: + def initialized_visitor(toolset: AbstractToolset[None]) -> None: assert toolset is inner_toolset - toolset.apply(visitor) + toolset.apply(initialized_visitor) assert get_inner_toolset(toolset) is None - # Test that the visitor doesn't apply when the toolset is not initialized - def crash_visitor(toolset: AbstractToolset[None]) -> None: - raise Exception('crash') # pragma: no cover + def uninitialized_visitor(visited_toolset: AbstractToolset[None]) -> None: + assert visited_toolset is toolset - assert toolset.apply(crash_visitor) is None + toolset.apply(uninitialized_visitor) assert tools == {} From 07d21d1b4fc023bc60a824a6514f4cdea9ab37b9 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Fri, 8 Aug 2025 16:07:32 +0000 Subject: [PATCH 03/30] Fix docstring examples --- .../pydantic_ai/ext/temporal/_agent.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_agent.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/_agent.py index 5ea2c6da2f..b4a7379f8f 100644 --- a/pydantic_ai_slim/pydantic_ai/ext/temporal/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/_agent.py @@ -182,7 +182,7 @@ async def run( async def main(): agent_run = await agent.run('What is the capital of France?') print(agent_run.output) - #> Paris + #> The capital of France is Paris. ``` Args: @@ -285,7 +285,7 @@ def run_sync( result_sync = agent.run_sync('What is the capital of Italy?') print(result_sync.output) - #> Rome + #> The capital of Italy is Rome. ``` Args: @@ -387,7 +387,7 @@ async def run_stream( async def main(): async with agent.run_stream('What is the capital of the UK?') as response: print(await response.get_output()) - #> London + #> The capital of the UK is London. ``` Args: @@ -524,19 +524,19 @@ async def main(): ), CallToolsNode( model_response=ModelResponse( - parts=[TextPart(content='Paris')], + parts=[TextPart(content='The capital of France is Paris.')], usage=Usage( - requests=1, request_tokens=56, response_tokens=1, total_tokens=57 + requests=1, request_tokens=56, response_tokens=7, total_tokens=63 ), model_name='gpt-4o', timestamp=datetime.datetime(...), ) ), - End(data=FinalResult(output='Paris')), + End(data=FinalResult(output='The capital of France is Paris.')), ] ''' print(agent_run.result.output) - #> Paris + #> The capital of France is Paris. ``` Args: From 58691abfb4e04d533943bded3bf3f376afa1c714 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Fri, 8 Aug 2025 17:15:23 +0000 Subject: [PATCH 04/30] Address feedback --- .../pydantic_ai/ext/temporal/_agent.py | 43 ++++++++++++----- .../ext/temporal/_function_toolset.py | 14 ++++-- .../pydantic_ai/ext/temporal/_logfire.py | 25 +++++----- .../pydantic_ai/ext/temporal/_mcp_server.py | 30 +++++++----- .../pydantic_ai/ext/temporal/_model.py | 20 +++++--- .../pydantic_ai/ext/temporal/_toolset.py | 22 +++++++-- tests/test_temporal.py | 47 ++++++++++--------- 7 files changed, 128 insertions(+), 73 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_agent.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/_agent.py index b4a7379f8f..2baa60dbbf 100644 --- a/pydantic_ai_slim/pydantic_ai/ext/temporal/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/_agent.py @@ -2,6 +2,7 @@ from collections.abc import AsyncIterator, Iterator, Sequence from contextlib import AbstractAsyncContextManager, asynccontextmanager, contextmanager +from datetime import timedelta from typing import Any, Callable, Literal, overload from temporalio import workflow @@ -36,13 +37,16 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]): def __init__( self, wrapped: AbstractAgent[AgentDepsT, OutputDataT], - activity_config: ActivityConfig = {}, - toolset_activity_config: dict[str, ActivityConfig] = {}, - tool_activity_config: dict[str, dict[str, ActivityConfig | Literal[False]]] = {}, + *, + activity_config: ActivityConfig | None = None, + model_activity_config: ActivityConfig | None = None, + toolset_activity_config: dict[str, ActivityConfig] | None = None, + tool_activity_config: dict[str, dict[str, ActivityConfig | Literal[False]]] | None = None, run_context_type: type[TemporalRunContext] = TemporalRunContext, temporalize_toolset_func: Callable[ [ AbstractToolset[Any], + str, ActivityConfig, dict[str, ActivityConfig | Literal[False]], type[TemporalRunContext], @@ -55,6 +59,7 @@ def __init__( Args: wrapped: The agent to wrap. activity_config: The Temporal activity config to use. + model_activity_config: The Temporal activity config to use for model requests. toolset_activity_config: The Temporal activity config to use for specific toolsets identified by ID. tool_activity_config: The Temporal activity config to use for specific tools identified by toolset ID and tool name. run_context_type: The type of run context to use to serialize and deserialize the run context. @@ -62,29 +67,45 @@ def __init__( """ super().__init__(wrapped) + activity_config = activity_config or ActivityConfig(start_to_close_timeout=timedelta(seconds=60)) + model_activity_config = model_activity_config or {} + toolset_activity_config = toolset_activity_config or {} + tool_activity_config = tool_activity_config or {} + agent = wrapped + if agent.name is None: + raise UserError( + "An agent needs to have a unique `name` in order to be used with Temporal. The name will be used to identify the agent's activities within the workflow." + ) + + activity_name_prefix = f'agent__{agent.name}' + activities: list[Callable[..., Any]] = [] if not isinstance(agent.model, Model): raise UserError( - 'Model cannot be set at agent run time when using Temporal, it must be set at agent creation time.' + 'An agent needs to have a `model` in order to be used with Temporal, it cannot be set at agent run time.' ) - event_stream_handler = agent.event_stream_handler - if event_stream_handler is None: - raise UserError('Streaming with Temporal requires `Agent` to have an `event_stream_handler` set.') - - temporal_model = TemporalModel(agent.model, event_stream_handler, activity_config, run_context_type) + temporal_model = TemporalModel( + agent.model, + activity_name_prefix=activity_name_prefix, + activity_config=activity_config | model_activity_config, + run_context_type=run_context_type, + event_stream_handler=agent.event_stream_handler, + ) activities.extend(temporal_model.temporal_activities) def temporalize_toolset(toolset: AbstractToolset[AgentDepsT]) -> AbstractToolset[AgentDepsT]: id = toolset.id - if not id: + if id is None: raise UserError( - "Toolsets that implement their own tool calling need to have an ID in order to be used with Temporal. The ID will be used to identify the toolset's activities within the workflow." + "Toolsets that implement their own tool calling need to have a unique `id` in order to be used with Temporal. The ID will be used to identify the toolset's activities within the workflow." ) + toolset = temporalize_toolset_func( toolset, + activity_name_prefix, activity_config | toolset_activity_config.get(id, {}), tool_activity_config.get(id, {}), run_context_type, diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_function_toolset.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/_function_toolset.py index d0965ab7d6..104f41cbce 100644 --- a/pydantic_ai_slim/pydantic_ai/ext/temporal/_function_toolset.py +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/_function_toolset.py @@ -28,8 +28,10 @@ class TemporalFunctionToolset(TemporalWrapperToolset): def __init__( self, toolset: FunctionToolset, - activity_config: ActivityConfig = {}, - tool_activity_config: dict[str, ActivityConfig | Literal[False]] = {}, + *, + activity_name_prefix: str, + activity_config: ActivityConfig, + tool_activity_config: dict[str, ActivityConfig | Literal[False]], run_context_type: type[TemporalRunContext] = TemporalRunContext, ): super().__init__(toolset) @@ -37,10 +39,11 @@ def __init__( self.tool_activity_config = tool_activity_config self.run_context_type = run_context_type + # An error is raised in `TemporalAgent` if no `id` is set. id = toolset.id assert id is not None - @activity.defn(name=f'function_toolset__{id}__call_tool') + @activity.defn(name=f'{activity_name_prefix}__toolset__{id}__call_tool') async def call_tool_activity(params: _CallToolParams) -> Any: name = params.name ctx = self.run_context_type.deserialize_run_context(params.serialized_run_context) @@ -48,7 +51,7 @@ async def call_tool_activity(params: _CallToolParams) -> Any: tool = (await toolset.get_tools(ctx))[name] except KeyError as e: raise UserError( - f'Tool {name!r} not found in toolset {toolset.id!r}. ' + f'Tool {name!r} not found in toolset {id!r}. ' 'Removing or renaming tools during an agent run is not supported with Temporal.' ) from e @@ -74,7 +77,8 @@ async def call_tool(self, name: str, tool_args: dict[str, Any], ctx: RunContext, assert isinstance(tool, FunctionToolsetTool) if not tool.is_async: raise UserError( - f'Temporal activity config for non-async tool {name!r} is `False` (activity disabled), but only async tools can be run outside of an activity. Make the tool function async instead.' + f'Temporal activity config for tool {name!r} has been explicitly set to `False` (activity disabled), ' + 'but non-async tools are run in threads which are not supported outside of an activity. Make the tool function async instead.' ) return await super().call_tool(name, tool_args, ctx, tool) diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_logfire.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/_logfire.py index c665c8d41f..d44a0077eb 100644 --- a/pydantic_ai_slim/pydantic_ai/ext/temporal/_logfire.py +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/_logfire.py @@ -21,8 +21,9 @@ def _default_setup_logfire() -> Logfire: class LogfirePlugin(ClientPlugin): """Temporal client plugin for Logfire.""" - def __init__(self, setup_logfire: Callable[[], Logfire] = _default_setup_logfire): + def __init__(self, setup_logfire: Callable[[], Logfire] = _default_setup_logfire, *, metrics: bool = True): self.setup_logfire = setup_logfire + self.metrics = metrics def configure_client(self, config: ClientConfig) -> ClientConfig: interceptors = config.get('interceptors', []) @@ -31,15 +32,17 @@ def configure_client(self, config: ClientConfig) -> ClientConfig: async def connect_service_client(self, config: ConnectConfig) -> ServiceClient: logfire = self.setup_logfire() - logfire_config = logfire.config - token = logfire_config.token - if token is not None: - base_url = logfire_config.advanced.generate_base_url(token) - metrics_url = base_url + '/v1/metrics' - headers = {'Authorization': f'Bearer {token}'} - - config.runtime = Runtime( - telemetry=TelemetryConfig(metrics=OpenTelemetryConfig(url=metrics_url, headers=headers)) - ) + + if self.metrics: + logfire_config = logfire.config + token = logfire_config.token + if token is not None: + base_url = logfire_config.advanced.generate_base_url(token) + metrics_url = base_url + '/v1/metrics' + headers = {'Authorization': f'Bearer {token}'} + + config.runtime = Runtime( + telemetry=TelemetryConfig(metrics=OpenTelemetryConfig(url=metrics_url, headers=headers)) + ) return await super().connect_service_client(config) diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_mcp_server.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/_mcp_server.py index 1d0c156ee0..702b7243b8 100644 --- a/pydantic_ai_slim/pydantic_ai/ext/temporal/_mcp_server.py +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/_mcp_server.py @@ -37,19 +37,31 @@ class TemporalMCPServer(TemporalWrapperToolset): def __init__( self, server: MCPServer, - activity_config: ActivityConfig = {}, - tool_activity_config: dict[str, ActivityConfig | Literal[False]] = {}, + *, + activity_name_prefix: str, + activity_config: ActivityConfig, + tool_activity_config: dict[str, ActivityConfig | Literal[False]], run_context_type: type[TemporalRunContext] = TemporalRunContext, ): super().__init__(server) self.activity_config = activity_config - self.tool_activity_config = tool_activity_config + + self.tool_activity_config: dict[str, ActivityConfig] = {} + for tool_name, tool_config in tool_activity_config.items(): + if tool_config is False: + raise UserError( + f'Temporal activity config for MCP tool {tool_name!r} has been explicitly set to `False` (activity disabled), ' + 'but MCP tools require the use of IO and so cannot be run outside of an activity.' + ) + self.tool_activity_config[tool_name] = tool_config + self.run_context_type = run_context_type + # An error is raised in `TemporalAgent` if no `id` is set. id = server.id assert id is not None - @activity.defn(name=f'mcp_server__{id}__get_tools') + @activity.defn(name=f'{activity_name_prefix}__mcp_server__{id}__get_tools') async def get_tools_activity(params: _GetToolsParams) -> dict[str, ToolDefinition]: run_context = self.run_context_type.deserialize_run_context(params.serialized_run_context) tools = await self.wrapped.get_tools(run_context) @@ -59,7 +71,7 @@ async def get_tools_activity(params: _GetToolsParams) -> dict[str, ToolDefinitio self.get_tools_activity = get_tools_activity - @activity.defn(name=f'mcp_server__{id}__call_tool') + @activity.defn(name=f'{activity_name_prefix}__mcp_server__{id}__call_tool') async def call_tool_activity(params: _CallToolParams) -> ToolResult: run_context = self.run_context_type.deserialize_run_context(params.serialized_run_context) return await self.wrapped.call_tool( @@ -110,13 +122,7 @@ async def call_tool( if not workflow.in_workflow(): return await super().call_tool(name, tool_args, ctx, tool) - tool_activity_config = self.tool_activity_config.get(name, {}) - if tool_activity_config is False: - raise UserError( - f'Temporal activity config for MCP tool {name!r} is `False` (activity disabled), but MCP tools cannot be run outside of an activity.' - ) - - tool_activity_config = self.activity_config | tool_activity_config + tool_activity_config = self.activity_config | self.tool_activity_config.get(name, {}) serialized_run_context = self.run_context_type.serialize_run_context(ctx) return await workflow.execute_activity( # pyright: ignore[reportUnknownMemberType] activity=self.call_tool_activity, diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_model.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/_model.py index 92f379cac3..a62b24f603 100644 --- a/pydantic_ai_slim/pydantic_ai/ext/temporal/_model.py +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/_model.py @@ -68,25 +68,28 @@ class TemporalModel(WrapperModel): def __init__( self, model: Model, - event_stream_handler: EventStreamHandler[Any], - activity_config: ActivityConfig = {}, + *, + activity_name_prefix: str, + activity_config: ActivityConfig, run_context_type: type[TemporalRunContext] = TemporalRunContext, + event_stream_handler: EventStreamHandler[Any] | None = None, ): super().__init__(model) self.activity_config = activity_config - self.event_stream_handler = event_stream_handler self.run_context_type = run_context_type + self.event_stream_handler = event_stream_handler - id = '_'.join([model.system, model.model_name]) - - @activity.defn(name=f'model__{id}__request') + @activity.defn(name=f'{activity_name_prefix}__model_request') async def request_activity(params: _RequestParams) -> ModelResponse: return await self.wrapped.request(params.messages, params.model_settings, params.model_request_parameters) self.request_activity = request_activity - @activity.defn(name=f'model__{id}__request_stream') + @activity.defn(name=f'{activity_name_prefix}__model_request_stream') async def request_stream_activity(params: _RequestParams) -> ModelResponse: + # An error is raised in `request_stream` if no `event_stream_handler` is set. + assert self.event_stream_handler is not None + run_context = self.run_context_type.deserialize_run_context(params.serialized_run_context) async with self.wrapped.request_stream( params.messages, params.model_settings, params.model_request_parameters, run_context @@ -131,6 +134,9 @@ async def request_stream( model_request_parameters: ModelRequestParameters, run_context: RunContext[Any] | None = None, ) -> AsyncIterator[StreamedResponse]: + if self.event_stream_handler is None: + raise UserError('Streaming with Temporal requires `Agent` to have an `event_stream_handler` set.') + if run_context is None: raise UserError('Streaming with Temporal requires `request_stream` to be called with a `run_context`') diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_toolset.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/_toolset.py index 3f815c9a5f..8a682591cc 100644 --- a/pydantic_ai_slim/pydantic_ai/ext/temporal/_toolset.py +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/_toolset.py @@ -28,14 +28,16 @@ def visit_and_replace( def temporalize_toolset( toolset: AbstractToolset[Any], - activity_config: ActivityConfig = {}, - tool_activity_config: dict[str, ActivityConfig | Literal[False]] = {}, + activity_name_prefix: str, + activity_config: ActivityConfig, + tool_activity_config: dict[str, ActivityConfig | Literal[False]], run_context_type: type[TemporalRunContext] = TemporalRunContext, ) -> AbstractToolset[Any]: """Temporalize a toolset. Args: toolset: The toolset to temporalize. + activity_name_prefix: Prefix for Temporal activity names. activity_config: The Temporal activity config to use. tool_activity_config: The Temporal activity config to use for specific tools identified by tool name. run_context_type: The type of run context to use to serialize and deserialize the run context. @@ -43,10 +45,22 @@ def temporalize_toolset( if isinstance(toolset, FunctionToolset): from ._function_toolset import TemporalFunctionToolset - return TemporalFunctionToolset(toolset, activity_config, tool_activity_config, run_context_type) + return TemporalFunctionToolset( + toolset, + activity_name_prefix=activity_name_prefix, + activity_config=activity_config, + tool_activity_config=tool_activity_config, + run_context_type=run_context_type, + ) elif isinstance(toolset, MCPServer): from ._mcp_server import TemporalMCPServer - return TemporalMCPServer(toolset, activity_config, tool_activity_config, run_context_type) + return TemporalMCPServer( + toolset, + activity_name_prefix=activity_name_prefix, + activity_config=activity_config, + tool_activity_config=tool_activity_config, + run_context_type=run_context_type, + ) else: return toolset diff --git a/tests/test_temporal.py b/tests/test_temporal.py index 495b529189..f5fc804fdb 100644 --- a/tests/test_temporal.py +++ b/tests/test_temporal.py @@ -131,6 +131,7 @@ class Response: ], tools=[get_weather], event_stream_handler=event_stream_handler, + name='temporal_agent', ) # This needs to be done before the `agent` is bound to the workflow. @@ -186,48 +187,48 @@ async def test_temporal(allow_model_requests: None, client: Client, capfire: Cap ) exporter = capfire.exporter - parsed_spans: list[str | AgentStreamEvent | HandleResponseEvent] = [] + parsed_otel_items: list[str] = [] for span in exporter.exported_spans_as_dict(): attributes = span['attributes'] if event := attributes.get('event'): - parsed_spans.append(event) + parsed_otel_items.append(event) else: - parsed_spans.append(attributes['logfire.msg']) + parsed_otel_items.append(attributes['logfire.msg']) - assert parsed_spans == snapshot( + assert parsed_otel_items == snapshot( [ 'StartWorkflow:AgentWorkflow', 'RunWorkflow:AgentWorkflow', - 'StartActivity:mcp_server__mcp__get_tools', - 'RunActivity:mcp_server__mcp__get_tools', - 'StartActivity:mcp_server__mcp__get_tools', - 'RunActivity:mcp_server__mcp__get_tools', - 'StartActivity:model__openai_gpt-4o__request_stream', + 'StartActivity:agent__temporal_agent__mcp_server__mcp__get_tools', + 'RunActivity:agent__temporal_agent__mcp_server__mcp__get_tools', + 'StartActivity:agent__temporal_agent__mcp_server__mcp__get_tools', + 'RunActivity:agent__temporal_agent__mcp_server__mcp__get_tools', + 'StartActivity:agent__temporal_agent__model_request_stream', 'ctx.run_step=1', '{"index":0,"part":{"tool_name":"get_country","args":"","tool_call_id":"call_3rqTYrA6H21AYUaRGP4F66oq","part_kind":"tool-call"},"event_kind":"part_start"}', '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"{}","tool_call_id":"call_3rqTYrA6H21AYUaRGP4F66oq","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', '{"index":1,"part":{"tool_name":"get_product_name","args":"","tool_call_id":"call_Xw9XMKBJU48kAAd78WgIswDx","part_kind":"tool-call"},"event_kind":"part_start"}', '{"index":1,"delta":{"tool_name_delta":null,"args_delta":"{}","tool_call_id":"call_Xw9XMKBJU48kAAd78WgIswDx","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', - 'RunActivity:model__openai_gpt-4o__request_stream', + 'RunActivity:agent__temporal_agent__model_request_stream', 'ctx.run_step=1', 'chat gpt-4o', 'ctx.run_step=1', '{"part":{"tool_name":"get_country","args":"{}","tool_call_id":"call_3rqTYrA6H21AYUaRGP4F66oq","part_kind":"tool-call"},"event_kind":"function_tool_call"}', '{"part":{"tool_name":"get_product_name","args":"{}","tool_call_id":"call_Xw9XMKBJU48kAAd78WgIswDx","part_kind":"tool-call"},"event_kind":"function_tool_call"}', 'running tool: get_country', - 'StartActivity:mcp_server__mcp__call_tool', + 'StartActivity:agent__temporal_agent__mcp_server__mcp__call_tool', IsStr( regex=r'{"result":{"tool_name":"get_country","content":"Mexico","tool_call_id":"call_3rqTYrA6H21AYUaRGP4F66oq","metadata":null,"timestamp":".+?","part_kind":"tool-return"},"event_kind":"function_tool_result"}' ), - 'RunActivity:mcp_server__mcp__call_tool', + 'RunActivity:agent__temporal_agent__mcp_server__mcp__call_tool', 'running tool: get_product_name', IsStr( regex=r'{"result":{"tool_name":"get_product_name","content":"Pydantic AI","tool_call_id":"call_Xw9XMKBJU48kAAd78WgIswDx","metadata":null,"timestamp":".+?","part_kind":"tool-return"},"event_kind":"function_tool_result"}' ), 'running 2 tools', - 'StartActivity:mcp_server__mcp__get_tools', - 'RunActivity:mcp_server__mcp__get_tools', - 'StartActivity:model__openai_gpt-4o__request_stream', + 'StartActivity:agent__temporal_agent__mcp_server__mcp__get_tools', + 'RunActivity:agent__temporal_agent__mcp_server__mcp__get_tools', + 'StartActivity:agent__temporal_agent__model_request_stream', 'ctx.run_step=2', '{"index":0,"part":{"tool_name":"get_weather","args":"","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","part_kind":"tool-call"},"event_kind":"part_start"}', '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"{\\"","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', @@ -236,21 +237,21 @@ async def test_temporal(allow_model_requests: None, client: Client, capfire: Cap '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"Mexico","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', '{"index":0,"delta":{"tool_name_delta":null,"args_delta":" City","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"\\"}","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', - 'RunActivity:model__openai_gpt-4o__request_stream', + 'RunActivity:agent__temporal_agent__model_request_stream', 'ctx.run_step=2', 'chat gpt-4o', 'ctx.run_step=2', '{"part":{"tool_name":"get_weather","args":"{\\"city\\":\\"Mexico City\\"}","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","part_kind":"tool-call"},"event_kind":"function_tool_call"}', - 'StartActivity:function_toolset____call_tool', - 'RunActivity:function_toolset____call_tool', + 'StartActivity:agent__temporal_agent__toolset____call_tool', + 'RunActivity:agent__temporal_agent__toolset____call_tool', 'running tool: get_weather', IsStr( regex=r'{"result":{"tool_name":"get_weather","content":"sunny","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","metadata":null,"timestamp":".+?","part_kind":"tool-return"},"event_kind":"function_tool_result"}' ), 'running 1 tool', - 'StartActivity:mcp_server__mcp__get_tools', - 'RunActivity:mcp_server__mcp__get_tools', - 'StartActivity:model__openai_gpt-4o__request_stream', + 'StartActivity:agent__temporal_agent__mcp_server__mcp__get_tools', + 'RunActivity:agent__temporal_agent__mcp_server__mcp__get_tools', + 'StartActivity:agent__temporal_agent__model_request_stream', 'ctx.run_step=3', '{"index":0,"part":{"tool_name":"final_result","args":"","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_kind":"tool-call"},"event_kind":"part_start"}', '{"tool_name":"final_result","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","event_kind":"final_result"}', @@ -294,11 +295,11 @@ async def test_temporal(allow_model_requests: None, client: Client, capfire: Cap '{"index":0,"delta":{"tool_name_delta":null,"args_delta":" AI","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"\\"}","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"]}","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', - 'RunActivity:model__openai_gpt-4o__request_stream', + 'RunActivity:agent__temporal_agent__model_request_stream', 'ctx.run_step=3', 'chat gpt-4o', 'ctx.run_step=3', - 'self run', + 'temporal_agent run', 'CompleteWorkflow:AgentWorkflow', ] ) From f19ee915ebca3ff137c7bcbaa5309691247f2b95 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Fri, 8 Aug 2025 17:48:27 +0000 Subject: [PATCH 05/30] Add simple Temporal test --- ..._temporal.yaml => test_complex_agent.yaml} | 0 .../test_temporal/test_simple_agent.yaml | 79 +++++++++++ tests/test_temporal.py | 125 ++++++++++++------ 3 files changed, 162 insertions(+), 42 deletions(-) rename tests/cassettes/test_temporal/{test_temporal.yaml => test_complex_agent.yaml} (100%) create mode 100644 tests/cassettes/test_temporal/test_simple_agent.yaml diff --git a/tests/cassettes/test_temporal/test_temporal.yaml b/tests/cassettes/test_temporal/test_complex_agent.yaml similarity index 100% rename from tests/cassettes/test_temporal/test_temporal.yaml rename to tests/cassettes/test_temporal/test_complex_agent.yaml diff --git a/tests/cassettes/test_temporal/test_simple_agent.yaml b/tests/cassettes/test_temporal/test_simple_agent.yaml new file mode 100644 index 0000000000..9aa924f5f5 --- /dev/null +++ b/tests/cassettes/test_temporal/test_simple_agent.yaml @@ -0,0 +1,79 @@ +interactions: +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '105' + content-type: + - application/json + host: + - api.openai.com + method: POST + parsed_body: + messages: + - content: What is the capital of Mexico? + role: user + model: gpt-4o + stream: false + uri: https://api.openai.com/v1/chat/completions + response: + headers: + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-length: + - '838' + content-type: + - application/json + openai-organization: + - pydantic-28gund + openai-processing-ms: + - '403' + openai-project: + - proj_dKobscVY9YJxeEaDJen54e3d + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + parsed_body: + choices: + - finish_reason: stop + index: 0 + logprobs: null + message: + annotations: [] + content: The capital of Mexico is Mexico City. + refusal: null + role: assistant + created: 1754675179 + id: chatcmpl-C2LSVwAtcuMjKCHykKXgKphwTaQVB + model: gpt-4o-2024-08-06 + object: chat.completion + service_tier: default + system_fingerprint: fp_ff25b2783a + usage: + completion_tokens: 8 + completion_tokens_details: + accepted_prediction_tokens: 0 + audio_tokens: 0 + reasoning_tokens: 0 + rejected_prediction_tokens: 0 + prompt_tokens: 14 + prompt_tokens_details: + audio_tokens: 0 + cached_tokens: 0 + total_tokens: 22 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/test_temporal.py b/tests/test_temporal.py index f5fc804fdb..3a92d9846b 100644 --- a/tests/test_temporal.py +++ b/tests/test_temporal.py @@ -72,6 +72,7 @@ ] TEMPORAL_PORT = 7243 +TASK_QUEUE = 'pydantic-ai-agent-task-queue' @pytest.fixture @@ -82,6 +83,14 @@ async def env() -> AsyncIterator[WorkflowEnvironment]: @pytest.fixture async def client(env: WorkflowEnvironment) -> Client: + return await Client.connect( + f'localhost:{TEMPORAL_PORT}', + plugins=[PydanticAIPlugin()], + ) + + +@pytest.fixture +async def client_with_logfire(env: WorkflowEnvironment) -> Client: return await Client.connect( f'localhost:{TEMPORAL_PORT}', plugins=[PydanticAIPlugin(), LogfirePlugin()], @@ -120,9 +129,12 @@ class Response: answers: list[Answer] -agent = Agent( - # Can't use the `openai_api_key` fixture here because the workflow needs to be defined at the top level of the file. - OpenAIModel('gpt-4o', provider=OpenAIProvider(api_key=os.getenv('OPENAI_API_KEY', 'mock-api-key'))), +# Can't use the `openai_api_key` fixture here because the workflow needs to be defined at the top level of the file. +model = OpenAIModel('gpt-4o', provider=OpenAIProvider(api_key=os.getenv('OPENAI_API_KEY', 'mock-api-key'))) + + +complex_agent = Agent( + model, deps_type=Deps, output_type=Response, toolsets=[ @@ -131,13 +143,14 @@ class Response: ], tools=[get_weather], event_stream_handler=event_stream_handler, - name='temporal_agent', + name='complex_agent', ) -# This needs to be done before the `agent` is bound to the workflow. -temporal_agent = TemporalAgent( - agent, +# This needs to be done before the `TemporalAgent` is bound to the workflow. +complex_temporal_agent = TemporalAgent( + complex_agent, activity_config=ActivityConfig(start_to_close_timeout=timedelta(seconds=60)), + model_activity_config=ActivityConfig(start_to_close_timeout=timedelta(seconds=90)), toolset_activity_config={ 'country': ActivityConfig(start_to_close_timeout=timedelta(seconds=120)), }, @@ -151,30 +164,28 @@ class Response: @workflow.defn -class AgentWorkflow: +class ComplexAgentWorkflow: @workflow.run async def run(self, prompt: str, deps: Deps) -> Response: - result = await temporal_agent.run(prompt, deps=deps) + result = await complex_temporal_agent.run(prompt, deps=deps) return result.output -async def test_temporal(allow_model_requests: None, client: Client, capfire: CaptureLogfire): - task_queue = 'pydantic-ai-agent-task-queue' - +async def test_complex_agent(allow_model_requests: None, client_with_logfire: Client, capfire: CaptureLogfire): async with Worker( - client, - task_queue=task_queue, - workflows=[AgentWorkflow], - plugins=[AgentPlugin(temporal_agent)], + client_with_logfire, + task_queue=TASK_QUEUE, + workflows=[ComplexAgentWorkflow], + plugins=[AgentPlugin(complex_temporal_agent)], ): - output = await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] - AgentWorkflow.run, + output = await client_with_logfire.execute_workflow( # pyright: ignore[reportUnknownMemberType] + ComplexAgentWorkflow.run, args=[ 'Tell me: the capital of the country; the weather there; the product name', Deps(country='Mexico'), ], - id='pydantic-ai-agent-workflow', - task_queue=task_queue, + id=ComplexAgentWorkflow.__name__, + task_queue=TASK_QUEUE, ) assert output == snapshot( Response( @@ -197,38 +208,38 @@ async def test_temporal(allow_model_requests: None, client: Client, capfire: Cap assert parsed_otel_items == snapshot( [ - 'StartWorkflow:AgentWorkflow', - 'RunWorkflow:AgentWorkflow', - 'StartActivity:agent__temporal_agent__mcp_server__mcp__get_tools', - 'RunActivity:agent__temporal_agent__mcp_server__mcp__get_tools', - 'StartActivity:agent__temporal_agent__mcp_server__mcp__get_tools', - 'RunActivity:agent__temporal_agent__mcp_server__mcp__get_tools', - 'StartActivity:agent__temporal_agent__model_request_stream', + 'StartWorkflow:ComplexAgentWorkflow', + 'RunWorkflow:ComplexAgentWorkflow', + 'StartActivity:agent__complex_agent__mcp_server__mcp__get_tools', + 'RunActivity:agent__complex_agent__mcp_server__mcp__get_tools', + 'StartActivity:agent__complex_agent__mcp_server__mcp__get_tools', + 'RunActivity:agent__complex_agent__mcp_server__mcp__get_tools', + 'StartActivity:agent__complex_agent__model_request_stream', 'ctx.run_step=1', '{"index":0,"part":{"tool_name":"get_country","args":"","tool_call_id":"call_3rqTYrA6H21AYUaRGP4F66oq","part_kind":"tool-call"},"event_kind":"part_start"}', '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"{}","tool_call_id":"call_3rqTYrA6H21AYUaRGP4F66oq","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', '{"index":1,"part":{"tool_name":"get_product_name","args":"","tool_call_id":"call_Xw9XMKBJU48kAAd78WgIswDx","part_kind":"tool-call"},"event_kind":"part_start"}', '{"index":1,"delta":{"tool_name_delta":null,"args_delta":"{}","tool_call_id":"call_Xw9XMKBJU48kAAd78WgIswDx","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', - 'RunActivity:agent__temporal_agent__model_request_stream', + 'RunActivity:agent__complex_agent__model_request_stream', 'ctx.run_step=1', 'chat gpt-4o', 'ctx.run_step=1', '{"part":{"tool_name":"get_country","args":"{}","tool_call_id":"call_3rqTYrA6H21AYUaRGP4F66oq","part_kind":"tool-call"},"event_kind":"function_tool_call"}', '{"part":{"tool_name":"get_product_name","args":"{}","tool_call_id":"call_Xw9XMKBJU48kAAd78WgIswDx","part_kind":"tool-call"},"event_kind":"function_tool_call"}', 'running tool: get_country', - 'StartActivity:agent__temporal_agent__mcp_server__mcp__call_tool', + 'StartActivity:agent__complex_agent__mcp_server__mcp__call_tool', IsStr( regex=r'{"result":{"tool_name":"get_country","content":"Mexico","tool_call_id":"call_3rqTYrA6H21AYUaRGP4F66oq","metadata":null,"timestamp":".+?","part_kind":"tool-return"},"event_kind":"function_tool_result"}' ), - 'RunActivity:agent__temporal_agent__mcp_server__mcp__call_tool', + 'RunActivity:agent__complex_agent__mcp_server__mcp__call_tool', 'running tool: get_product_name', IsStr( regex=r'{"result":{"tool_name":"get_product_name","content":"Pydantic AI","tool_call_id":"call_Xw9XMKBJU48kAAd78WgIswDx","metadata":null,"timestamp":".+?","part_kind":"tool-return"},"event_kind":"function_tool_result"}' ), 'running 2 tools', - 'StartActivity:agent__temporal_agent__mcp_server__mcp__get_tools', - 'RunActivity:agent__temporal_agent__mcp_server__mcp__get_tools', - 'StartActivity:agent__temporal_agent__model_request_stream', + 'StartActivity:agent__complex_agent__mcp_server__mcp__get_tools', + 'RunActivity:agent__complex_agent__mcp_server__mcp__get_tools', + 'StartActivity:agent__complex_agent__model_request_stream', 'ctx.run_step=2', '{"index":0,"part":{"tool_name":"get_weather","args":"","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","part_kind":"tool-call"},"event_kind":"part_start"}', '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"{\\"","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', @@ -237,21 +248,21 @@ async def test_temporal(allow_model_requests: None, client: Client, capfire: Cap '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"Mexico","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', '{"index":0,"delta":{"tool_name_delta":null,"args_delta":" City","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"\\"}","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', - 'RunActivity:agent__temporal_agent__model_request_stream', + 'RunActivity:agent__complex_agent__model_request_stream', 'ctx.run_step=2', 'chat gpt-4o', 'ctx.run_step=2', '{"part":{"tool_name":"get_weather","args":"{\\"city\\":\\"Mexico City\\"}","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","part_kind":"tool-call"},"event_kind":"function_tool_call"}', - 'StartActivity:agent__temporal_agent__toolset____call_tool', - 'RunActivity:agent__temporal_agent__toolset____call_tool', + 'StartActivity:agent__complex_agent__toolset____call_tool', + 'RunActivity:agent__complex_agent__toolset____call_tool', 'running tool: get_weather', IsStr( regex=r'{"result":{"tool_name":"get_weather","content":"sunny","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","metadata":null,"timestamp":".+?","part_kind":"tool-return"},"event_kind":"function_tool_result"}' ), 'running 1 tool', - 'StartActivity:agent__temporal_agent__mcp_server__mcp__get_tools', - 'RunActivity:agent__temporal_agent__mcp_server__mcp__get_tools', - 'StartActivity:agent__temporal_agent__model_request_stream', + 'StartActivity:agent__complex_agent__mcp_server__mcp__get_tools', + 'RunActivity:agent__complex_agent__mcp_server__mcp__get_tools', + 'StartActivity:agent__complex_agent__model_request_stream', 'ctx.run_step=3', '{"index":0,"part":{"tool_name":"final_result","args":"","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_kind":"tool-call"},"event_kind":"part_start"}', '{"tool_name":"final_result","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","event_kind":"final_result"}', @@ -295,11 +306,41 @@ async def test_temporal(allow_model_requests: None, client: Client, capfire: Cap '{"index":0,"delta":{"tool_name_delta":null,"args_delta":" AI","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"\\"}","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', '{"index":0,"delta":{"tool_name_delta":null,"args_delta":"]}","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}', - 'RunActivity:agent__temporal_agent__model_request_stream', + 'RunActivity:agent__complex_agent__model_request_stream', 'ctx.run_step=3', 'chat gpt-4o', 'ctx.run_step=3', - 'temporal_agent run', - 'CompleteWorkflow:AgentWorkflow', + 'complex_agent run', + 'CompleteWorkflow:ComplexAgentWorkflow', ] ) + + +simple_agent = Agent(model, name='simple_agent') + +# This needs to be done before the `TemporalAgent` is bound to the workflow. +simple_temporal_agent = TemporalAgent(simple_agent) + + +@workflow.defn +class SimpleAgentWorkflow: + @workflow.run + async def run(self, prompt: str) -> str: + result = await simple_temporal_agent.run(prompt) + return result.output + + +async def test_simple_agent(allow_model_requests: None, client: Client): + async with Worker( + client, + task_queue=TASK_QUEUE, + workflows=[SimpleAgentWorkflow], + plugins=[AgentPlugin(simple_temporal_agent)], + ): + output = await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] + SimpleAgentWorkflow.run, + args=['What is the capital of Mexico?'], + id=SimpleAgentWorkflow.__name__, + task_queue=TASK_QUEUE, + ) + assert output == snapshot('The capital of Mexico is Mexico City.') From 9642a15ecb0b8da2305ed07438dbb08e36b9e831 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Fri, 8 Aug 2025 20:37:46 +0000 Subject: [PATCH 06/30] Fix temporal tests --- tests/conftest.py | 2 +- tests/test_temporal.py | 33 +++++++++++++++++++++++++++------ 2 files changed, 28 insertions(+), 7 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index ccdd81eadc..f37d35a946 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -129,7 +129,7 @@ def env() -> Iterator[TestEnv]: test_env.reset() -@pytest.fixture +@pytest.fixture(scope='session') def anyio_backend(): return 'asyncio' diff --git a/tests/test_temporal.py b/tests/test_temporal.py index 3a92d9846b..7b4dd12426 100644 --- a/tests/test_temporal.py +++ b/tests/test_temporal.py @@ -10,6 +10,7 @@ from pydantic_ai import Agent, RunContext from pydantic_ai.messages import AgentStreamEvent, HandleResponseEvent +from pydantic_ai.models import cached_async_http_client from pydantic_ai.toolsets import FunctionToolset try: @@ -71,18 +72,32 @@ pytest.mark.vcr, ] + +# We need to use a custom cached HTTP client here as the default one created for OpenAIProvider will be closed automatically +# at the end of each test, but we need this one to live longer. +http_client = cached_async_http_client(provider='temporal') + + +@pytest.fixture(autouse=True, scope='module') +async def close_cached_httpx_client(anyio_backend: str) -> AsyncIterator[None]: + try: + yield + finally: + await http_client.aclose() + + TEMPORAL_PORT = 7243 TASK_QUEUE = 'pydantic-ai-agent-task-queue' -@pytest.fixture -async def env() -> AsyncIterator[WorkflowEnvironment]: - async with await WorkflowEnvironment.start_local(port=TEMPORAL_PORT) as env: # pyright: ignore[reportUnknownMemberType] +@pytest.fixture(scope='module') +async def temporal_env() -> AsyncIterator[WorkflowEnvironment]: + async with await WorkflowEnvironment.start_local(port=TEMPORAL_PORT, ui=True) as env: # pyright: ignore[reportUnknownMemberType] yield env @pytest.fixture -async def client(env: WorkflowEnvironment) -> Client: +async def client(temporal_env: WorkflowEnvironment) -> Client: return await Client.connect( f'localhost:{TEMPORAL_PORT}', plugins=[PydanticAIPlugin()], @@ -90,7 +105,7 @@ async def client(env: WorkflowEnvironment) -> Client: @pytest.fixture -async def client_with_logfire(env: WorkflowEnvironment) -> Client: +async def client_with_logfire(temporal_env: WorkflowEnvironment) -> Client: return await Client.connect( f'localhost:{TEMPORAL_PORT}', plugins=[PydanticAIPlugin(), LogfirePlugin()], @@ -130,7 +145,13 @@ class Response: # Can't use the `openai_api_key` fixture here because the workflow needs to be defined at the top level of the file. -model = OpenAIModel('gpt-4o', provider=OpenAIProvider(api_key=os.getenv('OPENAI_API_KEY', 'mock-api-key'))) +model = OpenAIModel( + 'gpt-4o', + provider=OpenAIProvider( + api_key=os.getenv('OPENAI_API_KEY', 'mock-api-key'), + http_client=http_client, + ), +) complex_agent = Agent( From ae0796322a382cd76a44c7bfd1bb4d5e41e6b138 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Fri, 8 Aug 2025 20:40:21 +0000 Subject: [PATCH 07/30] Add pydantic_ai.ext.temporal to API docs --- docs/api/ext.md | 2 ++ pydantic_ai_slim/pydantic_ai/ext/temporal/__init__.py | 6 +++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/docs/api/ext.md b/docs/api/ext.md index 7f01b44d45..beeac4fe72 100644 --- a/docs/api/ext.md +++ b/docs/api/ext.md @@ -1,5 +1,7 @@ # `pydantic_ai.ext` +::: pydantic_ai.ext.temporal + ::: pydantic_ai.ext.langchain ::: pydantic_ai.ext.aci diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/__init__.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/__init__.py index abe9a63ed0..006bd5edd3 100644 --- a/pydantic_ai_slim/pydantic_ai/ext/temporal/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/__init__.py @@ -16,12 +16,12 @@ from ._run_context import TemporalRunContext, TemporalRunContextWithDeps __all__ = [ - 'TemporalRunContext', - 'TemporalRunContextWithDeps', + 'TemporalAgent', 'PydanticAIPlugin', 'LogfirePlugin', 'AgentPlugin', - 'TemporalAgent', + 'TemporalRunContext', + 'TemporalRunContextWithDeps', ] From 22c780c2561e20a82bdc40e78fefbe45571db34c Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Fri, 8 Aug 2025 20:43:16 +0000 Subject: [PATCH 08/30] Skip testing flaky example --- docs/evals.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/evals.md b/docs/evals.md index e1820b88c1..8d3091ad50 100644 --- a/docs/evals.md +++ b/docs/evals.md @@ -313,7 +313,7 @@ _(This example is complete, it can be run "as is")_ You can control concurrency during evaluation (this might be useful to prevent exceeding a rate limit): -```python {title="parallel_evaluation_example.py" line_length="100"} +```python {title="parallel_evaluation_example.py" line_length="100" test="skip"} import asyncio import time From 19eca40721caf4e21ca8a9925a35bd44e2be5fb3 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Fri, 8 Aug 2025 21:57:35 +0000 Subject: [PATCH 09/30] Add a bunch of tests --- pydantic_ai_slim/pydantic_ai/agent/wrapper.py | 4 +- .../pydantic_ai/ext/temporal/__init__.py | 10 +- .../pydantic_ai/ext/temporal/_agent.py | 41 +- .../ext/temporal/_function_toolset.py | 8 +- .../pydantic_ai/ext/temporal/_mcp_server.py | 8 +- .../pydantic_ai/ext/temporal/_toolset.py | 6 + .../test_temporal/test_multiple_agents.yaml | 1009 +++++++++++++++++ .../test_temporal_agent_iter.yaml | 78 ++ .../test_temporal_agent_run.yaml | 79 ++ .../test_temporal_agent_run_stream.yaml | 78 ++ .../test_temporal_agent_run_sync.yaml | 79 ++ tests/test_temporal.py | 255 ++++- 12 files changed, 1598 insertions(+), 57 deletions(-) create mode 100644 tests/cassettes/test_temporal/test_multiple_agents.yaml create mode 100644 tests/cassettes/test_temporal/test_temporal_agent_iter.yaml create mode 100644 tests/cassettes/test_temporal/test_temporal_agent_run.yaml create mode 100644 tests/cassettes/test_temporal/test_temporal_agent_run_stream.yaml create mode 100644 tests/cassettes/test_temporal/test_temporal_agent_run_sync.yaml diff --git a/pydantic_ai_slim/pydantic_ai/agent/wrapper.py b/pydantic_ai_slim/pydantic_ai/agent/wrapper.py index d796d9dc91..bd39e4bded 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/wrapper.py +++ b/pydantic_ai_slim/pydantic_ai/agent/wrapper.py @@ -196,8 +196,8 @@ async def main(): usage=usage, infer_name=infer_name, toolsets=toolsets, - ) as result: - yield result + ) as run: + yield run @contextmanager def override( diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/__init__.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/__init__.py index 006bd5edd3..4d548013fa 100644 --- a/pydantic_ai_slim/pydantic_ai/ext/temporal/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/__init__.py @@ -11,6 +11,7 @@ from temporalio.worker import Plugin as WorkerPlugin, WorkerConfig from temporalio.worker.workflow_sandbox import SandboxedWorkflowRunner +from ...exceptions import UserError from ._agent import TemporalAgent from ._logfire import LogfirePlugin from ._run_context import TemporalRunContext, TemporalRunContextWithDeps @@ -33,7 +34,7 @@ def configure_client(self, config: ClientConfig) -> ClientConfig: DefaultPayloadConverter, PydanticPayloadConverter, ): - warnings.warn( + warnings.warn( # pragma: no cover 'A non-default Temporal data converter was used which has been replaced with the Pydantic data converter.' ) @@ -42,7 +43,7 @@ def configure_client(self, config: ClientConfig) -> ClientConfig: def configure_worker(self, config: WorkerConfig) -> WorkerConfig: runner = config.get('workflow_runner') # pyright: ignore[reportUnknownMemberType] - if isinstance(runner, SandboxedWorkflowRunner): + if isinstance(runner, SandboxedWorkflowRunner): # pragma: no branch config['workflow_runner'] = replace( runner, restrictions=runner.restrictions.with_passthrough_modules( @@ -57,6 +58,10 @@ def configure_worker(self, config: WorkerConfig) -> WorkerConfig: 'pandas', ), ) + + # pydantic_ai.exceptions.UserError is not retryable + config['workflow_failure_exception_types'] = [*config.get('workflow_failure_exception_types', []), UserError] # pyright: ignore[reportUnknownMemberType] + return super().configure_worker(config) @@ -68,5 +73,6 @@ def __init__(self, agent: TemporalAgent[Any, Any]): def configure_worker(self, config: WorkerConfig) -> WorkerConfig: activities: Sequence[Callable[..., Any]] = config.get('activities', []) # pyright: ignore[reportUnknownMemberType] + # Activities are checked for name conflicts by Temporal. config['activities'] = [*activities, *self.agent.temporal_activities] return super().configure_worker(config) diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_agent.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/_agent.py index 2baa60dbbf..f1c11c6477 100644 --- a/pydantic_ai_slim/pydantic_ai/ext/temporal/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/_agent.py @@ -6,6 +6,7 @@ from typing import Any, Callable, Literal, overload from temporalio import workflow +from temporalio.common import RetryPolicy from temporalio.workflow import ActivityConfig from typing_extensions import Never @@ -67,7 +68,14 @@ def __init__( """ super().__init__(wrapped) + # start_to_close_timeout is required activity_config = activity_config or ActivityConfig(start_to_close_timeout=timedelta(seconds=60)) + + # pydantic_ai.exceptions.UserError is not retryable + retry_policy = activity_config.get('retry_policy') or RetryPolicy() + retry_policy.non_retryable_error_types = [*(retry_policy.non_retryable_error_types or []), UserError.__name__] + activity_config['retry_policy'] = retry_policy + model_activity_config = model_activity_config or {} toolset_activity_config = toolset_activity_config or {} tool_activity_config = tool_activity_config or {} @@ -100,7 +108,7 @@ def temporalize_toolset(toolset: AbstractToolset[AgentDepsT]) -> AbstractToolset id = toolset.id if id is None: raise UserError( - "Toolsets that implement their own tool calling need to have a unique `id` in order to be used with Temporal. The ID will be used to identify the toolset's activities within the workflow." + "Toolsets that are 'leaves' (i.e. those that implement their own tool listing and calling) need to have a unique `id` in order to be used with Temporal. The ID will be used to identify the toolset's activities within the workflow." ) toolset = temporalize_toolset_func( @@ -223,10 +231,7 @@ async def main(): Returns: The result of the run. """ - if workflow.in_workflow() and event_stream_handler is not None: - raise UserError( - 'Event stream handler cannot be set at agent run time when using Temporal, it must be set at agent creation time.' - ) + _check_no_run_event_stream_handler_in_workflow(event_stream_handler) return await super().run( user_prompt, @@ -326,10 +331,7 @@ def run_sync( Returns: The result of the run. """ - if workflow.in_workflow() and event_stream_handler is not None: - raise UserError( - 'Event stream handler cannot be set at agent run time when using Temporal, it must be set at agent creation time.' - ) + _check_no_run_event_stream_handler_in_workflow(event_stream_handler) return super().run_sync( user_prompt, @@ -428,10 +430,7 @@ async def main(): Returns: The result of the run. """ - if workflow.in_workflow() and event_stream_handler is not None: - raise UserError( - 'Event stream handler cannot be set at agent run time when using Temporal, it must be set at agent creation time.' - ) + _check_no_run_event_stream_handler_in_workflow(event_stream_handler) async with super().run_stream( user_prompt, @@ -589,8 +588,9 @@ async def main(): infer_name=infer_name, toolsets=toolsets, **_deprecated_kwargs, - ) as result: - yield result + ) as run: + yield run + return if model is not None: raise UserError( @@ -612,8 +612,8 @@ async def main(): usage=usage, infer_name=infer_name, **_deprecated_kwargs, - ) as result: - yield result + ) as run: + yield run @contextmanager def override( @@ -651,3 +651,10 @@ def override( with super().override(deps=deps, model=model, toolsets=toolsets, tools=tools): yield + + +def _check_no_run_event_stream_handler_in_workflow(event_stream_handler: EventStreamHandler[AgentDepsT] | None) -> None: + if workflow.in_workflow() and event_stream_handler is not None: + raise UserError( + 'Event stream handler cannot be set at agent run time when using Temporal, it must be set at agent creation time.' + ) diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_function_toolset.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/_function_toolset.py index 104f41cbce..6616f39475 100644 --- a/pydantic_ai_slim/pydantic_ai/ext/temporal/_function_toolset.py +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/_function_toolset.py @@ -39,11 +39,7 @@ def __init__( self.tool_activity_config = tool_activity_config self.run_context_type = run_context_type - # An error is raised in `TemporalAgent` if no `id` is set. - id = toolset.id - assert id is not None - - @activity.defn(name=f'{activity_name_prefix}__toolset__{id}__call_tool') + @activity.defn(name=f'{activity_name_prefix}__toolset__{self.id}__call_tool') async def call_tool_activity(params: _CallToolParams) -> Any: name = params.name ctx = self.run_context_type.deserialize_run_context(params.serialized_run_context) @@ -51,7 +47,7 @@ async def call_tool_activity(params: _CallToolParams) -> Any: tool = (await toolset.get_tools(ctx))[name] except KeyError as e: raise UserError( - f'Tool {name!r} not found in toolset {id!r}. ' + f'Tool {name!r} not found in toolset {self.id!r}. ' 'Removing or renaming tools during an agent run is not supported with Temporal.' ) from e diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_mcp_server.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/_mcp_server.py index 702b7243b8..cdf75e59f2 100644 --- a/pydantic_ai_slim/pydantic_ai/ext/temporal/_mcp_server.py +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/_mcp_server.py @@ -57,11 +57,7 @@ def __init__( self.run_context_type = run_context_type - # An error is raised in `TemporalAgent` if no `id` is set. - id = server.id - assert id is not None - - @activity.defn(name=f'{activity_name_prefix}__mcp_server__{id}__get_tools') + @activity.defn(name=f'{activity_name_prefix}__mcp_server__{self.id}__get_tools') async def get_tools_activity(params: _GetToolsParams) -> dict[str, ToolDefinition]: run_context = self.run_context_type.deserialize_run_context(params.serialized_run_context) tools = await self.wrapped.get_tools(run_context) @@ -71,7 +67,7 @@ async def get_tools_activity(params: _GetToolsParams) -> dict[str, ToolDefinitio self.get_tools_activity = get_tools_activity - @activity.defn(name=f'{activity_name_prefix}__mcp_server__{id}__call_tool') + @activity.defn(name=f'{activity_name_prefix}__mcp_server__{self.id}__call_tool') async def call_tool_activity(params: _CallToolParams) -> ToolResult: run_context = self.run_context_type.deserialize_run_context(params.serialized_run_context) return await self.wrapped.call_tool( diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_toolset.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/_toolset.py index 8a682591cc..8c8441685d 100644 --- a/pydantic_ai_slim/pydantic_ai/ext/temporal/_toolset.py +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/_toolset.py @@ -14,6 +14,12 @@ class TemporalWrapperToolset(WrapperToolset[Any], ABC): + @property + def id(self) -> str: + # An error is raised in `TemporalAgent` if no `id` is set. + assert self.wrapped.id is not None + return self.wrapped.id + @property @abstractmethod def temporal_activities(self) -> list[Callable[..., Any]]: diff --git a/tests/cassettes/test_temporal/test_multiple_agents.yaml b/tests/cassettes/test_temporal/test_multiple_agents.yaml new file mode 100644 index 0000000000..92fab355e7 --- /dev/null +++ b/tests/cassettes/test_temporal/test_multiple_agents.yaml @@ -0,0 +1,1009 @@ +interactions: +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '105' + content-type: + - application/json + host: + - api.openai.com + method: POST + parsed_body: + messages: + - content: What is the capital of Mexico? + role: user + model: gpt-4o + stream: false + uri: https://api.openai.com/v1/chat/completions + response: + headers: + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-length: + - '838' + content-type: + - application/json + openai-organization: + - pydantic-28gund + openai-processing-ms: + - '714' + openai-project: + - proj_dKobscVY9YJxeEaDJen54e3d + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + parsed_body: + choices: + - finish_reason: stop + index: 0 + logprobs: null + message: + annotations: [] + content: The capital of Mexico is Mexico City. + refusal: null + role: assistant + created: 1754686067 + id: chatcmpl-C2OI7Ey3XvNe02fb41d1D6h1j6H1M + model: gpt-4o-2024-08-06 + object: chat.completion + service_tier: default + system_fingerprint: fp_07871e2ad8 + usage: + completion_tokens: 8 + completion_tokens_details: + accepted_prediction_tokens: 0 + audio_tokens: 0 + reasoning_tokens: 0 + rejected_prediction_tokens: 0 + prompt_tokens: 14 + prompt_tokens_details: + audio_tokens: 0 + cached_tokens: 0 + total_tokens: 22 + status: + code: 200 + message: OK +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '4294' + content-type: + - application/json + cookie: + - __cf_bm=FMdLfGkYYRRxShv6d.6ULos8pStg0TmiWrGy26zbUnk-1754686067-1.0.1.1-E.y8vuMwOtCOXnzbZRfxF.uHql5wJ.TjpdhC2xGP7dLaiSdNu.imsaLxXWUsb3oWk_j4bh0I6jtaUIkz6FLH3DWe7bW3PJgXdHVWVeSDd5I; + _cfuvid=VKe1SA_RyQAHS9bkLXRq_LMGd8_CKmeHtRDscnoH.vk-1754686067987-0.0.1.1-604800000 + host: + - api.openai.com + method: POST + parsed_body: + messages: + - content: 'Tell me: the capital of the country; the weather there; the product name' + role: user + model: gpt-4o + stream: true + stream_options: + include_usage: true + tool_choice: required + tools: + - function: + description: '' + name: get_weather + parameters: + additionalProperties: false + properties: + city: + type: string + required: + - city + type: object + strict: true + type: function + - function: + description: '' + name: get_country + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Convert Celsius to Fahrenheit.\n\n Args:\n celsius: Temperature in Celsius\n\n Returns:\n + \ Temperature in Fahrenheit\n " + name: celsius_to_fahrenheit + parameters: + additionalProperties: false + properties: + celsius: + type: number + required: + - celsius + type: object + strict: true + type: function + - function: + description: "Get the weather forecast for a location.\n\n Args:\n location: The location to get the weather + forecast for.\n\n Returns:\n The weather forecast for the location.\n " + name: get_weather_forecast + parameters: + additionalProperties: false + properties: + location: + type: string + required: + - location + type: object + strict: true + type: function + - function: + description: '' + name: get_image_resource + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_image_resource_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_audio_resource + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_audio_resource_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_product_name + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_product_name_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_image + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_dict + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_error + parameters: + additionalProperties: false + properties: + value: + default: false + type: boolean + type: object + type: function + - function: + description: '' + name: get_none + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_multiple_items + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Get the current log level.\n\n Returns:\n The current log level.\n " + name: get_log_level + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Echo the run context.\n\n Args:\n ctx: Context object containing request and session information.\n\n + \ Returns:\n Dictionary with an echo message and the deps.\n " + name: echo_deps + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: Use sampling callback. + name: use_sampling + parameters: + additionalProperties: false + properties: + foo: + type: string + required: + - foo + type: object + strict: true + type: function + - function: + description: The final response which ends this conversation + name: final_result + parameters: + $defs: + Answer: + additionalProperties: false + properties: + answer: + type: string + label: + type: string + required: + - label + - answer + type: object + additionalProperties: false + properties: + answers: + items: + $ref: '#/$defs/Answer' + type: array + required: + - answers + type: object + strict: true + type: function + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: |+ + data: {"id":"chatcmpl-C2OICn1CbpqxbWqJvEQJadV86H8q7","object":"chat.completion.chunk","created":1754686072,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","usage":null,"choices":[{"index":0,"delta":{"role":"assistant","content":null},"logprobs":null,"finish_reason":null}],"obfuscation":"bEcAVepxT2N"} + + data: {"id":"chatcmpl-C2OICn1CbpqxbWqJvEQJadV86H8q7","object":"chat.completion.chunk","created":1754686072,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","usage":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"id":"call_fc0SDU3fpyNWhrPIoQKrxefP","type":"function","function":{"name":"get_country","arguments":""}}]},"logprobs":null,"finish_reason":null}],"obfuscation":"r5vzM2B7QX"} + + data: {"id":"chatcmpl-C2OICn1CbpqxbWqJvEQJadV86H8q7","object":"chat.completion.chunk","created":1754686072,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","usage":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{}"}}]},"logprobs":null,"finish_reason":null}],"obfuscation":"f28G"} + + data: {"id":"chatcmpl-C2OICn1CbpqxbWqJvEQJadV86H8q7","object":"chat.completion.chunk","created":1754686072,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","usage":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"id":"call_QrIV88ppSKBV3sdKw9Dkr9L5","type":"function","function":{"name":"get_product_name","arguments":""}}]},"logprobs":null,"finish_reason":null}],"obfuscation":"HGhzy"} + + data: {"id":"chatcmpl-C2OICn1CbpqxbWqJvEQJadV86H8q7","object":"chat.completion.chunk","created":1754686072,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","usage":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"function":{"arguments":"{}"}}]},"logprobs":null,"finish_reason":null}],"obfuscation":"CPN0"} + + data: {"id":"chatcmpl-C2OICn1CbpqxbWqJvEQJadV86H8q7","object":"chat.completion.chunk","created":1754686072,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}],"usage":null,"obfuscation":"CLx1"} + + data: {"id":"chatcmpl-C2OICn1CbpqxbWqJvEQJadV86H8q7","object":"chat.completion.chunk","created":1754686072,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[],"usage":{"prompt_tokens":364,"completion_tokens":40,"total_tokens":404,"prompt_tokens_details":{"cached_tokens":0,"audio_tokens":0},"completion_tokens_details":{"reasoning_tokens":0,"audio_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}},"obfuscation":"9UAlDTq6bfIST"} + + data: [DONE] + + headers: + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-type: + - text/event-stream; charset=utf-8 + openai-organization: + - pydantic-28gund + openai-processing-ms: + - '952' + openai-project: + - proj_dKobscVY9YJxeEaDJen54e3d + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + status: + code: 200 + message: OK +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '4720' + content-type: + - application/json + cookie: + - __cf_bm=FMdLfGkYYRRxShv6d.6ULos8pStg0TmiWrGy26zbUnk-1754686067-1.0.1.1-E.y8vuMwOtCOXnzbZRfxF.uHql5wJ.TjpdhC2xGP7dLaiSdNu.imsaLxXWUsb3oWk_j4bh0I6jtaUIkz6FLH3DWe7bW3PJgXdHVWVeSDd5I; + _cfuvid=VKe1SA_RyQAHS9bkLXRq_LMGd8_CKmeHtRDscnoH.vk-1754686067987-0.0.1.1-604800000 + host: + - api.openai.com + method: POST + parsed_body: + messages: + - content: 'Tell me: the capital of the country; the weather there; the product name' + role: user + - role: assistant + tool_calls: + - function: + arguments: '{}' + name: get_country + id: call_fc0SDU3fpyNWhrPIoQKrxefP + type: function + - function: + arguments: '{}' + name: get_product_name + id: call_QrIV88ppSKBV3sdKw9Dkr9L5 + type: function + - content: Mexico + role: tool + tool_call_id: call_fc0SDU3fpyNWhrPIoQKrxefP + - content: Pydantic AI + role: tool + tool_call_id: call_QrIV88ppSKBV3sdKw9Dkr9L5 + model: gpt-4o + stream: true + stream_options: + include_usage: true + tool_choice: required + tools: + - function: + description: '' + name: get_weather + parameters: + additionalProperties: false + properties: + city: + type: string + required: + - city + type: object + strict: true + type: function + - function: + description: '' + name: get_country + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Convert Celsius to Fahrenheit.\n\n Args:\n celsius: Temperature in Celsius\n\n Returns:\n + \ Temperature in Fahrenheit\n " + name: celsius_to_fahrenheit + parameters: + additionalProperties: false + properties: + celsius: + type: number + required: + - celsius + type: object + strict: true + type: function + - function: + description: "Get the weather forecast for a location.\n\n Args:\n location: The location to get the weather + forecast for.\n\n Returns:\n The weather forecast for the location.\n " + name: get_weather_forecast + parameters: + additionalProperties: false + properties: + location: + type: string + required: + - location + type: object + strict: true + type: function + - function: + description: '' + name: get_image_resource + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_image_resource_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_audio_resource + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_audio_resource_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_product_name + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_product_name_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_image + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_dict + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_error + parameters: + additionalProperties: false + properties: + value: + default: false + type: boolean + type: object + type: function + - function: + description: '' + name: get_none + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_multiple_items + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Get the current log level.\n\n Returns:\n The current log level.\n " + name: get_log_level + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Echo the run context.\n\n Args:\n ctx: Context object containing request and session information.\n\n + \ Returns:\n Dictionary with an echo message and the deps.\n " + name: echo_deps + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: Use sampling callback. + name: use_sampling + parameters: + additionalProperties: false + properties: + foo: + type: string + required: + - foo + type: object + strict: true + type: function + - function: + description: The final response which ends this conversation + name: final_result + parameters: + $defs: + Answer: + additionalProperties: false + properties: + answer: + type: string + label: + type: string + required: + - label + - answer + type: object + additionalProperties: false + properties: + answers: + items: + $ref: '#/$defs/Answer' + type: array + required: + - answers + type: object + strict: true + type: function + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: |+ + data: {"id":"chatcmpl-C2OIHajL8O898rmLqIoxa4RVHOYix","object":"chat.completion.chunk","created":1754686077,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"role":"assistant","content":null,"tool_calls":[{"index":0,"id":"call_0sOcp1sdvSe58xn9EtpyT4Z7","type":"function","function":{"name":"get_weather","arguments":""}}],"refusal":null},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"YE1nZFEdg"} + + data: {"id":"chatcmpl-C2OIHajL8O898rmLqIoxa4RVHOYix","object":"chat.completion.chunk","created":1754686077,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"Izl"} + + data: {"id":"chatcmpl-C2OIHajL8O898rmLqIoxa4RVHOYix","object":"chat.completion.chunk","created":1754686077,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"city"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"q1"} + + data: {"id":"chatcmpl-C2OIHajL8O898rmLqIoxa4RVHOYix","object":"chat.completion.chunk","created":1754686077,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"o"} + + data: {"id":"chatcmpl-C2OIHajL8O898rmLqIoxa4RVHOYix","object":"chat.completion.chunk","created":1754686077,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"Mexico"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":""} + + data: {"id":"chatcmpl-C2OIHajL8O898rmLqIoxa4RVHOYix","object":"chat.completion.chunk","created":1754686077,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" City"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"5"} + + data: {"id":"chatcmpl-C2OIHajL8O898rmLqIoxa4RVHOYix","object":"chat.completion.chunk","created":1754686077,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"}"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"xub"} + + data: {"id":"chatcmpl-C2OIHajL8O898rmLqIoxa4RVHOYix","object":"chat.completion.chunk","created":1754686077,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}],"usage":null,"obfuscation":"n6VC"} + + data: {"id":"chatcmpl-C2OIHajL8O898rmLqIoxa4RVHOYix","object":"chat.completion.chunk","created":1754686077,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[],"usage":{"prompt_tokens":423,"completion_tokens":15,"total_tokens":438,"prompt_tokens_details":{"cached_tokens":0,"audio_tokens":0},"completion_tokens_details":{"reasoning_tokens":0,"audio_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}},"obfuscation":"DPehNdReB8XBJ"} + + data: [DONE] + + headers: + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-type: + - text/event-stream; charset=utf-8 + openai-organization: + - pydantic-28gund + openai-processing-ms: + - '571' + openai-project: + - proj_dKobscVY9YJxeEaDJen54e3d + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + status: + code: 200 + message: OK +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '4969' + content-type: + - application/json + cookie: + - __cf_bm=FMdLfGkYYRRxShv6d.6ULos8pStg0TmiWrGy26zbUnk-1754686067-1.0.1.1-E.y8vuMwOtCOXnzbZRfxF.uHql5wJ.TjpdhC2xGP7dLaiSdNu.imsaLxXWUsb3oWk_j4bh0I6jtaUIkz6FLH3DWe7bW3PJgXdHVWVeSDd5I; + _cfuvid=VKe1SA_RyQAHS9bkLXRq_LMGd8_CKmeHtRDscnoH.vk-1754686067987-0.0.1.1-604800000 + host: + - api.openai.com + method: POST + parsed_body: + messages: + - content: 'Tell me: the capital of the country; the weather there; the product name' + role: user + - role: assistant + tool_calls: + - function: + arguments: '{}' + name: get_country + id: call_fc0SDU3fpyNWhrPIoQKrxefP + type: function + - function: + arguments: '{}' + name: get_product_name + id: call_QrIV88ppSKBV3sdKw9Dkr9L5 + type: function + - content: Mexico + role: tool + tool_call_id: call_fc0SDU3fpyNWhrPIoQKrxefP + - content: Pydantic AI + role: tool + tool_call_id: call_QrIV88ppSKBV3sdKw9Dkr9L5 + - role: assistant + tool_calls: + - function: + arguments: '{"city":"Mexico City"}' + name: get_weather + id: call_0sOcp1sdvSe58xn9EtpyT4Z7 + type: function + - content: sunny + role: tool + tool_call_id: call_0sOcp1sdvSe58xn9EtpyT4Z7 + model: gpt-4o + stream: true + stream_options: + include_usage: true + tool_choice: required + tools: + - function: + description: '' + name: get_weather + parameters: + additionalProperties: false + properties: + city: + type: string + required: + - city + type: object + strict: true + type: function + - function: + description: '' + name: get_country + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Convert Celsius to Fahrenheit.\n\n Args:\n celsius: Temperature in Celsius\n\n Returns:\n + \ Temperature in Fahrenheit\n " + name: celsius_to_fahrenheit + parameters: + additionalProperties: false + properties: + celsius: + type: number + required: + - celsius + type: object + strict: true + type: function + - function: + description: "Get the weather forecast for a location.\n\n Args:\n location: The location to get the weather + forecast for.\n\n Returns:\n The weather forecast for the location.\n " + name: get_weather_forecast + parameters: + additionalProperties: false + properties: + location: + type: string + required: + - location + type: object + strict: true + type: function + - function: + description: '' + name: get_image_resource + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_image_resource_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_audio_resource + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_audio_resource_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_product_name + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_product_name_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_image + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_dict + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_error + parameters: + additionalProperties: false + properties: + value: + default: false + type: boolean + type: object + type: function + - function: + description: '' + name: get_none + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_multiple_items + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Get the current log level.\n\n Returns:\n The current log level.\n " + name: get_log_level + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Echo the run context.\n\n Args:\n ctx: Context object containing request and session information.\n\n + \ Returns:\n Dictionary with an echo message and the deps.\n " + name: echo_deps + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: Use sampling callback. + name: use_sampling + parameters: + additionalProperties: false + properties: + foo: + type: string + required: + - foo + type: object + strict: true + type: function + - function: + description: The final response which ends this conversation + name: final_result + parameters: + $defs: + Answer: + additionalProperties: false + properties: + answer: + type: string + label: + type: string + required: + - label + - answer + type: object + additionalProperties: false + properties: + answers: + items: + $ref: '#/$defs/Answer' + type: array + required: + - answers + type: object + strict: true + type: function + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: |+ + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"role":"assistant","content":null,"tool_calls":[{"index":0,"id":"call_ilRpnEc1a17bm7xyvhNbaSOS","type":"function","function":{"name":"final_result","arguments":""}}],"refusal":null},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"0bScewxE"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"hp7"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"answers"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"nfPPM72GKlkKYsc"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":["}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"HP"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"Cdc"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"label"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"2"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"y"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"Capital"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"SDZEIE6GemLgnH2"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" of"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"OZB"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" the"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"k9"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" Country"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"ICmAU4LdEPUFx7"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\",\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"i"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"answer"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":""} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"j"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"Mexico"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":""} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" City"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"U"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"},{\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"87LtLruw12QiS2W"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"label"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"n"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"p"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"Weather"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"e8ZZ9vaf5gjWfMW"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" in"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"ZJi"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" Mexico"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"8wEw5SfvrhCPdxY"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" City"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"Q"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\",\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"j"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"answer"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":""} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"J"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"Sunny"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"b"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"},{\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"TdXcc4UNug5RPRX"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"label"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"O"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"V"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"Product"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"Yk14mLdHSY7N0fo"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" Name"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"J"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\",\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"9"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"answer"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":""} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"m"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"P"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"57VUB"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"yd"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"hqeM"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"antic"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"F"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" AI"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"xQX"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"}"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"q6r"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"]}"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"8scW"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}],"usage":null,"obfuscation":"Df0J"} + + data: {"id":"chatcmpl-C2OIKzk3pk6v0J4StM6YwlYv4fnSE","object":"chat.completion.chunk","created":1754686080,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[],"usage":{"prompt_tokens":448,"completion_tokens":49,"total_tokens":497,"prompt_tokens_details":{"cached_tokens":0,"audio_tokens":0},"completion_tokens_details":{"reasoning_tokens":0,"audio_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}},"obfuscation":"tZUpwQLEqnpHt"} + + data: [DONE] + + headers: + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-type: + - text/event-stream; charset=utf-8 + openai-organization: + - pydantic-28gund + openai-processing-ms: + - '561' + openai-project: + - proj_dKobscVY9YJxeEaDJen54e3d + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + status: + code: 200 + message: OK +version: 1 +... diff --git a/tests/cassettes/test_temporal/test_temporal_agent_iter.yaml b/tests/cassettes/test_temporal/test_temporal_agent_iter.yaml new file mode 100644 index 0000000000..6b85fce101 --- /dev/null +++ b/tests/cassettes/test_temporal/test_temporal_agent_iter.yaml @@ -0,0 +1,78 @@ +interactions: +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '144' + content-type: + - application/json + host: + - api.openai.com + method: POST + parsed_body: + messages: + - content: What is the capital of Mexico? + role: user + model: gpt-4o + stream: true + stream_options: + include_usage: true + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: |+ + data: {"id":"chatcmpl-C2P1wP1damHwC6sXvGAIh5PMvH6wM","object":"chat.completion.chunk","created":1754688908,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"KhEoTT6u7JgGis"} + + data: {"id":"chatcmpl-C2P1wP1damHwC6sXvGAIh5PMvH6wM","object":"chat.completion.chunk","created":1754688908,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"content":"The"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"68kWLPsNVG6FC"} + + data: {"id":"chatcmpl-C2P1wP1damHwC6sXvGAIh5PMvH6wM","object":"chat.completion.chunk","created":1754688908,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"content":" capital"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"6d1qUFRY"} + + data: {"id":"chatcmpl-C2P1wP1damHwC6sXvGAIh5PMvH6wM","object":"chat.completion.chunk","created":1754688908,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"content":" of"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"Q7kmEmvYuS4Nk"} + + data: {"id":"chatcmpl-C2P1wP1damHwC6sXvGAIh5PMvH6wM","object":"chat.completion.chunk","created":1754688908,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"content":" Mexico"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"KrDQdBujM"} + + data: {"id":"chatcmpl-C2P1wP1damHwC6sXvGAIh5PMvH6wM","object":"chat.completion.chunk","created":1754688908,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"content":" is"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"qCG5yEQHHMYd2"} + + data: {"id":"chatcmpl-C2P1wP1damHwC6sXvGAIh5PMvH6wM","object":"chat.completion.chunk","created":1754688908,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"content":" Mexico"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"uFnoTvoy9"} + + data: {"id":"chatcmpl-C2P1wP1damHwC6sXvGAIh5PMvH6wM","object":"chat.completion.chunk","created":1754688908,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"content":" City"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"bdnHY6qXdKt"} + + data: {"id":"chatcmpl-C2P1wP1damHwC6sXvGAIh5PMvH6wM","object":"chat.completion.chunk","created":1754688908,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"2WeZHWZ8r9jetpK"} + + data: {"id":"chatcmpl-C2P1wP1damHwC6sXvGAIh5PMvH6wM","object":"chat.completion.chunk","created":1754688908,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}],"usage":null,"obfuscation":"7LC080uo8W"} + + data: {"id":"chatcmpl-C2P1wP1damHwC6sXvGAIh5PMvH6wM","object":"chat.completion.chunk","created":1754688908,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_ff25b2783a","choices":[],"usage":{"prompt_tokens":14,"completion_tokens":8,"total_tokens":22,"prompt_tokens_details":{"cached_tokens":0,"audio_tokens":0},"completion_tokens_details":{"reasoning_tokens":0,"audio_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}},"obfuscation":""} + + data: [DONE] + + headers: + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-type: + - text/event-stream; charset=utf-8 + openai-organization: + - pydantic-28gund + openai-processing-ms: + - '259' + openai-project: + - proj_dKobscVY9YJxeEaDJen54e3d + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + status: + code: 200 + message: OK +version: 1 +... diff --git a/tests/cassettes/test_temporal/test_temporal_agent_run.yaml b/tests/cassettes/test_temporal/test_temporal_agent_run.yaml new file mode 100644 index 0000000000..d84fdf771f --- /dev/null +++ b/tests/cassettes/test_temporal/test_temporal_agent_run.yaml @@ -0,0 +1,79 @@ +interactions: +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '105' + content-type: + - application/json + host: + - api.openai.com + method: POST + parsed_body: + messages: + - content: What is the capital of Mexico? + role: user + model: gpt-4o + stream: false + uri: https://api.openai.com/v1/chat/completions + response: + headers: + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-length: + - '838' + content-type: + - application/json + openai-organization: + - pydantic-28gund + openai-processing-ms: + - '580' + openai-project: + - proj_dKobscVY9YJxeEaDJen54e3d + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + parsed_body: + choices: + - finish_reason: stop + index: 0 + logprobs: null + message: + annotations: [] + content: The capital of Mexico is Mexico City. + refusal: null + role: assistant + created: 1754688958 + id: chatcmpl-C2P2k1mRRz7KMAtppLZz83Lyy33Jl + model: gpt-4o-2024-08-06 + object: chat.completion + service_tier: default + system_fingerprint: fp_ff25b2783a + usage: + completion_tokens: 8 + completion_tokens_details: + accepted_prediction_tokens: 0 + audio_tokens: 0 + reasoning_tokens: 0 + rejected_prediction_tokens: 0 + prompt_tokens: 14 + prompt_tokens_details: + audio_tokens: 0 + cached_tokens: 0 + total_tokens: 22 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/cassettes/test_temporal/test_temporal_agent_run_stream.yaml b/tests/cassettes/test_temporal/test_temporal_agent_run_stream.yaml new file mode 100644 index 0000000000..64d552affe --- /dev/null +++ b/tests/cassettes/test_temporal/test_temporal_agent_run_stream.yaml @@ -0,0 +1,78 @@ +interactions: +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '144' + content-type: + - application/json + host: + - api.openai.com + method: POST + parsed_body: + messages: + - content: What is the capital of Mexico? + role: user + model: gpt-4o + stream: true + stream_options: + include_usage: true + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: |+ + data: {"id":"chatcmpl-C2P2HtMJhPkWjQ2adKerkdVilXmRL","object":"chat.completion.chunk","created":1754688929,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"oouykO51ovJROe"} + + data: {"id":"chatcmpl-C2P2HtMJhPkWjQ2adKerkdVilXmRL","object":"chat.completion.chunk","created":1754688929,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"content":"The"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"Qk5ENocFqkZOr"} + + data: {"id":"chatcmpl-C2P2HtMJhPkWjQ2adKerkdVilXmRL","object":"chat.completion.chunk","created":1754688929,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"content":" capital"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"G2ubBYwM"} + + data: {"id":"chatcmpl-C2P2HtMJhPkWjQ2adKerkdVilXmRL","object":"chat.completion.chunk","created":1754688929,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"content":" of"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"46wA1R0uj9DWX"} + + data: {"id":"chatcmpl-C2P2HtMJhPkWjQ2adKerkdVilXmRL","object":"chat.completion.chunk","created":1754688929,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"content":" Mexico"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"HF2DtMCaA"} + + data: {"id":"chatcmpl-C2P2HtMJhPkWjQ2adKerkdVilXmRL","object":"chat.completion.chunk","created":1754688929,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"content":" is"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"vFNThjPk0BGrI"} + + data: {"id":"chatcmpl-C2P2HtMJhPkWjQ2adKerkdVilXmRL","object":"chat.completion.chunk","created":1754688929,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"content":" Mexico"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"EU1SORHKK"} + + data: {"id":"chatcmpl-C2P2HtMJhPkWjQ2adKerkdVilXmRL","object":"chat.completion.chunk","created":1754688929,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"content":" City"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"gpuKBCmzsBH"} + + data: {"id":"chatcmpl-C2P2HtMJhPkWjQ2adKerkdVilXmRL","object":"chat.completion.chunk","created":1754688929,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"Ow9nVgoSopFONI2"} + + data: {"id":"chatcmpl-C2P2HtMJhPkWjQ2adKerkdVilXmRL","object":"chat.completion.chunk","created":1754688929,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}],"usage":null,"obfuscation":"zkiywLbUBn"} + + data: {"id":"chatcmpl-C2P2HtMJhPkWjQ2adKerkdVilXmRL","object":"chat.completion.chunk","created":1754688929,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[],"usage":{"prompt_tokens":14,"completion_tokens":8,"total_tokens":22,"prompt_tokens_details":{"cached_tokens":0,"audio_tokens":0},"completion_tokens_details":{"reasoning_tokens":0,"audio_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}},"obfuscation":""} + + data: [DONE] + + headers: + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-type: + - text/event-stream; charset=utf-8 + openai-organization: + - pydantic-28gund + openai-processing-ms: + - '433' + openai-project: + - proj_dKobscVY9YJxeEaDJen54e3d + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + status: + code: 200 + message: OK +version: 1 +... diff --git a/tests/cassettes/test_temporal/test_temporal_agent_run_sync.yaml b/tests/cassettes/test_temporal/test_temporal_agent_run_sync.yaml new file mode 100644 index 0000000000..934c945533 --- /dev/null +++ b/tests/cassettes/test_temporal/test_temporal_agent_run_sync.yaml @@ -0,0 +1,79 @@ +interactions: +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '105' + content-type: + - application/json + host: + - api.openai.com + method: POST + parsed_body: + messages: + - content: What is the capital of Mexico? + role: user + model: gpt-4o + stream: false + uri: https://api.openai.com/v1/chat/completions + response: + headers: + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-length: + - '838' + content-type: + - application/json + openai-organization: + - pydantic-28gund + openai-processing-ms: + - '466' + openai-project: + - proj_dKobscVY9YJxeEaDJen54e3d + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + parsed_body: + choices: + - finish_reason: stop + index: 0 + logprobs: null + message: + annotations: [] + content: The capital of Mexico is Mexico City. + refusal: null + role: assistant + created: 1754688941 + id: chatcmpl-C2P2TVJ3Qoyk6ajLKjYZF8QDAwt50 + model: gpt-4o-2024-08-06 + object: chat.completion + service_tier: default + system_fingerprint: fp_ff25b2783a + usage: + completion_tokens: 8 + completion_tokens_details: + accepted_prediction_tokens: 0 + audio_tokens: 0 + reasoning_tokens: 0 + rejected_prediction_tokens: 0 + prompt_tokens: 14 + prompt_tokens_details: + audio_tokens: 0 + cached_tokens: 0 + total_tokens: 22 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/test_temporal.py b/tests/test_temporal.py index 7b4dd12426..9df0091d1d 100644 --- a/tests/test_temporal.py +++ b/tests/test_temporal.py @@ -1,6 +1,7 @@ from __future__ import annotations import os +import re from collections.abc import AsyncIterable, AsyncIterator from dataclasses import dataclass from datetime import timedelta @@ -9,13 +10,19 @@ from typing_extensions import TypedDict from pydantic_ai import Agent, RunContext +from pydantic_ai.exceptions import UserError +from pydantic_ai.ext.temporal._function_toolset import TemporalFunctionToolset +from pydantic_ai.ext.temporal._mcp_server import TemporalMCPServer +from pydantic_ai.ext.temporal._model import TemporalModel from pydantic_ai.messages import AgentStreamEvent, HandleResponseEvent from pydantic_ai.models import cached_async_http_client from pydantic_ai.toolsets import FunctionToolset try: from temporalio import workflow - from temporalio.client import Client + from temporalio.activity import _Definition as ActivityDefinition # pyright: ignore[reportPrivateUsage] + from temporalio.client import Client, WorkflowFailureError + from temporalio.exceptions import ApplicationError from temporalio.testing import WorkflowEnvironment from temporalio.worker import Worker from temporalio.workflow import ActivityConfig @@ -112,6 +119,45 @@ async def client_with_logfire(temporal_env: WorkflowEnvironment) -> Client: ) +# Can't use the `openai_api_key` fixture here because the workflow needs to be defined at the top level of the file. +model = OpenAIModel( + 'gpt-4o', + provider=OpenAIProvider( + api_key=os.getenv('OPENAI_API_KEY', 'mock-api-key'), + http_client=http_client, + ), +) + +simple_agent = Agent(model, name='simple_agent') + +# This needs to be done before the `TemporalAgent` is bound to the workflow. +simple_temporal_agent = TemporalAgent(simple_agent) + + +@workflow.defn +class SimpleAgentWorkflow: + @workflow.run + async def run(self, prompt: str) -> str: + result = await simple_temporal_agent.run(prompt) + return result.output + + +async def test_simple_agent(allow_model_requests: None, client: Client): + async with Worker( + client, + task_queue=TASK_QUEUE, + workflows=[SimpleAgentWorkflow], + plugins=[AgentPlugin(simple_temporal_agent)], + ): + output = await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] + SimpleAgentWorkflow.run, + args=['What is the capital of Mexico?'], + id=SimpleAgentWorkflow.__name__, + task_queue=TASK_QUEUE, + ) + assert output == snapshot('The capital of Mexico is Mexico City.') + + class Deps(TypedDict): country: str @@ -144,16 +190,6 @@ class Response: answers: list[Answer] -# Can't use the `openai_api_key` fixture here because the workflow needs to be defined at the top level of the file. -model = OpenAIModel( - 'gpt-4o', - provider=OpenAIProvider( - api_key=os.getenv('OPENAI_API_KEY', 'mock-api-key'), - http_client=http_client, - ), -) - - complex_agent = Agent( model, deps_type=Deps, @@ -337,31 +373,202 @@ async def test_complex_agent(allow_model_requests: None, client_with_logfire: Cl ) -simple_agent = Agent(model, name='simple_agent') +async def test_multiple_agents(allow_model_requests: None, client: Client): + async with Worker( + client, + task_queue=TASK_QUEUE, + workflows=[SimpleAgentWorkflow, ComplexAgentWorkflow], + plugins=[AgentPlugin(simple_temporal_agent), AgentPlugin(complex_temporal_agent)], + ): + output = await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] + SimpleAgentWorkflow.run, + args=['What is the capital of Mexico?'], + id=SimpleAgentWorkflow.__name__, + task_queue=TASK_QUEUE, + ) + assert output == snapshot('The capital of Mexico is Mexico City.') -# This needs to be done before the `TemporalAgent` is bound to the workflow. -simple_temporal_agent = TemporalAgent(simple_agent) + output = await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] + ComplexAgentWorkflow.run, + args=[ + 'Tell me: the capital of the country; the weather there; the product name', + Deps(country='Mexico'), + ], + id=ComplexAgentWorkflow.__name__, + task_queue=TASK_QUEUE, + ) + assert output == snapshot( + Response( + answers=[ + Answer(label='Capital of the Country', answer='Mexico City'), + Answer(label='Weather in Mexico City', answer='Sunny'), + Answer(label='Product Name', answer='Pydantic AI'), + ] + ) + ) + + +async def test_agent_name_collision(allow_model_requests: None, client: Client): + with pytest.raises(ValueError, match='More than one activity named agent__simple_agent__model_request'): + async with Worker( + client, + task_queue=TASK_QUEUE, + workflows=[SimpleAgentWorkflow], + plugins=[AgentPlugin(simple_temporal_agent), AgentPlugin(simple_temporal_agent)], + ): + pass + + +async def test_agent_without_name(): + with pytest.raises( + UserError, + match="An agent needs to have a unique `name` in order to be used with Temporal. The name will be used to identify the agent's activities within the workflow.", + ): + TemporalAgent(Agent()) + + +async def test_agent_without_model(): + with pytest.raises( + UserError, + match='An agent needs to have a `model` in order to be used with Temporal, it cannot be set at agent run time.', + ): + TemporalAgent(Agent(name='test_agent')) + + +async def test_toolset_without_id(): + with pytest.raises( + UserError, + match=re.escape( + "Toolsets that are 'leaves' (i.e. those that implement their own tool listing and calling) need to have a unique `id` in order to be used with Temporal. The ID will be used to identify the toolset's activities within the workflow." + ), + ): + TemporalAgent(Agent(model=model, name='test_agent', toolsets=[FunctionToolset()])) + + +async def test_temporal_agent(): + assert isinstance(complex_temporal_agent.model, TemporalModel) + assert complex_temporal_agent.model.wrapped == complex_agent.model + + toolsets = complex_temporal_agent.toolsets + assert len(toolsets) == 4 + + # Empty function toolset for the agent's own tools + assert isinstance(toolsets[0], FunctionToolset) + assert toolsets[0].id == '' + assert toolsets[0].tools == {} + + # Wrapped function toolset for the agent's own tools + assert isinstance(toolsets[1], TemporalFunctionToolset) + assert toolsets[1].id == '' + assert isinstance(toolsets[1].wrapped, FunctionToolset) + assert toolsets[1].wrapped.tools.keys() == {'get_weather'} + + # Wrapped 'country' toolset + assert isinstance(toolsets[2], TemporalFunctionToolset) + assert toolsets[2].id == 'country' + assert toolsets[2].wrapped == complex_agent.toolsets[1] + assert isinstance(toolsets[2].wrapped, FunctionToolset) + assert toolsets[2].wrapped.tools.keys() == {'get_country'} + + # Wrapped 'mcp' MCP server + assert isinstance(toolsets[3], TemporalMCPServer) + assert toolsets[3].id == 'mcp' + assert toolsets[3].wrapped == complex_agent.toolsets[2] + + assert [ + ActivityDefinition.must_from_callable(activity).name # pyright: ignore[reportUnknownMemberType] + for activity in complex_temporal_agent.temporal_activities + ] == snapshot( + [ + 'agent__complex_agent__model_request', + 'agent__complex_agent__model_request_stream', + 'agent__complex_agent__toolset____call_tool', + 'agent__complex_agent__toolset__country__call_tool', + 'agent__complex_agent__mcp_server__mcp__get_tools', + 'agent__complex_agent__mcp_server__mcp__call_tool', + ] + ) + + +def test_temporal_agent_run_sync(allow_model_requests: None): + result = simple_temporal_agent.run_sync('What is the capital of Mexico?') + assert result.output == snapshot('The capital of Mexico is Mexico City.') + + +async def test_temporal_agent_run(allow_model_requests: None): + result = await simple_temporal_agent.run('What is the capital of Mexico?') + assert result.output == snapshot('The capital of Mexico is Mexico City.') + + +async def test_temporal_agent_run_stream(allow_model_requests: None): + async with simple_temporal_agent.run_stream('What is the capital of Mexico?') as result: + assert [c async for c in result.stream_text(debounce_by=None)] == snapshot( + [ + 'The', + 'The capital', + 'The capital of', + 'The capital of Mexico', + 'The capital of Mexico is', + 'The capital of Mexico is Mexico', + 'The capital of Mexico is Mexico City', + 'The capital of Mexico is Mexico City.', + ] + ) + + +async def test_temporal_agent_iter(allow_model_requests: None): + output: list[str] = [] + async with simple_temporal_agent.iter('What is the capital of Mexico?') as run: + async for node in run: + if Agent.is_model_request_node(node): + async with node.stream(run.ctx) as stream: + async for chunk in stream.stream_text(debounce_by=None): + output.append(chunk) + assert output == snapshot( + [ + 'The', + 'The capital', + 'The capital of', + 'The capital of Mexico', + 'The capital of Mexico is', + 'The capital of Mexico is Mexico', + 'The capital of Mexico is Mexico City', + 'The capital of Mexico is Mexico City.', + ] + ) + + +async def simple_event_stream_handler( + ctx: RunContext[None], + stream: AsyncIterable[AgentStreamEvent | HandleResponseEvent], +): + pass @workflow.defn -class SimpleAgentWorkflow: +class SimpleAgentWorkflowWithEventStreamHandler: @workflow.run async def run(self, prompt: str) -> str: - result = await simple_temporal_agent.run(prompt) + result = await simple_temporal_agent.run(prompt, event_stream_handler=simple_event_stream_handler) return result.output -async def test_simple_agent(allow_model_requests: None, client: Client): +async def test_temporal_agent_run_in_workflow_with_event_stream_handler(allow_model_requests: None, client: Client): async with Worker( client, task_queue=TASK_QUEUE, - workflows=[SimpleAgentWorkflow], + workflows=[SimpleAgentWorkflowWithEventStreamHandler], plugins=[AgentPlugin(simple_temporal_agent)], ): - output = await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] - SimpleAgentWorkflow.run, - args=['What is the capital of Mexico?'], - id=SimpleAgentWorkflow.__name__, - task_queue=TASK_QUEUE, + with pytest.raises(WorkflowFailureError) as exc_info: + await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] + SimpleAgentWorkflowWithEventStreamHandler.run, + args=['What is the capital of Mexico?'], + id=SimpleAgentWorkflowWithEventStreamHandler.__name__, + task_queue=TASK_QUEUE, + ) + assert isinstance(exc_info.value.__cause__, ApplicationError) + assert exc_info.value.__cause__.type == snapshot('UserError') + assert exc_info.value.__cause__.message == snapshot( + 'Event stream handler cannot be set at agent run time when using Temporal, it must be set at agent creation time.' ) - assert output == snapshot('The capital of Mexico is Mexico City.') From e4f7f336ef145f92bcd11eb02f7d8f9f88a59081 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Fri, 8 Aug 2025 22:36:41 +0000 Subject: [PATCH 10/30] Only include temporal modules when temporalio is available --- tests/test_temporal.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_temporal.py b/tests/test_temporal.py index 9df0091d1d..9bb9bae9ed 100644 --- a/tests/test_temporal.py +++ b/tests/test_temporal.py @@ -11,9 +11,6 @@ from pydantic_ai import Agent, RunContext from pydantic_ai.exceptions import UserError -from pydantic_ai.ext.temporal._function_toolset import TemporalFunctionToolset -from pydantic_ai.ext.temporal._mcp_server import TemporalMCPServer -from pydantic_ai.ext.temporal._model import TemporalModel from pydantic_ai.messages import AgentStreamEvent, HandleResponseEvent from pydantic_ai.models import cached_async_http_client from pydantic_ai.toolsets import FunctionToolset @@ -34,6 +31,9 @@ TemporalAgent, TemporalRunContextWithDeps, ) + from pydantic_ai.ext.temporal._function_toolset import TemporalFunctionToolset + from pydantic_ai.ext.temporal._mcp_server import TemporalMCPServer + from pydantic_ai.ext.temporal._model import TemporalModel except ImportError: import pytest From 46c1f79ff074b3fc60e0c1a42dd5868f5d217f7f Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Fri, 8 Aug 2025 23:14:13 +0000 Subject: [PATCH 11/30] Run all Temporal tests in the same xdist group (process) --- .../ext/temporal/_function_toolset.py | 2 +- .../pydantic_ai/ext/temporal/_model.py | 4 - .../pydantic_ai/ext/temporal/_run_context.py | 2 +- .../test_temporal/test_complex_agent_run.yaml | 955 ++++++++++++++++++ ...> test_complex_agent_run_in_workflow.yaml} | 0 ...=> test_simple_agent_run_in_workflow.yaml} | 0 tests/test_temporal.py | 462 ++++++++- 7 files changed, 1414 insertions(+), 11 deletions(-) create mode 100644 tests/cassettes/test_temporal/test_complex_agent_run.yaml rename tests/cassettes/test_temporal/{test_complex_agent.yaml => test_complex_agent_run_in_workflow.yaml} (100%) rename tests/cassettes/test_temporal/{test_simple_agent.yaml => test_simple_agent_run_in_workflow.yaml} (100%) diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_function_toolset.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/_function_toolset.py index 6616f39475..0506632ce4 100644 --- a/pydantic_ai_slim/pydantic_ai/ext/temporal/_function_toolset.py +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/_function_toolset.py @@ -46,7 +46,7 @@ async def call_tool_activity(params: _CallToolParams) -> Any: try: tool = (await toolset.get_tools(ctx))[name] except KeyError as e: - raise UserError( + raise UserError( # pragma: no cover f'Tool {name!r} not found in toolset {self.id!r}. ' 'Removing or renaming tools during an agent run is not supported with Temporal.' ) from e diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_model.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/_model.py index a62b24f603..423d8348df 100644 --- a/pydantic_ai_slim/pydantic_ai/ext/temporal/_model.py +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/_model.py @@ -46,21 +46,17 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: yield def get(self) -> ModelResponse: - """Build a [`ModelResponse`][pydantic_ai.messages.ModelResponse] from the data received from the stream so far.""" return self.response def usage(self) -> Usage: - """Get the usage of the response so far. This will not be the final usage until the stream is exhausted.""" return self.response.usage @property def model_name(self) -> str: - """Get the model name of the response.""" return self.response.model_name or '' @property def timestamp(self) -> datetime: - """Get the timestamp of the response.""" return self.response.timestamp diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_run_context.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/_run_context.py index cec240b3d3..aed38c80a5 100644 --- a/pydantic_ai_slim/pydantic_ai/ext/temporal/_run_context.py +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/_run_context.py @@ -18,7 +18,7 @@ def __init__(self, **kwargs: Any): def __getattribute__(self, name: str) -> Any: try: return super().__getattribute__(name) - except AttributeError as e: + except AttributeError as e: # pragma: no cover if name in RunContext.__dataclass_fields__: raise AttributeError( f'{self.__class__.__name__!r} object has no attribute {name!r}. ' diff --git a/tests/cassettes/test_temporal/test_complex_agent_run.yaml b/tests/cassettes/test_temporal/test_complex_agent_run.yaml new file mode 100644 index 0000000000..a567fd8632 --- /dev/null +++ b/tests/cassettes/test_temporal/test_complex_agent_run.yaml @@ -0,0 +1,955 @@ +interactions: +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '4294' + content-type: + - application/json + host: + - api.openai.com + method: POST + parsed_body: + messages: + - content: 'Tell me: the capital of the country; the weather there; the product name' + role: user + model: gpt-4o + stream: true + stream_options: + include_usage: true + tool_choice: required + tools: + - function: + description: '' + name: get_weather + parameters: + additionalProperties: false + properties: + city: + type: string + required: + - city + type: object + strict: true + type: function + - function: + description: '' + name: get_country + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Convert Celsius to Fahrenheit.\n\n Args:\n celsius: Temperature in Celsius\n\n Returns:\n + \ Temperature in Fahrenheit\n " + name: celsius_to_fahrenheit + parameters: + additionalProperties: false + properties: + celsius: + type: number + required: + - celsius + type: object + strict: true + type: function + - function: + description: "Get the weather forecast for a location.\n\n Args:\n location: The location to get the weather + forecast for.\n\n Returns:\n The weather forecast for the location.\n " + name: get_weather_forecast + parameters: + additionalProperties: false + properties: + location: + type: string + required: + - location + type: object + strict: true + type: function + - function: + description: '' + name: get_image_resource + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_image_resource_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_audio_resource + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_audio_resource_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_product_name + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_product_name_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_image + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_dict + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_error + parameters: + additionalProperties: false + properties: + value: + default: false + type: boolean + type: object + type: function + - function: + description: '' + name: get_none + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_multiple_items + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Get the current log level.\n\n Returns:\n The current log level.\n " + name: get_log_level + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Echo the run context.\n\n Args:\n ctx: Context object containing request and session information.\n\n + \ Returns:\n Dictionary with an echo message and the deps.\n " + name: echo_deps + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: Use sampling callback. + name: use_sampling + parameters: + additionalProperties: false + properties: + foo: + type: string + required: + - foo + type: object + strict: true + type: function + - function: + description: The final response which ends this conversation + name: final_result + parameters: + $defs: + Answer: + additionalProperties: false + properties: + answer: + type: string + label: + type: string + required: + - label + - answer + type: object + additionalProperties: false + properties: + answers: + items: + $ref: '#/$defs/Answer' + type: array + required: + - answers + type: object + strict: true + type: function + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: |+ + data: {"id":"chatcmpl-C2QD1kGWsTW5OWiqAtOSFEAOfPfQH","object":"chat.completion.chunk","created":1754693439,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","usage":null,"choices":[{"index":0,"delta":{"role":"assistant","content":null},"logprobs":null,"finish_reason":null}],"obfuscation":"3Y6yaTIPlXV"} + + data: {"id":"chatcmpl-C2QD1kGWsTW5OWiqAtOSFEAOfPfQH","object":"chat.completion.chunk","created":1754693439,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","usage":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"id":"call_q2UyBRP7eXNTzAoR8lEhjc9Z","type":"function","function":{"name":"get_country","arguments":""}}]},"logprobs":null,"finish_reason":null}],"obfuscation":"g7emiFlCcG"} + + data: {"id":"chatcmpl-C2QD1kGWsTW5OWiqAtOSFEAOfPfQH","object":"chat.completion.chunk","created":1754693439,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","usage":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{}"}}]},"logprobs":null,"finish_reason":null}],"obfuscation":"S8ct"} + + data: {"id":"chatcmpl-C2QD1kGWsTW5OWiqAtOSFEAOfPfQH","object":"chat.completion.chunk","created":1754693439,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","usage":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"id":"call_b51ijcpFkDiTQG1bQzsrmtW5","type":"function","function":{"name":"get_product_name","arguments":""}}]},"logprobs":null,"finish_reason":null}],"obfuscation":"uomn5"} + + data: {"id":"chatcmpl-C2QD1kGWsTW5OWiqAtOSFEAOfPfQH","object":"chat.completion.chunk","created":1754693439,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","usage":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"function":{"arguments":"{}"}}]},"logprobs":null,"finish_reason":null}],"obfuscation":"yrJZ"} + + data: {"id":"chatcmpl-C2QD1kGWsTW5OWiqAtOSFEAOfPfQH","object":"chat.completion.chunk","created":1754693439,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}],"usage":null,"obfuscation":"onPp"} + + data: {"id":"chatcmpl-C2QD1kGWsTW5OWiqAtOSFEAOfPfQH","object":"chat.completion.chunk","created":1754693439,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[],"usage":{"prompt_tokens":364,"completion_tokens":40,"total_tokens":404,"prompt_tokens_details":{"cached_tokens":0,"audio_tokens":0},"completion_tokens_details":{"reasoning_tokens":0,"audio_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}},"obfuscation":"F1wZCrV0lEsbu"} + + data: [DONE] + + headers: + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-type: + - text/event-stream; charset=utf-8 + openai-organization: + - pydantic-28gund + openai-processing-ms: + - '906' + openai-project: + - proj_dKobscVY9YJxeEaDJen54e3d + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + status: + code: 200 + message: OK +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '4720' + content-type: + - application/json + cookie: + - __cf_bm=zft6YwMMTPDlJv9nFzcyqjatDLWL51IWoXRjiBo8lxg-1754693440-1.0.1.1-FT8HyZDjEEZIx76hp4IYd2Ke6ga_YHmugNrkwXCbkPQJM7bAIax9kMz_DGNsvY5gt.sE2g60Jc0zEEp43vK95vUKIk62fCzcc3i.7eygET0; + _cfuvid=I3WeF5lZoAzwC31zFIarQCYAjRSXcSCKQ0Z8Szv00_0-1754693440668-0.0.1.1-604800000 + host: + - api.openai.com + method: POST + parsed_body: + messages: + - content: 'Tell me: the capital of the country; the weather there; the product name' + role: user + - role: assistant + tool_calls: + - function: + arguments: '{}' + name: get_country + id: call_q2UyBRP7eXNTzAoR8lEhjc9Z + type: function + - function: + arguments: '{}' + name: get_product_name + id: call_b51ijcpFkDiTQG1bQzsrmtW5 + type: function + - content: Mexico + role: tool + tool_call_id: call_q2UyBRP7eXNTzAoR8lEhjc9Z + - content: Pydantic AI + role: tool + tool_call_id: call_b51ijcpFkDiTQG1bQzsrmtW5 + model: gpt-4o + stream: true + stream_options: + include_usage: true + tool_choice: required + tools: + - function: + description: '' + name: get_weather + parameters: + additionalProperties: false + properties: + city: + type: string + required: + - city + type: object + strict: true + type: function + - function: + description: '' + name: get_country + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Convert Celsius to Fahrenheit.\n\n Args:\n celsius: Temperature in Celsius\n\n Returns:\n + \ Temperature in Fahrenheit\n " + name: celsius_to_fahrenheit + parameters: + additionalProperties: false + properties: + celsius: + type: number + required: + - celsius + type: object + strict: true + type: function + - function: + description: "Get the weather forecast for a location.\n\n Args:\n location: The location to get the weather + forecast for.\n\n Returns:\n The weather forecast for the location.\n " + name: get_weather_forecast + parameters: + additionalProperties: false + properties: + location: + type: string + required: + - location + type: object + strict: true + type: function + - function: + description: '' + name: get_image_resource + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_image_resource_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_audio_resource + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_audio_resource_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_product_name + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_product_name_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_image + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_dict + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_error + parameters: + additionalProperties: false + properties: + value: + default: false + type: boolean + type: object + type: function + - function: + description: '' + name: get_none + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_multiple_items + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Get the current log level.\n\n Returns:\n The current log level.\n " + name: get_log_level + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Echo the run context.\n\n Args:\n ctx: Context object containing request and session information.\n\n + \ Returns:\n Dictionary with an echo message and the deps.\n " + name: echo_deps + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: Use sampling callback. + name: use_sampling + parameters: + additionalProperties: false + properties: + foo: + type: string + required: + - foo + type: object + strict: true + type: function + - function: + description: The final response which ends this conversation + name: final_result + parameters: + $defs: + Answer: + additionalProperties: false + properties: + answer: + type: string + label: + type: string + required: + - label + - answer + type: object + additionalProperties: false + properties: + answers: + items: + $ref: '#/$defs/Answer' + type: array + required: + - answers + type: object + strict: true + type: function + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: |+ + data: {"id":"chatcmpl-C2QD2NQfRbWW5ww5we2oDjS1mgHtK","object":"chat.completion.chunk","created":1754693440,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"role":"assistant","content":null,"tool_calls":[{"index":0,"id":"call_LwxJUB9KppVyogRRLQsamRJv","type":"function","function":{"name":"get_weather","arguments":""}}],"refusal":null},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"NA5VZTdCK"} + + data: {"id":"chatcmpl-C2QD2NQfRbWW5ww5we2oDjS1mgHtK","object":"chat.completion.chunk","created":1754693440,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"MYr"} + + data: {"id":"chatcmpl-C2QD2NQfRbWW5ww5we2oDjS1mgHtK","object":"chat.completion.chunk","created":1754693440,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"city"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"N0"} + + data: {"id":"chatcmpl-C2QD2NQfRbWW5ww5we2oDjS1mgHtK","object":"chat.completion.chunk","created":1754693440,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"S"} + + data: {"id":"chatcmpl-C2QD2NQfRbWW5ww5we2oDjS1mgHtK","object":"chat.completion.chunk","created":1754693440,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"Mexico"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":""} + + data: {"id":"chatcmpl-C2QD2NQfRbWW5ww5we2oDjS1mgHtK","object":"chat.completion.chunk","created":1754693440,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" City"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"P"} + + data: {"id":"chatcmpl-C2QD2NQfRbWW5ww5we2oDjS1mgHtK","object":"chat.completion.chunk","created":1754693440,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"}"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"FLs"} + + data: {"id":"chatcmpl-C2QD2NQfRbWW5ww5we2oDjS1mgHtK","object":"chat.completion.chunk","created":1754693440,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}],"usage":null,"obfuscation":"d0LO"} + + data: {"id":"chatcmpl-C2QD2NQfRbWW5ww5we2oDjS1mgHtK","object":"chat.completion.chunk","created":1754693440,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[],"usage":{"prompt_tokens":423,"completion_tokens":15,"total_tokens":438,"prompt_tokens_details":{"cached_tokens":0,"audio_tokens":0},"completion_tokens_details":{"reasoning_tokens":0,"audio_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}},"obfuscation":"o9nP5OYjURlfY"} + + data: [DONE] + + headers: + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-type: + - text/event-stream; charset=utf-8 + openai-organization: + - pydantic-28gund + openai-processing-ms: + - '379' + openai-project: + - proj_dKobscVY9YJxeEaDJen54e3d + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + status: + code: 200 + message: OK +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '4969' + content-type: + - application/json + cookie: + - __cf_bm=zft6YwMMTPDlJv9nFzcyqjatDLWL51IWoXRjiBo8lxg-1754693440-1.0.1.1-FT8HyZDjEEZIx76hp4IYd2Ke6ga_YHmugNrkwXCbkPQJM7bAIax9kMz_DGNsvY5gt.sE2g60Jc0zEEp43vK95vUKIk62fCzcc3i.7eygET0; + _cfuvid=I3WeF5lZoAzwC31zFIarQCYAjRSXcSCKQ0Z8Szv00_0-1754693440668-0.0.1.1-604800000 + host: + - api.openai.com + method: POST + parsed_body: + messages: + - content: 'Tell me: the capital of the country; the weather there; the product name' + role: user + - role: assistant + tool_calls: + - function: + arguments: '{}' + name: get_country + id: call_q2UyBRP7eXNTzAoR8lEhjc9Z + type: function + - function: + arguments: '{}' + name: get_product_name + id: call_b51ijcpFkDiTQG1bQzsrmtW5 + type: function + - content: Mexico + role: tool + tool_call_id: call_q2UyBRP7eXNTzAoR8lEhjc9Z + - content: Pydantic AI + role: tool + tool_call_id: call_b51ijcpFkDiTQG1bQzsrmtW5 + - role: assistant + tool_calls: + - function: + arguments: '{"city":"Mexico City"}' + name: get_weather + id: call_LwxJUB9KppVyogRRLQsamRJv + type: function + - content: sunny + role: tool + tool_call_id: call_LwxJUB9KppVyogRRLQsamRJv + model: gpt-4o + stream: true + stream_options: + include_usage: true + tool_choice: required + tools: + - function: + description: '' + name: get_weather + parameters: + additionalProperties: false + properties: + city: + type: string + required: + - city + type: object + strict: true + type: function + - function: + description: '' + name: get_country + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Convert Celsius to Fahrenheit.\n\n Args:\n celsius: Temperature in Celsius\n\n Returns:\n + \ Temperature in Fahrenheit\n " + name: celsius_to_fahrenheit + parameters: + additionalProperties: false + properties: + celsius: + type: number + required: + - celsius + type: object + strict: true + type: function + - function: + description: "Get the weather forecast for a location.\n\n Args:\n location: The location to get the weather + forecast for.\n\n Returns:\n The weather forecast for the location.\n " + name: get_weather_forecast + parameters: + additionalProperties: false + properties: + location: + type: string + required: + - location + type: object + strict: true + type: function + - function: + description: '' + name: get_image_resource + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_image_resource_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_audio_resource + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_audio_resource_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_product_name + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_product_name_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_image + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_dict + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_error + parameters: + additionalProperties: false + properties: + value: + default: false + type: boolean + type: object + type: function + - function: + description: '' + name: get_none + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_multiple_items + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Get the current log level.\n\n Returns:\n The current log level.\n " + name: get_log_level + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Echo the run context.\n\n Args:\n ctx: Context object containing request and session information.\n\n + \ Returns:\n Dictionary with an echo message and the deps.\n " + name: echo_deps + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: Use sampling callback. + name: use_sampling + parameters: + additionalProperties: false + properties: + foo: + type: string + required: + - foo + type: object + strict: true + type: function + - function: + description: The final response which ends this conversation + name: final_result + parameters: + $defs: + Answer: + additionalProperties: false + properties: + answer: + type: string + label: + type: string + required: + - label + - answer + type: object + additionalProperties: false + properties: + answers: + items: + $ref: '#/$defs/Answer' + type: array + required: + - answers + type: object + strict: true + type: function + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: |+ + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"role":"assistant","content":null,"tool_calls":[{"index":0,"id":"call_CCGIWaMeYWmxOQ91orkmTvzn","type":"function","function":{"name":"final_result","arguments":""}}],"refusal":null},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"3HGTeJmv"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"5oT"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"answers"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"Bkpoehp1mPtgpuo"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":["}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"XR"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"OrV"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"label"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"p"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"r"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"Capital"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"hPFa7nYTy1kqfHY"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\",\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"x"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"answer"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":""} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"1"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"The"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"Dfa"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" capital"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"uoFfXckIZzkxsw"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" of"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"4Iq"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" Mexico"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"0YTqpF2Qa9jYzl0"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" is"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"jh5"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" Mexico"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"rbLc3P4VrYs4CjM"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" City"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"5"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":".\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"Vu2"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"},{\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"i"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"label"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"2"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"G"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"Weather"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"G4dA7nClk85oh1y"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\",\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"Q"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"answer"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":""} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"I"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"The"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"i98"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" weather"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"vXmI31omHBLPWx"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" in"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"8Z7"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" Mexico"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"O2h4Rnr8tNEmqNy"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" City"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"8"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" is"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"dS8"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" currently"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"PaxQJe8HpcFV"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" sunny"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":""} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":".\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"oZX"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"},{\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"x"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"label"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"J"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"s"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"Product"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"zHxJUlHIGtbB0wY"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" Name"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"l"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\",\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"b"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"answer"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":""} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"j"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"The"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"TlE"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" product"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"10zmpKfDgdH40s"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" name"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"t"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" is"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"YN8"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" P"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"Cy29"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"yd"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"skZD"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"antic"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"q"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" AI"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"be9"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":".\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"5y4"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"}"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"IH76k"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"]}"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"5HnB"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}],"usage":null,"obfuscation":"P7V8"} + + data: {"id":"chatcmpl-C2QD4vblfNcSDeoXmULJR4umoKNqY","object":"chat.completion.chunk","created":1754693442,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[],"usage":{"prompt_tokens":448,"completion_tokens":62,"total_tokens":510,"prompt_tokens_details":{"cached_tokens":0,"audio_tokens":0},"completion_tokens_details":{"reasoning_tokens":0,"audio_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}},"obfuscation":"dRC1SdJDw80tk"} + + data: [DONE] + + headers: + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-type: + - text/event-stream; charset=utf-8 + openai-organization: + - pydantic-28gund + openai-processing-ms: + - '482' + openai-project: + - proj_dKobscVY9YJxeEaDJen54e3d + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + status: + code: 200 + message: OK +version: 1 +... diff --git a/tests/cassettes/test_temporal/test_complex_agent.yaml b/tests/cassettes/test_temporal/test_complex_agent_run_in_workflow.yaml similarity index 100% rename from tests/cassettes/test_temporal/test_complex_agent.yaml rename to tests/cassettes/test_temporal/test_complex_agent_run_in_workflow.yaml diff --git a/tests/cassettes/test_temporal/test_simple_agent.yaml b/tests/cassettes/test_temporal/test_simple_agent_run_in_workflow.yaml similarity index 100% rename from tests/cassettes/test_temporal/test_simple_agent.yaml rename to tests/cassettes/test_temporal/test_simple_agent_run_in_workflow.yaml diff --git a/tests/test_temporal.py b/tests/test_temporal.py index 9bb9bae9ed..c86f5d0f75 100644 --- a/tests/test_temporal.py +++ b/tests/test_temporal.py @@ -11,7 +11,18 @@ from pydantic_ai import Agent, RunContext from pydantic_ai.exceptions import UserError -from pydantic_ai.messages import AgentStreamEvent, HandleResponseEvent +from pydantic_ai.messages import ( + AgentStreamEvent, + FinalResultEvent, + FunctionToolCallEvent, + FunctionToolResultEvent, + HandleResponseEvent, + PartDeltaEvent, + PartStartEvent, + ToolCallPart, + ToolCallPartDelta, + ToolReturnPart, +) from pydantic_ai.models import cached_async_http_client from pydantic_ai.toolsets import FunctionToolset @@ -72,11 +83,12 @@ import pytest # Loads `vcr`, which Temporal doesn't like without passing through the import - from .conftest import IsStr + from .conftest import IsDatetime, IsStr pytestmark = [ pytest.mark.anyio, pytest.mark.vcr, + pytest.mark.xdist_group(name='temporal'), ] @@ -142,7 +154,7 @@ async def run(self, prompt: str) -> str: return result.output -async def test_simple_agent(allow_model_requests: None, client: Client): +async def test_simple_agent_run_in_workflow(allow_model_requests: None, client: Client): async with Worker( client, task_queue=TASK_QUEUE, @@ -228,7 +240,9 @@ async def run(self, prompt: str, deps: Deps) -> Response: return result.output -async def test_complex_agent(allow_model_requests: None, client_with_logfire: Client, capfire: CaptureLogfire): +async def test_complex_agent_run_in_workflow( + allow_model_requests: None, client_with_logfire: Client, capfire: CaptureLogfire +): async with Worker( client_with_logfire, task_queue=TASK_QUEUE, @@ -373,6 +387,271 @@ async def test_complex_agent(allow_model_requests: None, client_with_logfire: Cl ) +async def test_complex_agent_run(allow_model_requests: None): + events: list[AgentStreamEvent | HandleResponseEvent] = [] + + async def event_stream_handler( + ctx: RunContext[Deps], + stream: AsyncIterable[AgentStreamEvent | HandleResponseEvent], + ): + async for event in stream: + events.append(event) + + result = await complex_temporal_agent.run( + 'Tell me: the capital of the country; the weather there; the product name', + deps=Deps(country='Mexico'), + event_stream_handler=event_stream_handler, + ) + assert result.output == snapshot( + Response( + answers=[ + Answer(label='Capital', answer='The capital of Mexico is Mexico City.'), + Answer(label='Weather', answer='The weather in Mexico City is currently sunny.'), + Answer(label='Product Name', answer='The product name is Pydantic AI.'), + ] + ) + ) + assert events == snapshot( + [ + PartStartEvent( + index=0, + part=ToolCallPart(tool_name='get_country', args='', tool_call_id='call_q2UyBRP7eXNTzAoR8lEhjc9Z'), + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='{}', tool_call_id='call_q2UyBRP7eXNTzAoR8lEhjc9Z') + ), + PartStartEvent( + index=1, + part=ToolCallPart(tool_name='get_product_name', args='', tool_call_id='call_b51ijcpFkDiTQG1bQzsrmtW5'), + ), + PartDeltaEvent( + index=1, delta=ToolCallPartDelta(args_delta='{}', tool_call_id='call_b51ijcpFkDiTQG1bQzsrmtW5') + ), + FunctionToolCallEvent( + part=ToolCallPart(tool_name='get_country', args='{}', tool_call_id='call_q2UyBRP7eXNTzAoR8lEhjc9Z') + ), + FunctionToolCallEvent( + part=ToolCallPart(tool_name='get_product_name', args='{}', tool_call_id='call_b51ijcpFkDiTQG1bQzsrmtW5') + ), + FunctionToolResultEvent( + result=ToolReturnPart( + tool_name='get_country', + content='Mexico', + tool_call_id='call_q2UyBRP7eXNTzAoR8lEhjc9Z', + timestamp=IsDatetime(), + ) + ), + FunctionToolResultEvent( + result=ToolReturnPart( + tool_name='get_product_name', + content='Pydantic AI', + tool_call_id='call_b51ijcpFkDiTQG1bQzsrmtW5', + timestamp=IsDatetime(), + ) + ), + PartStartEvent( + index=0, + part=ToolCallPart(tool_name='get_weather', args='', tool_call_id='call_LwxJUB9KppVyogRRLQsamRJv'), + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='{"', tool_call_id='call_LwxJUB9KppVyogRRLQsamRJv') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='city', tool_call_id='call_LwxJUB9KppVyogRRLQsamRJv') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='":"', tool_call_id='call_LwxJUB9KppVyogRRLQsamRJv') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='Mexico', tool_call_id='call_LwxJUB9KppVyogRRLQsamRJv') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta=' City', tool_call_id='call_LwxJUB9KppVyogRRLQsamRJv') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='"}', tool_call_id='call_LwxJUB9KppVyogRRLQsamRJv') + ), + FunctionToolCallEvent( + part=ToolCallPart( + tool_name='get_weather', args='{"city":"Mexico City"}', tool_call_id='call_LwxJUB9KppVyogRRLQsamRJv' + ) + ), + FunctionToolResultEvent( + result=ToolReturnPart( + tool_name='get_weather', + content='sunny', + tool_call_id='call_LwxJUB9KppVyogRRLQsamRJv', + timestamp=IsDatetime(), + ) + ), + PartStartEvent( + index=0, + part=ToolCallPart(tool_name='final_result', args='', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn'), + ), + FinalResultEvent(tool_name='final_result', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn'), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='{"', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='answers', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='":[', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='{"', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='label', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='":"', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='Capital', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='","', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='answer', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='":"', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='The', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta=' capital', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta=' of', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta=' Mexico', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta=' is', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta=' Mexico', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta=' City', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='."', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='},{"', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='label', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='":"', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='Weather', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='","', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='answer', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='":"', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='The', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta=' weather', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta=' in', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta=' Mexico', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta=' City', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta=' is', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta=' currently', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta=' sunny', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='."', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='},{"', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='label', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='":"', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='Product', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta=' Name', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='","', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='answer', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='":"', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='The', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta=' product', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta=' name', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta=' is', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta=' P', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='yd', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='antic', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta=' AI', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='."', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta='}', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + PartDeltaEvent( + index=0, delta=ToolCallPartDelta(args_delta=']}', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') + ), + ] + ) + + async def test_multiple_agents(allow_model_requests: None, client: Client): async with Worker( client, @@ -568,7 +847,180 @@ async def test_temporal_agent_run_in_workflow_with_event_stream_handler(allow_mo task_queue=TASK_QUEUE, ) assert isinstance(exc_info.value.__cause__, ApplicationError) - assert exc_info.value.__cause__.type == snapshot('UserError') + assert exc_info.value.__cause__.type == UserError.__name__ assert exc_info.value.__cause__.message == snapshot( 'Event stream handler cannot be set at agent run time when using Temporal, it must be set at agent creation time.' ) + + +@workflow.defn +class SimpleAgentWorkflowWithIterModel: + @workflow.run + async def run(self, prompt: str) -> str: + async with simple_temporal_agent.iter(prompt, model=model) as run: + assert run.result is not None + return run.result.output + + +async def test_temporal_agent_iter_in_workflow_with_model(allow_model_requests: None, client: Client): + async with Worker( + client, + task_queue=TASK_QUEUE, + workflows=[SimpleAgentWorkflowWithIterModel], + plugins=[AgentPlugin(simple_temporal_agent)], + ): + with pytest.raises(WorkflowFailureError) as exc_info: + await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] + SimpleAgentWorkflowWithIterModel.run, + args=['What is the capital of Mexico?'], + id=SimpleAgentWorkflowWithIterModel.__name__, + task_queue=TASK_QUEUE, + ) + assert isinstance(exc_info.value.__cause__, ApplicationError) + assert exc_info.value.__cause__.type == UserError.__name__ + assert exc_info.value.__cause__.message == snapshot( + 'Model cannot be set at agent run time when using Temporal, it must be set at agent creation time.' + ) + + +@workflow.defn +class SimpleAgentWorkflowWithIterToolsets: + @workflow.run + async def run(self, prompt: str) -> str: + async with simple_temporal_agent.iter(prompt, toolsets=[FunctionToolset()]) as run: + assert run.result is not None + return run.result.output + + +async def test_temporal_agent_iter_in_workflow_with_toolsets(allow_model_requests: None, client: Client): + async with Worker( + client, + task_queue=TASK_QUEUE, + workflows=[SimpleAgentWorkflowWithIterToolsets], + plugins=[AgentPlugin(simple_temporal_agent)], + ): + with pytest.raises(WorkflowFailureError) as exc_info: + await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] + SimpleAgentWorkflowWithIterToolsets.run, + args=['What is the capital of Mexico?'], + id=SimpleAgentWorkflowWithIterToolsets.__name__, + task_queue=TASK_QUEUE, + ) + assert isinstance(exc_info.value.__cause__, ApplicationError) + assert exc_info.value.__cause__.type == UserError.__name__ + assert exc_info.value.__cause__.message == snapshot( + 'Toolsets cannot be set at agent run time when using Temporal, it must be set at agent creation time.' + ) + + +@workflow.defn +class SimpleAgentWorkflowWithOverrideModel: + @workflow.run + async def run(self, prompt: str) -> str: + with simple_temporal_agent.override(model=model): + result = await simple_temporal_agent.run(prompt) + return result.output + + +async def test_temporal_agent_override_model_in_workflow(allow_model_requests: None, client: Client): + async with Worker( + client, + task_queue=TASK_QUEUE, + workflows=[SimpleAgentWorkflowWithOverrideModel], + plugins=[AgentPlugin(simple_temporal_agent)], + ): + with pytest.raises(WorkflowFailureError) as exc_info: + await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] + SimpleAgentWorkflowWithOverrideModel.run, + args=['What is the capital of Mexico?'], + id=SimpleAgentWorkflowWithOverrideModel.__name__, + task_queue=TASK_QUEUE, + ) + assert isinstance(exc_info.value.__cause__, ApplicationError) + assert exc_info.value.__cause__.type == UserError.__name__ + assert exc_info.value.__cause__.message == snapshot( + 'Model cannot be contextually overridden when using Temporal, it must be set at agent creation time.' + ) + + +@workflow.defn +class SimpleAgentWorkflowWithOverrideToolsets: + @workflow.run + async def run(self, prompt: str) -> str: + with simple_temporal_agent.override(toolsets=[FunctionToolset()]): + result = await simple_temporal_agent.run(prompt) + return result.output + + +async def test_temporal_agent_override_toolsets_in_workflow(allow_model_requests: None, client: Client): + async with Worker( + client, + task_queue=TASK_QUEUE, + workflows=[SimpleAgentWorkflowWithOverrideToolsets], + plugins=[AgentPlugin(simple_temporal_agent)], + ): + with pytest.raises(WorkflowFailureError) as exc_info: + await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] + SimpleAgentWorkflowWithOverrideToolsets.run, + args=['What is the capital of Mexico?'], + id=SimpleAgentWorkflowWithOverrideToolsets.__name__, + task_queue=TASK_QUEUE, + ) + assert isinstance(exc_info.value.__cause__, ApplicationError) + assert exc_info.value.__cause__.type == UserError.__name__ + assert exc_info.value.__cause__.message == snapshot( + 'Toolsets cannot be contextually overridden when using Temporal, they must be set at agent creation time.' + ) + + +@workflow.defn +class SimpleAgentWorkflowWithOverrideTools: + @workflow.run + async def run(self, prompt: str) -> str: + with simple_temporal_agent.override(tools=[get_weather]): + result = await simple_temporal_agent.run(prompt) + return result.output + + +async def test_temporal_agent_override_tools_in_workflow(allow_model_requests: None, client: Client): + async with Worker( + client, + task_queue=TASK_QUEUE, + workflows=[SimpleAgentWorkflowWithOverrideTools], + plugins=[AgentPlugin(simple_temporal_agent)], + ): + with pytest.raises(WorkflowFailureError) as exc_info: + await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] + SimpleAgentWorkflowWithOverrideTools.run, + args=['What is the capital of Mexico?'], + id=SimpleAgentWorkflowWithOverrideTools.__name__, + task_queue=TASK_QUEUE, + ) + assert isinstance(exc_info.value.__cause__, ApplicationError) + assert exc_info.value.__cause__.type == UserError.__name__ + assert exc_info.value.__cause__.message == snapshot( + 'Tools cannot be contextually overridden when using Temporal, they must be set at agent creation time.' + ) + + +# TODO: f'Temporal activity config for tool {name!r} has been explicitly set to `False` (activity disabled), ' +# 'but non-async tools are run in threads which are not supported outside of an activity. Make the tool function async instead.' + +# TODO: 'The `deps` object must be a JSON-serializable dictionary in order to be used with Temporal. ' +# 'To use a different type, pass a `TemporalRunContext` subclass to `TemporalAgent` with custom `serialize_run_context` and `deserialize_run_context` class methods.' + +# TODO: Custom run_context_type + +# TODO: tool_activity_config + +# TODO: f'Temporal activity config for MCP tool {tool_name!r} has been explicitly set to `False` (activity disabled), ' +# 'but MCP tools require the use of IO and so cannot be run outside of an activity.' + +# TODO: Custom temporalize_toolset_func + +# TODO: raise UserError('Streaming with Temporal requires `Agent` to have an `event_stream_handler` set.') + +# TODO: raise UserError('Streaming with Temporal requires `request_stream` to be called with a `run_context`') + +# TODO: LogfirePlugin(setup_logfire=) +# TODO: LogfirePlugin(metrics=False) From 39c429832b1402e94c1f3808917747af6de42147 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Mon, 11 Aug 2025 14:19:14 +0000 Subject: [PATCH 12/30] Uninstrument Pydantic AI after Temporal tests --- tests/test_temporal.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tests/test_temporal.py b/tests/test_temporal.py index c86f5d0f75..257c109e5d 100644 --- a/tests/test_temporal.py +++ b/tests/test_temporal.py @@ -2,7 +2,7 @@ import os import re -from collections.abc import AsyncIterable, AsyncIterator +from collections.abc import AsyncIterable, AsyncIterator, Iterator from dataclasses import dataclass from datetime import timedelta @@ -105,6 +105,15 @@ async def close_cached_httpx_client(anyio_backend: str) -> AsyncIterator[None]: await http_client.aclose() +# `LogfirePlugin` calls `logfire.instrument_pydantic_ai()`, so we need to make sure this doesn't bleed into other tests. +@pytest.fixture(autouse=True, scope='module') +def uninstrument_pydantic_ai() -> Iterator[None]: + try: + yield + finally: + Agent.instrument_all(False) + + TEMPORAL_PORT = 7243 TASK_QUEUE = 'pydantic-ai-agent-task-queue' From b2039e2f91c07055e54cbe350f20d605dc17c1b8 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Mon, 11 Aug 2025 14:21:01 +0000 Subject: [PATCH 13/30] Unskip testing flaky example --- docs/evals.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/evals.md b/docs/evals.md index e1bd5a8148..81112e69bf 100644 --- a/docs/evals.md +++ b/docs/evals.md @@ -313,7 +313,7 @@ _(This example is complete, it can be run "as is")_ You can control concurrency during evaluation (this might be useful to prevent exceeding a rate limit): -```python {title="parallel_evaluation_example.py" line_length="100" test="skip"} +```python {title="parallel_evaluation_example.py" line_length="100"} import asyncio import time From 10afbd03c9fc54696e40164b87d7120d21302030 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Mon, 11 Aug 2025 16:19:56 +0000 Subject: [PATCH 14/30] Add some more tests --- docs/changelog.md | 2 +- .../pydantic_ai/ext/temporal/_agent.py | 128 ++- .../pydantic_ai/ext/temporal/_model.py | 19 +- ..._complex_agent_run_stream_in_workflow.yaml | 929 ++++++++++++++++++ ...ral_agent_sync_tool_activity_disabled.yaml | 100 ++ tests/test_temporal.py | 243 ++++- 6 files changed, 1316 insertions(+), 105 deletions(-) create mode 100644 tests/cassettes/test_temporal/test_complex_agent_run_stream_in_workflow.yaml create mode 100644 tests/cassettes/test_temporal/test_temporal_agent_sync_tool_activity_disabled.yaml diff --git a/docs/changelog.md b/docs/changelog.md index 574f5b7093..80ab36a57e 100644 --- a/docs/changelog.md +++ b/docs/changelog.md @@ -12,7 +12,7 @@ Pydantic AI is still pre-version 1, so breaking changes will occur, however: !!! note Here's a filtered list of the breaking changes for each version to help you upgrade Pydantic AI. -### v0.7.0 (2025-08-08) +### v0.7.0 (2025-08-11) See [#2458](https://github.com/pydantic/pydantic-ai/pull/2458) - `pydantic_ai.models.StreamedResponse` now yields a `FinalResultEvent` along with the existing `PartStartEvent` and `PartDeltaEvent`. If you're using `pydantic_ai.direct.model_request_stream` or `pydantic_ai.direct.model_request_stream_sync`, you may need to update your code to account for this. diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_agent.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/_agent.py index f1c11c6477..f498429fde 100644 --- a/pydantic_ai_slim/pydantic_ai/ext/temporal/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/_agent.py @@ -2,6 +2,7 @@ from collections.abc import AsyncIterator, Iterator, Sequence from contextlib import AbstractAsyncContextManager, asynccontextmanager, contextmanager +from contextvars import ContextVar from datetime import timedelta from typing import Any, Callable, Literal, overload @@ -128,6 +129,8 @@ def temporalize_toolset(toolset: AbstractToolset[AgentDepsT]) -> AbstractToolset self._toolsets = temporal_toolsets self._temporal_activities = activities + self._temporal_overrides_active: ContextVar[bool] = ContextVar('_temporal_overrides_active', default=False) + @property def model(self) -> Model: return self._model @@ -145,7 +148,11 @@ def temporal_activities(self) -> list[Callable[..., Any]]: def _temporal_overrides(self) -> Iterator[None]: # We reset tools here as the temporalized function toolset is already in self._toolsets. with super().override(model=self._model, toolsets=self._toolsets, tools=[]): - yield + token = self._temporal_overrides_active.set(True) + try: + yield + finally: + self._temporal_overrides_active.reset(token) @overload async def run( @@ -231,22 +238,26 @@ async def main(): Returns: The result of the run. """ - _check_no_run_event_stream_handler_in_workflow(event_stream_handler) + if workflow.in_workflow() and event_stream_handler is not None: + raise UserError( + 'Event stream handler cannot be set at agent run time inside a Temporal workflow, it must be set at agent creation time.' + ) - return await super().run( - user_prompt, - output_type=output_type, - message_history=message_history, - model=model, - deps=deps, - model_settings=model_settings, - usage_limits=usage_limits, - usage=usage, - infer_name=infer_name, - toolsets=toolsets, - event_stream_handler=event_stream_handler, - **_deprecated_kwargs, - ) + with self._temporal_overrides(): + return await super().run( + user_prompt, + output_type=output_type, + message_history=message_history, + model=model, + deps=deps, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + event_stream_handler=event_stream_handler, + **_deprecated_kwargs, + ) @overload def run_sync( @@ -331,7 +342,8 @@ def run_sync( Returns: The result of the run. """ - _check_no_run_event_stream_handler_in_workflow(event_stream_handler) + if workflow.in_workflow(): + raise UserError('`agent.run_sync()` cannot be used inside a Temporal workflow. Use `agent.run()` instead.') return super().run_sync( user_prompt, @@ -430,7 +442,10 @@ async def main(): Returns: The result of the run. """ - _check_no_run_event_stream_handler_in_workflow(event_stream_handler) + if workflow.in_workflow(): + raise UserError( + '`agent.run_stream()` cannot be used inside a Temporal workflow. Set an `event_stream_handler` on the agent and use `agent.run()` instead.' + ) async with super().run_stream( user_prompt, @@ -575,45 +590,35 @@ async def main(): Returns: The result of the run. """ - if not workflow.in_workflow(): - async with super().iter( - user_prompt=user_prompt, - output_type=output_type, - message_history=message_history, - model=model, - deps=deps, - model_settings=model_settings, - usage_limits=usage_limits, - usage=usage, - infer_name=infer_name, - toolsets=toolsets, - **_deprecated_kwargs, - ) as run: - yield run - return + if workflow.in_workflow(): + if not self._temporal_overrides_active.get(): + raise UserError( + '`agent.iter()` cannot be used inside a Temporal workflow. Set an `event_stream_handler` on the agent and use `agent.run()` instead.' + ) - if model is not None: - raise UserError( - 'Model cannot be set at agent run time when using Temporal, it must be set at agent creation time.' - ) - if toolsets is not None: - raise UserError( - 'Toolsets cannot be set at agent run time when using Temporal, it must be set at agent creation time.' - ) + if model is not None: + raise UserError( + 'Model cannot be set at agent run time inside a Temporal workflow, it must be set at agent creation time.' + ) + if toolsets is not None: + raise UserError( + 'Toolsets cannot be set at agent run time inside a Temporal workflow, it must be set at agent creation time.' + ) - with self._temporal_overrides(): - async with super().iter( - user_prompt=user_prompt, - output_type=output_type, - message_history=message_history, - deps=deps, - model_settings=model_settings, - usage_limits=usage_limits, - usage=usage, - infer_name=infer_name, - **_deprecated_kwargs, - ) as run: - yield run + async with super().iter( + user_prompt=user_prompt, + output_type=output_type, + message_history=message_history, + model=model, + deps=deps, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + **_deprecated_kwargs, + ) as run: + yield run @contextmanager def override( @@ -638,23 +643,16 @@ def override( if workflow.in_workflow(): if _utils.is_set(model): raise UserError( - 'Model cannot be contextually overridden when using Temporal, it must be set at agent creation time.' + 'Model cannot be contextually overridden inside a Temporal workflow, it must be set at agent creation time.' ) if _utils.is_set(toolsets): raise UserError( - 'Toolsets cannot be contextually overridden when using Temporal, they must be set at agent creation time.' + 'Toolsets cannot be contextually overridden inside a Temporal workflow, they must be set at agent creation time.' ) if _utils.is_set(tools): raise UserError( - 'Tools cannot be contextually overridden when using Temporal, they must be set at agent creation time.' + 'Tools cannot be contextually overridden inside a Temporal workflow, they must be set at agent creation time.' ) with super().override(deps=deps, model=model, toolsets=toolsets, tools=tools): yield - - -def _check_no_run_event_stream_handler_in_workflow(event_stream_handler: EventStreamHandler[AgentDepsT] | None) -> None: - if workflow.in_workflow() and event_stream_handler is not None: - raise UserError( - 'Event stream handler cannot be set at agent run time when using Temporal, it must be set at agent creation time.' - ) diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_model.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/_model.py index 423d8348df..cc0eaa271d 100644 --- a/pydantic_ai_slim/pydantic_ai/ext/temporal/_model.py +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/_model.py @@ -35,7 +35,7 @@ class _RequestParams: serialized_run_context: Any -class _TemporalStreamedResponse(StreamedResponse): +class TemporalStreamedResponse(StreamedResponse): def __init__(self, model_request_parameters: ModelRequestParameters, response: ModelResponse): super().__init__(model_request_parameters) self.response = response @@ -130,11 +130,22 @@ async def request_stream( model_request_parameters: ModelRequestParameters, run_context: RunContext[Any] | None = None, ) -> AsyncIterator[StreamedResponse]: + if not workflow.in_workflow(): + async with super().request_stream( + messages, model_settings, model_request_parameters, run_context + ) as streamed_response: + yield streamed_response + return + if self.event_stream_handler is None: - raise UserError('Streaming with Temporal requires `Agent` to have an `event_stream_handler` set.') + raise UserError( + 'Streaming inside a Temporal workflow requires `Agent` to have an `event_stream_handler` set.' + ) if run_context is None: - raise UserError('Streaming with Temporal requires `request_stream` to be called with a `run_context`') + raise UserError( + 'A Temporal model cannot be used with `pydantic_ai.direct.model_request_stream()` as it requires a `run_context`. Set an `event_stream_handler` on the agent and use `agent.run()` instead.' + ) serialized_run_context = self.run_context_type.serialize_run_context(run_context) response = await workflow.execute_activity( # pyright: ignore[reportUnknownMemberType] @@ -147,4 +158,4 @@ async def request_stream( ), **self.activity_config, ) - yield _TemporalStreamedResponse(model_request_parameters, response) + yield TemporalStreamedResponse(model_request_parameters, response) diff --git a/tests/cassettes/test_temporal/test_complex_agent_run_stream_in_workflow.yaml b/tests/cassettes/test_temporal/test_complex_agent_run_stream_in_workflow.yaml new file mode 100644 index 0000000000..99f14a3a15 --- /dev/null +++ b/tests/cassettes/test_temporal/test_complex_agent_run_stream_in_workflow.yaml @@ -0,0 +1,929 @@ +interactions: +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '4294' + content-type: + - application/json + host: + - api.openai.com + method: POST + parsed_body: + messages: + - content: 'Tell me: the capital of the country; the weather there; the product name' + role: user + model: gpt-4o + stream: true + stream_options: + include_usage: true + tool_choice: required + tools: + - function: + description: '' + name: get_weather + parameters: + additionalProperties: false + properties: + city: + type: string + required: + - city + type: object + strict: true + type: function + - function: + description: '' + name: get_country + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Convert Celsius to Fahrenheit.\n\n Args:\n celsius: Temperature in Celsius\n\n Returns:\n + \ Temperature in Fahrenheit\n " + name: celsius_to_fahrenheit + parameters: + additionalProperties: false + properties: + celsius: + type: number + required: + - celsius + type: object + strict: true + type: function + - function: + description: "Get the weather forecast for a location.\n\n Args:\n location: The location to get the weather + forecast for.\n\n Returns:\n The weather forecast for the location.\n " + name: get_weather_forecast + parameters: + additionalProperties: false + properties: + location: + type: string + required: + - location + type: object + strict: true + type: function + - function: + description: '' + name: get_image_resource + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_image_resource_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_audio_resource + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_audio_resource_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_product_name + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_product_name_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_image + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_dict + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_error + parameters: + additionalProperties: false + properties: + value: + default: false + type: boolean + type: object + type: function + - function: + description: '' + name: get_none + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_multiple_items + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Get the current log level.\n\n Returns:\n The current log level.\n " + name: get_log_level + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Echo the run context.\n\n Args:\n ctx: Context object containing request and session information.\n\n + \ Returns:\n Dictionary with an echo message and the deps.\n " + name: echo_deps + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: Use sampling callback. + name: use_sampling + parameters: + additionalProperties: false + properties: + foo: + type: string + required: + - foo + type: object + strict: true + type: function + - function: + description: The final response which ends this conversation + name: final_result + parameters: + $defs: + Answer: + additionalProperties: false + properties: + answer: + type: string + label: + type: string + required: + - label + - answer + type: object + additionalProperties: false + properties: + answers: + items: + $ref: '#/$defs/Answer' + type: array + required: + - answers + type: object + strict: true + type: function + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: |+ + data: {"id":"chatcmpl-C3OoWwTCYbJ255lFXwWSCx64R8I6u","object":"chat.completion.chunk","created":1754926404,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","usage":null,"choices":[{"index":0,"delta":{"role":"assistant","content":null},"logprobs":null,"finish_reason":null}],"obfuscation":"2RzOMz6snzg"} + + data: {"id":"chatcmpl-C3OoWwTCYbJ255lFXwWSCx64R8I6u","object":"chat.completion.chunk","created":1754926404,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","usage":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"id":"call_w5dFfgZ9tHc5AZxppyYFIuHl","type":"function","function":{"name":"get_country","arguments":""}}]},"logprobs":null,"finish_reason":null}],"obfuscation":"irDFsH5lzC"} + + data: {"id":"chatcmpl-C3OoWwTCYbJ255lFXwWSCx64R8I6u","object":"chat.completion.chunk","created":1754926404,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","usage":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{}"}}]},"logprobs":null,"finish_reason":null}],"obfuscation":"MRXh"} + + data: {"id":"chatcmpl-C3OoWwTCYbJ255lFXwWSCx64R8I6u","object":"chat.completion.chunk","created":1754926404,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","usage":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"id":"call_8Ks83PNrVg7CjrgfA49cI6SR","type":"function","function":{"name":"get_product_name","arguments":""}}]},"logprobs":null,"finish_reason":null}],"obfuscation":"qI2OI"} + + data: {"id":"chatcmpl-C3OoWwTCYbJ255lFXwWSCx64R8I6u","object":"chat.completion.chunk","created":1754926404,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","usage":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"function":{"arguments":"{}"}}]},"logprobs":null,"finish_reason":null}],"obfuscation":"7sxu"} + + data: {"id":"chatcmpl-C3OoWwTCYbJ255lFXwWSCx64R8I6u","object":"chat.completion.chunk","created":1754926404,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}],"usage":null,"obfuscation":"MdGw"} + + data: {"id":"chatcmpl-C3OoWwTCYbJ255lFXwWSCx64R8I6u","object":"chat.completion.chunk","created":1754926404,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[],"usage":{"prompt_tokens":364,"completion_tokens":40,"total_tokens":404,"prompt_tokens_details":{"cached_tokens":0,"audio_tokens":0},"completion_tokens_details":{"reasoning_tokens":0,"audio_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}},"obfuscation":"RNdcIwLOaDbhd"} + + data: [DONE] + + headers: + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-type: + - text/event-stream; charset=utf-8 + openai-organization: + - pydantic-28gund + openai-processing-ms: + - '1236' + openai-project: + - proj_dKobscVY9YJxeEaDJen54e3d + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + status: + code: 200 + message: OK +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '4720' + content-type: + - application/json + cookie: + - __cf_bm=qvTuiRBcW7Bg1izqtzFqOtqRu0FdltcnxI8HwrPcCLo-1754926405-1.0.1.1-c_MI0lTx2_gi3xrmoPwthTmLKeKKajC5fijqXwKfaytKJqHd2aGOttqRRjKHsoZaIaF6r95i.MVP9gTVsy2TvzO.WxJZQWwnY.oJMsxWDMc; + _cfuvid=a0UouwNeD6T07eGzpM64.qD3KG4SEfRY5kZYrLzgZiU-1754926405395-0.0.1.1-604800000 + host: + - api.openai.com + method: POST + parsed_body: + messages: + - content: 'Tell me: the capital of the country; the weather there; the product name' + role: user + - role: assistant + tool_calls: + - function: + arguments: '{}' + name: get_country + id: call_w5dFfgZ9tHc5AZxppyYFIuHl + type: function + - function: + arguments: '{}' + name: get_product_name + id: call_8Ks83PNrVg7CjrgfA49cI6SR + type: function + - content: Mexico + role: tool + tool_call_id: call_w5dFfgZ9tHc5AZxppyYFIuHl + - content: Pydantic AI + role: tool + tool_call_id: call_8Ks83PNrVg7CjrgfA49cI6SR + model: gpt-4o + stream: true + stream_options: + include_usage: true + tool_choice: required + tools: + - function: + description: '' + name: get_weather + parameters: + additionalProperties: false + properties: + city: + type: string + required: + - city + type: object + strict: true + type: function + - function: + description: '' + name: get_country + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Convert Celsius to Fahrenheit.\n\n Args:\n celsius: Temperature in Celsius\n\n Returns:\n + \ Temperature in Fahrenheit\n " + name: celsius_to_fahrenheit + parameters: + additionalProperties: false + properties: + celsius: + type: number + required: + - celsius + type: object + strict: true + type: function + - function: + description: "Get the weather forecast for a location.\n\n Args:\n location: The location to get the weather + forecast for.\n\n Returns:\n The weather forecast for the location.\n " + name: get_weather_forecast + parameters: + additionalProperties: false + properties: + location: + type: string + required: + - location + type: object + strict: true + type: function + - function: + description: '' + name: get_image_resource + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_image_resource_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_audio_resource + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_audio_resource_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_product_name + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_product_name_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_image + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_dict + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_error + parameters: + additionalProperties: false + properties: + value: + default: false + type: boolean + type: object + type: function + - function: + description: '' + name: get_none + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_multiple_items + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Get the current log level.\n\n Returns:\n The current log level.\n " + name: get_log_level + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Echo the run context.\n\n Args:\n ctx: Context object containing request and session information.\n\n + \ Returns:\n Dictionary with an echo message and the deps.\n " + name: echo_deps + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: Use sampling callback. + name: use_sampling + parameters: + additionalProperties: false + properties: + foo: + type: string + required: + - foo + type: object + strict: true + type: function + - function: + description: The final response which ends this conversation + name: final_result + parameters: + $defs: + Answer: + additionalProperties: false + properties: + answer: + type: string + label: + type: string + required: + - label + - answer + type: object + additionalProperties: false + properties: + answers: + items: + $ref: '#/$defs/Answer' + type: array + required: + - answers + type: object + strict: true + type: function + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: |+ + data: {"id":"chatcmpl-C3OoaxA52FVqKNOcPSRU9dAB2N72T","object":"chat.completion.chunk","created":1754926408,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"role":"assistant","content":null,"tool_calls":[{"index":0,"id":"call_oAYsUVV4qquVvieF6Dp61ipv","type":"function","function":{"name":"get_weather","arguments":""}}],"refusal":null},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"vrwPo0J2E"} + + data: {"id":"chatcmpl-C3OoaxA52FVqKNOcPSRU9dAB2N72T","object":"chat.completion.chunk","created":1754926408,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"HWd"} + + data: {"id":"chatcmpl-C3OoaxA52FVqKNOcPSRU9dAB2N72T","object":"chat.completion.chunk","created":1754926408,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"city"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"rm"} + + data: {"id":"chatcmpl-C3OoaxA52FVqKNOcPSRU9dAB2N72T","object":"chat.completion.chunk","created":1754926408,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"B"} + + data: {"id":"chatcmpl-C3OoaxA52FVqKNOcPSRU9dAB2N72T","object":"chat.completion.chunk","created":1754926408,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"Mexico"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":""} + + data: {"id":"chatcmpl-C3OoaxA52FVqKNOcPSRU9dAB2N72T","object":"chat.completion.chunk","created":1754926408,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" City"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"P"} + + data: {"id":"chatcmpl-C3OoaxA52FVqKNOcPSRU9dAB2N72T","object":"chat.completion.chunk","created":1754926408,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"}"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"vxH"} + + data: {"id":"chatcmpl-C3OoaxA52FVqKNOcPSRU9dAB2N72T","object":"chat.completion.chunk","created":1754926408,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}],"usage":null,"obfuscation":"TIRC"} + + data: {"id":"chatcmpl-C3OoaxA52FVqKNOcPSRU9dAB2N72T","object":"chat.completion.chunk","created":1754926408,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[],"usage":{"prompt_tokens":423,"completion_tokens":15,"total_tokens":438,"prompt_tokens_details":{"cached_tokens":0,"audio_tokens":0},"completion_tokens_details":{"reasoning_tokens":0,"audio_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}},"obfuscation":"b2wb3y0L5jj6U"} + + data: [DONE] + + headers: + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-type: + - text/event-stream; charset=utf-8 + openai-organization: + - pydantic-28gund + openai-processing-ms: + - '368' + openai-project: + - proj_dKobscVY9YJxeEaDJen54e3d + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + status: + code: 200 + message: OK +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '4969' + content-type: + - application/json + cookie: + - __cf_bm=qvTuiRBcW7Bg1izqtzFqOtqRu0FdltcnxI8HwrPcCLo-1754926405-1.0.1.1-c_MI0lTx2_gi3xrmoPwthTmLKeKKajC5fijqXwKfaytKJqHd2aGOttqRRjKHsoZaIaF6r95i.MVP9gTVsy2TvzO.WxJZQWwnY.oJMsxWDMc; + _cfuvid=a0UouwNeD6T07eGzpM64.qD3KG4SEfRY5kZYrLzgZiU-1754926405395-0.0.1.1-604800000 + host: + - api.openai.com + method: POST + parsed_body: + messages: + - content: 'Tell me: the capital of the country; the weather there; the product name' + role: user + - role: assistant + tool_calls: + - function: + arguments: '{}' + name: get_country + id: call_w5dFfgZ9tHc5AZxppyYFIuHl + type: function + - function: + arguments: '{}' + name: get_product_name + id: call_8Ks83PNrVg7CjrgfA49cI6SR + type: function + - content: Mexico + role: tool + tool_call_id: call_w5dFfgZ9tHc5AZxppyYFIuHl + - content: Pydantic AI + role: tool + tool_call_id: call_8Ks83PNrVg7CjrgfA49cI6SR + - role: assistant + tool_calls: + - function: + arguments: '{"city":"Mexico City"}' + name: get_weather + id: call_oAYsUVV4qquVvieF6Dp61ipv + type: function + - content: sunny + role: tool + tool_call_id: call_oAYsUVV4qquVvieF6Dp61ipv + model: gpt-4o + stream: true + stream_options: + include_usage: true + tool_choice: required + tools: + - function: + description: '' + name: get_weather + parameters: + additionalProperties: false + properties: + city: + type: string + required: + - city + type: object + strict: true + type: function + - function: + description: '' + name: get_country + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Convert Celsius to Fahrenheit.\n\n Args:\n celsius: Temperature in Celsius\n\n Returns:\n + \ Temperature in Fahrenheit\n " + name: celsius_to_fahrenheit + parameters: + additionalProperties: false + properties: + celsius: + type: number + required: + - celsius + type: object + strict: true + type: function + - function: + description: "Get the weather forecast for a location.\n\n Args:\n location: The location to get the weather + forecast for.\n\n Returns:\n The weather forecast for the location.\n " + name: get_weather_forecast + parameters: + additionalProperties: false + properties: + location: + type: string + required: + - location + type: object + strict: true + type: function + - function: + description: '' + name: get_image_resource + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_image_resource_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_audio_resource + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_audio_resource_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_product_name + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_product_name_link + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_image + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_dict + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_error + parameters: + additionalProperties: false + properties: + value: + default: false + type: boolean + type: object + type: function + - function: + description: '' + name: get_none + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: '' + name: get_multiple_items + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Get the current log level.\n\n Returns:\n The current log level.\n " + name: get_log_level + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: "Echo the run context.\n\n Args:\n ctx: Context object containing request and session information.\n\n + \ Returns:\n Dictionary with an echo message and the deps.\n " + name: echo_deps + parameters: + additionalProperties: false + properties: {} + type: object + type: function + - function: + description: Use sampling callback. + name: use_sampling + parameters: + additionalProperties: false + properties: + foo: + type: string + required: + - foo + type: object + strict: true + type: function + - function: + description: The final response which ends this conversation + name: final_result + parameters: + $defs: + Answer: + additionalProperties: false + properties: + answer: + type: string + label: + type: string + required: + - label + - answer + type: object + additionalProperties: false + properties: + answers: + items: + $ref: '#/$defs/Answer' + type: array + required: + - answers + type: object + strict: true + type: function + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: |+ + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"role":"assistant","content":null,"tool_calls":[{"index":0,"id":"call_nryimwPYCh3YrGpAvvDhPxpO","type":"function","function":{"name":"final_result","arguments":""}}],"refusal":null},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"Rsplpznn"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"mLE"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"answers"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"eeAi5JV3aIjxR6s"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":["}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"FU"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"09W"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"label"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"w"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"e"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"Capital"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"WYH8sFF1slrAXp1"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" of"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"x15"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" the"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"Pc"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" country"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"RX7C17YDGvCQ55"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\",\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"c"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"answer"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":""} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"D"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"Mexico"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":""} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" City"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"x"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"},{\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"FiC5ZMbgN3lsQIx"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"label"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"i"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"4"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"Weather"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"rpVPFBQDh0N6oTm"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" in"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"4YY"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" the"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"T9"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" capital"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"53tq8ACYnSSFVa"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\",\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"c"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"answer"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":""} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"9"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"Sunny"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"L"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"},{\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"rexyTVLA5g4TqZU"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"label"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"S"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"0"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"Product"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"maad8vsYEgcs6Io"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" Name"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"9"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\",\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"8"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"answer"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":""} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"u"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"P"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"Exwy0"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"yd"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"pHPa"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"antic"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"j"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" AI"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"qQ1"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"}"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"CNK"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"]}"}}]},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"x2F2"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}],"usage":null,"obfuscation":"F5EE"} + + data: {"id":"chatcmpl-C3OodA2lxT9iakx32WWKSC5FCMZMa","object":"chat.completion.chunk","created":1754926411,"model":"gpt-4o-2024-08-06","service_tier":"default","system_fingerprint":"fp_07871e2ad8","choices":[],"usage":{"prompt_tokens":448,"completion_tokens":49,"total_tokens":497,"prompt_tokens_details":{"cached_tokens":0,"audio_tokens":0},"completion_tokens_details":{"reasoning_tokens":0,"audio_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}},"obfuscation":"L2GnXfXPSsFok"} + + data: [DONE] + + headers: + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-type: + - text/event-stream; charset=utf-8 + openai-organization: + - pydantic-28gund + openai-processing-ms: + - '512' + openai-project: + - proj_dKobscVY9YJxeEaDJen54e3d + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + status: + code: 200 + message: OK +version: 1 +... diff --git a/tests/cassettes/test_temporal/test_temporal_agent_sync_tool_activity_disabled.yaml b/tests/cassettes/test_temporal/test_temporal_agent_sync_tool_activity_disabled.yaml new file mode 100644 index 0000000000..55e74d9003 --- /dev/null +++ b/tests/cassettes/test_temporal/test_temporal_agent_sync_tool_activity_disabled.yaml @@ -0,0 +1,100 @@ +interactions: +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '346' + content-type: + - application/json + host: + - api.openai.com + method: POST + parsed_body: + messages: + - content: What is the weather in Mexico City? + role: user + model: gpt-4o + stream: false + tool_choice: auto + tools: + - function: + description: '' + name: get_weather + parameters: + additionalProperties: false + properties: + city: + type: string + required: + - city + type: object + strict: true + type: function + uri: https://api.openai.com/v1/chat/completions + response: + headers: + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-length: + - '1085' + content-type: + - application/json + openai-organization: + - pydantic-28gund + openai-processing-ms: + - '806' + openai-project: + - proj_dKobscVY9YJxeEaDJen54e3d + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + parsed_body: + choices: + - finish_reason: tool_calls + index: 0 + logprobs: null + message: + annotations: [] + content: null + refusal: null + role: assistant + tool_calls: + - function: + arguments: '{"city":"Mexico City"}' + name: get_weather + id: call_MOtXZsU6lfOmXwoBOtXKpCth + type: function + created: 1754922581 + id: chatcmpl-C3NorJhLoxFbVB2hq1bUQDmkI1TEf + model: gpt-4o-2024-08-06 + object: chat.completion + service_tier: default + system_fingerprint: fp_07871e2ad8 + usage: + completion_tokens: 15 + completion_tokens_details: + accepted_prediction_tokens: 0 + audio_tokens: 0 + reasoning_tokens: 0 + rejected_prediction_tokens: 0 + prompt_tokens: 45 + prompt_tokens_details: + audio_tokens: 0 + cached_tokens: 0 + total_tokens: 60 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/test_temporal.py b/tests/test_temporal.py index 257c109e5d..602235eb9f 100644 --- a/tests/test_temporal.py +++ b/tests/test_temporal.py @@ -10,6 +10,7 @@ from typing_extensions import TypedDict from pydantic_ai import Agent, RunContext +from pydantic_ai.direct import model_request_stream from pydantic_ai.exceptions import UserError from pydantic_ai.messages import ( AgentStreamEvent, @@ -17,6 +18,8 @@ FunctionToolCallEvent, FunctionToolResultEvent, HandleResponseEvent, + ModelMessage, + ModelRequest, PartDeltaEvent, PartStartEvent, ToolCallPart, @@ -778,13 +781,13 @@ async def test_temporal_agent(): ) -def test_temporal_agent_run_sync(allow_model_requests: None): - result = simple_temporal_agent.run_sync('What is the capital of Mexico?') +async def test_temporal_agent_run(allow_model_requests: None): + result = await simple_temporal_agent.run('What is the capital of Mexico?') assert result.output == snapshot('The capital of Mexico is Mexico City.') -async def test_temporal_agent_run(allow_model_requests: None): - result = await simple_temporal_agent.run('What is the capital of Mexico?') +def test_temporal_agent_run_sync(allow_model_requests: None): + result = simple_temporal_agent.run_sync('What is the capital of Mexico?') assert result.output == snapshot('The capital of Mexico is Mexico City.') @@ -826,6 +829,96 @@ async def test_temporal_agent_iter(allow_model_requests: None): ) +@workflow.defn +class SimpleAgentWorkflowWithRunSync: + @workflow.run + async def run(self, prompt: str) -> str: + result = simple_temporal_agent.run_sync(prompt) + return result.output + + +async def test_temporal_agent_run_sync_in_workflow(allow_model_requests: None, client: Client): + async with Worker( + client, + task_queue=TASK_QUEUE, + workflows=[SimpleAgentWorkflowWithRunSync], + plugins=[AgentPlugin(simple_temporal_agent)], + ): + with pytest.raises(WorkflowFailureError) as exc_info: + await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] + SimpleAgentWorkflowWithRunSync.run, + args=['What is the capital of Mexico?'], + id=SimpleAgentWorkflowWithRunSync.__name__, + task_queue=TASK_QUEUE, + ) + assert isinstance(exc_info.value.__cause__, ApplicationError) + assert exc_info.value.__cause__.type == UserError.__name__ + assert exc_info.value.__cause__.message == snapshot( + '`agent.run_sync()` cannot be used inside a Temporal workflow. Use `agent.run()` instead.' + ) + + +@workflow.defn +class SimpleAgentWorkflowWithRunStream: + @workflow.run + async def run(self, prompt: str) -> str: + async with simple_temporal_agent.run_stream(prompt) as result: + pass + return await result.get_output() + + +async def test_temporal_agent_run_stream_in_workflow(allow_model_requests: None, client: Client): + async with Worker( + client, + task_queue=TASK_QUEUE, + workflows=[SimpleAgentWorkflowWithRunStream], + plugins=[AgentPlugin(simple_temporal_agent)], + ): + with pytest.raises(WorkflowFailureError) as exc_info: + await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] + SimpleAgentWorkflowWithRunStream.run, + args=['What is the capital of Mexico?'], + id=SimpleAgentWorkflowWithRunStream.__name__, + task_queue=TASK_QUEUE, + ) + assert isinstance(exc_info.value.__cause__, ApplicationError) + assert exc_info.value.__cause__.type == UserError.__name__ + assert exc_info.value.__cause__.message == snapshot( + '`agent.run_stream()` cannot be used inside a Temporal workflow. Set an `event_stream_handler` on the agent and use `agent.run()` instead.' + ) + + +@workflow.defn +class SimpleAgentWorkflowWithIter: + @workflow.run + async def run(self, prompt: str) -> str: + async with simple_temporal_agent.iter(prompt) as run: + async for _ in run: + pass + return 'done' + + +async def test_temporal_agent_iter_in_workflow(allow_model_requests: None, client: Client): + async with Worker( + client, + task_queue=TASK_QUEUE, + workflows=[SimpleAgentWorkflowWithIter], + plugins=[AgentPlugin(simple_temporal_agent)], + ): + with pytest.raises(WorkflowFailureError) as exc_info: + await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] + SimpleAgentWorkflowWithIter.run, + args=['What is the capital of Mexico?'], + id=SimpleAgentWorkflowWithIter.__name__, + task_queue=TASK_QUEUE, + ) + assert isinstance(exc_info.value.__cause__, ApplicationError) + assert exc_info.value.__cause__.type == UserError.__name__ + assert exc_info.value.__cause__.message == snapshot( + '`agent.iter()` cannot be used inside a Temporal workflow. Set an `event_stream_handler` on the agent and use `agent.run()` instead.' + ) + + async def simple_event_stream_handler( ctx: RunContext[None], stream: AsyncIterable[AgentStreamEvent | HandleResponseEvent], @@ -858,67 +951,65 @@ async def test_temporal_agent_run_in_workflow_with_event_stream_handler(allow_mo assert isinstance(exc_info.value.__cause__, ApplicationError) assert exc_info.value.__cause__.type == UserError.__name__ assert exc_info.value.__cause__.message == snapshot( - 'Event stream handler cannot be set at agent run time when using Temporal, it must be set at agent creation time.' + 'Event stream handler cannot be set at agent run time inside a Temporal workflow, it must be set at agent creation time.' ) @workflow.defn -class SimpleAgentWorkflowWithIterModel: +class SimpleAgentWorkflowWithRunModel: @workflow.run async def run(self, prompt: str) -> str: - async with simple_temporal_agent.iter(prompt, model=model) as run: - assert run.result is not None - return run.result.output + result = await simple_temporal_agent.run(prompt, model=model) + return result.output -async def test_temporal_agent_iter_in_workflow_with_model(allow_model_requests: None, client: Client): +async def test_temporal_agent_run_in_workflow_with_model(allow_model_requests: None, client: Client): async with Worker( client, task_queue=TASK_QUEUE, - workflows=[SimpleAgentWorkflowWithIterModel], + workflows=[SimpleAgentWorkflowWithRunModel], plugins=[AgentPlugin(simple_temporal_agent)], ): with pytest.raises(WorkflowFailureError) as exc_info: await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] - SimpleAgentWorkflowWithIterModel.run, + SimpleAgentWorkflowWithRunModel.run, args=['What is the capital of Mexico?'], - id=SimpleAgentWorkflowWithIterModel.__name__, + id=SimpleAgentWorkflowWithRunModel.__name__, task_queue=TASK_QUEUE, ) assert isinstance(exc_info.value.__cause__, ApplicationError) assert exc_info.value.__cause__.type == UserError.__name__ assert exc_info.value.__cause__.message == snapshot( - 'Model cannot be set at agent run time when using Temporal, it must be set at agent creation time.' + 'Model cannot be set at agent run time inside a Temporal workflow, it must be set at agent creation time.' ) @workflow.defn -class SimpleAgentWorkflowWithIterToolsets: +class SimpleAgentWorkflowWithRunToolsets: @workflow.run async def run(self, prompt: str) -> str: - async with simple_temporal_agent.iter(prompt, toolsets=[FunctionToolset()]) as run: - assert run.result is not None - return run.result.output + result = await simple_temporal_agent.run(prompt, toolsets=[FunctionToolset()]) + return result.output -async def test_temporal_agent_iter_in_workflow_with_toolsets(allow_model_requests: None, client: Client): +async def test_temporal_agent_run_in_workflow_with_toolsets(allow_model_requests: None, client: Client): async with Worker( client, task_queue=TASK_QUEUE, - workflows=[SimpleAgentWorkflowWithIterToolsets], + workflows=[SimpleAgentWorkflowWithRunToolsets], plugins=[AgentPlugin(simple_temporal_agent)], ): with pytest.raises(WorkflowFailureError) as exc_info: await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] - SimpleAgentWorkflowWithIterToolsets.run, + SimpleAgentWorkflowWithRunToolsets.run, args=['What is the capital of Mexico?'], - id=SimpleAgentWorkflowWithIterToolsets.__name__, + id=SimpleAgentWorkflowWithRunToolsets.__name__, task_queue=TASK_QUEUE, ) assert isinstance(exc_info.value.__cause__, ApplicationError) assert exc_info.value.__cause__.type == UserError.__name__ assert exc_info.value.__cause__.message == snapshot( - 'Toolsets cannot be set at agent run time when using Temporal, it must be set at agent creation time.' + 'Toolsets cannot be set at agent run time inside a Temporal workflow, it must be set at agent creation time.' ) @@ -948,7 +1039,7 @@ async def test_temporal_agent_override_model_in_workflow(allow_model_requests: N assert isinstance(exc_info.value.__cause__, ApplicationError) assert exc_info.value.__cause__.type == UserError.__name__ assert exc_info.value.__cause__.message == snapshot( - 'Model cannot be contextually overridden when using Temporal, it must be set at agent creation time.' + 'Model cannot be contextually overridden inside a Temporal workflow, it must be set at agent creation time.' ) @@ -978,7 +1069,7 @@ async def test_temporal_agent_override_toolsets_in_workflow(allow_model_requests assert isinstance(exc_info.value.__cause__, ApplicationError) assert exc_info.value.__cause__.type == UserError.__name__ assert exc_info.value.__cause__.message == snapshot( - 'Toolsets cannot be contextually overridden when using Temporal, they must be set at agent creation time.' + 'Toolsets cannot be contextually overridden inside a Temporal workflow, they must be set at agent creation time.' ) @@ -1008,12 +1099,101 @@ async def test_temporal_agent_override_tools_in_workflow(allow_model_requests: N assert isinstance(exc_info.value.__cause__, ApplicationError) assert exc_info.value.__cause__.type == UserError.__name__ assert exc_info.value.__cause__.message == snapshot( - 'Tools cannot be contextually overridden when using Temporal, they must be set at agent creation time.' + 'Tools cannot be contextually overridden inside a Temporal workflow, they must be set at agent creation time.' ) -# TODO: f'Temporal activity config for tool {name!r} has been explicitly set to `False` (activity disabled), ' -# 'but non-async tools are run in threads which are not supported outside of an activity. Make the tool function async instead.' +agent_with_sync_tool = Agent(model, name='agent_with_sync_tool', tools=[get_weather]) + +# This needs to be done before the `TemporalAgent` is bound to the workflow. +temporal_agent_with_sync_tool_activity_disabled = TemporalAgent( + agent_with_sync_tool, + tool_activity_config={ + '': { + 'get_weather': False, + }, + }, +) + + +@workflow.defn +class AgentWorkflowWithSyncToolActivityDisabled: + @workflow.run + async def run(self, prompt: str) -> str: + result = await temporal_agent_with_sync_tool_activity_disabled.run(prompt) + return result.output + + +async def test_temporal_agent_sync_tool_activity_disabled(allow_model_requests: None, client: Client): + async with Worker( + client, + task_queue=TASK_QUEUE, + workflows=[AgentWorkflowWithSyncToolActivityDisabled], + plugins=[AgentPlugin(temporal_agent_with_sync_tool_activity_disabled)], + ): + with pytest.raises(WorkflowFailureError) as exc_info: + await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] + AgentWorkflowWithSyncToolActivityDisabled.run, + args=['What is the weather in Mexico City?'], + id=AgentWorkflowWithSyncToolActivityDisabled.__name__, + task_queue=TASK_QUEUE, + ) + assert isinstance(exc_info.value.__cause__, ApplicationError) + assert exc_info.value.__cause__.type == UserError.__name__ + assert exc_info.value.__cause__.message == snapshot( + "Temporal activity config for tool 'get_weather' has been explicitly set to `False` (activity disabled), but non-async tools are run in threads which are not supported outside of an activity. Make the tool function async instead." + ) + + +async def test_temporal_agent_mcp_server_activity_disabled(client: Client): + with pytest.raises( + UserError, + match=re.escape( + "Temporal activity config for MCP tool 'get_product_name' has been explicitly set to `False` (activity disabled), " + 'but MCP tools require the use of IO and so cannot be run outside of an activity.' + ), + ): + TemporalAgent( + complex_agent, + tool_activity_config={ + 'mcp': { + 'get_product_name': False, + }, + }, + ) + + +@workflow.defn +class DirectStreamWorkflow: + @workflow.run + async def run(self, prompt: str) -> str: + messages: list[ModelMessage] = [ModelRequest.user_text_prompt(prompt)] + async with model_request_stream(complex_temporal_agent.model, messages) as stream: + async for _ in stream: + pass + return 'done' + + +async def test_temporal_model_stream_direct(client: Client): + async with Worker( + client, + task_queue=TASK_QUEUE, + workflows=[DirectStreamWorkflow], + plugins=[AgentPlugin(complex_temporal_agent)], + ): + with pytest.raises(WorkflowFailureError) as exc_info: + await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] + DirectStreamWorkflow.run, + args=['What is the capital of Mexico?'], + id=DirectStreamWorkflow.__name__, + task_queue=TASK_QUEUE, + ) + assert isinstance(exc_info.value.__cause__, ApplicationError) + assert exc_info.value.__cause__.type == UserError.__name__ + assert exc_info.value.__cause__.message == snapshot( + 'A Temporal model cannot be used with `pydantic_ai.direct.model_request_stream()` as it requires a `run_context`. Set an `event_stream_handler` on the agent and use `agent.run()` instead.' + ) + # TODO: 'The `deps` object must be a JSON-serializable dictionary in order to be used with Temporal. ' # 'To use a different type, pass a `TemporalRunContext` subclass to `TemporalAgent` with custom `serialize_run_context` and `deserialize_run_context` class methods.' @@ -1022,14 +1202,7 @@ async def test_temporal_agent_override_tools_in_workflow(allow_model_requests: N # TODO: tool_activity_config -# TODO: f'Temporal activity config for MCP tool {tool_name!r} has been explicitly set to `False` (activity disabled), ' -# 'but MCP tools require the use of IO and so cannot be run outside of an activity.' - # TODO: Custom temporalize_toolset_func -# TODO: raise UserError('Streaming with Temporal requires `Agent` to have an `event_stream_handler` set.') - -# TODO: raise UserError('Streaming with Temporal requires `request_stream` to be called with a `run_context`') - # TODO: LogfirePlugin(setup_logfire=) # TODO: LogfirePlugin(metrics=False) From d1fd4ba08b20959af53f0d1da956940810ae5143 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Mon, 11 Aug 2025 17:54:59 +0000 Subject: [PATCH 15/30] Add all the tests --- .../pydantic_ai/ext/temporal/__init__.py | 2 + .../pydantic_ai/ext/temporal/_agent.py | 21 +- .../pydantic_ai/ext/temporal/_logfire.py | 4 +- .../pydantic_ai/ext/temporal/_run_context.py | 8 +- .../test_temporal/test_logfire_plugin.yaml | 31 ++ ...ral_agent_with_dataclass_deps_as_dict.yaml | 195 ++++++++++++ ...est_temporal_agent_with_non_dict_deps.yaml | 95 ++++++ tests/test_temporal.py | 284 +++++++++++++----- 8 files changed, 554 insertions(+), 86 deletions(-) create mode 100644 tests/cassettes/test_temporal/test_logfire_plugin.yaml create mode 100644 tests/cassettes/test_temporal/test_temporal_agent_with_dataclass_deps_as_dict.yaml create mode 100644 tests/cassettes/test_temporal/test_temporal_agent_with_non_dict_deps.yaml diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/__init__.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/__init__.py index 4d548013fa..4f1e299942 100644 --- a/pydantic_ai_slim/pydantic_ai/ext/temporal/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/__init__.py @@ -15,6 +15,7 @@ from ._agent import TemporalAgent from ._logfire import LogfirePlugin from ._run_context import TemporalRunContext, TemporalRunContextWithDeps +from ._toolset import TemporalWrapperToolset __all__ = [ 'TemporalAgent', @@ -23,6 +24,7 @@ 'AgentPlugin', 'TemporalRunContext', 'TemporalRunContextWithDeps', + 'TemporalWrapperToolset', ] diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_agent.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/_agent.py index f498429fde..a41b4c5d18 100644 --- a/pydantic_ai_slim/pydantic_ai/ext/temporal/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/_agent.py @@ -56,16 +56,23 @@ def __init__( AbstractToolset[Any], ] = temporalize_toolset, ): - """Wrap an agent to make it compatible with Temporal. + """Wrap an agent to allow it to be used inside a Temporal workflow, by automatically moving model requests and tool calls to Temporal activities. Args: wrapped: The agent to wrap. - activity_config: The Temporal activity config to use. - model_activity_config: The Temporal activity config to use for model requests. - toolset_activity_config: The Temporal activity config to use for specific toolsets identified by ID. - tool_activity_config: The Temporal activity config to use for specific tools identified by toolset ID and tool name. - run_context_type: The type of run context to use to serialize and deserialize the run context. - temporalize_toolset_func: The function to use to prepare the toolsets for Temporal. + activity_config: The base Temporal activity config to use for all activities. + model_activity_config: The Temporal activity config to use for model request activities. This is merged with the base activity config. + toolset_activity_config: The Temporal activity config to use for get-tools and call-tool activities for specific toolsets identified by ID. This is merged with the base activity config. + tool_activity_config: The Temporal activity config to use for specific tool call activities identified by toolset ID and tool name. + This is merged with the base and toolset-specific activity configs. Use `False` to disable using an activity for a specific tool. + run_context_type: The `TemporalRunContext` subclass to use to serialize and deserialize the run context for use inside a Temporal activity. + By default, only the `retries`, `tool_call_id`, `tool_name`, `retry` and `run_step` attributes will be available. + To make another attribute available, create a `TemporalRunContext` subclass with a custom `serialize_run_context` class method that returns a dictionary that includes the attribute. + If `deps` is a JSON-serializable dictionary, you can use `TemporalRunContextWithDeps` to make the `deps` attribute available as well. + If `deps` is of a different type, create a `TemporalRunContext` subclass with custom `serialize_run_context` and `deserialize_run_context` class methods. + temporalize_toolset_func: Optional function to use to prepare "leaf" toolsets (i.e. those that implement their own tool listing and calling) for Temporal by wrapping them in a `TemporalWrapperToolset` that moves methods that require IO to Temporal activities. + If not provided, only `FunctionToolset` and `MCPServer` will be prepared for Temporal. + The function takes the toolset, the activity name prefix, the toolset-specific activity config, the tool-specific activity configs and the run context type. """ super().__init__(wrapped) diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_logfire.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/_logfire.py index d44a0077eb..6b907e5f6f 100644 --- a/pydantic_ai_slim/pydantic_ai/ext/temporal/_logfire.py +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/_logfire.py @@ -27,7 +27,7 @@ def __init__(self, setup_logfire: Callable[[], Logfire] = _default_setup_logfire def configure_client(self, config: ClientConfig) -> ClientConfig: interceptors = config.get('interceptors', []) - config['interceptors'] = [*interceptors, TracingInterceptor(get_tracer('temporal'))] + config['interceptors'] = [*interceptors, TracingInterceptor(get_tracer('temporalio'))] return super().configure_client(config) async def connect_service_client(self, config: ConnectConfig) -> ServiceClient: @@ -36,7 +36,7 @@ async def connect_service_client(self, config: ConnectConfig) -> ServiceClient: if self.metrics: logfire_config = logfire.config token = logfire_config.token - if token is not None: + if logfire_config.send_to_logfire and token is not None and logfire_config.metrics is not False: base_url = logfire_config.advanced.generate_base_url(token) metrics_url = base_url + '/v1/metrics' headers = {'Authorization': f'Bearer {token}'} diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_run_context.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/_run_context.py index aed38c80a5..3096aa42e3 100644 --- a/pydantic_ai_slim/pydantic_ai/ext/temporal/_run_context.py +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/_run_context.py @@ -29,6 +29,7 @@ def __getattribute__(self, name: str) -> Any: @classmethod def serialize_run_context(cls, ctx: RunContext[Any]) -> dict[str, Any]: + """Serialize the run context to a `dict[str, Any]`.""" return { 'retries': ctx.retries, 'tool_call_id': ctx.tool_call_id, @@ -38,7 +39,8 @@ def serialize_run_context(cls, ctx: RunContext[Any]) -> dict[str, Any]: } @classmethod - def deserialize_run_context(cls, ctx: dict[str, Any]) -> RunContext[Any]: + def deserialize_run_context(cls, ctx: dict[str, Any]) -> TemporalRunContext: + """Deserialize the run context from a `dict[str, Any]`.""" return cls(**ctx) @@ -47,7 +49,7 @@ class TemporalRunContextWithDeps(TemporalRunContext): def serialize_run_context(cls, ctx: RunContext[Any]) -> dict[str, Any]: if not isinstance(ctx.deps, dict): raise UserError( - 'The `deps` object must be a JSON-serializable dictionary in order to be used with Temporal. ' - 'To use a different type, pass a `TemporalRunContext` subclass to `TemporalAgent` with custom `serialize_run_context` and `deserialize_run_context` class methods.' + '`TemporalRunContextWithDeps` requires the `deps` object to be a JSON-serializable dictionary. ' + 'To support `deps` of a different type, pass a `TemporalRunContext` subclass to `TemporalAgent` with custom `serialize_run_context` and `deserialize_run_context` class methods.' ) return {**super().serialize_run_context(ctx), 'deps': ctx.deps} # pyright: ignore[reportUnknownMemberType] diff --git a/tests/cassettes/test_temporal/test_logfire_plugin.yaml b/tests/cassettes/test_temporal/test_logfire_plugin.yaml new file mode 100644 index 0000000000..6c7fedecfb --- /dev/null +++ b/tests/cassettes/test_temporal/test_logfire_plugin.yaml @@ -0,0 +1,31 @@ +interactions: +- request: + body: null + headers: + accept: + - '*/*' + accept-encoding: + - gzip, deflate + connection: + - keep-alive + method: GET + uri: https://logfire-us.pydantic.dev/v1/info + response: + headers: + access-control-expose-headers: + - traceresponse + connection: + - keep-alive + content-length: + - '87' + content-type: + - application/json + transfer-encoding: + - chunked + parsed_body: + project_name: pydantic-ai + project_url: https://logfire-us.pydantic.dev/pydantic/pydantic-ai + status: + code: 200 + message: OK +version: 1 diff --git a/tests/cassettes/test_temporal/test_temporal_agent_with_dataclass_deps_as_dict.yaml b/tests/cassettes/test_temporal/test_temporal_agent_with_dataclass_deps_as_dict.yaml new file mode 100644 index 0000000000..56e26e3b43 --- /dev/null +++ b/tests/cassettes/test_temporal/test_temporal_agent_with_dataclass_deps_as_dict.yaml @@ -0,0 +1,195 @@ +interactions: +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '298' + content-type: + - application/json + host: + - api.openai.com + method: POST + parsed_body: + messages: + - content: What is the capital of the country? + role: user + model: gpt-4o + stream: false + tool_choice: auto + tools: + - function: + description: '' + name: get_country_from_deps + parameters: + additionalProperties: false + properties: {} + type: object + type: function + uri: https://api.openai.com/v1/chat/completions + response: + headers: + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-length: + - '1071' + content-type: + - application/json + openai-organization: + - pydantic-28gund + openai-processing-ms: + - '762' + openai-project: + - proj_dKobscVY9YJxeEaDJen54e3d + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + parsed_body: + choices: + - finish_reason: tool_calls + index: 0 + logprobs: null + message: + annotations: [] + content: null + refusal: null + role: assistant + tool_calls: + - function: + arguments: '{}' + name: get_country_from_deps + id: call_5yRefuR6ypjiaezT7TLFs6cw + type: function + created: 1754931124 + id: chatcmpl-C3Q2eKnB6inIhhfgNgGnWbTMA2dxc + model: gpt-4o-2024-08-06 + object: chat.completion + service_tier: default + system_fingerprint: fp_ff25b2783a + usage: + completion_tokens: 13 + completion_tokens_details: + accepted_prediction_tokens: 0 + audio_tokens: 0 + reasoning_tokens: 0 + rejected_prediction_tokens: 0 + prompt_tokens: 42 + prompt_tokens_details: + audio_tokens: 0 + cached_tokens: 0 + total_tokens: 55 + status: + code: 200 + message: OK +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '534' + content-type: + - application/json + cookie: + - __cf_bm=mSkun4f2D8ApNaB3wVpGAt_q1Nw1DAlbsGyBdx44ecs-1754931124-1.0.1.1-1wnrnf1KSSmpulBH_mYdlPZlUDzGlcxdUIezMYqWXFW1DBq0oXf_iFZ43bjWBar0yuXJ7TuX2f8JH6fLYqeoVJjutHZJeJNskPgLTocD3fE; + _cfuvid=ZfQ6ht_tZf58Z86BtFmF.8PgCy5MRXIbVNE3e0MV5dc-1754931124950-0.0.1.1-604800000 + host: + - api.openai.com + method: POST + parsed_body: + messages: + - content: What is the capital of the country? + role: user + - role: assistant + tool_calls: + - function: + arguments: '{}' + name: get_country_from_deps + id: call_5yRefuR6ypjiaezT7TLFs6cw + type: function + - content: Mexico + role: tool + tool_call_id: call_5yRefuR6ypjiaezT7TLFs6cw + model: gpt-4o + stream: false + tool_choice: auto + tools: + - function: + description: '' + name: get_country_from_deps + parameters: + additionalProperties: false + properties: {} + type: object + type: function + uri: https://api.openai.com/v1/chat/completions + response: + headers: + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-length: + - '838' + content-type: + - application/json + openai-organization: + - pydantic-28gund + openai-processing-ms: + - '450' + openai-project: + - proj_dKobscVY9YJxeEaDJen54e3d + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + parsed_body: + choices: + - finish_reason: stop + index: 0 + logprobs: null + message: + annotations: [] + content: The capital of Mexico is Mexico City. + refusal: null + role: assistant + created: 1754931125 + id: chatcmpl-C3Q2f5OX9rYHpSnEVZwHjzqUVrTQF + model: gpt-4o-2024-08-06 + object: chat.completion + service_tier: default + system_fingerprint: fp_07871e2ad8 + usage: + completion_tokens: 9 + completion_tokens_details: + accepted_prediction_tokens: 0 + audio_tokens: 0 + reasoning_tokens: 0 + rejected_prediction_tokens: 0 + prompt_tokens: 67 + prompt_tokens_details: + audio_tokens: 0 + cached_tokens: 0 + total_tokens: 76 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/cassettes/test_temporal/test_temporal_agent_with_non_dict_deps.yaml b/tests/cassettes/test_temporal/test_temporal_agent_with_non_dict_deps.yaml new file mode 100644 index 0000000000..b4b64ef667 --- /dev/null +++ b/tests/cassettes/test_temporal/test_temporal_agent_with_non_dict_deps.yaml @@ -0,0 +1,95 @@ +interactions: +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '298' + content-type: + - application/json + host: + - api.openai.com + method: POST + parsed_body: + messages: + - content: What is the capital of the country? + role: user + model: gpt-4o + stream: false + tool_choice: auto + tools: + - function: + description: '' + name: get_country_from_deps + parameters: + additionalProperties: false + properties: {} + type: object + type: function + uri: https://api.openai.com/v1/chat/completions + response: + headers: + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-length: + - '1071' + content-type: + - application/json + openai-organization: + - pydantic-28gund + openai-processing-ms: + - '579' + openai-project: + - proj_dKobscVY9YJxeEaDJen54e3d + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + parsed_body: + choices: + - finish_reason: tool_calls + index: 0 + logprobs: null + message: + annotations: [] + content: null + refusal: null + role: assistant + tool_calls: + - function: + arguments: '{}' + name: get_country_from_deps + id: call_SdMSvu33V768Hpgpv2L9Vub0 + type: function + created: 1754931280 + id: chatcmpl-C3Q5AndpHahgIZa3e472KqmMMupyp + model: gpt-4o-2024-08-06 + object: chat.completion + service_tier: default + system_fingerprint: fp_07871e2ad8 + usage: + completion_tokens: 13 + completion_tokens_details: + accepted_prediction_tokens: 0 + audio_tokens: 0 + reasoning_tokens: 0 + rejected_prediction_tokens: 0 + prompt_tokens: 42 + prompt_tokens_details: + audio_tokens: 0 + cached_tokens: 0 + total_tokens: 55 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/test_temporal.py b/tests/test_temporal.py index 602235eb9f..6c6de8e571 100644 --- a/tests/test_temporal.py +++ b/tests/test_temporal.py @@ -3,10 +3,11 @@ import os import re from collections.abc import AsyncIterable, AsyncIterator, Iterator -from dataclasses import dataclass +from contextlib import contextmanager +from dataclasses import asdict, dataclass from datetime import timedelta +from typing import Any, Literal -from inline_snapshot import snapshot from typing_extensions import TypedDict from pydantic_ai import Agent, RunContext @@ -33,6 +34,7 @@ from temporalio import workflow from temporalio.activity import _Definition as ActivityDefinition # pyright: ignore[reportPrivateUsage] from temporalio.client import Client, WorkflowFailureError + from temporalio.contrib.opentelemetry import TracingInterceptor from temporalio.exceptions import ApplicationError from temporalio.testing import WorkflowEnvironment from temporalio.worker import Worker @@ -43,6 +45,7 @@ LogfirePlugin, PydanticAIPlugin, TemporalAgent, + TemporalRunContext, TemporalRunContextWithDeps, ) from pydantic_ai.ext.temporal._function_toolset import TemporalFunctionToolset @@ -55,7 +58,10 @@ try: import logfire + from logfire import Logfire + from logfire._internal.tracer import _ProxyTracer # pyright: ignore[reportPrivateUsage] from logfire.testing import CaptureLogfire + from opentelemetry.trace import ProxyTracer except ImportError: import pytest @@ -84,6 +90,7 @@ # https://github.com/temporalio/sdk-python/blob/3244f8bffebee05e0e7efefb1240a75039903dda/tests/test_client.py#L112C1-L113C1 import pytest + from inline_snapshot import snapshot # Loads `vcr`, which Temporal doesn't like without passing through the import from .conftest import IsDatetime, IsStr @@ -117,6 +124,16 @@ def uninstrument_pydantic_ai() -> Iterator[None]: Agent.instrument_all(False) +@contextmanager +def temporal_raises(exc_type: type[Exception], exc_message: str) -> Iterator[None]: + """Helper for asserting that a Temporal workflow fails with the expected error.""" + with pytest.raises(WorkflowFailureError) as exc_info: + yield + assert isinstance(exc_info.value.__cause__, ApplicationError) + assert exc_info.value.__cause__.type == exc_type.__name__ + assert exc_info.value.__cause__.message == exc_message + + TEMPORAL_PORT = 7243 TASK_QUEUE = 'pydantic-ai-agent-task-queue' @@ -844,18 +861,16 @@ async def test_temporal_agent_run_sync_in_workflow(allow_model_requests: None, c workflows=[SimpleAgentWorkflowWithRunSync], plugins=[AgentPlugin(simple_temporal_agent)], ): - with pytest.raises(WorkflowFailureError) as exc_info: + with temporal_raises( + UserError, + snapshot('`agent.run_sync()` cannot be used inside a Temporal workflow. Use `agent.run()` instead.'), + ): await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] SimpleAgentWorkflowWithRunSync.run, args=['What is the capital of Mexico?'], id=SimpleAgentWorkflowWithRunSync.__name__, task_queue=TASK_QUEUE, ) - assert isinstance(exc_info.value.__cause__, ApplicationError) - assert exc_info.value.__cause__.type == UserError.__name__ - assert exc_info.value.__cause__.message == snapshot( - '`agent.run_sync()` cannot be used inside a Temporal workflow. Use `agent.run()` instead.' - ) @workflow.defn @@ -874,18 +889,18 @@ async def test_temporal_agent_run_stream_in_workflow(allow_model_requests: None, workflows=[SimpleAgentWorkflowWithRunStream], plugins=[AgentPlugin(simple_temporal_agent)], ): - with pytest.raises(WorkflowFailureError) as exc_info: + with temporal_raises( + UserError, + snapshot( + '`agent.run_stream()` cannot be used inside a Temporal workflow. Set an `event_stream_handler` on the agent and use `agent.run()` instead.' + ), + ): await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] SimpleAgentWorkflowWithRunStream.run, args=['What is the capital of Mexico?'], id=SimpleAgentWorkflowWithRunStream.__name__, task_queue=TASK_QUEUE, ) - assert isinstance(exc_info.value.__cause__, ApplicationError) - assert exc_info.value.__cause__.type == UserError.__name__ - assert exc_info.value.__cause__.message == snapshot( - '`agent.run_stream()` cannot be used inside a Temporal workflow. Set an `event_stream_handler` on the agent and use `agent.run()` instead.' - ) @workflow.defn @@ -905,18 +920,18 @@ async def test_temporal_agent_iter_in_workflow(allow_model_requests: None, clien workflows=[SimpleAgentWorkflowWithIter], plugins=[AgentPlugin(simple_temporal_agent)], ): - with pytest.raises(WorkflowFailureError) as exc_info: + with temporal_raises( + UserError, + snapshot( + '`agent.iter()` cannot be used inside a Temporal workflow. Set an `event_stream_handler` on the agent and use `agent.run()` instead.' + ), + ): await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] SimpleAgentWorkflowWithIter.run, args=['What is the capital of Mexico?'], id=SimpleAgentWorkflowWithIter.__name__, task_queue=TASK_QUEUE, ) - assert isinstance(exc_info.value.__cause__, ApplicationError) - assert exc_info.value.__cause__.type == UserError.__name__ - assert exc_info.value.__cause__.message == snapshot( - '`agent.iter()` cannot be used inside a Temporal workflow. Set an `event_stream_handler` on the agent and use `agent.run()` instead.' - ) async def simple_event_stream_handler( @@ -941,18 +956,18 @@ async def test_temporal_agent_run_in_workflow_with_event_stream_handler(allow_mo workflows=[SimpleAgentWorkflowWithEventStreamHandler], plugins=[AgentPlugin(simple_temporal_agent)], ): - with pytest.raises(WorkflowFailureError) as exc_info: + with temporal_raises( + UserError, + snapshot( + 'Event stream handler cannot be set at agent run time inside a Temporal workflow, it must be set at agent creation time.' + ), + ): await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] SimpleAgentWorkflowWithEventStreamHandler.run, args=['What is the capital of Mexico?'], id=SimpleAgentWorkflowWithEventStreamHandler.__name__, task_queue=TASK_QUEUE, ) - assert isinstance(exc_info.value.__cause__, ApplicationError) - assert exc_info.value.__cause__.type == UserError.__name__ - assert exc_info.value.__cause__.message == snapshot( - 'Event stream handler cannot be set at agent run time inside a Temporal workflow, it must be set at agent creation time.' - ) @workflow.defn @@ -970,18 +985,18 @@ async def test_temporal_agent_run_in_workflow_with_model(allow_model_requests: N workflows=[SimpleAgentWorkflowWithRunModel], plugins=[AgentPlugin(simple_temporal_agent)], ): - with pytest.raises(WorkflowFailureError) as exc_info: + with temporal_raises( + UserError, + snapshot( + 'Model cannot be set at agent run time inside a Temporal workflow, it must be set at agent creation time.' + ), + ): await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] SimpleAgentWorkflowWithRunModel.run, args=['What is the capital of Mexico?'], id=SimpleAgentWorkflowWithRunModel.__name__, task_queue=TASK_QUEUE, ) - assert isinstance(exc_info.value.__cause__, ApplicationError) - assert exc_info.value.__cause__.type == UserError.__name__ - assert exc_info.value.__cause__.message == snapshot( - 'Model cannot be set at agent run time inside a Temporal workflow, it must be set at agent creation time.' - ) @workflow.defn @@ -999,18 +1014,18 @@ async def test_temporal_agent_run_in_workflow_with_toolsets(allow_model_requests workflows=[SimpleAgentWorkflowWithRunToolsets], plugins=[AgentPlugin(simple_temporal_agent)], ): - with pytest.raises(WorkflowFailureError) as exc_info: + with temporal_raises( + UserError, + snapshot( + 'Toolsets cannot be set at agent run time inside a Temporal workflow, it must be set at agent creation time.' + ), + ): await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] SimpleAgentWorkflowWithRunToolsets.run, args=['What is the capital of Mexico?'], id=SimpleAgentWorkflowWithRunToolsets.__name__, task_queue=TASK_QUEUE, ) - assert isinstance(exc_info.value.__cause__, ApplicationError) - assert exc_info.value.__cause__.type == UserError.__name__ - assert exc_info.value.__cause__.message == snapshot( - 'Toolsets cannot be set at agent run time inside a Temporal workflow, it must be set at agent creation time.' - ) @workflow.defn @@ -1029,18 +1044,18 @@ async def test_temporal_agent_override_model_in_workflow(allow_model_requests: N workflows=[SimpleAgentWorkflowWithOverrideModel], plugins=[AgentPlugin(simple_temporal_agent)], ): - with pytest.raises(WorkflowFailureError) as exc_info: + with temporal_raises( + UserError, + snapshot( + 'Model cannot be contextually overridden inside a Temporal workflow, it must be set at agent creation time.' + ), + ): await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] SimpleAgentWorkflowWithOverrideModel.run, args=['What is the capital of Mexico?'], id=SimpleAgentWorkflowWithOverrideModel.__name__, task_queue=TASK_QUEUE, ) - assert isinstance(exc_info.value.__cause__, ApplicationError) - assert exc_info.value.__cause__.type == UserError.__name__ - assert exc_info.value.__cause__.message == snapshot( - 'Model cannot be contextually overridden inside a Temporal workflow, it must be set at agent creation time.' - ) @workflow.defn @@ -1059,18 +1074,18 @@ async def test_temporal_agent_override_toolsets_in_workflow(allow_model_requests workflows=[SimpleAgentWorkflowWithOverrideToolsets], plugins=[AgentPlugin(simple_temporal_agent)], ): - with pytest.raises(WorkflowFailureError) as exc_info: + with temporal_raises( + UserError, + snapshot( + 'Toolsets cannot be contextually overridden inside a Temporal workflow, they must be set at agent creation time.' + ), + ): await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] SimpleAgentWorkflowWithOverrideToolsets.run, args=['What is the capital of Mexico?'], id=SimpleAgentWorkflowWithOverrideToolsets.__name__, task_queue=TASK_QUEUE, ) - assert isinstance(exc_info.value.__cause__, ApplicationError) - assert exc_info.value.__cause__.type == UserError.__name__ - assert exc_info.value.__cause__.message == snapshot( - 'Toolsets cannot be contextually overridden inside a Temporal workflow, they must be set at agent creation time.' - ) @workflow.defn @@ -1089,18 +1104,18 @@ async def test_temporal_agent_override_tools_in_workflow(allow_model_requests: N workflows=[SimpleAgentWorkflowWithOverrideTools], plugins=[AgentPlugin(simple_temporal_agent)], ): - with pytest.raises(WorkflowFailureError) as exc_info: + with temporal_raises( + UserError, + snapshot( + 'Tools cannot be contextually overridden inside a Temporal workflow, they must be set at agent creation time.' + ), + ): await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] SimpleAgentWorkflowWithOverrideTools.run, args=['What is the capital of Mexico?'], id=SimpleAgentWorkflowWithOverrideTools.__name__, task_queue=TASK_QUEUE, ) - assert isinstance(exc_info.value.__cause__, ApplicationError) - assert exc_info.value.__cause__.type == UserError.__name__ - assert exc_info.value.__cause__.message == snapshot( - 'Tools cannot be contextually overridden inside a Temporal workflow, they must be set at agent creation time.' - ) agent_with_sync_tool = Agent(model, name='agent_with_sync_tool', tools=[get_weather]) @@ -1131,18 +1146,16 @@ async def test_temporal_agent_sync_tool_activity_disabled(allow_model_requests: workflows=[AgentWorkflowWithSyncToolActivityDisabled], plugins=[AgentPlugin(temporal_agent_with_sync_tool_activity_disabled)], ): - with pytest.raises(WorkflowFailureError) as exc_info: + with temporal_raises( + UserError, + "Temporal activity config for tool 'get_weather' has been explicitly set to `False` (activity disabled), but non-async tools are run in threads which are not supported outside of an activity. Make the tool function async instead.", + ): await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] AgentWorkflowWithSyncToolActivityDisabled.run, args=['What is the weather in Mexico City?'], id=AgentWorkflowWithSyncToolActivityDisabled.__name__, task_queue=TASK_QUEUE, ) - assert isinstance(exc_info.value.__cause__, ApplicationError) - assert exc_info.value.__cause__.type == UserError.__name__ - assert exc_info.value.__cause__.message == snapshot( - "Temporal activity config for tool 'get_weather' has been explicitly set to `False` (activity disabled), but non-async tools are run in threads which are not supported outside of an activity. Make the tool function async instead." - ) async def test_temporal_agent_mcp_server_activity_disabled(client: Client): @@ -1181,28 +1194,151 @@ async def test_temporal_model_stream_direct(client: Client): workflows=[DirectStreamWorkflow], plugins=[AgentPlugin(complex_temporal_agent)], ): - with pytest.raises(WorkflowFailureError) as exc_info: + with temporal_raises( + UserError, + snapshot( + 'A Temporal model cannot be used with `pydantic_ai.direct.model_request_stream()` as it requires a `run_context`. Set an `event_stream_handler` on the agent and use `agent.run()` instead.' + ), + ): await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] DirectStreamWorkflow.run, args=['What is the capital of Mexico?'], id=DirectStreamWorkflow.__name__, task_queue=TASK_QUEUE, ) - assert isinstance(exc_info.value.__cause__, ApplicationError) - assert exc_info.value.__cause__.type == UserError.__name__ - assert exc_info.value.__cause__.message == snapshot( - 'A Temporal model cannot be used with `pydantic_ai.direct.model_request_stream()` as it requires a `run_context`. Set an `event_stream_handler` on the agent and use `agent.run()` instead.' + + +@dataclass +class DataclassDeps: + country: str + + +agent_with_dataclass_deps = Agent(model, name='agent_with_dataclass_deps', deps_type=DataclassDeps) + + +@agent_with_dataclass_deps.tool +async def get_country_from_deps(ctx: RunContext[DataclassDeps]) -> str: + return ctx.deps.country + + +# This needs to be done before the `TemporalAgent` is bound to the workflow. +temporal_agent_with_dataclass_deps = TemporalAgent( + agent_with_dataclass_deps, + run_context_type=TemporalRunContextWithDeps, +) + + +@workflow.defn +class AgentWorkflowWithDataclassDeps: + @workflow.run + async def run(self, prompt: str, deps: DataclassDeps) -> str: + result = await temporal_agent_with_dataclass_deps.run(prompt, deps=deps) + return result.output + + +async def test_temporal_agent_with_non_dict_deps(allow_model_requests: None, client: Client): + async with Worker( + client, + task_queue=TASK_QUEUE, + workflows=[AgentWorkflowWithDataclassDeps], + plugins=[AgentPlugin(temporal_agent_with_dataclass_deps)], + ): + with temporal_raises( + UserError, + snapshot( + '`TemporalRunContextWithDeps` requires the `deps` object to be a JSON-serializable dictionary. To support `deps` of a different type, pass a `TemporalRunContext` subclass to `TemporalAgent` with custom `serialize_run_context` and `deserialize_run_context` class methods.' + ), + ): + await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] + AgentWorkflowWithDataclassDeps.run, + args=[ + 'What is the capital of the country?', + DataclassDeps(country='Mexico'), + ], + id=AgentWorkflowWithDataclassDeps.__name__, + task_queue=TASK_QUEUE, + ) + + +class TemporalRunContextWithDataclassDeps(TemporalRunContext): + @classmethod + def serialize_run_context(cls, ctx: RunContext[DataclassDeps]) -> dict[str, Any]: + return {**super().serialize_run_context(ctx), 'deps': asdict(ctx.deps)} + + @classmethod + def deserialize_run_context(cls, ctx: dict[str, Any]) -> TemporalRunContext: + deps = DataclassDeps(**ctx.pop('deps', {})) + return cls(**ctx, deps=deps) + + +# This needs to be done before the `TemporalAgent` is bound to the workflow. +temporal_agent_with_dataclass_deps_as_dict = TemporalAgent( + agent_with_dataclass_deps, + run_context_type=TemporalRunContextWithDataclassDeps, +) + + +@workflow.defn +class AgentWorkflowWithDataclassDepsAsDict: + @workflow.run + async def run(self, prompt: str, deps: DataclassDeps) -> str: + result = await temporal_agent_with_dataclass_deps_as_dict.run(prompt, deps=deps) + return result.output + + +async def test_temporal_agent_with_dataclass_deps_as_dict(allow_model_requests: None, client: Client): + async with Worker( + client, + task_queue=TASK_QUEUE, + workflows=[AgentWorkflowWithDataclassDepsAsDict], + plugins=[AgentPlugin(temporal_agent_with_dataclass_deps_as_dict)], + ): + output = await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] + AgentWorkflowWithDataclassDepsAsDict.run, + args=[ + 'What is the capital of the country?', + DataclassDeps(country='Mexico'), + ], + id=AgentWorkflowWithDataclassDepsAsDict.__name__, + task_queue=TASK_QUEUE, ) + assert output == snapshot('The capital of Mexico is Mexico City.') + + +async def test_logfire_plugin(client: Client): + def setup_logfire(send_to_logfire: bool = True, metrics: Literal[False] | None = None) -> Logfire: + instance = logfire.configure(local=True, metrics=metrics) + instance.config.token = 'test' + instance.config.send_to_logfire = send_to_logfire + return instance + + plugin = LogfirePlugin(setup_logfire) + config = client.config() + config['plugins'] = [plugin] + new_client = Client(**config) -# TODO: 'The `deps` object must be a JSON-serializable dictionary in order to be used with Temporal. ' -# 'To use a different type, pass a `TemporalRunContext` subclass to `TemporalAgent` with custom `serialize_run_context` and `deserialize_run_context` class methods.' + interceptor = new_client.config()['interceptors'][0] + assert isinstance(interceptor, TracingInterceptor) + if isinstance(interceptor.tracer, ProxyTracer): + assert interceptor.tracer._instrumenting_module_name == 'temporalio' # pyright: ignore[reportPrivateUsage] + elif isinstance(interceptor.tracer, _ProxyTracer): + assert interceptor.tracer.instrumenting_module_name == 'temporalio' + else: + assert False, f'Unexpected tracer type: {type(interceptor.tracer)}' -# TODO: Custom run_context_type + new_client = await Client.connect(client.service_client.config.target_host, plugins=[plugin]) + # We can't check if the metrics URL was actually set correctly because it's on a `temporalio.bridge.runtime.Runtime` that we can't read from. + assert new_client.service_client.config.runtime is not None -# TODO: tool_activity_config + plugin = LogfirePlugin(setup_logfire, metrics=False) + new_client = await Client.connect(client.service_client.config.target_host, plugins=[plugin]) + assert new_client.service_client.config.runtime is None -# TODO: Custom temporalize_toolset_func + plugin = LogfirePlugin(lambda: setup_logfire(send_to_logfire=False)) + new_client = await Client.connect(client.service_client.config.target_host, plugins=[plugin]) + assert new_client.service_client.config.runtime is None -# TODO: LogfirePlugin(setup_logfire=) -# TODO: LogfirePlugin(metrics=False) + plugin = LogfirePlugin(lambda: setup_logfire(metrics=False)) + new_client = await Client.connect(client.service_client.config.target_host, plugins=[plugin]) + assert new_client.service_client.config.runtime is None From 9916c8240f7fe4c53518bdc5c4f1c025148df8d7 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Mon, 11 Aug 2025 18:11:18 +0000 Subject: [PATCH 16/30] Only retry temporal activities once in tests, so CI doesn't time out --- tests/test_temporal.py | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/tests/test_temporal.py b/tests/test_temporal.py index 6c6de8e571..8b7b105e71 100644 --- a/tests/test_temporal.py +++ b/tests/test_temporal.py @@ -34,6 +34,7 @@ from temporalio import workflow from temporalio.activity import _Definition as ActivityDefinition # pyright: ignore[reportPrivateUsage] from temporalio.client import Client, WorkflowFailureError + from temporalio.common import RetryPolicy from temporalio.contrib.opentelemetry import TracingInterceptor from temporalio.exceptions import ApplicationError from temporalio.testing import WorkflowEnvironment @@ -172,7 +173,13 @@ async def client_with_logfire(temporal_env: WorkflowEnvironment) -> Client: simple_agent = Agent(model, name='simple_agent') # This needs to be done before the `TemporalAgent` is bound to the workflow. -simple_temporal_agent = TemporalAgent(simple_agent) +simple_temporal_agent = TemporalAgent( + simple_agent, + activity_config=ActivityConfig( + start_to_close_timeout=timedelta(seconds=60), + retry_policy=RetryPolicy(maximum_attempts=1), + ), +) @workflow.defn @@ -247,7 +254,10 @@ class Response: # This needs to be done before the `TemporalAgent` is bound to the workflow. complex_temporal_agent = TemporalAgent( complex_agent, - activity_config=ActivityConfig(start_to_close_timeout=timedelta(seconds=60)), + activity_config=ActivityConfig( + start_to_close_timeout=timedelta(seconds=60), + retry_policy=RetryPolicy(maximum_attempts=1), + ), model_activity_config=ActivityConfig(start_to_close_timeout=timedelta(seconds=90)), toolset_activity_config={ 'country': ActivityConfig(start_to_close_timeout=timedelta(seconds=120)), @@ -256,6 +266,9 @@ class Response: 'country': { 'get_country': False, }, + '': { + 'get_weather': ActivityConfig(start_to_close_timeout=timedelta(seconds=180)), + }, }, run_context_type=TemporalRunContextWithDeps, ) @@ -1148,7 +1161,9 @@ async def test_temporal_agent_sync_tool_activity_disabled(allow_model_requests: ): with temporal_raises( UserError, - "Temporal activity config for tool 'get_weather' has been explicitly set to `False` (activity disabled), but non-async tools are run in threads which are not supported outside of an activity. Make the tool function async instead.", + snapshot( + "Temporal activity config for tool 'get_weather' has been explicitly set to `False` (activity disabled), but non-async tools are run in threads which are not supported outside of an activity. Make the tool function async instead." + ), ): await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] AgentWorkflowWithSyncToolActivityDisabled.run, From 13f222477740d63234b7a2457e42936b312ce7b7 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Mon, 11 Aug 2025 18:19:14 +0000 Subject: [PATCH 17/30] Fix logfire f-string logging with '{}' inside str repr in Python 3.10 --- tests/test_temporal.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_temporal.py b/tests/test_temporal.py index 8b7b105e71..67a2e52296 100644 --- a/tests/test_temporal.py +++ b/tests/test_temporal.py @@ -216,7 +216,7 @@ async def event_stream_handler( ): logfire.info(f'{ctx.run_step=}') async for event in stream: - logfire.info(f'{event=}') + logfire.info('event', event=event) async def get_country(ctx: RunContext[Deps]) -> str: From 342ab17818742a04f2c8068a28e07491603e2f5b Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Mon, 11 Aug 2025 18:45:24 +0000 Subject: [PATCH 18/30] More test coverage --- .../ext/temporal/_function_toolset.py | 9 +-- .../pydantic_ai/ext/temporal/_mcp_server.py | 9 +-- .../pydantic_ai/ext/temporal/_model.py | 15 ++-- ...poral_agent_override_deps_in_workflow.yaml | 79 +++++++++++++++++++ tests/test_temporal.py | 78 ++++++++++++------ 5 files changed, 146 insertions(+), 44 deletions(-) create mode 100644 tests/cassettes/test_temporal/test_temporal_agent_override_deps_in_workflow.yaml diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_function_toolset.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/_function_toolset.py index 0506632ce4..0a7b038042 100644 --- a/pydantic_ai_slim/pydantic_ai/ext/temporal/_function_toolset.py +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/_function_toolset.py @@ -45,8 +45,8 @@ async def call_tool_activity(params: _CallToolParams) -> Any: ctx = self.run_context_type.deserialize_run_context(params.serialized_run_context) try: tool = (await toolset.get_tools(ctx))[name] - except KeyError as e: - raise UserError( # pragma: no cover + except KeyError as e: # pragma: no cover + raise UserError( f'Tool {name!r} not found in toolset {self.id!r}. ' 'Removing or renaming tools during an agent run is not supported with Temporal.' ) from e @@ -55,11 +55,6 @@ async def call_tool_activity(params: _CallToolParams) -> Any: self.call_tool_activity = call_tool_activity - @property - def wrapped_function_toolset(self) -> FunctionToolset: - assert isinstance(self.wrapped, FunctionToolset) - return self.wrapped - @property def temporal_activities(self) -> list[Callable[..., Any]]: return [self.call_tool_activity] diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_mcp_server.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/_mcp_server.py index cdf75e59f2..5c7b8c81d4 100644 --- a/pydantic_ai_slim/pydantic_ai/ext/temporal/_mcp_server.py +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/_mcp_server.py @@ -74,15 +74,14 @@ async def call_tool_activity(params: _CallToolParams) -> ToolResult: params.name, params.tool_args, run_context, - self.wrapped_server.tool_for_tool_def(params.tool_def), + self.tool_for_tool_def(params.tool_def), ) self.call_tool_activity = call_tool_activity - @property - def wrapped_server(self) -> MCPServer: + def tool_for_tool_def(self, tool_def: ToolDefinition) -> ToolsetTool: assert isinstance(self.wrapped, MCPServer) - return self.wrapped + return self.wrapped.tool_for_tool_def(tool_def) @property def temporal_activities(self) -> list[Callable[..., Any]]: @@ -106,7 +105,7 @@ async def get_tools(self, ctx: RunContext[Any]) -> dict[str, ToolsetTool[Any]]: arg=_GetToolsParams(serialized_run_context=serialized_run_context), **self.activity_config, ) - return {name: self.wrapped_server.tool_for_tool_def(tool_def) for name, tool_def in tool_defs.items()} + return {name: self.tool_for_tool_def(tool_def) for name, tool_def in tool_defs.items()} async def call_tool( self, diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_model.py b/pydantic_ai_slim/pydantic_ai/ext/temporal/_model.py index cc0eaa271d..07398afb07 100644 --- a/pydantic_ai_slim/pydantic_ai/ext/temporal/_model.py +++ b/pydantic_ai_slim/pydantic_ai/ext/temporal/_model.py @@ -49,15 +49,15 @@ def get(self) -> ModelResponse: return self.response def usage(self) -> Usage: - return self.response.usage + return self.response.usage # pragma: no cover @property def model_name(self) -> str: - return self.response.model_name or '' + return self.response.model_name or '' # pragma: no cover @property def timestamp(self) -> datetime: - return self.response.timestamp + return self.response.timestamp # pragma: no cover class TemporalModel(WrapperModel): @@ -137,16 +137,15 @@ async def request_stream( yield streamed_response return - if self.event_stream_handler is None: - raise UserError( - 'Streaming inside a Temporal workflow requires `Agent` to have an `event_stream_handler` set.' - ) - if run_context is None: raise UserError( 'A Temporal model cannot be used with `pydantic_ai.direct.model_request_stream()` as it requires a `run_context`. Set an `event_stream_handler` on the agent and use `agent.run()` instead.' ) + # We can never get here without an `event_stream_handler`, as `TemporalAgent.run_stream` and `TemporalAgent.iter` raise an error saying to use `TemporalAgent.run` instead, + # and that only calls `request_stream` if `event_stream_handler` is set. + assert self.event_stream_handler is not None + serialized_run_context = self.run_context_type.serialize_run_context(run_context) response = await workflow.execute_activity( # pyright: ignore[reportUnknownMemberType] activity=self.request_stream_activity, diff --git a/tests/cassettes/test_temporal/test_temporal_agent_override_deps_in_workflow.yaml b/tests/cassettes/test_temporal/test_temporal_agent_override_deps_in_workflow.yaml new file mode 100644 index 0000000000..0d9336e72e --- /dev/null +++ b/tests/cassettes/test_temporal/test_temporal_agent_override_deps_in_workflow.yaml @@ -0,0 +1,79 @@ +interactions: +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '105' + content-type: + - application/json + host: + - api.openai.com + method: POST + parsed_body: + messages: + - content: What is the capital of Mexico? + role: user + model: gpt-4o + stream: false + uri: https://api.openai.com/v1/chat/completions + response: + headers: + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-length: + - '838' + content-type: + - application/json + openai-organization: + - pydantic-28gund + openai-processing-ms: + - '467' + openai-project: + - proj_dKobscVY9YJxeEaDJen54e3d + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + parsed_body: + choices: + - finish_reason: stop + index: 0 + logprobs: null + message: + annotations: [] + content: The capital of Mexico is Mexico City. + refusal: null + role: assistant + created: 1754937502 + id: chatcmpl-C3RhWZ6jbzOaAe9fKOSr5lWGY5Qi2 + model: gpt-4o-2024-08-06 + object: chat.completion + service_tier: default + system_fingerprint: fp_ff25b2783a + usage: + completion_tokens: 8 + completion_tokens_details: + accepted_prediction_tokens: 0 + audio_tokens: 0 + reasoning_tokens: 0 + rejected_prediction_tokens: 0 + prompt_tokens: 14 + prompt_tokens_details: + audio_tokens: 0 + cached_tokens: 0 + total_tokens: 22 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/test_temporal.py b/tests/test_temporal.py index 67a2e52296..3c6d42a243 100644 --- a/tests/test_temporal.py +++ b/tests/test_temporal.py @@ -28,7 +28,8 @@ ToolReturnPart, ) from pydantic_ai.models import cached_async_http_client -from pydantic_ai.toolsets import FunctionToolset +from pydantic_ai.tools import ToolDefinition +from pydantic_ai.toolsets import DeferredToolset, FunctionToolset try: from temporalio import workflow @@ -52,7 +53,7 @@ from pydantic_ai.ext.temporal._function_toolset import TemporalFunctionToolset from pydantic_ai.ext.temporal._mcp_server import TemporalMCPServer from pydantic_ai.ext.temporal._model import TemporalModel -except ImportError: +except ImportError: # pragma: lax no cover import pytest pytest.skip('temporal not installed', allow_module_level=True) @@ -63,14 +64,14 @@ from logfire._internal.tracer import _ProxyTracer # pyright: ignore[reportPrivateUsage] from logfire.testing import CaptureLogfire from opentelemetry.trace import ProxyTracer -except ImportError: +except ImportError: # pragma: lax no cover import pytest pytest.skip('logfire not installed', allow_module_level=True) try: from pydantic_ai.mcp import MCPServerStdio -except ImportError: +except ImportError: # pragma: lax no cover import pytest pytest.skip('mcp not installed', allow_module_level=True) @@ -78,7 +79,7 @@ try: from pydantic_ai.models.openai import OpenAIModel from pydantic_ai.providers.openai import OpenAIProvider -except ImportError: +except ImportError: # pragma: lax no cover import pytest pytest.skip('openai not installed', allow_module_level=True) @@ -245,6 +246,7 @@ class Response: toolsets=[ FunctionToolset[Deps](tools=[get_country], id='country'), MCPServerStdio('python', ['-m', 'tests.mcp_server'], timeout=20, id='mcp'), + DeferredToolset(tool_defs=[ToolDefinition(name='deferred')], id='deferred'), ], tools=[get_weather], event_stream_handler=event_stream_handler, @@ -264,6 +266,7 @@ class Response: }, tool_activity_config={ 'country': { + 'unknown_tool': ActivityConfig(start_to_close_timeout=timedelta(seconds=150)), 'get_country': False, }, '': { @@ -771,7 +774,7 @@ async def test_temporal_agent(): assert complex_temporal_agent.model.wrapped == complex_agent.model toolsets = complex_temporal_agent.toolsets - assert len(toolsets) == 4 + assert len(toolsets) == 5 # Empty function toolset for the agent's own tools assert isinstance(toolsets[0], FunctionToolset) @@ -796,6 +799,11 @@ async def test_temporal_agent(): assert toolsets[3].id == 'mcp' assert toolsets[3].wrapped == complex_agent.toolsets[2] + # Unwrapped 'deferred' toolset + assert isinstance(toolsets[4], DeferredToolset) + assert toolsets[4].id == 'deferred' + assert toolsets[4] == complex_agent.toolsets[3] + assert [ ActivityDefinition.must_from_callable(activity).name # pyright: ignore[reportUnknownMemberType] for activity in complex_temporal_agent.temporal_activities @@ -864,7 +872,7 @@ class SimpleAgentWorkflowWithRunSync: @workflow.run async def run(self, prompt: str) -> str: result = simple_temporal_agent.run_sync(prompt) - return result.output + return result.output # pragma: no cover async def test_temporal_agent_run_sync_in_workflow(allow_model_requests: None, client: Client): @@ -892,7 +900,7 @@ class SimpleAgentWorkflowWithRunStream: async def run(self, prompt: str) -> str: async with simple_temporal_agent.run_stream(prompt) as result: pass - return await result.get_output() + return await result.get_output() # pragma: no cover async def test_temporal_agent_run_stream_in_workflow(allow_model_requests: None, client: Client): @@ -923,7 +931,7 @@ async def run(self, prompt: str) -> str: async with simple_temporal_agent.iter(prompt) as run: async for _ in run: pass - return 'done' + return 'done' # pragma: no cover async def test_temporal_agent_iter_in_workflow(allow_model_requests: None, client: Client): @@ -959,7 +967,7 @@ class SimpleAgentWorkflowWithEventStreamHandler: @workflow.run async def run(self, prompt: str) -> str: result = await simple_temporal_agent.run(prompt, event_stream_handler=simple_event_stream_handler) - return result.output + return result.output # pragma: no cover async def test_temporal_agent_run_in_workflow_with_event_stream_handler(allow_model_requests: None, client: Client): @@ -988,7 +996,7 @@ class SimpleAgentWorkflowWithRunModel: @workflow.run async def run(self, prompt: str) -> str: result = await simple_temporal_agent.run(prompt, model=model) - return result.output + return result.output # pragma: no cover async def test_temporal_agent_run_in_workflow_with_model(allow_model_requests: None, client: Client): @@ -1017,7 +1025,7 @@ class SimpleAgentWorkflowWithRunToolsets: @workflow.run async def run(self, prompt: str) -> str: result = await simple_temporal_agent.run(prompt, toolsets=[FunctionToolset()]) - return result.output + return result.output # pragma: no cover async def test_temporal_agent_run_in_workflow_with_toolsets(allow_model_requests: None, client: Client): @@ -1046,8 +1054,7 @@ class SimpleAgentWorkflowWithOverrideModel: @workflow.run async def run(self, prompt: str) -> str: with simple_temporal_agent.override(model=model): - result = await simple_temporal_agent.run(prompt) - return result.output + pass async def test_temporal_agent_override_model_in_workflow(allow_model_requests: None, client: Client): @@ -1076,8 +1083,7 @@ class SimpleAgentWorkflowWithOverrideToolsets: @workflow.run async def run(self, prompt: str) -> str: with simple_temporal_agent.override(toolsets=[FunctionToolset()]): - result = await simple_temporal_agent.run(prompt) - return result.output + pass async def test_temporal_agent_override_toolsets_in_workflow(allow_model_requests: None, client: Client): @@ -1106,8 +1112,7 @@ class SimpleAgentWorkflowWithOverrideTools: @workflow.run async def run(self, prompt: str) -> str: with simple_temporal_agent.override(tools=[get_weather]): - result = await simple_temporal_agent.run(prompt) - return result.output + pass async def test_temporal_agent_override_tools_in_workflow(allow_model_requests: None, client: Client): @@ -1131,6 +1136,31 @@ async def test_temporal_agent_override_tools_in_workflow(allow_model_requests: N ) +@workflow.defn +class SimpleAgentWorkflowWithOverrideDeps: + @workflow.run + async def run(self, prompt: str) -> str: + with simple_temporal_agent.override(deps=None): + result = await simple_temporal_agent.run(prompt) + return result.output + + +async def test_temporal_agent_override_deps_in_workflow(allow_model_requests: None, client: Client): + async with Worker( + client, + task_queue=TASK_QUEUE, + workflows=[SimpleAgentWorkflowWithOverrideDeps], + plugins=[AgentPlugin(simple_temporal_agent)], + ): + output = await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] + SimpleAgentWorkflowWithOverrideDeps.run, + args=['What is the capital of Mexico?'], + id=SimpleAgentWorkflowWithOverrideDeps.__name__, + task_queue=TASK_QUEUE, + ) + assert output == snapshot('The capital of Mexico is Mexico City.') + + agent_with_sync_tool = Agent(model, name='agent_with_sync_tool', tools=[get_weather]) # This needs to be done before the `TemporalAgent` is bound to the workflow. @@ -1149,7 +1179,7 @@ class AgentWorkflowWithSyncToolActivityDisabled: @workflow.run async def run(self, prompt: str) -> str: result = await temporal_agent_with_sync_tool_activity_disabled.run(prompt) - return result.output + return result.output # pragma: no cover async def test_temporal_agent_sync_tool_activity_disabled(allow_model_requests: None, client: Client): @@ -1199,7 +1229,7 @@ async def run(self, prompt: str) -> str: async with model_request_stream(complex_temporal_agent.model, messages) as stream: async for _ in stream: pass - return 'done' + return 'done' # pragma: no cover async def test_temporal_model_stream_direct(client: Client): @@ -1248,7 +1278,7 @@ class AgentWorkflowWithDataclassDeps: @workflow.run async def run(self, prompt: str, deps: DataclassDeps) -> str: result = await temporal_agent_with_dataclass_deps.run(prompt, deps=deps) - return result.output + return result.output # pragma: no cover async def test_temporal_agent_with_non_dict_deps(allow_model_requests: None, client: Client): @@ -1336,11 +1366,11 @@ def setup_logfire(send_to_logfire: bool = True, metrics: Literal[False] | None = interceptor = new_client.config()['interceptors'][0] assert isinstance(interceptor, TracingInterceptor) if isinstance(interceptor.tracer, ProxyTracer): - assert interceptor.tracer._instrumenting_module_name == 'temporalio' # pyright: ignore[reportPrivateUsage] + assert interceptor.tracer._instrumenting_module_name == 'temporalio' # pyright: ignore[reportPrivateUsage] # pragma: lax no cover elif isinstance(interceptor.tracer, _ProxyTracer): - assert interceptor.tracer.instrumenting_module_name == 'temporalio' + assert interceptor.tracer.instrumenting_module_name == 'temporalio' # pragma: lax no cover else: - assert False, f'Unexpected tracer type: {type(interceptor.tracer)}' + assert False, f'Unexpected tracer type: {type(interceptor.tracer)}' # pragma: no cover new_client = await Client.connect(client.service_client.config.target_host, plugins=[plugin]) # We can't check if the metrics URL was actually set correctly because it's on a `temporalio.bridge.runtime.Runtime` that we can't read from. From e34851751e1a8388b838903dcb2634ab35742d81 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Mon, 11 Aug 2025 19:13:40 +0000 Subject: [PATCH 19/30] Fix typecheck --- tests/test_temporal.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_temporal.py b/tests/test_temporal.py index 3c6d42a243..b075c0371a 100644 --- a/tests/test_temporal.py +++ b/tests/test_temporal.py @@ -1052,7 +1052,7 @@ async def test_temporal_agent_run_in_workflow_with_toolsets(allow_model_requests @workflow.defn class SimpleAgentWorkflowWithOverrideModel: @workflow.run - async def run(self, prompt: str) -> str: + async def run(self, prompt: str) -> None: with simple_temporal_agent.override(model=model): pass @@ -1081,7 +1081,7 @@ async def test_temporal_agent_override_model_in_workflow(allow_model_requests: N @workflow.defn class SimpleAgentWorkflowWithOverrideToolsets: @workflow.run - async def run(self, prompt: str) -> str: + async def run(self, prompt: str) -> None: with simple_temporal_agent.override(toolsets=[FunctionToolset()]): pass @@ -1110,7 +1110,7 @@ async def test_temporal_agent_override_toolsets_in_workflow(allow_model_requests @workflow.defn class SimpleAgentWorkflowWithOverrideTools: @workflow.run - async def run(self, prompt: str) -> str: + async def run(self, prompt: str) -> None: with simple_temporal_agent.override(tools=[get_weather]): pass From 141984e2bb09ee71870c1cb85b9da68b2e8ccd03 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Mon, 11 Aug 2025 20:10:48 +0000 Subject: [PATCH 20/30] More test coverage --- tests/test_temporal.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/tests/test_temporal.py b/tests/test_temporal.py index b075c0371a..ef4500e08f 100644 --- a/tests/test_temporal.py +++ b/tests/test_temporal.py @@ -266,9 +266,11 @@ class Response: }, tool_activity_config={ 'country': { - 'unknown_tool': ActivityConfig(start_to_close_timeout=timedelta(seconds=150)), 'get_country': False, }, + 'mcp': { + 'get_product_name': ActivityConfig(start_to_close_timeout=timedelta(seconds=150)), + }, '': { 'get_weather': ActivityConfig(start_to_close_timeout=timedelta(seconds=180)), }, @@ -442,11 +444,12 @@ async def event_stream_handler( async for event in stream: events.append(event) - result = await complex_temporal_agent.run( - 'Tell me: the capital of the country; the weather there; the product name', - deps=Deps(country='Mexico'), - event_stream_handler=event_stream_handler, - ) + with complex_temporal_agent.override(deps=Deps(country='Mexico')): + result = await complex_temporal_agent.run( + 'Tell me: the capital of the country; the weather there; the product name', + deps=Deps(country='The Netherlands'), + event_stream_handler=event_stream_handler, + ) assert result.output == snapshot( Response( answers=[ From 9f59c4d76ae73b40db34714152d257acef7f05b0 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Mon, 11 Aug 2025 20:18:18 +0000 Subject: [PATCH 21/30] Remove unnecessary 'pragma: no cover' --- pydantic_ai_slim/pydantic_ai/agent/__init__.py | 2 +- pydantic_ai_slim/pydantic_ai/result.py | 2 +- pydantic_ai_slim/pydantic_ai/toolsets/deferred.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 7a7cc4cf88..94fc6ac47f 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -1380,7 +1380,7 @@ async def run_mcp_servers( class _AgentFunctionToolset(FunctionToolset[AgentDepsT]): @property def id(self) -> str: - return '' # pragma: no cover + return '' @property def label(self) -> str: diff --git a/pydantic_ai_slim/pydantic_ai/result.py b/pydantic_ai_slim/pydantic_ai/result.py index 3f43ddb25f..a0aace15fe 100644 --- a/pydantic_ai_slim/pydantic_ai/result.py +++ b/pydantic_ai_slim/pydantic_ai/result.py @@ -196,7 +196,7 @@ async def _stream_text_deltas_ungrouped() -> AsyncIterator[tuple[str, int]]: and isinstance(event.part, _messages.TextPart) and event.part.content ): - yield event.part.content, event.index # pragma: no cover + yield event.part.content, event.index elif ( # pragma: no branch isinstance(event, _messages.PartDeltaEvent) and isinstance(event.delta, _messages.TextPartDelta) diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/deferred.py b/pydantic_ai_slim/pydantic_ai/toolsets/deferred.py index 4b7bde27d0..4c74077c4a 100644 --- a/pydantic_ai_slim/pydantic_ai/toolsets/deferred.py +++ b/pydantic_ai_slim/pydantic_ai/toolsets/deferred.py @@ -27,7 +27,7 @@ def __init__(self, tool_defs: list[ToolDefinition], *, id: str | None = None): @property def id(self) -> str | None: - return self._id # pragma: no cover + return self._id async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[AgentDepsT]]: return { From 96f24ecb075167762e8931482c2353d437f09f2f Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Mon, 11 Aug 2025 20:53:47 +0000 Subject: [PATCH 22/30] Move temporal module to pydantic_ai.durable_exec --- docs/api/ext.md | 2 -- pydantic_ai_slim/pydantic_ai/durable_exec/__init__.py | 0 .../{ext => durable_exec}/temporal/__init__.py | 0 .../pydantic_ai/{ext => durable_exec}/temporal/_agent.py | 2 +- .../{ext => durable_exec}/temporal/_function_toolset.py | 0 .../{ext => durable_exec}/temporal/_logfire.py | 0 .../{ext => durable_exec}/temporal/_mcp_server.py | 0 .../pydantic_ai/{ext => durable_exec}/temporal/_model.py | 0 .../{ext => durable_exec}/temporal/_run_context.py | 0 .../{ext => durable_exec}/temporal/_toolset.py | 3 ++- tests/test_temporal.py | 8 ++++---- 11 files changed, 7 insertions(+), 8 deletions(-) create mode 100644 pydantic_ai_slim/pydantic_ai/durable_exec/__init__.py rename pydantic_ai_slim/pydantic_ai/{ext => durable_exec}/temporal/__init__.py (100%) rename pydantic_ai_slim/pydantic_ai/{ext => durable_exec}/temporal/_agent.py (99%) rename pydantic_ai_slim/pydantic_ai/{ext => durable_exec}/temporal/_function_toolset.py (100%) rename pydantic_ai_slim/pydantic_ai/{ext => durable_exec}/temporal/_logfire.py (100%) rename pydantic_ai_slim/pydantic_ai/{ext => durable_exec}/temporal/_mcp_server.py (100%) rename pydantic_ai_slim/pydantic_ai/{ext => durable_exec}/temporal/_model.py (100%) rename pydantic_ai_slim/pydantic_ai/{ext => durable_exec}/temporal/_run_context.py (100%) rename pydantic_ai_slim/pydantic_ai/{ext => durable_exec}/temporal/_toolset.py (97%) diff --git a/docs/api/ext.md b/docs/api/ext.md index beeac4fe72..7f01b44d45 100644 --- a/docs/api/ext.md +++ b/docs/api/ext.md @@ -1,7 +1,5 @@ # `pydantic_ai.ext` -::: pydantic_ai.ext.temporal - ::: pydantic_ai.ext.langchain ::: pydantic_ai.ext.aci diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/__init__.py b/pydantic_ai_slim/pydantic_ai/durable_exec/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/__init__.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/__init__.py similarity index 100% rename from pydantic_ai_slim/pydantic_ai/ext/temporal/__init__.py rename to pydantic_ai_slim/pydantic_ai/durable_exec/temporal/__init__.py diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py similarity index 99% rename from pydantic_ai_slim/pydantic_ai/ext/temporal/_agent.py rename to pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py index a41b4c5d18..97a7946506 100644 --- a/pydantic_ai_slim/pydantic_ai/ext/temporal/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py @@ -19,8 +19,8 @@ ) from pydantic_ai._run_context import AgentDepsT from pydantic_ai.agent import AbstractAgent, AgentRun, AgentRunResult, EventStreamHandler, RunOutputDataT, WrapperAgent +from pydantic_ai.durable_exec.temporal._run_context import TemporalRunContext from pydantic_ai.exceptions import UserError -from pydantic_ai.ext.temporal._run_context import TemporalRunContext from pydantic_ai.models import Model from pydantic_ai.output import OutputDataT, OutputSpec from pydantic_ai.result import StreamedRunResult diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_function_toolset.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_function_toolset.py similarity index 100% rename from pydantic_ai_slim/pydantic_ai/ext/temporal/_function_toolset.py rename to pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_function_toolset.py diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_logfire.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_logfire.py similarity index 100% rename from pydantic_ai_slim/pydantic_ai/ext/temporal/_logfire.py rename to pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_logfire.py diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_mcp_server.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_mcp_server.py similarity index 100% rename from pydantic_ai_slim/pydantic_ai/ext/temporal/_mcp_server.py rename to pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_mcp_server.py diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_model.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_model.py similarity index 100% rename from pydantic_ai_slim/pydantic_ai/ext/temporal/_model.py rename to pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_model.py diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_run_context.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_run_context.py similarity index 100% rename from pydantic_ai_slim/pydantic_ai/ext/temporal/_run_context.py rename to pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_run_context.py diff --git a/pydantic_ai_slim/pydantic_ai/ext/temporal/_toolset.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_toolset.py similarity index 97% rename from pydantic_ai_slim/pydantic_ai/ext/temporal/_toolset.py rename to pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_toolset.py index 8c8441685d..afa1551630 100644 --- a/pydantic_ai_slim/pydantic_ai/ext/temporal/_toolset.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_toolset.py @@ -6,12 +6,13 @@ from temporalio.workflow import ActivityConfig from pydantic_ai._run_context import AgentDepsT -from pydantic_ai.ext.temporal._run_context import TemporalRunContext from pydantic_ai.mcp import MCPServer from pydantic_ai.toolsets.abstract import AbstractToolset from pydantic_ai.toolsets.function import FunctionToolset from pydantic_ai.toolsets.wrapper import WrapperToolset +from ._run_context import TemporalRunContext + class TemporalWrapperToolset(WrapperToolset[Any], ABC): @property diff --git a/tests/test_temporal.py b/tests/test_temporal.py index ef4500e08f..e7940dcac3 100644 --- a/tests/test_temporal.py +++ b/tests/test_temporal.py @@ -42,7 +42,7 @@ from temporalio.worker import Worker from temporalio.workflow import ActivityConfig - from pydantic_ai.ext.temporal import ( + from pydantic_ai.durable_exec.temporal import ( AgentPlugin, LogfirePlugin, PydanticAIPlugin, @@ -50,9 +50,9 @@ TemporalRunContext, TemporalRunContextWithDeps, ) - from pydantic_ai.ext.temporal._function_toolset import TemporalFunctionToolset - from pydantic_ai.ext.temporal._mcp_server import TemporalMCPServer - from pydantic_ai.ext.temporal._model import TemporalModel + from pydantic_ai.durable_exec.temporal._function_toolset import TemporalFunctionToolset + from pydantic_ai.durable_exec.temporal._mcp_server import TemporalMCPServer + from pydantic_ai.durable_exec.temporal._model import TemporalModel except ImportError: # pragma: lax no cover import pytest From 7e60b95b27d4175297bbe8d33c20b7ba4149f164 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Tue, 12 Aug 2025 01:00:43 +0000 Subject: [PATCH 23/30] Add docs --- docs/api/agent.md | 1 + docs/api/durable_exec.md | 3 + docs/temporal.md | 229 ++++++++++++++++++ mkdocs.yml | 2 + .../pydantic_ai/agent/__init__.py | 1 + .../durable_exec/temporal/_agent.py | 43 ++-- .../durable_exec/temporal/_run_context.py | 13 +- tests/test_temporal.py | 34 +-- 8 files changed, 293 insertions(+), 33 deletions(-) create mode 100644 docs/api/durable_exec.md create mode 100644 docs/temporal.md diff --git a/docs/api/agent.md b/docs/api/agent.md index bfbfe2a3ae..668eb04edf 100644 --- a/docs/api/agent.md +++ b/docs/api/agent.md @@ -12,3 +12,4 @@ - RunOutputDataT - capture_run_messages - InstrumentationSettings + - EventStreamHandler diff --git a/docs/api/durable_exec.md b/docs/api/durable_exec.md new file mode 100644 index 0000000000..b4269eed78 --- /dev/null +++ b/docs/api/durable_exec.md @@ -0,0 +1,3 @@ +# `pydantic_ai.durable_exec` + +::: pydantic_ai.durable_exec.temporal diff --git a/docs/temporal.md b/docs/temporal.md new file mode 100644 index 0000000000..6eb214f43c --- /dev/null +++ b/docs/temporal.md @@ -0,0 +1,229 @@ +# Durable Execution with Temporal + +Pydantic AI allows you to build durable agents that never lose their progress and handle long-running, asynchronous, and human-in-the-loop workflows with production-grade reliability. Durable agents have full support for [streaming](agents.md#streaming-all-events) and [MCP](mcp/client.md), with the added benefit of fault tolerance. + +[Temporal](https://temporal.io) is a popular [durable execution](https://docs.temporal.io/evaluate/understanding-temporal#durable-execution) platform that's natively supported by Pydantic AI. +The integration only uses Pydantic AI's public interface, so it can also serve as a reference for how to integrate with another durable execution systems. + +### Durable Execution + +In Temporal's durable execution implementation, a program that crashes or encounters an exception while interacting with a model or API will retry until it can successfully complete. + +Temporal relies primarily on a replay mechanism to recover from failures. +As the program makes progress, Temporal saves key inputs and decisions, allowing a re-started program to pick up right where it left off. + +The key to making this work is to separate the applications repeatable (deterministic) and non-repeatable (non-deterministic) parts: + +1. Deterministic pieces, termed [**workflows**](https://docs.temporal.io/workflow-definition), execute the same way when re-run with the same inputs. +2. Non-deterministic pieces, termed [**activities**](https://docs.temporal.io/activities), can run arbitrary code, performing I/O and any other operations. + +Workflow code can run for extended periods and, if interrupted, resume exactly where it left off. +Activity code faces no restrictions on I/O or external interactions, but if it fails part-way through it restarts from the beginning. + +In the case of Pydantic AI agents, this means that [model requests](models/index.md), [tool calls](tools.md) that may require I/O, and [MCP server communication](mcp/client.md) all need to be offloaded to Temporal activities, while the logic that coordinates them (i.e. the agent run) lives in the workflow. Code that handles a scheduled job or web request can then execute the workflow, which will in turn execute the activities as needed. + +The diagram below shows the overall architecture of an agentic application in Temporal. +The Temporal Server is responsible to tracking program execution and making sure associated state is preserved reliably (i.e., stored to a database, possibly replicated across cloud regions). +Temporal Server manages data in encrypted form, so all data processing occurs on the Worker, which runs the workflow and activities. + + +```text + +---------------------+ + | Temporal Server | (Stores workflow state, + +---------------------+ schedules activities, + ^ persists progress) + | + Save state, | Schedule Tasks, + progress, | load state on resume + timeouts | + | ++------------------------------------------------------+ +| Worker | +| +----------------------------------------------+ | +| | Workflow Code | | +| | (Agent Run Loop) | | +| +----------------------------------------------+ | +| | | | | +| v v v | +| +-----------+ +------------+ +-------------+ | +| | Activity | | Activity | | Activity | | +| | (Tool) | | (MCP Tool) | | (Model API) | | +| +-----------+ +------------+ +-------------+ | +| | | | | ++------------------------------------------------------+ + | | | + v v v + [External APIs, services, databases, etc.] +``` + +See the [Temporal documentation](https://docs.temporal.io/evaluate/understanding-temporal#temporal-application-the-building-blocks) for more information. + +## Durable Agent + +Any agent can be wrapped in a [`TemporalAgent`][pydantic_ai.durable_exec.temporal.TemporalAgent] to get a durable agent that can be used inside a deterministic Temporal workflow, by automatically offloading all work that requires IO (namely model requests, tool calls, and MCP server communication) to non-deterministic activities. + +At the time of wrapping, the agent's [model](models/index.md) and [toolsets](toolsets.md) (including function tools registered on the agent and MCP servers) are frozen, activities are dynamically created for each, and the original model and toolsets are wrapped to call on the worker to execute the corresponding activity instead of directly performing the action inside the workflow. The original agent can still be used as normal outside of the Temporal workflow, but any changes to its model or toolsets after wrapping will not be reflected in the durable agent. + +This is a simple but complete example of wrapping an agent for durable execution, creating a Temporal workflow with durable execution logic, connecting to a Temporal server, and running the workflow from non-durable code. + +All it requires is a Temporal server to be [running locally](https://github.com/temporalio/temporal#download-and-start-temporal-server-locally): + +```sh +brew install temporal +temporal server start-dev +``` + +```python {title="temporal_agent.py" test="skip"} +import uuid + +from temporalio import workflow +from temporalio.client import Client +from temporalio.worker import Worker + +from pydantic_ai import Agent +from pydantic_ai.durable_exec.temporal import AgentPlugin, PydanticAIPlugin, TemporalAgent + +agent = Agent( + 'gpt-5', + instructions="You're an expert in geography.", + name='geography', # (10)! +) + +temporal_agent = TemporalAgent(agent) # (1)! + + +@workflow.defn +class GeographyWorkflow: # (2)! + @workflow.run + async def run(self, prompt: str) -> str: + result = await temporal_agent.run(prompt) # (3)! + return result.output + + +async def main(): + client = await Client.connect( # (4)! + 'localhost:7233', # (5)! + plugins=[PydanticAIPlugin()], # (6)! + ) + + async with Worker( # (7)! + client, + task_queue='geography', + workflows=[GeographyWorkflow], + plugins=[AgentPlugin(temporal_agent)], # (8)! + ): + output = await client.execute_workflow( # (9)! + GeographyWorkflow.run, + args=['What is the capital of Mexico?'], + id=f'geography-{uuid.uuid4()}', + task_queue='geography', + ) + print(output) + #> Mexico City (Ciudad de México, CDMX) +``` + +1. The original `Agent` cannot be used inside a deterministic Temporal workflow, but the `TemporalAgent` can. +2. As explained above, the workflow represents a deterministic piece of code that can use non-deterministic activities for operations that require IO. +3. [`TemporalAgent.run()`][pydantic_ai.durable_exec.temporal.TemporalAgent.run] works just like [`Agent.run()`][pydantic_ai.Agent.run], but it will automatically offload model requests, tool calls, and MCP server communication to Temporal activities. +4. We connect to the Temporal server which keeps track of workflow and activity execution. +5. This assumes the Temporal server is [running locally](https://github.com/temporalio/temporal#download-and-start-temporal-server-locally). +6. The [`PydanticAIPlugin`][pydantic_ai.durable_exec.temporal.PydanticAIPlugin] tells Temporal to use Pydantic for serialization and deserialization, and to treat [`UserError`][pydantic_ai.exceptions.UserError] exceptions as non-retryable. +7. We start the worker process that will listen on the specified task queue and run workflows and activities. +8. The [`AgentPlugin`][pydantic_ai.durable_exec.temporal.AgentPlugin] registers the `TemporalAgent`'s activities with the worker. +9. We call on the server to execute the workflow on a worker that's listening on the specified task queue. +10. The agent's `name` is used to uniquely identify its activities. + +_(This example is complete, it can be run "as is" — you'll need to add `asyncio.run(main())` to run `main`)_ + +In a real world application, the agent, workflow, and worker are typically defined separately from the code that calls for a workflow to be executed. +Because Temporal workflows need to be defined at the top level of the file and the `TemporalAgent` instance is needed inside the workflow and when starting the worker (to register the activities), it needs to be defined at the top level of the file as well. + +For more information on how to use Temporal in Python applications, see their [Python SDK guide](https://docs.temporal.io/develop/python). + +## Agent and Toolset Requirements + +To ensure that Temporal knows what code to run when an activity fails or is interrupted and then restarted, even if your code is changed in between, each activity needs to have a name that's stable and unique. + +When `TemporalAgent` dynamically creates activities for the wrapped agent's model requests and toolsets (specifically those that implement their own tool listing and calling, i.e. [`FunctionToolset`][pydantic_ai.toolsets.FunctionToolset] and [`MCPServer`][pydantic_ai.mcp.MCPServer]), their names are derived from the agent's [`name`][pydantic_ai.agent.AbstractAgent.name] and the toolsets' [`id`s][pydantic_ai.toolsets.AbstractToolset.id]. These fields are normally optional, but are required to be set when using Temporal. They should not be changed once the temporal agent has been deployed to production as this would break active workflows. + +Other than that, any agent and toolset will just work! + +## Agent Run Dependencies + +By default, a [dependencies](dependencies.md) object provided to [`TemporalAgent.run()`][pydantic_ai.durable_exec.temporal.TemporalAgent.run] will not be available on the [`RunContext`][pydantic_ai.tools.RunContext] that's passed to tool functions and the [event stream handler](#streaming), as it's not guaranteed to be serializable. + +You can use the `run_context_type` parameter on the `TemporalAgent` constructor to specify a different run context type that will be used to serialize and deserialize the run context for use inside a Temporal activity: + +- If the `Agent`'s `deps_type` is a JSON-serializable dictionary, like a `TypedDict`, you can use [`TemporalRunContextWithDeps`][pydantic_ai.durable_exec.temporal.TemporalRunContextWithDeps]. +- If not, create a [`TemporalRunContext`][pydantic_ai.durable_exec.temporal.TemporalRunContext] subclass with custom `serialize_run_context` and `deserialize_run_context` class methods. + +## Streaming + +Because Temporal activities cannot stream output directly to the activity call site, [`Agent.run_stream()`][pydantic_ai.Agent.run_stream] and [`Agent.iter()`][pydantic_ai.Agent.iter] are not supported. + +Instead, you can implement streaming by setting an [`event_stream_handler`][pydantic_ai.agent.EventStreamHandler] on the `Agent` or `TemporalAgent` instance and using [`TemporalAgent.run()`][pydantic_ai.durable_exec.temporal.TemporalAgent.run] inside the workflow. +The event stream handler function will receive the agent [run context][pydantic_ai.tools.RunContext] and an async iterable of events from the model's streaming response and the agent's execution of tools. For examples, see the [streaming docs](agents.md#streaming-all-events). + +As the streaming model request activity, workflow, and workflow execution call all take place in separate processes, passing data between them requires some care: + +- To get data from the workflow call site or workflow to the event stream handler, you can use a [dependencies object](#agent-run-dependencies). +- To get data from the event stream handler to the workflow, workflow call site, or a frontend, you need to use an external system that the event stream handler can write to and the event consumer can read from, like a message queue. You can use the dependency object to make sure the same connection string or other unique ID is available in all the places that need it. + +## Activity Configuration + +Temporal activity configuration, like timeouts and retry policies, can be customized by passing [`temporalio.workflow.ActivityConfig`](https://python.temporal.io/temporalio.workflow.ActivityConfig.html) objects to the `TemporalAgent` constructor: + +- `activity_config`: The base Temporal activity config to use for all activities. If no config is provided, a `start_to_close_timeout` of 60 seconds is used. +- `model_activity_config`: The Temporal activity config to use for model request activities. This is merged with the base activity config. +- `toolset_activity_config`: The Temporal activity config to use for get-tools and call-tool activities for specific toolsets identified by ID. This is merged with the base activity config. +- `tool_activity_config`: The Temporal activity config to use for specific tool call activities identified by toolset ID and tool name. + This is merged with the base and toolset-specific activity configs. + + If a tool does not use IO, you can specify `False` to disable using an activity. Note that the tool is required to be defined as an `async` function as non-async tools are run in threads which are non-deterministic and thus not supported outside of activities. + +## Activity Retries + +On top of the automatic retries for request failures that Temporal will perform, Pydantic AI and various provider API clients also have their own request retry logic. Enabling these at the same time will cause the request to be retried more often than expected, with improper `Retry-After` handling. + +When using Temporal, it's recommended to not use [HTTP Request Retries](retries.md) and to turn off your provider API client's own retry logic, for example by setting `max_retries=0` on a [custom `OpenAIProvider` API client](models/openai.md#custom-openai-client). + +You can customize Temporal's retry policy using [activity configuration](#activity-configuration). + +## Observability with Logfire + +Temporal generates telemetry events and metrics for each workflow and activity execution, and Pydantic AI generates events for each agent run, model request and tool call. These can be sent to [Logfire](logfire.md) to get a complete picture of what's happening in your application. + +To use Logfire with Temporal, you need to pass a [`LogfirePlugin`][pydantic_ai.durable_exec.temporal.LogfirePlugin] object to Temporal's `Client.connect()`: + +```py {title="logfire_plugin.py" test="skip" noqa="F841"} +from pydantic_ai.durable_exec.temporal import PydanticAIPlugin, LogfirePlugin + +from temporalio.client import Client + +async def main(): + client = await Client.connect( + 'localhost:7233', + plugins=[PydanticAIPlugin(), LogfirePlugin()], + ) +``` + +By default, the `LogfirePlugin` will instrument Temporal (including metrics) and Pydantic AI and send all data to Logfire. To customize Logfire configuration and instrumentation, you can pass a `logfire_setup` function to the `LogfirePlugin` constructor and return a custom `Logfire` instance (i.e. the result of `logfire.configure()`). To disable sending Temporal metrics to Logfire, you can pass `metrics=False` to the `LogfirePlugin` constructor. + +## Known Issues + +### Pandas + +When `logfire.info` is used inside an activity and the `pandas` package is among your project's dependencies, you may encounter the following error which seems to be the result of an import race condition: + +``` +AttributeError: partially initialized module 'pandas' has no attribute '_pandas_parser_CAPI' (most likely due to a circular import) +``` + +To fix this, you can use the [`temporalio.workflow.unsafe.imports_passed_through()`](https://python.temporal.io/temporalio.workflow.unsafe.html#imports_passed_through) context manager to proactively import the package and not have it be reloaded in the workflow sandbox: + +```python {title="temporal_activity.py" test="skip" noqa="F401"} +from temporalio import workflow + +with workflow.unsafe.imports_passed_through(): + import pandas +``` diff --git a/mkdocs.yml b/mkdocs.yml index e59b899bc1..7fd5611d65 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -44,6 +44,7 @@ nav: - builtin-tools.md - common-tools.md - retries.md + - temporal.md - MCP: - mcp/index.md - mcp/client.md @@ -75,6 +76,7 @@ nav: - api/toolsets.md - api/builtin_tools.md - api/common_tools.md + - api/durable_exec.md - api/output.md - api/result.md - api/messages.md diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 94fc6ac47f..0b25b4c2a8 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -82,6 +82,7 @@ 'InstrumentationSettings', 'WrapperAgent', 'AbstractAgent', + 'EventStreamHandler', ) diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py index 97a7946506..a7503c077f 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py @@ -40,6 +40,8 @@ def __init__( self, wrapped: AbstractAgent[AgentDepsT, OutputDataT], *, + name: str | None = None, + event_stream_handler: EventStreamHandler[AgentDepsT] | None = None, activity_config: ActivityConfig | None = None, model_activity_config: ActivityConfig | None = None, toolset_activity_config: dict[str, ActivityConfig] | None = None, @@ -56,19 +58,25 @@ def __init__( AbstractToolset[Any], ] = temporalize_toolset, ): - """Wrap an agent to allow it to be used inside a Temporal workflow, by automatically moving model requests and tool calls to Temporal activities. + """Wrap an agent to enable it to be used inside a Temporal workflow, by automatically offloading model requests, tool calls, and MCP server communication to Temporal activities. + + After wrapping, the original agent can still be used as normal outside of the Temporal workflow, but any changes to its model or toolsets after wrapping will not be reflected in the durable agent. Args: wrapped: The agent to wrap. - activity_config: The base Temporal activity config to use for all activities. + name: Optional unique agent name to use in the Temporal activities' names. If not provided, the agent's `name` will be used. + event_stream_handler: Optional event stream handler to use instead of the one set on the wrapped agent. + activity_config: The base Temporal activity config to use for all activities. If no config is provided, a `start_to_close_timeout` of 60 seconds is used. model_activity_config: The Temporal activity config to use for model request activities. This is merged with the base activity config. toolset_activity_config: The Temporal activity config to use for get-tools and call-tool activities for specific toolsets identified by ID. This is merged with the base activity config. tool_activity_config: The Temporal activity config to use for specific tool call activities identified by toolset ID and tool name. - This is merged with the base and toolset-specific activity configs. Use `False` to disable using an activity for a specific tool. + This is merged with the base and toolset-specific activity configs. + If a tool does not use IO, you can specify `False` to disable using an activity. + Note that the tool is required to be defined as an `async` function as non-async tools are run in threads which are non-deterministic and thus not supported outside of activities. run_context_type: The `TemporalRunContext` subclass to use to serialize and deserialize the run context for use inside a Temporal activity. By default, only the `retries`, `tool_call_id`, `tool_name`, `retry` and `run_step` attributes will be available. To make another attribute available, create a `TemporalRunContext` subclass with a custom `serialize_run_context` class method that returns a dictionary that includes the attribute. - If `deps` is a JSON-serializable dictionary, you can use `TemporalRunContextWithDeps` to make the `deps` attribute available as well. + If `deps` is a JSON-serializable dictionary, like a `TypedDict`, you can use `TemporalRunContextWithDeps` to make the `deps` attribute available as well. If `deps` is of a different type, create a `TemporalRunContext` subclass with custom `serialize_run_context` and `deserialize_run_context` class methods. temporalize_toolset_func: Optional function to use to prepare "leaf" toolsets (i.e. those that implement their own tool listing and calling) for Temporal by wrapping them in a `TemporalWrapperToolset` that moves methods that require IO to Temporal activities. If not provided, only `FunctionToolset` and `MCPServer` will be prepared for Temporal. @@ -88,27 +96,26 @@ def __init__( toolset_activity_config = toolset_activity_config or {} tool_activity_config = tool_activity_config or {} - agent = wrapped - - if agent.name is None: + self._name = name or wrapped.name + if self._name is None: raise UserError( "An agent needs to have a unique `name` in order to be used with Temporal. The name will be used to identify the agent's activities within the workflow." ) - activity_name_prefix = f'agent__{agent.name}' + activity_name_prefix = f'agent__{self._name}' activities: list[Callable[..., Any]] = [] - if not isinstance(agent.model, Model): + if not isinstance(wrapped.model, Model): raise UserError( 'An agent needs to have a `model` in order to be used with Temporal, it cannot be set at agent run time.' ) temporal_model = TemporalModel( - agent.model, + wrapped.model, activity_name_prefix=activity_name_prefix, activity_config=activity_config | model_activity_config, run_context_type=run_context_type, - event_stream_handler=agent.event_stream_handler, + event_stream_handler=event_stream_handler or wrapped.event_stream_handler, ) activities.extend(temporal_model.temporal_activities) @@ -130,7 +137,7 @@ def temporalize_toolset(toolset: AbstractToolset[AgentDepsT]) -> AbstractToolset activities.extend(toolset.temporal_activities) return toolset - temporal_toolsets = [toolset.visit_and_replace(temporalize_toolset) for toolset in agent.toolsets] + temporal_toolsets = [toolset.visit_and_replace(temporalize_toolset) for toolset in wrapped.toolsets] self._model = temporal_model self._toolsets = temporal_toolsets @@ -350,7 +357,9 @@ def run_sync( The result of the run. """ if workflow.in_workflow(): - raise UserError('`agent.run_sync()` cannot be used inside a Temporal workflow. Use `agent.run()` instead.') + raise UserError( + '`agent.run_sync()` cannot be used inside a Temporal workflow. Use `await agent.run()` instead.' + ) return super().run_sync( user_prompt, @@ -451,7 +460,9 @@ async def main(): """ if workflow.in_workflow(): raise UserError( - '`agent.run_stream()` cannot be used inside a Temporal workflow. Set an `event_stream_handler` on the agent and use `agent.run()` instead.' + '`agent.run_stream()` cannot currently be used inside a Temporal workflow. ' + 'Set an `event_stream_handler` on the agent and use `agent.run()` instead. ' + 'Please file an issue if this is not sufficient for your use case.' ) async with super().run_stream( @@ -600,7 +611,9 @@ async def main(): if workflow.in_workflow(): if not self._temporal_overrides_active.get(): raise UserError( - '`agent.iter()` cannot be used inside a Temporal workflow. Set an `event_stream_handler` on the agent and use `agent.run()` instead.' + '`agent.iter()` cannot currently be used inside a Temporal workflow. ' + 'Set an `event_stream_handler` on the agent and use `agent.run()` instead. ' + 'Please file an issue if this is not sufficient for your use case.' ) if model is not None: diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_run_context.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_run_context.py index 3096aa42e3..222e3f6bcc 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_run_context.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_run_context.py @@ -7,6 +7,15 @@ class TemporalRunContext(RunContext[Any]): + """The [`RunContext`][pydantic_ai.tools.RunContext] subclass to use to serialize and deserialize the run context for use inside a Temporal activity. + + By default, only the `retries`, `tool_call_id`, `tool_name`, `retry` and `run_step` attributes will be available. + To make another attribute available, create a `TemporalRunContext` subclass with a custom `serialize_run_context` class method that returns a dictionary that includes the attribute and pass it to [`TemporalAgent`][pydantic_ai.durable_exec.temporal.TemporalAgent]. + + If `deps` is a JSON-serializable dictionary, like a `TypedDict`, you can use [`TemporalRunContextWithDeps`][pydantic_ai.durable_exec.temporal.TemporalRunContextWithDeps] to make the `deps` attribute available as well. + If `deps` is of a different type, create a `TemporalRunContext` subclass with custom `serialize_run_context` and `deserialize_run_context` class methods. + """ + def __init__(self, **kwargs: Any): self.__dict__ = kwargs setattr( @@ -45,11 +54,13 @@ def deserialize_run_context(cls, ctx: dict[str, Any]) -> TemporalRunContext: class TemporalRunContextWithDeps(TemporalRunContext): + """[`TemporalRunContext`][pydantic_ai.durable_exec.temporal.TemporalRunContext] subclass that includes JSON-serializable dictionary `deps`, like a `TypedDict`.""" + @classmethod def serialize_run_context(cls, ctx: RunContext[Any]) -> dict[str, Any]: if not isinstance(ctx.deps, dict): raise UserError( - '`TemporalRunContextWithDeps` requires the `deps` object to be a JSON-serializable dictionary. ' + '`TemporalRunContextWithDeps` requires the `deps` object to be a JSON-serializable dictionary, like a `TypedDict`. ' 'To support `deps` of a different type, pass a `TemporalRunContext` subclass to `TemporalAgent` with custom `serialize_run_context` and `deserialize_run_context` class methods.' ) return {**super().serialize_run_context(ctx), 'deps': ctx.deps} # pyright: ignore[reportUnknownMemberType] diff --git a/tests/test_temporal.py b/tests/test_temporal.py index e7940dcac3..8ff155d4b3 100644 --- a/tests/test_temporal.py +++ b/tests/test_temporal.py @@ -127,7 +127,7 @@ def uninstrument_pydantic_ai() -> Iterator[None]: @contextmanager -def temporal_raises(exc_type: type[Exception], exc_message: str) -> Iterator[None]: +def workflow_raises(exc_type: type[Exception], exc_message: str) -> Iterator[None]: """Helper for asserting that a Temporal workflow fails with the expected error.""" with pytest.raises(WorkflowFailureError) as exc_info: yield @@ -885,9 +885,9 @@ async def test_temporal_agent_run_sync_in_workflow(allow_model_requests: None, c workflows=[SimpleAgentWorkflowWithRunSync], plugins=[AgentPlugin(simple_temporal_agent)], ): - with temporal_raises( + with workflow_raises( UserError, - snapshot('`agent.run_sync()` cannot be used inside a Temporal workflow. Use `agent.run()` instead.'), + snapshot('`agent.run_sync()` cannot be used inside a Temporal workflow. Use `await agent.run()` instead.'), ): await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] SimpleAgentWorkflowWithRunSync.run, @@ -913,10 +913,10 @@ async def test_temporal_agent_run_stream_in_workflow(allow_model_requests: None, workflows=[SimpleAgentWorkflowWithRunStream], plugins=[AgentPlugin(simple_temporal_agent)], ): - with temporal_raises( + with workflow_raises( UserError, snapshot( - '`agent.run_stream()` cannot be used inside a Temporal workflow. Set an `event_stream_handler` on the agent and use `agent.run()` instead.' + '`agent.run_stream()` cannot currently be used inside a Temporal workflow. Set an `event_stream_handler` on the agent and use `agent.run()` instead. Please file an issue if this is not sufficient for your use case.' ), ): await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] @@ -944,10 +944,10 @@ async def test_temporal_agent_iter_in_workflow(allow_model_requests: None, clien workflows=[SimpleAgentWorkflowWithIter], plugins=[AgentPlugin(simple_temporal_agent)], ): - with temporal_raises( + with workflow_raises( UserError, snapshot( - '`agent.iter()` cannot be used inside a Temporal workflow. Set an `event_stream_handler` on the agent and use `agent.run()` instead.' + '`agent.iter()` cannot currently be used inside a Temporal workflow. Set an `event_stream_handler` on the agent and use `agent.run()` instead. Please file an issue if this is not sufficient for your use case.' ), ): await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] @@ -980,7 +980,7 @@ async def test_temporal_agent_run_in_workflow_with_event_stream_handler(allow_mo workflows=[SimpleAgentWorkflowWithEventStreamHandler], plugins=[AgentPlugin(simple_temporal_agent)], ): - with temporal_raises( + with workflow_raises( UserError, snapshot( 'Event stream handler cannot be set at agent run time inside a Temporal workflow, it must be set at agent creation time.' @@ -1009,7 +1009,7 @@ async def test_temporal_agent_run_in_workflow_with_model(allow_model_requests: N workflows=[SimpleAgentWorkflowWithRunModel], plugins=[AgentPlugin(simple_temporal_agent)], ): - with temporal_raises( + with workflow_raises( UserError, snapshot( 'Model cannot be set at agent run time inside a Temporal workflow, it must be set at agent creation time.' @@ -1038,7 +1038,7 @@ async def test_temporal_agent_run_in_workflow_with_toolsets(allow_model_requests workflows=[SimpleAgentWorkflowWithRunToolsets], plugins=[AgentPlugin(simple_temporal_agent)], ): - with temporal_raises( + with workflow_raises( UserError, snapshot( 'Toolsets cannot be set at agent run time inside a Temporal workflow, it must be set at agent creation time.' @@ -1067,7 +1067,7 @@ async def test_temporal_agent_override_model_in_workflow(allow_model_requests: N workflows=[SimpleAgentWorkflowWithOverrideModel], plugins=[AgentPlugin(simple_temporal_agent)], ): - with temporal_raises( + with workflow_raises( UserError, snapshot( 'Model cannot be contextually overridden inside a Temporal workflow, it must be set at agent creation time.' @@ -1096,7 +1096,7 @@ async def test_temporal_agent_override_toolsets_in_workflow(allow_model_requests workflows=[SimpleAgentWorkflowWithOverrideToolsets], plugins=[AgentPlugin(simple_temporal_agent)], ): - with temporal_raises( + with workflow_raises( UserError, snapshot( 'Toolsets cannot be contextually overridden inside a Temporal workflow, they must be set at agent creation time.' @@ -1125,7 +1125,7 @@ async def test_temporal_agent_override_tools_in_workflow(allow_model_requests: N workflows=[SimpleAgentWorkflowWithOverrideTools], plugins=[AgentPlugin(simple_temporal_agent)], ): - with temporal_raises( + with workflow_raises( UserError, snapshot( 'Tools cannot be contextually overridden inside a Temporal workflow, they must be set at agent creation time.' @@ -1192,7 +1192,7 @@ async def test_temporal_agent_sync_tool_activity_disabled(allow_model_requests: workflows=[AgentWorkflowWithSyncToolActivityDisabled], plugins=[AgentPlugin(temporal_agent_with_sync_tool_activity_disabled)], ): - with temporal_raises( + with workflow_raises( UserError, snapshot( "Temporal activity config for tool 'get_weather' has been explicitly set to `False` (activity disabled), but non-async tools are run in threads which are not supported outside of an activity. Make the tool function async instead." @@ -1242,7 +1242,7 @@ async def test_temporal_model_stream_direct(client: Client): workflows=[DirectStreamWorkflow], plugins=[AgentPlugin(complex_temporal_agent)], ): - with temporal_raises( + with workflow_raises( UserError, snapshot( 'A Temporal model cannot be used with `pydantic_ai.direct.model_request_stream()` as it requires a `run_context`. Set an `event_stream_handler` on the agent and use `agent.run()` instead.' @@ -1291,10 +1291,10 @@ async def test_temporal_agent_with_non_dict_deps(allow_model_requests: None, cli workflows=[AgentWorkflowWithDataclassDeps], plugins=[AgentPlugin(temporal_agent_with_dataclass_deps)], ): - with temporal_raises( + with workflow_raises( UserError, snapshot( - '`TemporalRunContextWithDeps` requires the `deps` object to be a JSON-serializable dictionary. To support `deps` of a different type, pass a `TemporalRunContext` subclass to `TemporalAgent` with custom `serialize_run_context` and `deserialize_run_context` class methods.' + '`TemporalRunContextWithDeps` requires the `deps` object to be a JSON-serializable dictionary, like a `TypedDict`. To support `deps` of a different type, pass a `TemporalRunContext` subclass to `TemporalAgent` with custom `serialize_run_context` and `deserialize_run_context` class methods.' ), ): await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] From b66517a48bfac029869900a6cc59ae6e3f3d0d96 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Tue, 12 Aug 2025 12:56:41 +0000 Subject: [PATCH 24/30] Add TemporalAgent.name getter and setter --- .../pydantic_ai/durable_exec/temporal/_agent.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py index a7503c077f..c91b2b5652 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py @@ -102,7 +102,7 @@ def __init__( "An agent needs to have a unique `name` in order to be used with Temporal. The name will be used to identify the agent's activities within the workflow." ) - activity_name_prefix = f'agent__{self._name}' + activity_name_prefix = f'agent__{self.name}' activities: list[Callable[..., Any]] = [] if not isinstance(wrapped.model, Model): @@ -145,6 +145,14 @@ def temporalize_toolset(toolset: AbstractToolset[AgentDepsT]) -> AbstractToolset self._temporal_overrides_active: ContextVar[bool] = ContextVar('_temporal_overrides_active', default=False) + @property + def name(self) -> str | None: + return self._name + + @name.setter + def name(self, value: str | None) -> None: + self._name = value + @property def model(self) -> Model: return self._model From 983c2c565894cce0240bdd83fadffd2c17fc4253 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Tue, 12 Aug 2025 13:27:52 +0000 Subject: [PATCH 25/30] Disable TemporalAgent.name setter --- .../pydantic_ai/durable_exec/temporal/_agent.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py index c91b2b5652..c1a26d6d2c 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py @@ -150,8 +150,10 @@ def name(self) -> str | None: return self._name @name.setter - def name(self, value: str | None) -> None: - self._name = value + def name(self, value: str | None) -> None: # pragma: no cover + raise UserError( + 'The agent name cannot be changed after creation. If you need to change the name, create a new agent.' + ) @property def model(self) -> Model: From f343eb522b0739868ef2e7547fb0d36245ab65c9 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Tue, 12 Aug 2025 13:44:31 +0000 Subject: [PATCH 26/30] Bump changelog date --- docs/changelog.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/changelog.md b/docs/changelog.md index 80ab36a57e..e62bc58f22 100644 --- a/docs/changelog.md +++ b/docs/changelog.md @@ -12,7 +12,7 @@ Pydantic AI is still pre-version 1, so breaking changes will occur, however: !!! note Here's a filtered list of the breaking changes for each version to help you upgrade Pydantic AI. -### v0.7.0 (2025-08-11) +### v0.7.0 (2025-08-12) See [#2458](https://github.com/pydantic/pydantic-ai/pull/2458) - `pydantic_ai.models.StreamedResponse` now yields a `FinalResultEvent` along with the existing `PartStartEvent` and `PartDeltaEvent`. If you're using `pydantic_ai.direct.model_request_stream` or `pydantic_ai.direct.model_request_stream_sync`, you may need to update your code to account for this. From f12e4fda1d75bf0ea72320c9e3a9eeb363b7cf6c Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Tue, 12 Aug 2025 22:53:27 +0000 Subject: [PATCH 27/30] Automatically serialize deps --- docs/temporal.md | 12 +- .../pydantic_ai/agent/__init__.py | 5 + .../pydantic_ai/agent/abstract.py | 6 + pydantic_ai_slim/pydantic_ai/agent/wrapper.py | 4 + .../durable_exec/temporal/__init__.py | 11 +- .../durable_exec/temporal/_agent.py | 31 +++-- .../temporal/_function_toolset.py | 30 +++-- .../durable_exec/temporal/_mcp_server.py | 60 +++++---- .../durable_exec/temporal/_model.py | 32 +++-- .../durable_exec/temporal/_run_context.py | 34 ++--- .../durable_exec/temporal/_toolset.py | 16 ++- ...l_agent_with_unserializable_deps_type.yaml | 95 ++++++++++++++ tests/test_temporal.py | 121 +++++------------- 13 files changed, 270 insertions(+), 187 deletions(-) create mode 100644 tests/cassettes/test_temporal/test_temporal_agent_with_unserializable_deps_type.yaml diff --git a/docs/temporal.md b/docs/temporal.md index 6eb214f43c..d7e8d253e4 100644 --- a/docs/temporal.md +++ b/docs/temporal.md @@ -148,14 +148,14 @@ When `TemporalAgent` dynamically creates activities for the wrapped agent's mode Other than that, any agent and toolset will just work! -## Agent Run Dependencies +## Agent Run Context and Dependencies -By default, a [dependencies](dependencies.md) object provided to [`TemporalAgent.run()`][pydantic_ai.durable_exec.temporal.TemporalAgent.run] will not be available on the [`RunContext`][pydantic_ai.tools.RunContext] that's passed to tool functions and the [event stream handler](#streaming), as it's not guaranteed to be serializable. +As workflows and activities run in separate processes, any values passed between them need to be serializable. As these payloads are stored in the workflow execution event history, Temporal limits their size to 2MB. -You can use the `run_context_type` parameter on the `TemporalAgent` constructor to specify a different run context type that will be used to serialize and deserialize the run context for use inside a Temporal activity: +To account for these limitations, tool functions and the [event stream handler](#streaming) running inside activities receive a limited version of the agent's [`RunContext`][pydantic_ai.tools.RunContext], and it's your responsibility to make sure that the [dependencies](dependencies.md) object provided to [`TemporalAgent.run()`][pydantic_ai.durable_exec.temporal.TemporalAgent.run] can be serialized using Pydantic. -- If the `Agent`'s `deps_type` is a JSON-serializable dictionary, like a `TypedDict`, you can use [`TemporalRunContextWithDeps`][pydantic_ai.durable_exec.temporal.TemporalRunContextWithDeps]. -- If not, create a [`TemporalRunContext`][pydantic_ai.durable_exec.temporal.TemporalRunContext] subclass with custom `serialize_run_context` and `deserialize_run_context` class methods. +Specifically, only the `deps`, `retries`, `tool_call_id`, `tool_name`, `retry`, and `run_step` fields are available by default, and trying to access `model`, `usage`, `prompt`, `messages`, or `tracer` will raise an error. +If you need one or more of these attributes to be available inside activities, you can create a [`TemporalRunContext`][pydantic_ai.durable_exec.temporal.TemporalRunContext] subclass with custom `serialize_run_context` and `deserialize_run_context` class methods and pass it to [`TemporalAgent`][pydantic_ai.durable_exec.temporal.TemporalAgent] as `run_context_type`. ## Streaming @@ -166,7 +166,7 @@ The event stream handler function will receive the agent [run context][pydantic_ As the streaming model request activity, workflow, and workflow execution call all take place in separate processes, passing data between them requires some care: -- To get data from the workflow call site or workflow to the event stream handler, you can use a [dependencies object](#agent-run-dependencies). +- To get data from the workflow call site or workflow to the event stream handler, you can use a [dependencies object](#agent-run-context-and-dependencies). - To get data from the event stream handler to the workflow, workflow call site, or a frontend, you need to use an external system that the event stream handler can write to and the event consumer can read from, like a message queue. You can use the dependency object to make sure the same connection string or other unique ID is available in all the places that need it. ## Activity Configuration diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 0b25b4c2a8..de96eabe24 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -402,6 +402,11 @@ def name(self, value: str | None) -> None: """Set the name of the agent, used for logging.""" self._name = value + @property + def deps_type(self) -> type: + """The type of dependencies used by the agent.""" + return self._deps_type + @property def output_type(self) -> OutputSpec[OutputDataT]: """The type of data output by agent runs, used to validate the data returned by the model, defaults to `str`.""" diff --git a/pydantic_ai_slim/pydantic_ai/agent/abstract.py b/pydantic_ai_slim/pydantic_ai/agent/abstract.py index 82df047f73..8a2c685c19 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/abstract.py +++ b/pydantic_ai_slim/pydantic_ai/agent/abstract.py @@ -93,6 +93,12 @@ def name(self, value: str | None) -> None: """Set the name of the agent, used for logging.""" raise NotImplementedError + @property + @abstractmethod + def deps_type(self) -> type: + """The type of dependencies used by the agent.""" + raise NotImplementedError + @property @abstractmethod def output_type(self) -> OutputSpec[OutputDataT]: diff --git a/pydantic_ai_slim/pydantic_ai/agent/wrapper.py b/pydantic_ai_slim/pydantic_ai/agent/wrapper.py index bd39e4bded..7554f26a2c 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/wrapper.py +++ b/pydantic_ai_slim/pydantic_ai/agent/wrapper.py @@ -43,6 +43,10 @@ def name(self) -> str | None: def name(self, value: str | None) -> None: self.wrapped.name = value + @property + def deps_type(self) -> type: + return self.wrapped.deps_type + @property def output_type(self) -> OutputSpec[OutputDataT]: return self.wrapped.output_type diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/__init__.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/__init__.py index 4f1e299942..bb68fb9a67 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/__init__.py @@ -5,6 +5,7 @@ from dataclasses import replace from typing import Any, Callable +from pydantic.errors import PydanticUserError from temporalio.client import ClientConfig, Plugin as ClientPlugin from temporalio.contrib.pydantic import PydanticPayloadConverter, pydantic_data_converter from temporalio.converter import DefaultPayloadConverter @@ -14,7 +15,7 @@ from ...exceptions import UserError from ._agent import TemporalAgent from ._logfire import LogfirePlugin -from ._run_context import TemporalRunContext, TemporalRunContextWithDeps +from ._run_context import TemporalRunContext from ._toolset import TemporalWrapperToolset __all__ = [ @@ -23,7 +24,6 @@ 'LogfirePlugin', 'AgentPlugin', 'TemporalRunContext', - 'TemporalRunContextWithDeps', 'TemporalWrapperToolset', ] @@ -61,8 +61,11 @@ def configure_worker(self, config: WorkerConfig) -> WorkerConfig: ), ) - # pydantic_ai.exceptions.UserError is not retryable - config['workflow_failure_exception_types'] = [*config.get('workflow_failure_exception_types', []), UserError] # pyright: ignore[reportUnknownMemberType] + config['workflow_failure_exception_types'] = [ + *config.get('workflow_failure_exception_types', []), # pyright: ignore[reportUnknownMemberType] + UserError, + PydanticUserError, + ] return super().configure_worker(config) diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py index c1a26d6d2c..ae37c0b5e2 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py @@ -6,6 +6,8 @@ from datetime import timedelta from typing import Any, Callable, Literal, overload +from pydantic.errors import PydanticUserError +from pydantic_core import PydanticSerializationError from temporalio import workflow from temporalio.common import RetryPolicy from temporalio.workflow import ActivityConfig @@ -46,16 +48,17 @@ def __init__( model_activity_config: ActivityConfig | None = None, toolset_activity_config: dict[str, ActivityConfig] | None = None, tool_activity_config: dict[str, dict[str, ActivityConfig | Literal[False]]] | None = None, - run_context_type: type[TemporalRunContext] = TemporalRunContext, + run_context_type: type[TemporalRunContext[AgentDepsT]] = TemporalRunContext[AgentDepsT], temporalize_toolset_func: Callable[ [ - AbstractToolset[Any], + AbstractToolset[AgentDepsT], str, ActivityConfig, dict[str, ActivityConfig | Literal[False]], - type[TemporalRunContext], + type[AgentDepsT], + type[TemporalRunContext[AgentDepsT]], ], - AbstractToolset[Any], + AbstractToolset[AgentDepsT], ] = temporalize_toolset, ): """Wrap an agent to enable it to be used inside a Temporal workflow, by automatically offloading model requests, tool calls, and MCP server communication to Temporal activities. @@ -74,10 +77,8 @@ def __init__( If a tool does not use IO, you can specify `False` to disable using an activity. Note that the tool is required to be defined as an `async` function as non-async tools are run in threads which are non-deterministic and thus not supported outside of activities. run_context_type: The `TemporalRunContext` subclass to use to serialize and deserialize the run context for use inside a Temporal activity. - By default, only the `retries`, `tool_call_id`, `tool_name`, `retry` and `run_step` attributes will be available. + By default, only the `deps`, `retries`, `tool_call_id`, `tool_name`, `retry` and `run_step` attributes will be available. To make another attribute available, create a `TemporalRunContext` subclass with a custom `serialize_run_context` class method that returns a dictionary that includes the attribute. - If `deps` is a JSON-serializable dictionary, like a `TypedDict`, you can use `TemporalRunContextWithDeps` to make the `deps` attribute available as well. - If `deps` is of a different type, create a `TemporalRunContext` subclass with custom `serialize_run_context` and `deserialize_run_context` class methods. temporalize_toolset_func: Optional function to use to prepare "leaf" toolsets (i.e. those that implement their own tool listing and calling) for Temporal by wrapping them in a `TemporalWrapperToolset` that moves methods that require IO to Temporal activities. If not provided, only `FunctionToolset` and `MCPServer` will be prepared for Temporal. The function takes the toolset, the activity name prefix, the toolset-specific activity config, the tool-specific activity configs and the run context type. @@ -87,9 +88,13 @@ def __init__( # start_to_close_timeout is required activity_config = activity_config or ActivityConfig(start_to_close_timeout=timedelta(seconds=60)) - # pydantic_ai.exceptions.UserError is not retryable + # `pydantic_ai.exceptions.UserError` and `pydantic.errors.PydanticUserError` are not retryable retry_policy = activity_config.get('retry_policy') or RetryPolicy() - retry_policy.non_retryable_error_types = [*(retry_policy.non_retryable_error_types or []), UserError.__name__] + retry_policy.non_retryable_error_types = [ + *(retry_policy.non_retryable_error_types or []), + UserError.__name__, + PydanticUserError.__name__, + ] activity_config['retry_policy'] = retry_policy model_activity_config = model_activity_config or {} @@ -104,6 +109,8 @@ def __init__( activity_name_prefix = f'agent__{self.name}' + deps_type = wrapped.deps_type + activities: list[Callable[..., Any]] = [] if not isinstance(wrapped.model, Model): raise UserError( @@ -114,6 +121,7 @@ def __init__( wrapped.model, activity_name_prefix=activity_name_prefix, activity_config=activity_config | model_activity_config, + deps_type=deps_type, run_context_type=run_context_type, event_stream_handler=event_stream_handler or wrapped.event_stream_handler, ) @@ -131,6 +139,7 @@ def temporalize_toolset(toolset: AbstractToolset[AgentDepsT]) -> AbstractToolset activity_name_prefix, activity_config | toolset_activity_config.get(id, {}), tool_activity_config.get(id, {}), + deps_type, run_context_type, ) if isinstance(toolset, TemporalWrapperToolset): @@ -175,6 +184,10 @@ def _temporal_overrides(self) -> Iterator[None]: token = self._temporal_overrides_active.set(True) try: yield + except PydanticSerializationError as e: + raise UserError( + "The `deps` object failed to be serialized. Temporal requires all objects that are passed to activities to be serializable using Pydantic's `TypeAdapter`." + ) from e finally: self._temporal_overrides_active.reset(token) diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_function_toolset.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_function_toolset.py index 0a7b038042..6c161cb390 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_function_toolset.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_function_toolset.py @@ -7,8 +7,8 @@ from temporalio import activity, workflow from temporalio.workflow import ActivityConfig -from pydantic_ai._run_context import RunContext from pydantic_ai.exceptions import UserError +from pydantic_ai.tools import AgentDepsT, RunContext from pydantic_ai.toolsets import FunctionToolset, ToolsetTool from pydantic_ai.toolsets.function import FunctionToolsetTool @@ -24,25 +24,25 @@ class _CallToolParams: serialized_run_context: Any -class TemporalFunctionToolset(TemporalWrapperToolset): +class TemporalFunctionToolset(TemporalWrapperToolset[AgentDepsT]): def __init__( self, - toolset: FunctionToolset, + toolset: FunctionToolset[AgentDepsT], *, activity_name_prefix: str, activity_config: ActivityConfig, tool_activity_config: dict[str, ActivityConfig | Literal[False]], - run_context_type: type[TemporalRunContext] = TemporalRunContext, + deps_type: type[AgentDepsT], + run_context_type: type[TemporalRunContext[AgentDepsT]] = TemporalRunContext[AgentDepsT], ): super().__init__(toolset) self.activity_config = activity_config self.tool_activity_config = tool_activity_config self.run_context_type = run_context_type - @activity.defn(name=f'{activity_name_prefix}__toolset__{self.id}__call_tool') - async def call_tool_activity(params: _CallToolParams) -> Any: + async def call_tool_activity(params: _CallToolParams, deps: AgentDepsT) -> Any: name = params.name - ctx = self.run_context_type.deserialize_run_context(params.serialized_run_context) + ctx = self.run_context_type.deserialize_run_context(params.serialized_run_context, deps=deps) try: tool = (await toolset.get_tools(ctx))[name] except KeyError as e: # pragma: no cover @@ -53,13 +53,20 @@ async def call_tool_activity(params: _CallToolParams) -> Any: return await self.wrapped.call_tool(name, params.tool_args, ctx, tool) - self.call_tool_activity = call_tool_activity + # Set type hint explicitly so that Temporal can take care of serialization and deserialization + call_tool_activity.__annotations__['deps'] = deps_type + + self.call_tool_activity = activity.defn(name=f'{activity_name_prefix}__toolset__{self.id}__call_tool')( + call_tool_activity + ) @property def temporal_activities(self) -> list[Callable[..., Any]]: return [self.call_tool_activity] - async def call_tool(self, name: str, tool_args: dict[str, Any], ctx: RunContext, tool: ToolsetTool) -> Any: + async def call_tool( + self, name: str, tool_args: dict[str, Any], ctx: RunContext[AgentDepsT], tool: ToolsetTool[AgentDepsT] + ) -> Any: if not workflow.in_workflow(): return await super().call_tool(name, tool_args, ctx, tool) @@ -77,6 +84,9 @@ async def call_tool(self, name: str, tool_args: dict[str, Any], ctx: RunContext, serialized_run_context = self.run_context_type.serialize_run_context(ctx) return await workflow.execute_activity( # pyright: ignore[reportUnknownMemberType] activity=self.call_tool_activity, - arg=_CallToolParams(name=name, tool_args=tool_args, serialized_run_context=serialized_run_context), + args=[ + _CallToolParams(name=name, tool_args=tool_args, serialized_run_context=serialized_run_context), + ctx.deps, + ], **tool_activity_config, ) diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_mcp_server.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_mcp_server.py index 5c7b8c81d4..d10216bcf2 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_mcp_server.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_mcp_server.py @@ -8,10 +8,9 @@ from temporalio.workflow import ActivityConfig from typing_extensions import Self -from pydantic_ai._run_context import RunContext from pydantic_ai.exceptions import UserError from pydantic_ai.mcp import MCPServer, ToolResult -from pydantic_ai.tools import ToolDefinition +from pydantic_ai.tools import AgentDepsT, RunContext, ToolDefinition from pydantic_ai.toolsets.abstract import ToolsetTool from ._run_context import TemporalRunContext @@ -33,7 +32,7 @@ class _CallToolParams: tool_def: ToolDefinition -class TemporalMCPServer(TemporalWrapperToolset): +class TemporalMCPServer(TemporalWrapperToolset[AgentDepsT]): def __init__( self, server: MCPServer, @@ -41,7 +40,8 @@ def __init__( activity_name_prefix: str, activity_config: ActivityConfig, tool_activity_config: dict[str, ActivityConfig | Literal[False]], - run_context_type: type[TemporalRunContext] = TemporalRunContext, + deps_type: type[AgentDepsT], + run_context_type: type[TemporalRunContext[AgentDepsT]] = TemporalRunContext[AgentDepsT], ): super().__init__(server) self.activity_config = activity_config @@ -57,19 +57,22 @@ def __init__( self.run_context_type = run_context_type - @activity.defn(name=f'{activity_name_prefix}__mcp_server__{self.id}__get_tools') - async def get_tools_activity(params: _GetToolsParams) -> dict[str, ToolDefinition]: - run_context = self.run_context_type.deserialize_run_context(params.serialized_run_context) + async def get_tools_activity(params: _GetToolsParams, deps: AgentDepsT) -> dict[str, ToolDefinition]: + run_context = self.run_context_type.deserialize_run_context(params.serialized_run_context, deps=deps) tools = await self.wrapped.get_tools(run_context) # ToolsetTool is not serializable as it holds a SchemaValidator (which is also the same for every MCP tool so unnecessary to pass along the wire every time), # so we just return the ToolDefinitions and wrap them in ToolsetTool outside of the activity. return {name: tool.tool_def for name, tool in tools.items()} - self.get_tools_activity = get_tools_activity + # Set type hint explicitly so that Temporal can take care of serialization and deserialization + get_tools_activity.__annotations__['deps'] = deps_type - @activity.defn(name=f'{activity_name_prefix}__mcp_server__{self.id}__call_tool') - async def call_tool_activity(params: _CallToolParams) -> ToolResult: - run_context = self.run_context_type.deserialize_run_context(params.serialized_run_context) + self.get_tools_activity = activity.defn(name=f'{activity_name_prefix}__mcp_server__{self.id}__get_tools')( + get_tools_activity + ) + + async def call_tool_activity(params: _CallToolParams, deps: AgentDepsT) -> ToolResult: + run_context = self.run_context_type.deserialize_run_context(params.serialized_run_context, deps=deps) return await self.wrapped.call_tool( params.name, params.tool_args, @@ -77,9 +80,14 @@ async def call_tool_activity(params: _CallToolParams) -> ToolResult: self.tool_for_tool_def(params.tool_def), ) - self.call_tool_activity = call_tool_activity + # Set type hint explicitly so that Temporal can take care of serialization and deserialization + call_tool_activity.__annotations__['deps'] = deps_type + + self.call_tool_activity = activity.defn(name=f'{activity_name_prefix}__mcp_server__{self.id}__call_tool')( + call_tool_activity + ) - def tool_for_tool_def(self, tool_def: ToolDefinition) -> ToolsetTool: + def tool_for_tool_def(self, tool_def: ToolDefinition) -> ToolsetTool[AgentDepsT]: assert isinstance(self.wrapped, MCPServer) return self.wrapped.tool_for_tool_def(tool_def) @@ -95,14 +103,17 @@ async def __aenter__(self) -> Self: async def __aexit__(self, *args: Any) -> bool | None: return None - async def get_tools(self, ctx: RunContext[Any]) -> dict[str, ToolsetTool[Any]]: + async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[AgentDepsT]]: if not workflow.in_workflow(): return await super().get_tools(ctx) serialized_run_context = self.run_context_type.serialize_run_context(ctx) tool_defs = await workflow.execute_activity( # pyright: ignore[reportUnknownMemberType] activity=self.get_tools_activity, - arg=_GetToolsParams(serialized_run_context=serialized_run_context), + args=[ + _GetToolsParams(serialized_run_context=serialized_run_context), + ctx.deps, + ], **self.activity_config, ) return {name: self.tool_for_tool_def(tool_def) for name, tool_def in tool_defs.items()} @@ -111,8 +122,8 @@ async def call_tool( self, name: str, tool_args: dict[str, Any], - ctx: RunContext[Any], - tool: ToolsetTool[Any], + ctx: RunContext[AgentDepsT], + tool: ToolsetTool[AgentDepsT], ) -> ToolResult: if not workflow.in_workflow(): return await super().call_tool(name, tool_args, ctx, tool) @@ -121,11 +132,14 @@ async def call_tool( serialized_run_context = self.run_context_type.serialize_run_context(ctx) return await workflow.execute_activity( # pyright: ignore[reportUnknownMemberType] activity=self.call_tool_activity, - arg=_CallToolParams( - name=name, - tool_args=tool_args, - serialized_run_context=serialized_run_context, - tool_def=tool.tool_def, - ), + args=[ + _CallToolParams( + name=name, + tool_args=tool_args, + serialized_run_context=serialized_run_context, + tool_def=tool.tool_def, + ), + ctx.deps, + ], **tool_activity_config, ) diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_model.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_model.py index 07398afb07..6607aec077 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_model.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_model.py @@ -10,7 +10,6 @@ from temporalio import activity, workflow from temporalio.workflow import ActivityConfig -from pydantic_ai._run_context import RunContext from pydantic_ai.agent import EventStreamHandler from pydantic_ai.exceptions import UserError from pydantic_ai.messages import ( @@ -21,6 +20,7 @@ from pydantic_ai.models import Model, ModelRequestParameters, StreamedResponse from pydantic_ai.models.wrapper import WrapperModel from pydantic_ai.settings import ModelSettings +from pydantic_ai.tools import AgentDepsT, RunContext from pydantic_ai.usage import Usage from ._run_context import TemporalRunContext @@ -67,7 +67,8 @@ def __init__( *, activity_name_prefix: str, activity_config: ActivityConfig, - run_context_type: type[TemporalRunContext] = TemporalRunContext, + deps_type: type[AgentDepsT], + run_context_type: type[TemporalRunContext[AgentDepsT]] = TemporalRunContext[AgentDepsT], event_stream_handler: EventStreamHandler[Any] | None = None, ): super().__init__(model) @@ -81,12 +82,11 @@ async def request_activity(params: _RequestParams) -> ModelResponse: self.request_activity = request_activity - @activity.defn(name=f'{activity_name_prefix}__model_request_stream') - async def request_stream_activity(params: _RequestParams) -> ModelResponse: + async def request_stream_activity(params: _RequestParams, deps: AgentDepsT) -> ModelResponse: # An error is raised in `request_stream` if no `event_stream_handler` is set. assert self.event_stream_handler is not None - run_context = self.run_context_type.deserialize_run_context(params.serialized_run_context) + run_context = self.run_context_type.deserialize_run_context(params.serialized_run_context, deps=deps) async with self.wrapped.request_stream( params.messages, params.model_settings, params.model_request_parameters, run_context ) as streamed_response: @@ -96,7 +96,12 @@ async def request_stream_activity(params: _RequestParams) -> ModelResponse: pass return streamed_response.get() - self.request_stream_activity = request_stream_activity + # Set type hint explicitly so that Temporal can take care of serialization and deserialization + request_stream_activity.__annotations__['deps'] = deps_type + + self.request_stream_activity = activity.defn(name=f'{activity_name_prefix}__model_request_stream')( + request_stream_activity + ) @property def temporal_activities(self) -> list[Callable[..., Any]]: @@ -149,12 +154,15 @@ async def request_stream( serialized_run_context = self.run_context_type.serialize_run_context(run_context) response = await workflow.execute_activity( # pyright: ignore[reportUnknownMemberType] activity=self.request_stream_activity, - arg=_RequestParams( - messages=messages, - model_settings=model_settings, - model_request_parameters=model_request_parameters, - serialized_run_context=serialized_run_context, - ), + args=[ + _RequestParams( + messages=messages, + model_settings=model_settings, + model_request_parameters=model_request_parameters, + serialized_run_context=serialized_run_context, + ), + run_context.deps, + ], **self.activity_config, ) yield TemporalStreamedResponse(model_request_parameters, response) diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_run_context.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_run_context.py index 222e3f6bcc..e3bd7f6232 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_run_context.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_run_context.py @@ -2,26 +2,23 @@ from typing import Any -from pydantic_ai._run_context import RunContext from pydantic_ai.exceptions import UserError +from pydantic_ai.tools import AgentDepsT, RunContext -class TemporalRunContext(RunContext[Any]): +class TemporalRunContext(RunContext[AgentDepsT]): """The [`RunContext`][pydantic_ai.tools.RunContext] subclass to use to serialize and deserialize the run context for use inside a Temporal activity. - By default, only the `retries`, `tool_call_id`, `tool_name`, `retry` and `run_step` attributes will be available. + By default, only the `deps`, `retries`, `tool_call_id`, `tool_name`, `retry` and `run_step` attributes will be available. To make another attribute available, create a `TemporalRunContext` subclass with a custom `serialize_run_context` class method that returns a dictionary that includes the attribute and pass it to [`TemporalAgent`][pydantic_ai.durable_exec.temporal.TemporalAgent]. - - If `deps` is a JSON-serializable dictionary, like a `TypedDict`, you can use [`TemporalRunContextWithDeps`][pydantic_ai.durable_exec.temporal.TemporalRunContextWithDeps] to make the `deps` attribute available as well. - If `deps` is of a different type, create a `TemporalRunContext` subclass with custom `serialize_run_context` and `deserialize_run_context` class methods. """ - def __init__(self, **kwargs: Any): - self.__dict__ = kwargs + def __init__(self, deps: AgentDepsT, **kwargs: Any): + self.__dict__ = {**kwargs, 'deps': deps} setattr( self, '__dataclass_fields__', - {name: field for name, field in RunContext.__dataclass_fields__.items() if name in kwargs}, + {name: field for name, field in RunContext.__dataclass_fields__.items() if name in self.__dict__}, ) def __getattribute__(self, name: str) -> Any: @@ -29,7 +26,7 @@ def __getattribute__(self, name: str) -> Any: return super().__getattribute__(name) except AttributeError as e: # pragma: no cover if name in RunContext.__dataclass_fields__: - raise AttributeError( + raise UserError( f'{self.__class__.__name__!r} object has no attribute {name!r}. ' 'To make the attribute available, create a `TemporalRunContext` subclass with a custom `serialize_run_context` class method that returns a dictionary that includes the attribute and pass it to `TemporalAgent`.' ) @@ -48,19 +45,6 @@ def serialize_run_context(cls, ctx: RunContext[Any]) -> dict[str, Any]: } @classmethod - def deserialize_run_context(cls, ctx: dict[str, Any]) -> TemporalRunContext: + def deserialize_run_context(cls, ctx: dict[str, Any], deps: AgentDepsT) -> TemporalRunContext[AgentDepsT]: """Deserialize the run context from a `dict[str, Any]`.""" - return cls(**ctx) - - -class TemporalRunContextWithDeps(TemporalRunContext): - """[`TemporalRunContext`][pydantic_ai.durable_exec.temporal.TemporalRunContext] subclass that includes JSON-serializable dictionary `deps`, like a `TypedDict`.""" - - @classmethod - def serialize_run_context(cls, ctx: RunContext[Any]) -> dict[str, Any]: - if not isinstance(ctx.deps, dict): - raise UserError( - '`TemporalRunContextWithDeps` requires the `deps` object to be a JSON-serializable dictionary, like a `TypedDict`. ' - 'To support `deps` of a different type, pass a `TemporalRunContext` subclass to `TemporalAgent` with custom `serialize_run_context` and `deserialize_run_context` class methods.' - ) - return {**super().serialize_run_context(ctx), 'deps': ctx.deps} # pyright: ignore[reportUnknownMemberType] + return cls(**ctx, deps=deps) diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_toolset.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_toolset.py index afa1551630..8a7acac368 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_toolset.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_toolset.py @@ -5,8 +5,8 @@ from temporalio.workflow import ActivityConfig -from pydantic_ai._run_context import AgentDepsT from pydantic_ai.mcp import MCPServer +from pydantic_ai.tools import AgentDepsT from pydantic_ai.toolsets.abstract import AbstractToolset from pydantic_ai.toolsets.function import FunctionToolset from pydantic_ai.toolsets.wrapper import WrapperToolset @@ -14,7 +14,7 @@ from ._run_context import TemporalRunContext -class TemporalWrapperToolset(WrapperToolset[Any], ABC): +class TemporalWrapperToolset(WrapperToolset[AgentDepsT], ABC): @property def id(self) -> str: # An error is raised in `TemporalAgent` if no `id` is set. @@ -34,12 +34,13 @@ def visit_and_replace( def temporalize_toolset( - toolset: AbstractToolset[Any], + toolset: AbstractToolset[AgentDepsT], activity_name_prefix: str, activity_config: ActivityConfig, tool_activity_config: dict[str, ActivityConfig | Literal[False]], - run_context_type: type[TemporalRunContext] = TemporalRunContext, -) -> AbstractToolset[Any]: + deps_type: type[AgentDepsT], + run_context_type: type[TemporalRunContext[AgentDepsT]] = TemporalRunContext[AgentDepsT], +) -> AbstractToolset[AgentDepsT]: """Temporalize a toolset. Args: @@ -47,7 +48,8 @@ def temporalize_toolset( activity_name_prefix: Prefix for Temporal activity names. activity_config: The Temporal activity config to use. tool_activity_config: The Temporal activity config to use for specific tools identified by tool name. - run_context_type: The type of run context to use to serialize and deserialize the run context. + deps_type: The type of agent's dependencies object. It needs to be serializable using Pydantic's `TypeAdapter`. + run_context_type: The `TemporalRunContext` (sub)class that's used to serialize and deserialize the run context. """ if isinstance(toolset, FunctionToolset): from ._function_toolset import TemporalFunctionToolset @@ -57,6 +59,7 @@ def temporalize_toolset( activity_name_prefix=activity_name_prefix, activity_config=activity_config, tool_activity_config=tool_activity_config, + deps_type=deps_type, run_context_type=run_context_type, ) elif isinstance(toolset, MCPServer): @@ -67,6 +70,7 @@ def temporalize_toolset( activity_name_prefix=activity_name_prefix, activity_config=activity_config, tool_activity_config=tool_activity_config, + deps_type=deps_type, run_context_type=run_context_type, ) else: diff --git a/tests/cassettes/test_temporal/test_temporal_agent_with_unserializable_deps_type.yaml b/tests/cassettes/test_temporal/test_temporal_agent_with_unserializable_deps_type.yaml new file mode 100644 index 0000000000..de8c761758 --- /dev/null +++ b/tests/cassettes/test_temporal/test_temporal_agent_with_unserializable_deps_type.yaml @@ -0,0 +1,95 @@ +interactions: +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '279' + content-type: + - application/json + host: + - api.openai.com + method: POST + parsed_body: + messages: + - content: What is the model name? + role: user + model: gpt-4o + stream: false + tool_choice: auto + tools: + - function: + description: '' + name: get_model_name + parameters: + additionalProperties: false + properties: {} + type: object + type: function + uri: https://api.openai.com/v1/chat/completions + response: + headers: + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-length: + - '1064' + content-type: + - application/json + openai-organization: + - pydantic-28gund + openai-processing-ms: + - '1141' + openai-project: + - proj_dKobscVY9YJxeEaDJen54e3d + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + parsed_body: + choices: + - finish_reason: tool_calls + index: 0 + logprobs: null + message: + annotations: [] + content: null + refusal: null + role: assistant + tool_calls: + - function: + arguments: '{}' + name: get_model_name + id: call_wB0C4FAOjxYgTNJrQT9NzzZ9 + type: function + created: 1755036404 + id: chatcmpl-C3rQisW29iISecZ6NMn4FrseeO3A9 + model: gpt-4o-2024-08-06 + object: chat.completion + service_tier: default + system_fingerprint: fp_07871e2ad8 + usage: + completion_tokens: 11 + completion_tokens_details: + accepted_prediction_tokens: 0 + audio_tokens: 0 + reasoning_tokens: 0 + rejected_prediction_tokens: 0 + prompt_tokens: 38 + prompt_tokens_details: + audio_tokens: 0 + cached_tokens: 0 + total_tokens: 49 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/test_temporal.py b/tests/test_temporal.py index 8ff155d4b3..50288bf756 100644 --- a/tests/test_temporal.py +++ b/tests/test_temporal.py @@ -4,11 +4,11 @@ import re from collections.abc import AsyncIterable, AsyncIterator, Iterator from contextlib import contextmanager -from dataclasses import asdict, dataclass +from dataclasses import dataclass from datetime import timedelta -from typing import Any, Literal +from typing import Literal -from typing_extensions import TypedDict +from pydantic import BaseModel from pydantic_ai import Agent, RunContext from pydantic_ai.direct import model_request_stream @@ -27,7 +27,7 @@ ToolCallPartDelta, ToolReturnPart, ) -from pydantic_ai.models import cached_async_http_client +from pydantic_ai.models import Model, cached_async_http_client from pydantic_ai.tools import ToolDefinition from pydantic_ai.toolsets import DeferredToolset, FunctionToolset @@ -47,8 +47,6 @@ LogfirePlugin, PydanticAIPlugin, TemporalAgent, - TemporalRunContext, - TemporalRunContextWithDeps, ) from pydantic_ai.durable_exec.temporal._function_toolset import TemporalFunctionToolset from pydantic_ai.durable_exec.temporal._mcp_server import TemporalMCPServer @@ -138,6 +136,10 @@ def workflow_raises(exc_type: type[Exception], exc_message: str) -> Iterator[Non TEMPORAL_PORT = 7243 TASK_QUEUE = 'pydantic-ai-agent-task-queue' +BASE_ACTIVITY_CONFIG = ActivityConfig( + start_to_close_timeout=timedelta(seconds=60), + retry_policy=RetryPolicy(maximum_attempts=1), +) @pytest.fixture(scope='module') @@ -174,13 +176,7 @@ async def client_with_logfire(temporal_env: WorkflowEnvironment) -> Client: simple_agent = Agent(model, name='simple_agent') # This needs to be done before the `TemporalAgent` is bound to the workflow. -simple_temporal_agent = TemporalAgent( - simple_agent, - activity_config=ActivityConfig( - start_to_close_timeout=timedelta(seconds=60), - retry_policy=RetryPolicy(maximum_attempts=1), - ), -) +simple_temporal_agent = TemporalAgent(simple_agent, activity_config=BASE_ACTIVITY_CONFIG) @workflow.defn @@ -207,7 +203,7 @@ async def test_simple_agent_run_in_workflow(allow_model_requests: None, client: assert output == snapshot('The capital of Mexico is Mexico City.') -class Deps(TypedDict): +class Deps(BaseModel): country: str @@ -221,7 +217,7 @@ async def event_stream_handler( async def get_country(ctx: RunContext[Deps]) -> str: - return ctx.deps['country'] + return ctx.deps.country def get_weather(city: str) -> str: @@ -256,10 +252,7 @@ class Response: # This needs to be done before the `TemporalAgent` is bound to the workflow. complex_temporal_agent = TemporalAgent( complex_agent, - activity_config=ActivityConfig( - start_to_close_timeout=timedelta(seconds=60), - retry_policy=RetryPolicy(maximum_attempts=1), - ), + activity_config=BASE_ACTIVITY_CONFIG, model_activity_config=ActivityConfig(start_to_close_timeout=timedelta(seconds=90)), toolset_activity_config={ 'country': ActivityConfig(start_to_close_timeout=timedelta(seconds=120)), @@ -275,7 +268,6 @@ class Response: 'get_weather': ActivityConfig(start_to_close_timeout=timedelta(seconds=180)), }, }, - run_context_type=TemporalRunContextWithDeps, ) @@ -1169,6 +1161,7 @@ async def test_temporal_agent_override_deps_in_workflow(allow_model_requests: No # This needs to be done before the `TemporalAgent` is bound to the workflow. temporal_agent_with_sync_tool_activity_disabled = TemporalAgent( agent_with_sync_tool, + activity_config=BASE_ACTIVITY_CONFIG, tool_activity_config={ '': { 'get_weather': False, @@ -1256,103 +1249,47 @@ async def test_temporal_model_stream_direct(client: Client): ) -@dataclass -class DataclassDeps: - country: str - +unserializable_deps_agent = Agent(model, name='unserializable_deps_agent', deps_type=Model) -agent_with_dataclass_deps = Agent(model, name='agent_with_dataclass_deps', deps_type=DataclassDeps) - -@agent_with_dataclass_deps.tool -async def get_country_from_deps(ctx: RunContext[DataclassDeps]) -> str: - return ctx.deps.country +@unserializable_deps_agent.tool +async def get_model_name(ctx: RunContext[Model]) -> str: + return ctx.deps.model_name # This needs to be done before the `TemporalAgent` is bound to the workflow. -temporal_agent_with_dataclass_deps = TemporalAgent( - agent_with_dataclass_deps, - run_context_type=TemporalRunContextWithDeps, -) +unserializable_deps_temporal_agent = TemporalAgent(unserializable_deps_agent, activity_config=BASE_ACTIVITY_CONFIG) @workflow.defn -class AgentWorkflowWithDataclassDeps: +class UnserializableDepsAgentWorkflow: @workflow.run - async def run(self, prompt: str, deps: DataclassDeps) -> str: - result = await temporal_agent_with_dataclass_deps.run(prompt, deps=deps) - return result.output # pragma: no cover + async def run(self, prompt: str) -> str: + result = await unserializable_deps_temporal_agent.run(prompt, deps=unserializable_deps_temporal_agent.model) + return result.output -async def test_temporal_agent_with_non_dict_deps(allow_model_requests: None, client: Client): +async def test_temporal_agent_with_unserializable_deps_type(allow_model_requests: None, client: Client): async with Worker( client, task_queue=TASK_QUEUE, - workflows=[AgentWorkflowWithDataclassDeps], - plugins=[AgentPlugin(temporal_agent_with_dataclass_deps)], + workflows=[UnserializableDepsAgentWorkflow], + plugins=[AgentPlugin(unserializable_deps_temporal_agent)], ): with workflow_raises( UserError, snapshot( - '`TemporalRunContextWithDeps` requires the `deps` object to be a JSON-serializable dictionary, like a `TypedDict`. To support `deps` of a different type, pass a `TemporalRunContext` subclass to `TemporalAgent` with custom `serialize_run_context` and `deserialize_run_context` class methods.' + "The `deps` object failed to be serialized. Temporal requires all objects that are passed to activities to be serializable using Pydantic's `TypeAdapter`." ), ): await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] - AgentWorkflowWithDataclassDeps.run, - args=[ - 'What is the capital of the country?', - DataclassDeps(country='Mexico'), - ], - id=AgentWorkflowWithDataclassDeps.__name__, + UnserializableDepsAgentWorkflow.run, + args=['What is the model name?'], + id=UnserializableDepsAgentWorkflow.__name__, task_queue=TASK_QUEUE, ) -class TemporalRunContextWithDataclassDeps(TemporalRunContext): - @classmethod - def serialize_run_context(cls, ctx: RunContext[DataclassDeps]) -> dict[str, Any]: - return {**super().serialize_run_context(ctx), 'deps': asdict(ctx.deps)} - - @classmethod - def deserialize_run_context(cls, ctx: dict[str, Any]) -> TemporalRunContext: - deps = DataclassDeps(**ctx.pop('deps', {})) - return cls(**ctx, deps=deps) - - -# This needs to be done before the `TemporalAgent` is bound to the workflow. -temporal_agent_with_dataclass_deps_as_dict = TemporalAgent( - agent_with_dataclass_deps, - run_context_type=TemporalRunContextWithDataclassDeps, -) - - -@workflow.defn -class AgentWorkflowWithDataclassDepsAsDict: - @workflow.run - async def run(self, prompt: str, deps: DataclassDeps) -> str: - result = await temporal_agent_with_dataclass_deps_as_dict.run(prompt, deps=deps) - return result.output - - -async def test_temporal_agent_with_dataclass_deps_as_dict(allow_model_requests: None, client: Client): - async with Worker( - client, - task_queue=TASK_QUEUE, - workflows=[AgentWorkflowWithDataclassDepsAsDict], - plugins=[AgentPlugin(temporal_agent_with_dataclass_deps_as_dict)], - ): - output = await client.execute_workflow( # pyright: ignore[reportUnknownMemberType] - AgentWorkflowWithDataclassDepsAsDict.run, - args=[ - 'What is the capital of the country?', - DataclassDeps(country='Mexico'), - ], - id=AgentWorkflowWithDataclassDepsAsDict.__name__, - task_queue=TASK_QUEUE, - ) - assert output == snapshot('The capital of Mexico is Mexico City.') - - async def test_logfire_plugin(client: Client): def setup_logfire(send_to_logfire: bool = True, metrics: Literal[False] | None = None) -> Logfire: instance = logfire.configure(local=True, metrics=metrics) From f0958d95b1ee6a80a2ace67f29342cc2c3d9a43a Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Tue, 12 Aug 2025 22:57:18 +0000 Subject: [PATCH 28/30] Add beta note to docs --- docs/temporal.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/temporal.md b/docs/temporal.md index d7e8d253e4..214ac3328a 100644 --- a/docs/temporal.md +++ b/docs/temporal.md @@ -1,5 +1,8 @@ # Durable Execution with Temporal +!!! note + Durable execution support is in beta and the public interface is subject to change based on user feedback. We expect it to be stable by the release of Pydantic AI v1 at the end of August. Questions and feedback are welcome in [GitHub issues](https://github.com/pydantic/pydantic-ai/issues) and the [`#pydantic-ai` Slack channel](https://logfire.pydantic.dev/docs/join-slack/). + Pydantic AI allows you to build durable agents that never lose their progress and handle long-running, asynchronous, and human-in-the-loop workflows with production-grade reliability. Durable agents have full support for [streaming](agents.md#streaming-all-events) and [MCP](mcp/client.md), with the added benefit of fault tolerance. [Temporal](https://temporal.io) is a popular [durable execution](https://docs.temporal.io/evaluate/understanding-temporal#durable-execution) platform that's natively supported by Pydantic AI. From 2908487e1b8d06c8ca6e9933a39c52d622a1698a Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Tue, 12 Aug 2025 23:00:44 +0000 Subject: [PATCH 29/30] Fix test coverage --- .../pydantic_ai/durable_exec/temporal/_agent.py | 6 ++---- tests/test_temporal.py | 4 ++-- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py index ae37c0b5e2..23d8b42366 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py @@ -109,8 +109,6 @@ def __init__( activity_name_prefix = f'agent__{self.name}' - deps_type = wrapped.deps_type - activities: list[Callable[..., Any]] = [] if not isinstance(wrapped.model, Model): raise UserError( @@ -121,7 +119,7 @@ def __init__( wrapped.model, activity_name_prefix=activity_name_prefix, activity_config=activity_config | model_activity_config, - deps_type=deps_type, + deps_type=self.deps_type, run_context_type=run_context_type, event_stream_handler=event_stream_handler or wrapped.event_stream_handler, ) @@ -139,7 +137,7 @@ def temporalize_toolset(toolset: AbstractToolset[AgentDepsT]) -> AbstractToolset activity_name_prefix, activity_config | toolset_activity_config.get(id, {}), tool_activity_config.get(id, {}), - deps_type, + self.deps_type, run_context_type, ) if isinstance(toolset, TemporalWrapperToolset): diff --git a/tests/test_temporal.py b/tests/test_temporal.py index 50288bf756..1154e0fa7c 100644 --- a/tests/test_temporal.py +++ b/tests/test_temporal.py @@ -1254,7 +1254,7 @@ async def test_temporal_model_stream_direct(client: Client): @unserializable_deps_agent.tool async def get_model_name(ctx: RunContext[Model]) -> str: - return ctx.deps.model_name + return ctx.deps.model_name # pragma: no cover # This needs to be done before the `TemporalAgent` is bound to the workflow. @@ -1266,7 +1266,7 @@ class UnserializableDepsAgentWorkflow: @workflow.run async def run(self, prompt: str) -> str: result = await unserializable_deps_temporal_agent.run(prompt, deps=unserializable_deps_temporal_agent.model) - return result.output + return result.output # pragma: no cover async def test_temporal_agent_with_unserializable_deps_type(allow_model_requests: None, client: Client): From e9195e871f94bfb8809757bf674bc4189d117231 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Tue, 12 Aug 2025 23:07:13 +0000 Subject: [PATCH 30/30] Fix test coverage --- pydantic_ai_slim/pydantic_ai/models/wrapper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pydantic_ai_slim/pydantic_ai/models/wrapper.py b/pydantic_ai_slim/pydantic_ai/models/wrapper.py index 9818ad603b..4c91991cc1 100644 --- a/pydantic_ai_slim/pydantic_ai/models/wrapper.py +++ b/pydantic_ai_slim/pydantic_ai/models/wrapper.py @@ -64,4 +64,4 @@ def settings(self) -> ModelSettings | None: return self.wrapped.settings def __getattr__(self, item: str): - return getattr(self.wrapped, item) # pragma: no cover + return getattr(self.wrapped, item)