Skip to content

Commit c626eb9

Browse files
committed
fix: ensure RunState serialization compatibility with openai-agents-js
1 parent b38b2f7 commit c626eb9

File tree

6 files changed

+828
-89
lines changed

6 files changed

+828
-89
lines changed

examples/agent_patterns/human_in_the_loop.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ async def main():
104104
with open("result.json") as f:
105105
stored_state_json = json.load(f)
106106

107-
state = RunState.from_json(agent, stored_state_json)
107+
state = await RunState.from_json(agent, stored_state_json)
108108

109109
# Process each interruption
110110
for interruption in result.interruptions:

src/agents/_run_impl.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -240,6 +240,9 @@ class SingleStepResult:
240240
tool_output_guardrail_results: list[ToolOutputGuardrailResult]
241241
"""Tool output guardrail results from this step."""
242242

243+
processed_response: ProcessedResponse | None = None
244+
"""The processed model response. This is needed for resuming from interruptions."""
245+
243246
@property
244247
def generated_items(self) -> list[RunItem]:
245248
"""Items generated during the agent run (i.e. everything generated after
@@ -331,6 +334,7 @@ async def execute_tools_and_side_effects(
331334
next_step=NextStepInterruption(interruptions=interruptions),
332335
tool_input_guardrail_results=tool_input_guardrail_results,
333336
tool_output_guardrail_results=tool_output_guardrail_results,
337+
processed_response=processed_response,
334338
)
335339

336340
new_step_items.extend([result.run_item for result in approved_function_results])

src/agents/result.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
)
3030

3131
if TYPE_CHECKING:
32-
from ._run_impl import QueueCompleteSentinel
32+
from ._run_impl import ProcessedResponse, QueueCompleteSentinel
3333
from .agent import Agent
3434
from .tool_guardrails import ToolInputGuardrailResult, ToolOutputGuardrailResult
3535

@@ -116,6 +116,8 @@ def last_response_id(self) -> str | None:
116116
@dataclass
117117
class RunResult(RunResultBase):
118118
_last_agent: Agent[Any]
119+
_last_processed_response: ProcessedResponse | None = field(default=None, repr=False)
120+
"""The last processed model response. This is needed for resuming from interruptions."""
119121

120122
@property
121123
def last_agent(self) -> Agent[Any]:
@@ -162,6 +164,7 @@ def to_state(self) -> Any:
162164
state._model_responses = self.raw_responses
163165
state._input_guardrail_results = self.input_guardrail_results
164166
state._output_guardrail_results = self.output_guardrail_results
167+
state._last_processed_response = self._last_processed_response
165168

166169
# If there are interruptions, set the current step
167170
if self.interruptions:
@@ -202,6 +205,9 @@ class RunResultStreaming(RunResultBase):
202205
is_complete: bool = False
203206
"""Whether the agent has finished running."""
204207

208+
_last_processed_response: ProcessedResponse | None = field(default=None, repr=False)
209+
"""The last processed model response. This is needed for resuming from interruptions."""
210+
205211
# Queues that the background run_loop writes to
206212
_event_queue: asyncio.Queue[StreamEvent | QueueCompleteSentinel] = field(
207213
default_factory=asyncio.Queue, repr=False
@@ -443,6 +449,7 @@ def to_state(self) -> Any:
443449
state._input_guardrail_results = self.input_guardrail_results
444450
state._output_guardrail_results = self.output_guardrail_results
445451
state._current_turn = self.current_turn
452+
state._last_processed_response = self._last_processed_response
446453

447454
# If there are interruptions, set the current step
448455
if self.interruptions:

0 commit comments

Comments
 (0)