From 5a7973c00c0a02073fae3c81b77736b1a03753fd Mon Sep 17 00:00:00 2001 From: Vayoa <89143290+vayoa@users.noreply.github.com> Date: Mon, 14 Jul 2025 00:25:14 +0300 Subject: [PATCH 1/4] Update delegation and messaging mechanisms --- .env.example | 1 + afk/agents/__init__.py | 12 ++++---- afk/agents/analyst.py | 21 +++++++------- afk/agents/base.py | 19 ++++++------- afk/agents/pm.py | 21 +++++++------- afk/agents/qa.py | 21 +++++++------- afk/cap/models.py | 1 + afk/cap/orchestrator.py | 53 +++++++++++++++++++++++++++++++++-- afk/config.py | 1 + afk/main.py | 17 ++++++----- docs/custom_agent_protocol.md | 3 ++ tests/test_models.py | 1 + tests/test_orchestrator.py | 28 ++++++++++-------- 13 files changed, 130 insertions(+), 69 deletions(-) diff --git a/.env.example b/.env.example index 7460c7b..8f4bd6e 100644 --- a/.env.example +++ b/.env.example @@ -1 +1,2 @@ GROQ_API_KEY= +AFK_INCOMING_ROLE=user diff --git a/afk/agents/__init__.py b/afk/agents/__init__.py index 6c58199..1fe768c 100644 --- a/afk/agents/__init__.py +++ b/afk/agents/__init__.py @@ -1,15 +1,15 @@ from .base import BaseAgent, load_chain_of_thought_prompt -from .pm import PMAgent, pm_manifest -from .analyst import AnalystAgent, analyst_manifest -from .qa import QAAgent, qa_manifest +from .pm import PMAgent, create_pm_manifest +from .analyst import AnalystAgent, create_analyst_manifest +from .qa import QAAgent, create_qa_manifest __all__ = [ "BaseAgent", "PMAgent", "AnalystAgent", "QAAgent", - "pm_manifest", - "analyst_manifest", - "qa_manifest", + "create_pm_manifest", + "create_analyst_manifest", + "create_qa_manifest", "load_chain_of_thought_prompt", ] diff --git a/afk/agents/analyst.py b/afk/agents/analyst.py index 99a5657..0d7781b 100644 --- a/afk/agents/analyst.py +++ b/afk/agents/analyst.py @@ -16,16 +16,17 @@ "Critique your findings and produce a thorough markdown report before delegating to the QA agent for verification." ) -analyst_manifest = AgentManifest( - id="analyst", - system_prompt=ANALYST_SYSTEM_PROMPT, - allowed_agents=["qa"], - delegate_rules={"qa": DelegateRule(when_to_delegate="When analysis is complete")}, - overseer="qa", - mcp_bindings=ANALYST_BINDINGS, -) +def create_analyst_manifest(agent_id: str) -> AgentManifest: + return AgentManifest( + id=agent_id, + system_prompt=ANALYST_SYSTEM_PROMPT, + allowed_agents=["qa"], + delegate_rules={"qa": DelegateRule(when_to_delegate="When analysis is complete")}, + overseer="qa", + mcp_bindings=ANALYST_BINDINGS, + ) class AnalystAgent(BaseAgent): - def __init__(self, orchestrator: Optional[Any] = None) -> None: - super().__init__(analyst_manifest, orchestrator) + def __init__(self, agent_id: str = "analyst", orchestrator: Optional[Any] = None) -> None: + super().__init__(create_analyst_manifest(agent_id), orchestrator, agent_type="analyst") diff --git a/afk/agents/base.py b/afk/agents/base.py index 3fbfc1b..cbd0f85 100644 --- a/afk/agents/base.py +++ b/afk/agents/base.py @@ -216,10 +216,10 @@ class BaseAgent: "parameters": { "type": "object", "properties": { - "recipient_id": {"type": "string"}, + "agent_type": {"type": "string"}, "context_summary": {"type": "string"}, }, - "required": ["recipient_id", "context_summary"], + "required": ["agent_type", "context_summary"], "additionalProperties": False, }, }, @@ -227,9 +227,13 @@ class BaseAgent: ] def __init__( - self, manifest: AgentManifest, orchestrator: Optional[Any] = None + self, + manifest: AgentManifest, + orchestrator: Optional[Any] = None, + agent_type: Optional[str] = None, ) -> None: self.manifest = manifest + self.agent_type = agent_type or manifest.id self.orchestrator = orchestrator self.session = Session() self.history: List[Dict[str, str]] = [] @@ -518,16 +522,11 @@ def _handle_cap_function(self, name: str, args: Dict[str, Any]) -> str: self.orchestrator.dispatch_message(msg) return "message sent" elif name == "delegate_task": - from ..cap.models import DelegationNotice - - notice = DelegationNotice( - task_id=self.orchestrator.next_task_id(), + return self.orchestrator.delegate_task( sender_id=self.manifest.id, - recipient_id=args["recipient_id"], + agent_type=args["agent_type"], context_summary=args["context_summary"], ) - self.orchestrator.dispatch_message(notice) - return "task delegated" return "" # ------------------------------------------------------------------ diff --git a/afk/agents/pm.py b/afk/agents/pm.py index 57ac321..72acd00 100644 --- a/afk/agents/pm.py +++ b/afk/agents/pm.py @@ -17,16 +17,17 @@ "Delegate analysis to the analyst agent when ready and oversee results via the QA agent." ) -pm_manifest = AgentManifest( - id="pm", - system_prompt=PM_SYSTEM_PROMPT, - allowed_agents=["analyst"], - delegate_rules={"analyst": DelegateRule(when_to_delegate="When tasks require analysis")}, - overseer="qa", - mcp_bindings=PM_BINDINGS, -) +def create_pm_manifest(agent_id: str) -> AgentManifest: + return AgentManifest( + id=agent_id, + system_prompt=PM_SYSTEM_PROMPT, + allowed_agents=["analyst"], + delegate_rules={"analyst": DelegateRule(when_to_delegate="When tasks require analysis")}, + overseer="qa", + mcp_bindings=PM_BINDINGS, + ) class PMAgent(BaseAgent): - def __init__(self, orchestrator: Optional[Any] = None) -> None: - super().__init__(pm_manifest, orchestrator) + def __init__(self, agent_id: str = "pm", orchestrator: Optional[Any] = None) -> None: + super().__init__(create_pm_manifest(agent_id), orchestrator, agent_type="pm") diff --git a/afk/agents/qa.py b/afk/agents/qa.py index b4c266b..182b0bd 100644 --- a/afk/agents/qa.py +++ b/afk/agents/qa.py @@ -15,16 +15,17 @@ "Communicate any issues back to the analyst and confirm when the analysis looks sound before responding to the PM." ) -qa_manifest = AgentManifest( - id="qa", - system_prompt=QA_SYSTEM_PROMPT, - allowed_agents=[], - delegate_rules={}, - overseer=None, - mcp_bindings=QA_BINDINGS, -) +def create_qa_manifest(agent_id: str) -> AgentManifest: + return AgentManifest( + id=agent_id, + system_prompt=QA_SYSTEM_PROMPT, + allowed_agents=[], + delegate_rules={}, + overseer=None, + mcp_bindings=QA_BINDINGS, + ) class QAAgent(BaseAgent): - def __init__(self, orchestrator: Optional[Any] = None) -> None: - super().__init__(qa_manifest, orchestrator) + def __init__(self, agent_id: str = "qa", orchestrator: Optional[Any] = None) -> None: + super().__init__(create_qa_manifest(agent_id), orchestrator, agent_type="qa") diff --git a/afk/cap/models.py b/afk/cap/models.py index a6bca65..c986e97 100644 --- a/afk/cap/models.py +++ b/afk/cap/models.py @@ -38,6 +38,7 @@ class DelegationNotice(BaseMessage): """Handover message for delegation.""" type: Literal["delegation_notice"] = "delegation_notice" + agent_type: str context_summary: str diff --git a/afk/cap/orchestrator.py b/afk/cap/orchestrator.py index a401f45..544354c 100644 --- a/afk/cap/orchestrator.py +++ b/afk/cap/orchestrator.py @@ -2,11 +2,15 @@ from __future__ import annotations -from typing import Dict, Iterator, List, Tuple +from typing import Dict, Iterator, List, Tuple, Callable +import uuid + +from ..config import settings import openai from ..agents.base import BaseAgent, StreamChunk +from ..agents import PMAgent, AnalystAgent, QAAgent from .models import BaseMessage, DelegationNotice @@ -15,13 +19,20 @@ class Orchestrator: def __init__(self) -> None: self.agents: Dict[str, BaseAgent] = {} + self.agent_types: Dict[str, str] = {} self.task_counter = 0 self.queue: List[BaseMessage] = [] self.user_messages: List[BaseMessage] = [] + self.agent_builders: Dict[str, Callable[[str, "Orchestrator"], BaseAgent]] = { + "pm": PMAgent, + "analyst": AnalystAgent, + "qa": QAAgent, + } def register_agent(self, agent: BaseAgent) -> None: """Register an agent instance.""" self.agents[agent.manifest.id] = agent + self.agent_types[agent.manifest.id] = agent.agent_type def next_task_id(self) -> str: self.task_counter += 1 @@ -36,6 +47,7 @@ def handle_user_message(self, content: str) -> None: task_id=self.next_task_id(), sender_id="user", recipient_id=recipient, + agent_type="pm", context_summary=content, ) self.dispatch_message(notice) @@ -47,6 +59,27 @@ def dispatch_message(self, message: BaseMessage) -> None: else: self.queue.append(message) + def _create_agent(self, agent_type: str, agent_id: str) -> BaseAgent: + builder = self.agent_builders.get(agent_type) + if not builder: + raise ValueError(f"Unknown agent type: {agent_type}") + return builder(agent_id, self) + + def delegate_task(self, sender_id: str, agent_type: str, context_summary: str) -> str: + """Spawn a new agent and dispatch a delegation notice.""" + agent_id = uuid.uuid4().hex[:5] + agent = self._create_agent(agent_type, agent_id) + self.register_agent(agent) + notice = DelegationNotice( + task_id=self.next_task_id(), + sender_id=sender_id, + recipient_id=agent_id, + context_summary=context_summary, + agent_type=agent_type, + ) + self.dispatch_message(notice) + return agent_id + def run(self) -> Iterator[Tuple[str, StreamChunk]]: """Process queued messages yielding agent outputs.""" while self.queue: @@ -56,6 +89,20 @@ def run(self) -> Iterator[Tuple[str, StreamChunk]]: continue content = msg.content or getattr(msg, "context_summary", "") + rollback_len = len(agent.history) + + if msg.sender_id != "user": + sender_type = self.agent_types.get(msg.sender_id, "unknown") + prefix = ( + f"Agent {sender_type} (id: {msg.sender_id}) sent the following message:\n" + ) + role = settings.incoming_role + agent.history.append({"role": role, "content": prefix + content}) + content = "" + else: + agent.history.append({"role": "user", "content": content}) + content = "" + keep_pinging = True in_thought = False orig_thinking = False @@ -64,7 +111,6 @@ def run(self) -> Iterator[Tuple[str, StreamChunk]]: ended = False had_post_end = False call_seen = False - history_len = len(agent.history) try: for chunk in agent.stream(content): c = dict(chunk) @@ -84,10 +130,11 @@ def run(self) -> Iterator[Tuple[str, StreamChunk]]: call_seen = True except openai.OpenAIError as e: self.queue.insert(0, msg) - agent.history = agent.history[:history_len] + agent.history = agent.history[:rollback_len] yield agent.manifest.id, {"error": str(e), "body": getattr(e, "body", None)} return + rollback_len = len(agent.history) if call_seen: keep_pinging = True content = "" diff --git a/afk/config.py b/afk/config.py index 723de08..7a601ac 100644 --- a/afk/config.py +++ b/afk/config.py @@ -17,6 +17,7 @@ class Settings: def __init__(self) -> None: self.debug = get_env("AFK_DEBUG", "false").lower() == "true" self.default_model = get_env("AFK_MODEL", "llama-3.3-70b-versatile") + self.incoming_role = get_env("AFK_INCOMING_ROLE", "user") settings = Settings() diff --git a/afk/main.py b/afk/main.py index 1c697a2..6160309 100644 --- a/afk/main.py +++ b/afk/main.py @@ -13,7 +13,7 @@ sys.path.insert(0, str(Path(__file__).resolve().parent.parent)) from afk.cap.orchestrator import Orchestrator -from afk.agents import PMAgent, AnalystAgent, QAAgent +from afk.agents import PMAgent def run() -> None: @@ -22,9 +22,7 @@ def run() -> None: if "orchestrator" not in st.session_state: orch = Orchestrator() - orch.register_agent(PMAgent(orch)) - orch.register_agent(AnalystAgent(orch)) - orch.register_agent(QAAgent(orch)) + orch.register_agent(PMAgent(orchestrator=orch)) st.session_state.orchestrator = orch st.session_state.messages = [] @@ -182,14 +180,15 @@ def start_agent(agent_id: str) -> None: if current_agent is not None: st.session_state.messages.append( { - "role": current_agent, + "role": orchestrator.agent_types.get(current_agent, current_agent), "content": main_buf, "thinking": thought_buf, - "avatar": AVATARS.get(current_agent), + "avatar": AVATARS.get(orchestrator.agent_types.get(current_agent, current_agent)), } ) current_agent = agent_id - container = st.chat_message(agent_id, avatar=AVATARS.get(agent_id)) + agent_type = orchestrator.agent_types.get(agent_id, agent_id) + container = st.chat_message(agent_type, avatar=AVATARS.get(agent_type)) body = container.container() main_placeholder = body.empty() exp_container = None @@ -268,10 +267,10 @@ def start_agent(agent_id: str) -> None: if current_agent is not None and not had_error: st.session_state.messages.append( { - "role": current_agent, + "role": orchestrator.agent_types.get(current_agent, current_agent), "content": main_buf, "thinking": thought_buf, - "avatar": AVATARS.get(current_agent), + "avatar": AVATARS.get(orchestrator.agent_types.get(current_agent, current_agent)), } ) diff --git a/docs/custom_agent_protocol.md b/docs/custom_agent_protocol.md index e7c00b0..3d8378d 100644 --- a/docs/custom_agent_protocol.md +++ b/docs/custom_agent_protocol.md @@ -37,6 +37,8 @@ This clear separation of roles ensures each agent focuses on its domain (plannin Agents and users communicate freely using the unified **Message** format. Any active participant—whether an agent instance or the user—may send a message to any other participant at any time. There are no protocol-enforced restrictions on who can message whom. This supports open-ended clarifications, brainstorming, feedback loops, and interactive question-and-answer flows among multiple agents or between agents and the user. +Incoming messages are added to the recipient agent's history with a prefix of the form `Agent {agent_type} (id: {id}) sent the following message:`. The role used (`assistant` or `user`) is controlled by the `AFK_INCOMING_ROLE` environment variable. + ### Delegation Delegation is a controlled sub-process distinct from general messaging. Only agents listed in an agent’s `allowed_agents` may receive a `delegation_notice` from that agent. Furthermore, delegation occurs only when the agent’s `delegate_rules` conditions are met. Key points: @@ -206,6 +208,7 @@ All communications use a unified **Message** envelope, with specialized variants "task_id": "string", "sender_id": "string", "recipient_id": "string", + "agent_type": "string", "context_summary": "string", "attachments": [ /* same as Base Message */ diff --git a/tests/test_models.py b/tests/test_models.py index 40ca53c..fc7a6be 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -16,6 +16,7 @@ def test_delegation_notice(): sender_id="orchestrator", recipient_id="pm", context_summary="test", + agent_type="pm", ) assert notice.type == "delegation_notice" diff --git a/tests/test_orchestrator.py b/tests/test_orchestrator.py index 3cb207c..b951867 100644 --- a/tests/test_orchestrator.py +++ b/tests/test_orchestrator.py @@ -17,7 +17,7 @@ def create_agent(agent_id, orchestrator): delegate_rules={"b": DelegateRule(when_to_delegate="always")} if agent_id == "a" else {}, mcp_bindings=[], ) - return BaseAgent(manifest, orchestrator) + return BaseAgent(manifest, orchestrator, agent_type=agent_id) class FakeChunk: @@ -30,7 +30,13 @@ def test_orchestrator_flow(monkeypatch): a = create_agent("a", orch) b = create_agent("b", orch) orch.register_agent(a) - orch.register_agent(b) + + def create_b(agent_type, agent_id): + assert agent_type == "b" + b.manifest.id = agent_id + return b + + monkeypatch.setattr(orch, "_create_agent", create_b) call_a = 0 call_b = 0 @@ -40,7 +46,7 @@ def fake_create_a(**_): call_a += 1 if call_a == 1: yield FakeChunk(content="ping") - yield FakeChunk(tool_calls=[{"function": {"name": "delegate_task", "arguments": '{"recipient_id":"b","context_summary":"do"}'}}]) + yield FakeChunk(tool_calls=[{"function": {"name": "delegate_task", "arguments": '{"agent_type":"b","context_summary":"do"}'}}]) else: if False: yield # empty generator @@ -60,8 +66,8 @@ def fake_create_b(**_): orch.handle_user_message("start") outputs = list(orch.run()) - assert any(agent_id == "a" for agent_id, _ in outputs) - assert any(agent_id == "b" for agent_id, _ in outputs) + assert any(agent_id == a.manifest.id for agent_id, _ in outputs) + assert any(agent_id == b.manifest.id for agent_id, _ in outputs) assert orch.user_messages a.shutdown() @@ -88,7 +94,7 @@ def fake_create(**_): monkeypatch.setattr(agent.client.chat.completions, "create", fake_create) - orch.dispatch_message(DelegationNotice(task_id="T1", sender_id="user", recipient_id="a", context_summary="hi")) + orch.dispatch_message(DelegationNotice(task_id="T1", sender_id="user", recipient_id="a", context_summary="hi", agent_type="a")) outputs = list(orch.run()) # ensure the orchestrator called the model twice assert call_count == 2 @@ -117,7 +123,7 @@ def fake_create(**_): monkeypatch.setattr(agent.client.chat.completions, "create", fake_create) - orch.dispatch_message(DelegationNotice(task_id="T1", sender_id="user", recipient_id="a", context_summary="hi")) + orch.dispatch_message(DelegationNotice(task_id="T1", sender_id="user", recipient_id="a", context_summary="hi", agent_type="a")) outputs = list(orch.run()) assert call_count == 2 @@ -147,7 +153,7 @@ def fake_create(**_): monkeypatch.setattr(agent.client.chat.completions, "create", fake_create) - orch.dispatch_message(DelegationNotice(task_id="T1", sender_id="user", recipient_id="a", context_summary="hi")) + orch.dispatch_message(DelegationNotice(task_id="T1", sender_id="user", recipient_id="a", context_summary="hi", agent_type="a")) outputs = list(orch.run()) assert call_count == 2 @@ -167,7 +173,7 @@ def fake_stream(**_): monkeypatch.setattr(agent.client.chat.completions, "create", lambda **_: fake_stream()) - orch.dispatch_message(DelegationNotice(task_id="T1", sender_id="user", recipient_id="a", context_summary="hi")) + orch.dispatch_message(DelegationNotice(task_id="T1", sender_id="user", recipient_id="a", context_summary="hi", agent_type="a")) outputs = list(orch.run()) assert any(c.get("status") == "end_thinking" for _, c in outputs) @@ -198,7 +204,7 @@ def fake_create(**_): monkeypatch.setattr(agent.client.chat.completions, "create", fake_create) - orch.dispatch_message(DelegationNotice(task_id="T1", sender_id="user", recipient_id="a", context_summary="hi")) + orch.dispatch_message(DelegationNotice(task_id="T1", sender_id="user", recipient_id="a", context_summary="hi", agent_type="a")) outputs = list(orch.run()) assert any(c.get("call") and c.get("thinking") for _, c in outputs) @@ -219,7 +225,7 @@ def raise_error(_): monkeypatch.setattr(agent, "stream", raise_error) orch.dispatch_message( - DelegationNotice(task_id="T1", sender_id="user", recipient_id="a", context_summary="hi") + DelegationNotice(task_id="T1", sender_id="user", recipient_id="a", context_summary="hi", agent_type="a") ) gen = orch.run() agent_id, chunk = next(gen) From cedf70df5190798610993e1f9066c717666a9f93 Mon Sep 17 00:00:00 2001 From: Vayoa <89143290+vayoa@users.noreply.github.com> Date: Mon, 14 Jul 2025 10:29:20 +0300 Subject: [PATCH 2/4] Fix infinite loop after delegation --- afk/cap/orchestrator.py | 1 + 1 file changed, 1 insertion(+) diff --git a/afk/cap/orchestrator.py b/afk/cap/orchestrator.py index 544354c..5d4733c 100644 --- a/afk/cap/orchestrator.py +++ b/afk/cap/orchestrator.py @@ -147,6 +147,7 @@ def run(self) -> Iterator[Tuple[str, StreamChunk]]: keep_pinging = True content = "" in_thought = True + orig_thinking = False else: content = "" in_thought = False From f98a6d862e1880f2b01087de817ab0ccd1d1bf35 Mon Sep 17 00:00:00 2001 From: Vayoa <89143290+vayoa@users.noreply.github.com> Date: Mon, 14 Jul 2025 14:37:47 +0300 Subject: [PATCH 3/4] move reping logic to base agent --- afk/agents/base.py | 53 ++++++++++++++++++++++++++++++++++++++ afk/cap/orchestrator.py | 57 ++++++----------------------------------- 2 files changed, 61 insertions(+), 49 deletions(-) diff --git a/afk/agents/base.py b/afk/agents/base.py index cbd0f85..ea15ada 100644 --- a/afk/agents/base.py +++ b/afk/agents/base.py @@ -495,6 +495,59 @@ def stream(self, user_message: str) -> Iterator[StreamChunk]: if assistant_content: self.history.append({"role": "assistant", "content": assistant_content}) + def respond(self, user_message: str) -> Iterator[StreamChunk]: + """Stream a response, automatically continuing until complete.""" + keep_pinging = True + in_thought = False + orig_thinking = False + content = user_message + + while keep_pinging: + keep_pinging = False + ended = False + had_post_end = False + call_seen = False + rollback_len = len(self.history) + try: + for chunk in self.stream(content): + c = dict(chunk) + orig_thinking = c.get("thinking", orig_thinking) + if c.get("status") == "start_thinking": + in_thought = True + if c.get("status") == "end_thinking": + ended = True + elif ended and (c.get("text") or c.get("call")): + had_post_end = True + if c.get("text"): + in_thought = False + if in_thought: + c["thinking"] = True + yield c + if c.get("call"): + call_seen = True + except openai.OpenAIError: + self.history = self.history[:rollback_len] + raise + + rollback_len = len(self.history) + if call_seen: + keep_pinging = True + content = "" + in_thought = True + elif ended and not had_post_end: + keep_pinging = True + content = "" + in_thought = True + elif orig_thinking: + keep_pinging = True + content = "" + in_thought = True + orig_thinking = False + else: + content = "" + in_thought = False + orig_thinking = False + # ------------------------------------------------------------------ # IPython helpers # ------------------------------------------------------------------ diff --git a/afk/cap/orchestrator.py b/afk/cap/orchestrator.py index 5d4733c..e153b6e 100644 --- a/afk/cap/orchestrator.py +++ b/afk/cap/orchestrator.py @@ -103,52 +103,11 @@ def run(self) -> Iterator[Tuple[str, StreamChunk]]: agent.history.append({"role": "user", "content": content}) content = "" - keep_pinging = True - in_thought = False - orig_thinking = False - while keep_pinging: - keep_pinging = False - ended = False - had_post_end = False - call_seen = False - try: - for chunk in agent.stream(content): - c = dict(chunk) - orig_thinking = c.get("thinking", orig_thinking) - if c.get("status") == "start_thinking": - in_thought = True - if c.get("status") == "end_thinking": - ended = True - elif ended and (c.get("text") or c.get("call")): - had_post_end = True - if c.get("text"): - in_thought = False - if in_thought: - c["thinking"] = True - yield agent.manifest.id, c - if c.get("call"): - call_seen = True - except openai.OpenAIError as e: - self.queue.insert(0, msg) - agent.history = agent.history[:rollback_len] - yield agent.manifest.id, {"error": str(e), "body": getattr(e, "body", None)} - return - - rollback_len = len(agent.history) - if call_seen: - keep_pinging = True - content = "" - in_thought = True - elif ended and not had_post_end: - keep_pinging = True - content = "" - in_thought = True - elif orig_thinking: - keep_pinging = True - content = "" - in_thought = True - orig_thinking = False - else: - content = "" - in_thought = False - orig_thinking = False + try: + for chunk in agent.respond(content): + yield agent.manifest.id, chunk + except openai.OpenAIError as e: + self.queue.insert(0, msg) + agent.history = agent.history[:rollback_len] + yield agent.manifest.id, {"error": str(e), "body": getattr(e, "body", None)} + return From eee870270cb61e918a0d98515a1fc9169ae9ace7 Mon Sep 17 00:00:00 2001 From: Vayoa <89143290+vayoa@users.noreply.github.com> Date: Mon, 14 Jul 2025 15:05:14 +0300 Subject: [PATCH 4/4] Fix streaming lag --- afk/agents/base.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/afk/agents/base.py b/afk/agents/base.py index ea15ada..0839caa 100644 --- a/afk/agents/base.py +++ b/afk/agents/base.py @@ -496,7 +496,7 @@ def stream(self, user_message: str) -> Iterator[StreamChunk]: self.history.append({"role": "assistant", "content": assistant_content}) def respond(self, user_message: str) -> Iterator[StreamChunk]: - """Stream a response, automatically continuing until complete.""" + """Stream a response and automatically re-ping until complete.""" keep_pinging = True in_thought = False orig_thinking = False @@ -509,8 +509,7 @@ def respond(self, user_message: str) -> Iterator[StreamChunk]: call_seen = False rollback_len = len(self.history) try: - for chunk in self.stream(content): - c = dict(chunk) + for c in self.stream(content): orig_thinking = c.get("thinking", orig_thinking) if c.get("status") == "start_thinking": in_thought = True