From 1e38b73d0fd9ee01f45bd4b1bd4e4f532836e8cc Mon Sep 17 00:00:00 2001 From: jmatejcz Date: Fri, 17 Oct 2025 11:12:43 +0200 Subject: [PATCH 1/2] refactor: params to plan prompt --- .../rai/agents/langchain/core/megamind.py | 68 +++++++++++++++---- 1 file changed, 53 insertions(+), 15 deletions(-) diff --git a/src/rai_core/rai/agents/langchain/core/megamind.py b/src/rai_core/rai/agents/langchain/core/megamind.py index 860820d6a..467ef6c14 100644 --- a/src/rai_core/rai/agents/langchain/core/megamind.py +++ b/src/rai_core/rai/agents/langchain/core/megamind.py @@ -209,12 +209,45 @@ def get_initial_megamind_state(task: str): ) +@dataclass +class PlanPrompts: + """Configurable prompts for the planning step.""" + + objective_template: str = "You are given objective to complete: {original_task}" + steps_done_header: str = "Steps that were already done successfully:\n" + next_step_prompt: str = "\nBased on that outcome and past steps come up with the next step and delegate it to selected agent." + first_step_prompt: str = ( + "\nCome up with the first step and delegate it to selected agent." + ) + completion_prompt: str = ( + "\n\nWhen you decide that the objective is completed return response to user." + ) + + @classmethod + def default(cls): + """Return default prompts.""" + return cls() + + @classmethod + def custom(cls, **kwargs): + """Create custom prompts with overrides.""" + default = cls.default() + for key, value in kwargs.items(): + if hasattr(default, key): + setattr(default, key, value) + return default + + def plan_step( megamind_agent: BaseChatModel, state: MegamindState, + prompts: Optional[PlanPrompts] = None, context_providers: Optional[List[ContextProvider]] = None, ) -> MegamindState: """Initial planning step.""" + if prompts is None: + prompts = PlanPrompts.default() + if "original_task" not in state: state["original_task"] = state["messages"][0].content[0]["text"] if "steps_done" not in state: @@ -222,14 +255,18 @@ def plan_step( if "step" not in state: state["step"] = None - megamind_prompt = f"You are given objective to complete: {state['original_task']}" + megamind_prompt = prompts.objective_template.format( + original_task=state["original_task"] + ) if context_providers: for provider in context_providers: megamind_prompt += provider.get_context() megamind_prompt += "\n" + + # Add completed steps if any if state["steps_done"]: megamind_prompt += "\n\n" - megamind_prompt += "Steps that were already done successfully:\n" + megamind_prompt += prompts.steps_done_header steps_done = "\n".join( [f"{i + 1}. {step}" for i, step in enumerate(state["steps_done"])] ) @@ -239,22 +276,17 @@ def plan_step( if state["step"]: if not state["step_success"]: raise ValueError("Step success should be specified at this point") - - megamind_prompt += "\nBased on that outcome and past steps come up with the next step and delegate it to selected agent." + megamind_prompt += prompts.next_step_prompt else: - megamind_prompt += "\n" - megamind_prompt += ( - "Come up with the fist step and delegate it to selected agent." - ) + megamind_prompt += prompts.first_step_prompt + + megamind_prompt += prompts.completion_prompt - megamind_prompt += "\n\n" - megamind_prompt += ( - "When you decide that the objective is completed return response to user." - ) messages = [ HumanMultimodalMessage(content=megamind_prompt), ] + # NOTE (jmatejcz) the response of megamind isnt appended to messages # as Command from handoff instantly transitions to next node megamind_agent.invoke({"messages": messages}) @@ -265,7 +297,8 @@ def create_megamind( megamind_llm: BaseChatModel, executors: List[Executor], megamind_system_prompt: Optional[str] = None, - task_planning_prompt: Optional[str] = None, + anylyzer_prompt: Optional[str] = None, + plan_prompts: Optional[PlanPrompts] = None, context_providers: List[ContextProvider] = [], ) -> CompiledStateGraph: """Create a megamind langchain agent @@ -292,7 +325,7 @@ def create_megamind( llm=executor.llm, tools=executor.tools, system_prompt=executor.system_prompt, - planning_prompt=task_planning_prompt, + planning_prompt=anylyzer_prompt, ) handoff_tools.append( @@ -325,7 +358,12 @@ def create_megamind( graph = StateGraph(MegamindState).add_node( "megamind", - partial(plan_step, megamind_agent, context_providers=context_providers), + partial( + plan_step, + megamind_agent, + context_providers=context_providers, + prompts=plan_prompts, + ), ) for agent_name, agent in executor_agents.items(): graph.add_node(agent_name, agent) From 560861d30345fe63e73e7e744f694347959147d6 Mon Sep 17 00:00:00 2001 From: jmatejcz Date: Fri, 17 Oct 2025 12:15:08 +0200 Subject: [PATCH 2/2] chore: version bump --- src/rai_core/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rai_core/pyproject.toml b/src/rai_core/pyproject.toml index 5bbe774b8..68195dc5a 100644 --- a/src/rai_core/pyproject.toml +++ b/src/rai_core/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "rai_core" -version = "2.5.4" +version = "2.6.4" description = "Core functionality for RAI framework" authors = ["Maciej Majek ", "Bartłomiej Boczek ", "Kajetan Rachwał "] readme = "README.md"