From ccc45b9efbfebee405357477a718eb77ea3fd7ac Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Tue, 20 Jun 2023 14:28:25 -0700 Subject: [PATCH 1/2] openai functions --- .env.template | 6 ++++- autogpt/agent/agent.py | 42 ++++++++++++++++++++++++++--- autogpt/command_decorator.py | 12 ++++++++- autogpt/config/ai_config.py | 2 +- autogpt/config/config.py | 2 ++ autogpt/json_utils/utilities.py | 11 +++++--- autogpt/llm/chat.py | 12 ++++++--- autogpt/llm/utils/__init__.py | 6 +++++ autogpt/models/command.py | 8 +++--- autogpt/models/command_argument.py | 9 +++++++ autogpt/models/command_function.py | 18 +++++++++++++ autogpt/prompts/generator.py | 23 +++++++++++++--- tests/unit/test_commands.py | 28 +++++++++---------- tests/unit/test_prompt_generator.py | 5 ++-- 14 files changed, 148 insertions(+), 36 deletions(-) create mode 100644 autogpt/models/command_argument.py create mode 100644 autogpt/models/command_function.py diff --git a/.env.template b/.env.template index 067452457937..4eb344a7b8b7 100644 --- a/.env.template +++ b/.env.template @@ -25,10 +25,14 @@ OPENAI_API_KEY=your-openai-api-key ## PROMPT_SETTINGS_FILE - Specifies which Prompt Settings file to use (defaults to prompt_settings.yaml) # PROMPT_SETTINGS_FILE=prompt_settings.yaml -## OPENAI_API_BASE_URL - Custom url for the OpenAI API, useful for connecting to custom backends. No effect if USE_AZURE is true, leave blank to keep the default url +## OPENAI_API_BASE_URL - Custom url for the OpenAI API, useful for connecting to custom backends. No effect if USE_AZURE is true, leave blank to keep the default url # the following is an example: # OPENAI_API_BASE_URL=http://localhost:443/v1 +## OPENAI_FUNCTIONS - Enables OpenAI functions: https://platform.openai.com/docs/guides/gpt/function-calling +# the following is an example: +# OPENAI_FUNCTIONS=False + ## AUTHORISE COMMAND KEY - Key to authorise commands # AUTHORISE_COMMAND_KEY=y diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index 7537233efc0f..879c7c8e0644 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -10,7 +10,6 @@ from autogpt.json_utils.utilities import extract_json_from_response, validate_json from autogpt.llm.chat import chat_with_ai from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS -from autogpt.llm.utils import count_string_tokens from autogpt.log_cycle.log_cycle import ( FULL_MESSAGE_HISTORY_FILE_NAME, NEXT_ACTION_FILE_NAME, @@ -20,6 +19,7 @@ from autogpt.logs import logger, print_assistant_thoughts from autogpt.memory.message_history import MessageHistory from autogpt.memory.vector import VectorMemory +from autogpt.models.command_function import CommandFunction from autogpt.models.command_registry import CommandRegistry from autogpt.speech import say_text from autogpt.spinner import Spinner @@ -138,7 +138,8 @@ def signal_handler(signum, frame): self.system_prompt, self.triggering_prompt, self.fast_token_limit, - self.config.fast_llm_model, + self.get_functions_from_commands(), + self.config.fast_llm_model ) try: @@ -275,7 +276,7 @@ def signal_handler(signum, frame): agent=self, ) result = f"Command {command_name} returned: " f"{command_result}" - + from autogpt.llm.utils import count_string_tokens result_tlength = count_string_tokens( str(command_result), self.config.fast_llm_model ) @@ -314,3 +315,38 @@ def _resolve_pathlike_command_args(self, command_args): self.workspace.get_path(command_args[pathlike]) ) return command_args + + + def get_functions_from_commands(self) -> list[CommandFunction]: + """Get functions from the commands. "functions" in this context refers to OpenAI functions + see https://platform.openai.com/docs/guides/gpt/function-calling + """ + functions = [] + if not self.config.openai_functions: + return functions + for command in self.command_registry.commands.values(): + properties = {} + required = [] + + for argument in command.arguments: + properties[argument.name] = { + "type": argument.type, + "description": argument.description, + } + if argument.required: + required.append(argument.name) + + parameters = { + "type": "object", + "properties": properties, + "required": required, + } + functions.append( + CommandFunction( + name=command.name, + description=command.description, + parameters=parameters, + ) + ) + + return functions diff --git a/autogpt/command_decorator.py b/autogpt/command_decorator.py index 1edd766ec4dd..d86e41096b22 100644 --- a/autogpt/command_decorator.py +++ b/autogpt/command_decorator.py @@ -3,6 +3,7 @@ from autogpt.config import Config from autogpt.models.command import Command +from autogpt.models.command_argument import CommandArgument # Unique identifier for auto-gpt commands AUTO_GPT_COMMAND_IDENTIFIER = "auto_gpt_command" @@ -18,11 +19,20 @@ def command( """The command decorator is used to create Command objects from ordinary functions.""" def decorator(func: Callable[..., Any]) -> Command: + typed_arguments = [ + CommandArgument( + name=arg_name, + description=argument.get("description"), + type=argument.get("type", "string"), + required=argument.get("required", False), + ) + for arg_name, argument in arguments.items() + ] cmd = Command( name=name, description=description, method=func, - signature=arguments, + arguments=typed_arguments, enabled=enabled, disabled_reason=disabled_reason, ) diff --git a/autogpt/config/ai_config.py b/autogpt/config/ai_config.py index 6b9e15f181b7..3c645abe36f3 100644 --- a/autogpt/config/ai_config.py +++ b/autogpt/config/ai_config.py @@ -164,5 +164,5 @@ def construct_full_prompt( if self.api_budget > 0.0: full_prompt += f"\nIt takes money to let you run. Your API budget is ${self.api_budget:.3f}" self.prompt_generator = prompt_generator - full_prompt += f"\n\n{prompt_generator.generate_prompt_string()}" + full_prompt += f"\n\n{prompt_generator.generate_prompt_string(config)}" return full_prompt diff --git a/autogpt/config/config.py b/autogpt/config/config.py index 5e0999b15666..d032f8224eab 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -88,6 +88,8 @@ def __init__(self) -> None: if self.openai_organization is not None: openai.organization = self.openai_organization + self.openai_functions = os.getenv("OPENAI_FUNCTIONS", "False") == "True" + self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY") # ELEVENLABS_VOICE_1_ID is deprecated and included for backwards-compatibility self.elevenlabs_voice_id = os.getenv( diff --git a/autogpt/json_utils/utilities.py b/autogpt/json_utils/utilities.py index 4fbf0c0578bf..95a406f566fa 100644 --- a/autogpt/json_utils/utilities.py +++ b/autogpt/json_utils/utilities.py @@ -29,11 +29,16 @@ def extract_json_from_response(response_content: str) -> dict: def llm_response_schema( - schema_name: str = LLM_DEFAULT_RESPONSE_FORMAT, + config: Config, schema_name: str = LLM_DEFAULT_RESPONSE_FORMAT ) -> dict[str, Any]: filename = os.path.join(os.path.dirname(__file__), f"{schema_name}.json") with open(filename, "r") as f: - return json.load(f) + + json_schema = json.load(f) + if config.openai_functions: + del json_schema["properties"]["command"] + json_schema["required"] = ["thoughts"] + return json_schema def validate_json( @@ -47,7 +52,7 @@ def validate_json( Returns: bool: Whether the json_object is valid or not """ - schema = llm_response_schema(schema_name) + schema = llm_response_schema(config, schema_name) validator = Draft7Validator(schema) if errors := sorted(validator.iter_errors(json_object), key=lambda e: e.path): diff --git a/autogpt/llm/chat.py b/autogpt/llm/chat.py index 0a088d061be8..2ab319c4d2f1 100644 --- a/autogpt/llm/chat.py +++ b/autogpt/llm/chat.py @@ -1,7 +1,9 @@ from __future__ import annotations import time -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, List + +from autogpt.models.command_function import CommandFunction if TYPE_CHECKING: from autogpt.agent.agent import Agent @@ -21,7 +23,8 @@ def chat_with_ai( system_prompt: str, triggering_prompt: str, token_limit: int, - model: str | None = None, + functions: List[CommandFunction], + model: str | None = None ): """ Interact with the OpenAI API, sending the prompt, user input, @@ -94,6 +97,7 @@ def chat_with_ai( current_tokens_used += count_message_tokens([user_input_msg], model) current_tokens_used += 500 # Reserve space for new_summary_message + current_tokens_used += 500 # Reserve space for the openai functions TODO improve # Add Messages until the token limit is reached or there are no more messages to add. for cycle in reversed(list(agent.history.per_cycle(agent.config))): @@ -193,11 +197,13 @@ def chat_with_ai( assistant_reply = create_chat_completion( prompt=message_sequence, config=agent.config, - max_tokens=tokens_remaining, + functions=functions, + max_tokens=tokens_remaining ) # Update full message history agent.history.append(user_input_msg) + assistant_reply = "okfdsnfjdhsfndjsh" agent.history.add("assistant", assistant_reply, "ai_response") return assistant_reply diff --git a/autogpt/llm/utils/__init__.py b/autogpt/llm/utils/__init__.py index 3b0d3e17608e..44ff80ca63e6 100644 --- a/autogpt/llm/utils/__init__.py +++ b/autogpt/llm/utils/__init__.py @@ -11,6 +11,7 @@ from ..base import ChatSequence, Message from ..providers import openai as iopenai from .token_counter import * +from ...models.command_function import CommandFunction def call_ai_function( @@ -88,6 +89,7 @@ def create_text_completion( def create_chat_completion( prompt: ChatSequence, config: Config, + functions: Optional[List[CommandFunction]] = [], model: Optional[str] = None, temperature: Optional[float] = None, max_tokens: Optional[int] = None, @@ -134,6 +136,10 @@ def create_chat_completion( chat_completion_kwargs[ "deployment_id" ] = config.get_azure_deployment_id_for_model(model) + if functions: + chat_completion_kwargs["functions"] = [ + function.__dict__ for function in functions + ] response = iopenai.create_chat_completion( messages=prompt.raw(), diff --git a/autogpt/models/command.py b/autogpt/models/command.py index f88bbcae6081..aa0883a34f88 100644 --- a/autogpt/models/command.py +++ b/autogpt/models/command.py @@ -9,7 +9,7 @@ class Command: Attributes: name (str): The name of the command. description (str): A brief description of what the command does. - signature (str): The signature of the function that the command executes. Defaults to None. + arguments (str): The arguments of the function that the command executes. Defaults to None. """ def __init__( @@ -17,14 +17,14 @@ def __init__( name: str, description: str, method: Callable[..., Any], - signature: Dict[str, Dict[str, Any]], + arguments: Dict[str, Dict[str, Any]], enabled: bool | Callable[[Config], bool] = True, disabled_reason: Optional[str] = None, ): self.name = name self.description = description self.method = method - self.signature = signature + self.arguments = arguments self.enabled = enabled self.disabled_reason = disabled_reason @@ -38,4 +38,4 @@ def __call__(self, *args, **kwargs) -> Any: return self.method(*args, **kwargs) def __str__(self) -> str: - return f"{self.name}: {self.description}, args: {self.signature}" + return f"{self.name}: {self.description}, args: {self.arguments}" diff --git a/autogpt/models/command_argument.py b/autogpt/models/command_argument.py new file mode 100644 index 000000000000..c70c0880d6e2 --- /dev/null +++ b/autogpt/models/command_argument.py @@ -0,0 +1,9 @@ +class CommandArgument: + def __init__(self, name: str, type: str, description: str, required: bool): + self.name = name + self.type = type + self.description = description + self.required = required + + def __repr__(self): + return f"CommandArgument('{self.name}', '{self.type}', '{self.description}', {self.required})" diff --git a/autogpt/models/command_function.py b/autogpt/models/command_function.py new file mode 100644 index 000000000000..7cd85e453ab3 --- /dev/null +++ b/autogpt/models/command_function.py @@ -0,0 +1,18 @@ +from typing import Any + + +class CommandFunction: + """Represents a "function" in OpenAI, which is mapped to a Command in Auto-GPT""" + + def __init__(self, name: str, description: str, parameters: dict[str, Any]): + self.name = name + self.description = description + self.parameters = parameters + + @property + def __dict__(self) -> dict: + return { + "name": self.name, + "description": self.description, + "parameters": self.parameters, + } diff --git a/autogpt/prompts/generator.py b/autogpt/prompts/generator.py index 2a0334bf45a7..09c09bff4831 100644 --- a/autogpt/prompts/generator.py +++ b/autogpt/prompts/generator.py @@ -1,7 +1,10 @@ """ A module for generating custom prompt strings.""" +import json from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional +from autogpt.config import Config from autogpt.json_utils.utilities import llm_response_schema +from autogpt.models.command import Command if TYPE_CHECKING: from autogpt.models.command_registry import CommandRegistry @@ -127,7 +130,7 @@ def _generate_numbered_list(self, items: List[Any], item_type="list") -> str: else: return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items)) - def generate_prompt_string(self) -> str: + def generate_prompt_string(self, config: Config) -> str: """ Generate a prompt string based on the constraints, commands, resources, and performance evaluations. @@ -137,11 +140,23 @@ def generate_prompt_string(self) -> str: """ return ( f"Constraints:\n{self._generate_numbered_list(self.constraints)}\n\n" - "Commands:\n" - f"{self._generate_numbered_list(self.commands, item_type='command')}\n\n" f"Resources:\n{self._generate_numbered_list(self.resources)}\n\n" + f"{generate_prompt_string(self, self.commands, config)}" "Performance Evaluation:\n" f"{self._generate_numbered_list(self.performance_evaluation)}\n\n" "Respond with only valid JSON conforming to the following schema: \n" - f"{llm_response_schema()}\n" + f"{json.dumps(llm_response_schema(config))}\n" ) + +def generate_prompt_string(self, commands: List[Command], config: Config) -> str: + """ + Generate a prompt string based on the constraints, commands, resources, + and performance evaluations. + + Returns: + str: The generated prompt string. + """ + if config.openai_functions: + return "" + return ("Commands:\n" + f"{self._generate_numbered_list(self.commands, item_type='command')}\n\n") diff --git a/tests/unit/test_commands.py b/tests/unit/test_commands.py index cb3f539acec2..2730a3bd7fc7 100644 --- a/tests/unit/test_commands.py +++ b/tests/unit/test_commands.py @@ -8,7 +8,7 @@ from autogpt.models.command import Command from autogpt.models.command_registry import CommandRegistry -SIGNATURE = "(arg1: int, arg2: str) -> str" +ARGUMENTS = "(arg1: int, arg2: str) -> str" class TestCommand: @@ -26,13 +26,13 @@ def test_command_creation(self): name="example", description="Example command", method=self.example_command_method, - signature=SIGNATURE, + arguments=ARGUMENTS, ) assert cmd.name == "example" assert cmd.description == "Example command" assert cmd.method == self.example_command_method - assert cmd.signature == "(arg1: int, arg2: str) -> str" + assert cmd.arguments == "(arg1: int, arg2: str) -> str" def test_command_call(self): """Test that Command(*args) calls and returns the result of method(*args).""" @@ -41,7 +41,7 @@ def test_command_call(self): name="example", description="Example command", method=self.example_command_method, - signature={ + arguments={ "prompt": { "type": "string", "description": "The prompt used to generate the image", @@ -58,21 +58,21 @@ def test_command_call_with_invalid_arguments(self): name="example", description="Example command", method=self.example_command_method, - signature=SIGNATURE, + arguments=ARGUMENTS, ) with pytest.raises(TypeError): cmd(arg1="invalid", does_not_exist="test") - def test_command_custom_signature(self): - custom_signature = "custom_arg1: int, custom_arg2: str" + def test_command_custom_arguments(self): + custom_arguments = "custom_arg1: int, custom_arg2: str" cmd = Command( name="example", description="Example command", method=self.example_command_method, - signature=custom_signature, + arguments=custom_arguments, ) - assert cmd.signature == custom_signature + assert cmd.arguments == custom_arguments class TestCommandRegistry: @@ -87,7 +87,7 @@ def test_register_command(self): name="example", description="Example command", method=self.example_command_method, - signature=SIGNATURE, + arguments=ARGUMENTS, ) registry.register(cmd) @@ -102,7 +102,7 @@ def test_unregister_command(self): name="example", description="Example command", method=self.example_command_method, - signature=SIGNATURE, + arguments=ARGUMENTS, ) registry.register(cmd) @@ -117,7 +117,7 @@ def test_get_command(self): name="example", description="Example command", method=self.example_command_method, - signature=SIGNATURE, + arguments=ARGUMENTS, ) registry.register(cmd) @@ -139,7 +139,7 @@ def test_call_command(self): name="example", description="Example command", method=self.example_command_method, - signature=SIGNATURE, + arguments=ARGUMENTS, ) registry.register(cmd) @@ -161,7 +161,7 @@ def test_get_command_prompt(self): name="example", description="Example command", method=self.example_command_method, - signature=SIGNATURE, + arguments=ARGUMENTS, ) registry.register(cmd) diff --git a/tests/unit/test_prompt_generator.py b/tests/unit/test_prompt_generator.py index 1fa1754d744d..4b8fada2ae90 100644 --- a/tests/unit/test_prompt_generator.py +++ b/tests/unit/test_prompt_generator.py @@ -1,5 +1,6 @@ from unittest import TestCase +from autogpt.config import Config from autogpt.prompts.generator import PromptGenerator @@ -59,7 +60,7 @@ def test_add_performance_evaluation(self): self.generator.add_performance_evaluation(evaluation) self.assertIn(evaluation, self.generator.performance_evaluation) - def test_generate_prompt_string(self): + def test_generate_prompt_string(self, config: Config): """ Test if the generate_prompt_string() method generates a prompt string with all the added constraints, commands, resources, and evaluations. @@ -94,7 +95,7 @@ def test_generate_prompt_string(self): self.generator.add_performance_evaluation(evaluation) # Generate the prompt string and verify its correctness - prompt_string = self.generator.generate_prompt_string() + prompt_string = self.generator.generate_prompt_string(config) self.assertIsNotNone(prompt_string) # Check if all constraints, commands, resources, and evaluations are present in the prompt string From 7be73082bcca5ab7275f2d4b6694d11f869fe58c Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Tue, 20 Jun 2023 16:30:07 -0700 Subject: [PATCH 2/2] Implement openai functions feature flag --- autogpt/agent/agent.py | 12 +- autogpt/agent/agent_manager.py | 8 +- autogpt/app.py | 24 +- autogpt/json_utils/utilities.py | 1 - autogpt/llm/chat.py | 8 +- autogpt/llm/utils/__init__.py | 20 +- autogpt/memory/message_history.py | 2 +- autogpt/models/chat_completion_response.py | 4 + autogpt/processing/text.py | 7 +- autogpt/prompts/generator.py | 23 +- autogpt/setup.py | 2 +- tests/unit/test_agent_manager.py | 5 +- tests/unit/test_message_history.py | 10 +- tests/unit/test_prompt_generator.py | 256 ++++++++++++--------- 14 files changed, 227 insertions(+), 155 deletions(-) create mode 100644 autogpt/models/chat_completion_response.py diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index 879c7c8e0644..5e96c29aad92 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -139,11 +139,13 @@ def signal_handler(signum, frame): self.triggering_prompt, self.fast_token_limit, self.get_functions_from_commands(), - self.config.fast_llm_model + self.config.fast_llm_model, ) try: - assistant_reply_json = extract_json_from_response(assistant_reply) + assistant_reply_json = extract_json_from_response( + assistant_reply.content + ) validate_json(assistant_reply_json, self.config) except json.JSONDecodeError as e: logger.error(f"Exception while validating assistant reply JSON: {e}") @@ -161,7 +163,9 @@ def signal_handler(signum, frame): print_assistant_thoughts( self.ai_name, assistant_reply_json, self.config ) - command_name, arguments = get_command(assistant_reply_json) + command_name, arguments = get_command( + assistant_reply_json, assistant_reply, self.config + ) if self.config.speak_mode: say_text(f"I want to execute {command_name}") @@ -277,6 +281,7 @@ def signal_handler(signum, frame): ) result = f"Command {command_name} returned: " f"{command_result}" from autogpt.llm.utils import count_string_tokens + result_tlength = count_string_tokens( str(command_result), self.config.fast_llm_model ) @@ -316,7 +321,6 @@ def _resolve_pathlike_command_args(self, command_args): ) return command_args - def get_functions_from_commands(self) -> list[CommandFunction]: """Get functions from the commands. "functions" in this context refers to OpenAI functions see https://platform.openai.com/docs/guides/gpt/function-calling diff --git a/autogpt/agent/agent_manager.py b/autogpt/agent/agent_manager.py index 1f1c8a1de05f..eaecbf3b41a8 100644 --- a/autogpt/agent/agent_manager.py +++ b/autogpt/agent/agent_manager.py @@ -41,7 +41,9 @@ def create_agent( if plugin_messages := plugin.pre_instruction(messages.raw()): messages.extend([Message(**raw_msg) for raw_msg in plugin_messages]) # Start GPT instance - agent_reply = create_chat_completion(prompt=messages, config=self.config) + agent_reply = create_chat_completion( + prompt=messages, config=self.config + ).content messages.add("assistant", agent_reply) @@ -92,7 +94,9 @@ def message_agent(self, key: str | int, message: str) -> str: messages.extend([Message(**raw_msg) for raw_msg in plugin_messages]) # Start GPT instance - agent_reply = create_chat_completion(prompt=messages, config=self.config) + agent_reply = create_chat_completion( + prompt=messages, config=self.config + ).content messages.add("assistant", agent_reply) diff --git a/autogpt/app.py b/autogpt/app.py index 78e3a4dd206e..4f1c3a9a436e 100644 --- a/autogpt/app.py +++ b/autogpt/app.py @@ -3,6 +3,8 @@ from typing import Dict from autogpt.agent.agent import Agent +from autogpt.config import Config +from autogpt.models.chat_completion_response import ChatCompletionResponse def is_valid_int(value: str) -> bool: @@ -21,11 +23,13 @@ def is_valid_int(value: str) -> bool: return False -def get_command(response_json: Dict): +def get_command( + assistant_reply_json: Dict, assistant_reply: ChatCompletionResponse, config: Config +): """Parse the response and return the command name and arguments Args: - response_json (json): The response from the AI + assistant_reply_json (json): The response from the AI Returns: tuple: The command name and arguments @@ -35,14 +39,22 @@ def get_command(response_json: Dict): Exception: If any other error occurs """ + if config.openai_functions: + assistant_reply_json["command"] = { + "name": assistant_reply.function_call.name, + "args": json.loads(assistant_reply.function_call.arguments), + } try: - if "command" not in response_json: + if "command" not in assistant_reply_json: return "Error:", "Missing 'command' object in JSON" - if not isinstance(response_json, dict): - return "Error:", f"'response_json' object is not dictionary {response_json}" + if not isinstance(assistant_reply_json, dict): + return ( + "Error:", + f"'assistant_reply_json' object is not dictionary {assistant_reply_json}", + ) - command = response_json["command"] + command = assistant_reply_json["command"] if not isinstance(command, dict): return "Error:", "'command' object is not a dictionary" diff --git a/autogpt/json_utils/utilities.py b/autogpt/json_utils/utilities.py index 95a406f566fa..b78d6322ed7f 100644 --- a/autogpt/json_utils/utilities.py +++ b/autogpt/json_utils/utilities.py @@ -33,7 +33,6 @@ def llm_response_schema( ) -> dict[str, Any]: filename = os.path.join(os.path.dirname(__file__), f"{schema_name}.json") with open(filename, "r") as f: - json_schema = json.load(f) if config.openai_functions: del json_schema["properties"]["command"] diff --git a/autogpt/llm/chat.py b/autogpt/llm/chat.py index 2ab319c4d2f1..5952da58b84a 100644 --- a/autogpt/llm/chat.py +++ b/autogpt/llm/chat.py @@ -24,7 +24,7 @@ def chat_with_ai( triggering_prompt: str, token_limit: int, functions: List[CommandFunction], - model: str | None = None + model: str | None = None, ): """ Interact with the OpenAI API, sending the prompt, user input, @@ -198,12 +198,12 @@ def chat_with_ai( prompt=message_sequence, config=agent.config, functions=functions, - max_tokens=tokens_remaining + max_tokens=tokens_remaining, ) # Update full message history agent.history.append(user_input_msg) - assistant_reply = "okfdsnfjdhsfndjsh" - agent.history.add("assistant", assistant_reply, "ai_response") + + agent.history.add("assistant", assistant_reply.content, "ai_response") return assistant_reply diff --git a/autogpt/llm/utils/__init__.py b/autogpt/llm/utils/__init__.py index 44ff80ca63e6..d083bb80a7ad 100644 --- a/autogpt/llm/utils/__init__.py +++ b/autogpt/llm/utils/__init__.py @@ -7,11 +7,12 @@ from autogpt.config import Config from autogpt.logs import logger +from ...models.chat_completion_response import ChatCompletionResponse +from ...models.command_function import CommandFunction from ..api_manager import ApiManager from ..base import ChatSequence, Message from ..providers import openai as iopenai from .token_counter import * -from ...models.command_function import CommandFunction def call_ai_function( @@ -53,7 +54,7 @@ def call_ai_function( Message("user", arg_str), ], ) - return create_chat_completion(prompt=prompt, temperature=0) + return create_chat_completion(prompt=prompt, temperature=0).content def create_text_completion( @@ -93,7 +94,7 @@ def create_chat_completion( model: Optional[str] = None, temperature: Optional[float] = None, max_tokens: Optional[int] = None, -) -> str: +) -> ChatCompletionResponse: """Create a chat completion using the OpenAI API Args: @@ -147,19 +148,20 @@ def create_chat_completion( ) logger.debug(f"Response: {response}") - resp = "" - if not hasattr(response, "error"): - resp = response.choices[0].message["content"] - else: + if hasattr(response, "error"): logger.error(response.error) raise RuntimeError(response.error) + first_message = response.choices[0].message + content = first_message["content"] + function_call = first_message.get("function_call", {}) + for plugin in config.plugins: if not plugin.can_handle_on_response(): continue - resp = plugin.on_response(resp) + content = plugin.on_response(content) - return resp + return ChatCompletionResponse(content=content, function_call=function_call) def check_model( diff --git a/autogpt/memory/message_history.py b/autogpt/memory/message_history.py index 4dba13dd8923..f3e1dc30c946 100644 --- a/autogpt/memory/message_history.py +++ b/autogpt/memory/message_history.py @@ -228,7 +228,7 @@ def summarize_batch(self, new_events_batch, config): PROMPT_SUMMARY_FILE_NAME, ) - self.summary = create_chat_completion(prompt, config) + self.summary = create_chat_completion(prompt, config).content self.agent.log_cycle_handler.log_cycle( self.agent.ai_name, diff --git a/autogpt/models/chat_completion_response.py b/autogpt/models/chat_completion_response.py new file mode 100644 index 000000000000..c2b241533a5e --- /dev/null +++ b/autogpt/models/chat_completion_response.py @@ -0,0 +1,4 @@ +class ChatCompletionResponse: + def __init__(self, content: str, function_call: dict[str, str]): + self.content = content + self.function_call = function_call diff --git a/autogpt/processing/text.py b/autogpt/processing/text.py index 78eabf45bbb5..30caa5970cbd 100644 --- a/autogpt/processing/text.py +++ b/autogpt/processing/text.py @@ -67,7 +67,8 @@ def summarize_text( Args: text (str): The text to summarize - config (Config): The config object + config (Config): Thtext( + "\n\n".joine config object instruction (str): Additional instruction for summarization, e.g. "focus on information related to polar bears", "omit personal information contained in the text" question (str): Question to answer in the summary @@ -114,8 +115,8 @@ def summarize_text( logger.debug(f"Summarizing with {model}:\n{summarization_prompt.dump()}\n") summary = create_chat_completion( - summarization_prompt, config, temperature=0, max_tokens=500 - ) + prompt=summarization_prompt, config=config, temperature=0, max_tokens=500 + ).content logger.debug(f"\n{'-'*16} SUMMARY {'-'*17}\n{summary}\n{'-'*42}\n") return summary.strip(), None diff --git a/autogpt/prompts/generator.py b/autogpt/prompts/generator.py index 09c09bff4831..8c76b4330d5b 100644 --- a/autogpt/prompts/generator.py +++ b/autogpt/prompts/generator.py @@ -148,15 +148,18 @@ def generate_prompt_string(self, config: Config) -> str: f"{json.dumps(llm_response_schema(config))}\n" ) + def generate_prompt_string(self, commands: List[Command], config: Config) -> str: - """ - Generate a prompt string based on the constraints, commands, resources, - and performance evaluations. + """ + Generate a prompt string based on the constraints, commands, resources, + and performance evaluations. - Returns: - str: The generated prompt string. - """ - if config.openai_functions: - return "" - return ("Commands:\n" - f"{self._generate_numbered_list(self.commands, item_type='command')}\n\n") + Returns: + str: The generated prompt string. + """ + if config.openai_functions: + return "" + return ( + "Commands:\n" + f"{self._generate_numbered_list(self.commands, item_type='command')}\n\n" + ) diff --git a/autogpt/setup.py b/autogpt/setup.py index 2fe8b3a9f1e8..f17a91e05ec1 100644 --- a/autogpt/setup.py +++ b/autogpt/setup.py @@ -185,7 +185,7 @@ def generate_aiconfig_automatic(user_prompt: str, config: Config) -> AIConfig: ], ), config, - ) + ).content # Debug LLM Output logger.debug(f"AI Config Generator Raw Output: {output}") diff --git a/tests/unit/test_agent_manager.py b/tests/unit/test_agent_manager.py index a372b7260dad..d9b99efea47b 100644 --- a/tests/unit/test_agent_manager.py +++ b/tests/unit/test_agent_manager.py @@ -2,6 +2,7 @@ from autogpt.agent.agent_manager import AgentManager from autogpt.llm.chat import create_chat_completion +from autogpt.models.chat_completion_response import ChatCompletionResponse @pytest.fixture @@ -32,7 +33,9 @@ def mock_create_chat_completion(mocker): "autogpt.agent.agent_manager.create_chat_completion", wraps=create_chat_completion, ) - mock_create_chat_completion.return_value = "irrelevant" + mock_create_chat_completion.return_value = ChatCompletionResponse( + content="irrelevant", function_call={} + ) return mock_create_chat_completion diff --git a/tests/unit/test_message_history.py b/tests/unit/test_message_history.py index 14b60895ecf2..6e6c2d2a4e5d 100644 --- a/tests/unit/test_message_history.py +++ b/tests/unit/test_message_history.py @@ -11,6 +11,7 @@ from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS from autogpt.llm.utils import count_string_tokens from autogpt.memory.message_history import MessageHistory +from autogpt.models.chat_completion_response import ChatCompletionResponse @pytest.fixture @@ -45,10 +46,13 @@ def test_message_history_batch_summary(mocker, agent, config): message_count = 0 # Setting the mock output and inputs - mock_summary_text = "I executed browse_website command for each of the websites returned from Google search, but none of them have any job openings." + mock_summary_response = ChatCompletionResponse( + content="I executed browse_website command for each of the websites returned from Google search, but none of them have any job openings.", + function_call={}, + ) mock_summary = mocker.patch( "autogpt.memory.message_history.create_chat_completion", - return_value=mock_summary_text, + return_value=mock_summary_response, ) system_prompt = 'You are AIJobSearcher, an AI designed to search for job openings for software engineer role\nYour decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. Find any job openings for software engineers online\n2. Go through each of the websites and job openings to summarize their requirements and URL, and skip that if you already visit the website\n\nIt takes money to let you run. Your API budget is $5.000\n\nConstraints:\n1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.\n2. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.\n3. No user assistance\n4. Exclusively use the commands listed in double quotes e.g. "command name"\n\nCommands:\n1. google_search: Google Search, args: "query": ""\n2. browse_website: Browse Website, args: "url": "", "question": ""\n3. task_complete: Task Complete (Shutdown), args: "reason": ""\n\nResources:\n1. Internet access for searches and information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\n2. Constructively self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions and strategies to refine your approach.\n4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou should only respond in JSON format as described below \nResponse Format: \n{\n "thoughts": {\n "text": "thought",\n "reasoning": "reasoning",\n "plan": "- short bulleted\\n- list that conveys\\n- long-term plan",\n "criticism": "constructive self-criticism",\n "speak": "thoughts summary to say to user"\n },\n "command": {\n "name": "command name",\n "args": {\n "arg name": "value"\n }\n }\n} \nEnsure the response can be parsed by Python json.loads' @@ -139,6 +143,6 @@ def test_message_history_batch_summary(mocker, agent, config): assert new_summary_message == Message( role="system", content="This reminds you of these events from your past: \n" - + mock_summary_text, + + mock_summary_response.content, type=None, ) diff --git a/tests/unit/test_prompt_generator.py b/tests/unit/test_prompt_generator.py index 4b8fada2ae90..c5ffaf78cd12 100644 --- a/tests/unit/test_prompt_generator.py +++ b/tests/unit/test_prompt_generator.py @@ -1,116 +1,152 @@ -from unittest import TestCase - -from autogpt.config import Config from autogpt.prompts.generator import PromptGenerator -class TestPromptGenerator(TestCase): +def test_add_constraint(): + """ + Test if the add_constraint() method adds a constraint to the generator's constraints list. + """ + constraint = "Constraint1" + generator = PromptGenerator() + generator.add_constraint(constraint) + assert constraint in generator.constraints + + +def test_add_command(): + """ + Test if the add_command() method adds a command to the generator's commands list. + """ + command_label = "Command Label" + command_name = "command_name" + args = {"arg1": "value1", "arg2": "value2"} + generator = PromptGenerator() + generator.add_command(command_label, command_name, args) + command = { + "label": command_label, + "name": command_name, + "args": args, + "function": None, + } + assert command in generator.commands + + +def test_add_resource(): + """ + Test if the add_resource() method adds a resource to the generator's resources list. + """ + resource = "Resource1" + generator = PromptGenerator() + generator.add_resource(resource) + assert resource in generator.resources + + +def test_add_performance_evaluation(): + """ + Test if the add_performance_evaluation() method adds an evaluation to the generator's + performance_evaluation list. + """ + evaluation = "Evaluation1" + generator = PromptGenerator() + generator.add_performance_evaluation(evaluation) + assert evaluation in generator.performance_evaluation + + +def test_generate_prompt_string(config): + """ + Test if the generate_prompt_string() method generates a prompt string with all the added + constraints, commands, resources, and evaluations. + """ + + # Define the test data + constraints = ["Constraint1", "Constraint2"] + commands = [ + { + "label": "Command1", + "name": "command_name1", + "args": {"arg1": "value1"}, + }, + { + "label": "Command2", + "name": "command_name2", + "args": {}, + }, + ] + resources = ["Resource1", "Resource2"] + evaluations = ["Evaluation1", "Evaluation2"] + + # Add test data to the generator + generator = PromptGenerator() + for constraint in constraints: + generator.add_constraint(constraint) + for command in commands: + generator.add_command(command["label"], command["name"], command["args"]) + for resource in resources: + generator.add_resource(resource) + for evaluation in evaluations: + generator.add_performance_evaluation(evaluation) + + # Generate the prompt string and verify its correctness + prompt_string = generator.generate_prompt_string(config) + assert prompt_string is not None + + # Check if all constraints, commands, resources, and evaluations are present in the prompt string + for constraint in constraints: + assert constraint in prompt_string + for command in commands: + assert command["name"] in prompt_string + for key, value in command["args"].items(): + assert f'"{key}": "{value}"' in prompt_string + for resource in resources: + assert resource in prompt_string + for evaluation in evaluations: + assert evaluation in prompt_string + + +def test_generate_prompt_string(config): """ - Test cases for the PromptGenerator class, which is responsible for generating - prompts for the AI with constraints, commands, resources, and performance evaluations. + Test if the generate_prompt_string() method generates a prompt string with all the added + constraints, commands, resources, and evaluations. """ - @classmethod - def setUpClass(cls): - """ - Set up the initial state for each test method by creating an instance of PromptGenerator. - """ - cls.generator = PromptGenerator() - - # Test whether the add_constraint() method adds a constraint to the generator's constraints list - def test_add_constraint(self): - """ - Test if the add_constraint() method adds a constraint to the generator's constraints list. - """ - constraint = "Constraint1" - self.generator.add_constraint(constraint) - self.assertIn(constraint, self.generator.constraints) - - # Test whether the add_command() method adds a command to the generator's commands list - def test_add_command(self): - """ - Test if the add_command() method adds a command to the generator's commands list. - """ - command_label = "Command Label" - command_name = "command_name" - args = {"arg1": "value1", "arg2": "value2"} - self.generator.add_command(command_label, command_name, args) - command = { - "label": command_label, - "name": command_name, - "args": args, - "function": None, - } - self.assertIn(command, self.generator.commands) - - def test_add_resource(self): - """ - Test if the add_resource() method adds a resource to the generator's resources list. - """ - resource = "Resource1" - self.generator.add_resource(resource) - self.assertIn(resource, self.generator.resources) - - def test_add_performance_evaluation(self): - """ - Test if the add_performance_evaluation() method adds an evaluation to the generator's - performance_evaluation list. - """ - evaluation = "Evaluation1" - self.generator.add_performance_evaluation(evaluation) - self.assertIn(evaluation, self.generator.performance_evaluation) - - def test_generate_prompt_string(self, config: Config): - """ - Test if the generate_prompt_string() method generates a prompt string with all the added - constraints, commands, resources, and evaluations. - """ - # Define the test data - constraints = ["Constraint1", "Constraint2"] - commands = [ - { - "label": "Command1", - "name": "command_name1", - "args": {"arg1": "value1"}, - }, - { - "label": "Command2", - "name": "command_name2", - "args": {}, - }, - ] - resources = ["Resource1", "Resource2"] - evaluations = ["Evaluation1", "Evaluation2"] - - # Add test data to the generator - for constraint in constraints: - self.generator.add_constraint(constraint) - for command in commands: - self.generator.add_command( - command["label"], command["name"], command["args"] - ) - for resource in resources: - self.generator.add_resource(resource) - for evaluation in evaluations: - self.generator.add_performance_evaluation(evaluation) - - # Generate the prompt string and verify its correctness - prompt_string = self.generator.generate_prompt_string(config) - self.assertIsNotNone(prompt_string) - - # Check if all constraints, commands, resources, and evaluations are present in the prompt string - for constraint in constraints: - self.assertIn(constraint, prompt_string) - for command in commands: - self.assertIn(command["name"], prompt_string) - for key, value in command["args"].items(): - self.assertIn(f'"{key}": "{value}"', prompt_string) - for resource in resources: - self.assertIn(resource, prompt_string) - for evaluation in evaluations: - self.assertIn(evaluation, prompt_string) - - self.assertIn("constraints", prompt_string.lower()) - self.assertIn("commands", prompt_string.lower()) - self.assertIn("resources", prompt_string.lower()) - self.assertIn("performance evaluation", prompt_string.lower()) + # Define the test data + constraints = ["Constraint1", "Constraint2"] + commands = [ + { + "label": "Command1", + "name": "command_name1", + "args": {"arg1": "value1"}, + }, + { + "label": "Command2", + "name": "command_name2", + "args": {}, + }, + ] + resources = ["Resource1", "Resource2"] + evaluations = ["Evaluation1", "Evaluation2"] + + # Add test data to the generator + generator = PromptGenerator() + for constraint in constraints: + generator.add_constraint(constraint) + for command in commands: + generator.add_command(command["label"], command["name"], command["args"]) + for resource in resources: + generator.add_resource(resource) + for evaluation in evaluations: + generator.add_performance_evaluation(evaluation) + + # Generate the prompt string and verify its correctness + prompt_string = generator.generate_prompt_string(config) + assert prompt_string is not None + + # Check if all constraints, commands, resources, and evaluations are present in the prompt string + for constraint in constraints: + assert constraint in prompt_string + for command in commands: + assert command["name"] in prompt_string + for key, value in command["args"].items(): + assert f'"{key}": "{value}"' in prompt_string + for resource in resources: + assert resource in prompt_string + for evaluation in evaluations: + assert evaluation in prompt_string