diff --git a/.env.template b/.env.template index fe217a43ea1d..05f2c1abf4d3 100644 --- a/.env.template +++ b/.env.template @@ -13,6 +13,9 @@ ## AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml) # AI_SETTINGS_FILE=ai_settings.yaml +## PROMPT_SETTINGS_FILE - Specifies which Prompt Settings file to use (defaults to prompt_settings.yaml) +# PROMPT_SETTINGS_FILE=prompt_settings.yaml + ## AUTHORISE COMMAND KEY - Key to authorise commands # AUTHORISE_COMMAND_KEY=y ## EXIT_KEY - Key to exit AUTO-GPT diff --git a/autogpt/cli.py b/autogpt/cli.py index d9294516f255..3b45b50174f3 100644 --- a/autogpt/cli.py +++ b/autogpt/cli.py @@ -15,6 +15,11 @@ "-C", help="Specifies which ai_settings.yaml file to use, will also automatically skip the re-prompt.", ) +@click.option( + "--prompt-settings", + "-P", + help="Specifies which prompt_settings.yaml file to use.", +) @click.option( "-l", "--continuous-limit", @@ -66,6 +71,7 @@ def main( continuous: bool, continuous_limit: int, ai_settings: str, + prompt_settings: str, skip_reprompt: bool, speak: bool, debug: bool, @@ -91,6 +97,7 @@ def main( continuous, continuous_limit, ai_settings, + prompt_settings, skip_reprompt, speak, debug, diff --git a/autogpt/config/config.py b/autogpt/config/config.py index 7ee0df8b9530..daf12397ffd5 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -38,6 +38,9 @@ def __init__(self) -> None: self.disabled_command_categories = [] self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml") + self.prompt_settings_file = os.getenv( + "PROMPT_SETTINGS_FILE", "prompt_settings.yaml" + ) self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo") self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4") self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000)) diff --git a/autogpt/config/prompt_config.py b/autogpt/config/prompt_config.py new file mode 100644 index 000000000000..3f562c95f43a --- /dev/null +++ b/autogpt/config/prompt_config.py @@ -0,0 +1,53 @@ +# sourcery skip: do-not-use-staticmethod +""" +A module that contains the PromptConfig class object that contains the configuration +""" +import yaml +from colorama import Fore + +from autogpt import utils +from autogpt.config.config import Config +from autogpt.logs import logger + +CFG = Config() + + +class PromptConfig: + """ + A class object that contains the configuration information for the prompt, which will be used by the prompt generator + + Attributes: + constraints (list): Constraints list for the prompt generator. + resources (list): Resources list for the prompt generator. + performance_evaluations (list): Performance evaluation list for the prompt generator. + """ + + def __init__( + self, + config_file: str = CFG.prompt_settings_file, + ) -> None: + """ + Initialize a class instance with parameters (constraints, resources, performance_evaluations) loaded from + yaml file if yaml file exists, + else raises error. + + Parameters: + constraints (list): Constraints list for the prompt generator. + resources (list): Resources list for the prompt generator. + performance_evaluations (list): Performance evaluation list for the prompt generator. + Returns: + None + """ + # Validate file + (validated, message) = utils.validate_yaml_file(config_file) + if not validated: + logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message) + logger.double_check() + exit(1) + + with open(config_file, encoding="utf-8") as file: + config_params = yaml.load(file, Loader=yaml.FullLoader) + + self.constraints = config_params.get("constraints", []) + self.resources = config_params.get("resources", []) + self.performance_evaluations = config_params.get("performance_evaluations", []) diff --git a/autogpt/configurator.py b/autogpt/configurator.py index 84000e576558..0fddd1fa8d3c 100644 --- a/autogpt/configurator.py +++ b/autogpt/configurator.py @@ -14,6 +14,7 @@ def create_config( continuous: bool, continuous_limit: int, ai_settings_file: str, + prompt_settings_file: str, skip_reprompt: bool, speak: bool, debug: bool, @@ -30,6 +31,7 @@ def create_config( continuous (bool): Whether to run in continuous mode continuous_limit (int): The number of times to run in continuous mode ai_settings_file (str): The path to the ai_settings.yaml file + prompt_settings_file (str): The path to the prompt_settings.yaml file skip_reprompt (bool): Whether to skip the re-prompting messages at the beginning of the script speak (bool): Whether to enable speak mode debug (bool): Whether to enable debug mode @@ -112,6 +114,19 @@ def create_config( CFG.ai_settings_file = file CFG.skip_reprompt = True + if prompt_settings_file: + file = prompt_settings_file + + # Validate file + (validated, message) = utils.validate_yaml_file(file) + if not validated: + logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message) + logger.double_check() + exit(1) + + logger.typewriter_log("Using Prompt Settings File:", Fore.GREEN, file) + CFG.prompt_settings_file = file + if browser_name: CFG.selenium_web_browser = browser_name diff --git a/autogpt/main.py b/autogpt/main.py index d07a0993cbe0..792496095af2 100644 --- a/autogpt/main.py +++ b/autogpt/main.py @@ -27,6 +27,7 @@ def run_auto_gpt( continuous: bool, continuous_limit: int, ai_settings: str, + prompt_settings: str, skip_reprompt: bool, speak: bool, debug: bool, @@ -50,6 +51,7 @@ def run_auto_gpt( continuous, continuous_limit, ai_settings, + prompt_settings, skip_reprompt, speak, debug, diff --git a/autogpt/prompts/prompt.py b/autogpt/prompts/prompt.py index c1d8a2b58aeb..e69fc4b79942 100644 --- a/autogpt/prompts/prompt.py +++ b/autogpt/prompts/prompt.py @@ -2,6 +2,7 @@ from autogpt.config.ai_config import AIConfig from autogpt.config.config import Config +from autogpt.config.prompt_config import PromptConfig from autogpt.llm import ApiManager from autogpt.logs import logger from autogpt.prompts.generator import PromptGenerator @@ -27,46 +28,21 @@ def build_default_prompt_generator() -> PromptGenerator: # Initialize the PromptGenerator object prompt_generator = PromptGenerator() + # Initialize the PromptConfig object and load the file set in the main config (default: prompts_settings.yaml) + prompt_config = PromptConfig(CFG.prompt_settings_file) + # Add constraints to the PromptGenerator object - prompt_generator.add_constraint( - "~4000 word limit for short term memory. Your short term memory is short, so" - " immediately save important information to files." - ) - prompt_generator.add_constraint( - "If you are unsure how you previously did something or want to recall past" - " events, thinking about similar events will help you remember." - ) - prompt_generator.add_constraint("No user assistance") - prompt_generator.add_constraint( - "Exclusively use the commands listed below e.g. command_name" - ) + for constraint in prompt_config.constraints: + prompt_generator.add_constraint(constraint) # Add resources to the PromptGenerator object - prompt_generator.add_resource( - "Internet access for searches and information gathering." - ) - prompt_generator.add_resource("Long Term memory management.") - prompt_generator.add_resource( - "GPT-3.5 powered Agents for delegation of simple tasks." - ) - prompt_generator.add_resource("File output.") + for resource in prompt_config.resources: + prompt_generator.add_resource(resource) # Add performance evaluations to the PromptGenerator object - prompt_generator.add_performance_evaluation( - "Continuously review and analyze your actions to ensure you are performing to" - " the best of your abilities." - ) - prompt_generator.add_performance_evaluation( - "Constructively self-criticize your big-picture behavior constantly." - ) - prompt_generator.add_performance_evaluation( - "Reflect on past decisions and strategies to refine your approach." - ) - prompt_generator.add_performance_evaluation( - "Every command has a cost, so be smart and efficient. Aim to complete tasks in" - " the least number of steps." - ) - prompt_generator.add_performance_evaluation("Write all code to a file.") + for performance_evaluation in prompt_config.performance_evaluations: + prompt_generator.add_performance_evaluation(performance_evaluation) + return prompt_generator diff --git a/docs/usage.md b/docs/usage.md index 4a0c88705d28..011f5f8a512e 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -23,10 +23,13 @@ Running with `--help` lists all the possible command line arguments you can pass Here are some common arguments you can use when running Auto-GPT: * Run Auto-GPT with a different AI Settings file - - :::shell - ./run.sh --ai-settings - + ``` shell + ./run.sh --ai-settings + ``` +* Run Auto-GPT with a different Prompt Settings file + ``` shell + ./run.sh --prompt-settings + ``` * Specify a memory backend :::shell diff --git a/prompt_settings.yaml b/prompt_settings.yaml new file mode 100644 index 000000000000..b8e7c0d2dce9 --- /dev/null +++ b/prompt_settings.yaml @@ -0,0 +1,19 @@ +constraints: [ + '~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.', + 'If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.', + 'No user assistance', + 'Exclusively use the commands listed below e.g. command_name' +] +resources: [ + 'Internet access for searches and information gathering.', + 'Long Term memory management.', + 'GPT-3.5 powered Agents for delegation of simple tasks.', + 'File output.' +] +performance_evaluations: [ + 'Continuously review and analyze your actions to ensure you are performing to the best of your abilities.', + 'Constructively self-criticize your big-picture behavior constantly.', + 'Reflect on past decisions and strategies to refine your approach.', + 'Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.', + 'Write all code to a file.' +] diff --git a/tests/test_prompt_config.py b/tests/test_prompt_config.py new file mode 100644 index 000000000000..eacb0cf4facc --- /dev/null +++ b/tests/test_prompt_config.py @@ -0,0 +1,48 @@ +from autogpt.config.prompt_config import PromptConfig + +""" +Test cases for the PromptConfig class, which handles loads the Prompts configuration +settings from a YAML file. +""" + + +def test_prompt_config_loading(tmp_path): + """Test if the prompt configuration loads correctly""" + + yaml_content = """ +constraints: +- A test constraint +- Another test constraint +- A third test constraint +resources: +- A test resource +- Another test resource +- A third test resource +performance_evaluations: +- A test performance evaluation +- Another test performance evaluation +- A third test performance evaluation +""" + config_file = tmp_path / "test_prompt_settings.yaml" + config_file.write_text(yaml_content) + + prompt_config = PromptConfig(config_file) + + assert len(prompt_config.constraints) == 3 + assert prompt_config.constraints[0] == "A test constraint" + assert prompt_config.constraints[1] == "Another test constraint" + assert prompt_config.constraints[2] == "A third test constraint" + assert len(prompt_config.resources) == 3 + assert prompt_config.resources[0] == "A test resource" + assert prompt_config.resources[1] == "Another test resource" + assert prompt_config.resources[2] == "A third test resource" + assert len(prompt_config.performance_evaluations) == 3 + assert prompt_config.performance_evaluations[0] == "A test performance evaluation" + assert ( + prompt_config.performance_evaluations[1] + == "Another test performance evaluation" + ) + assert ( + prompt_config.performance_evaluations[2] + == "A third test performance evaluation" + )