Skip to content

Commit

Permalink
Make prompt parameters configurable (#3375)
Browse files Browse the repository at this point in the history
Co-authored-by: Nicholas Tindle <[email protected]>
Co-authored-by: k-boikov <[email protected]>
  • Loading branch information
3 people authored May 17, 2023
1 parent 1c399e6 commit 42a5a0c
Show file tree
Hide file tree
Showing 10 changed files with 168 additions and 39 deletions.
3 changes: 3 additions & 0 deletions .env.template
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,9 @@
## AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml)
# AI_SETTINGS_FILE=ai_settings.yaml

## PROMPT_SETTINGS_FILE - Specifies which Prompt Settings file to use (defaults to prompt_settings.yaml)
# PROMPT_SETTINGS_FILE=prompt_settings.yaml

## AUTHORISE COMMAND KEY - Key to authorise commands
# AUTHORISE_COMMAND_KEY=y
## EXIT_KEY - Key to exit AUTO-GPT
Expand Down
7 changes: 7 additions & 0 deletions autogpt/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,11 @@
"-C",
help="Specifies which ai_settings.yaml file to use, will also automatically skip the re-prompt.",
)
@click.option(
"--prompt-settings",
"-P",
help="Specifies which prompt_settings.yaml file to use.",
)
@click.option(
"-l",
"--continuous-limit",
Expand Down Expand Up @@ -66,6 +71,7 @@ def main(
continuous: bool,
continuous_limit: int,
ai_settings: str,
prompt_settings: str,
skip_reprompt: bool,
speak: bool,
debug: bool,
Expand All @@ -91,6 +97,7 @@ def main(
continuous,
continuous_limit,
ai_settings,
prompt_settings,
skip_reprompt,
speak,
debug,
Expand Down
3 changes: 3 additions & 0 deletions autogpt/config/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,9 @@ def __init__(self) -> None:
self.disabled_command_categories = []

self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml")
self.prompt_settings_file = os.getenv(
"PROMPT_SETTINGS_FILE", "prompt_settings.yaml"
)
self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4")
self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000))
Expand Down
53 changes: 53 additions & 0 deletions autogpt/config/prompt_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
# sourcery skip: do-not-use-staticmethod
"""
A module that contains the PromptConfig class object that contains the configuration
"""
import yaml
from colorama import Fore

from autogpt import utils
from autogpt.config.config import Config
from autogpt.logs import logger

CFG = Config()


class PromptConfig:
"""
A class object that contains the configuration information for the prompt, which will be used by the prompt generator
Attributes:
constraints (list): Constraints list for the prompt generator.
resources (list): Resources list for the prompt generator.
performance_evaluations (list): Performance evaluation list for the prompt generator.
"""

def __init__(
self,
config_file: str = CFG.prompt_settings_file,
) -> None:
"""
Initialize a class instance with parameters (constraints, resources, performance_evaluations) loaded from
yaml file if yaml file exists,
else raises error.
Parameters:
constraints (list): Constraints list for the prompt generator.
resources (list): Resources list for the prompt generator.
performance_evaluations (list): Performance evaluation list for the prompt generator.
Returns:
None
"""
# Validate file
(validated, message) = utils.validate_yaml_file(config_file)
if not validated:
logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message)
logger.double_check()
exit(1)

with open(config_file, encoding="utf-8") as file:
config_params = yaml.load(file, Loader=yaml.FullLoader)

self.constraints = config_params.get("constraints", [])
self.resources = config_params.get("resources", [])
self.performance_evaluations = config_params.get("performance_evaluations", [])
15 changes: 15 additions & 0 deletions autogpt/configurator.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ def create_config(
continuous: bool,
continuous_limit: int,
ai_settings_file: str,
prompt_settings_file: str,
skip_reprompt: bool,
speak: bool,
debug: bool,
Expand All @@ -30,6 +31,7 @@ def create_config(
continuous (bool): Whether to run in continuous mode
continuous_limit (int): The number of times to run in continuous mode
ai_settings_file (str): The path to the ai_settings.yaml file
prompt_settings_file (str): The path to the prompt_settings.yaml file
skip_reprompt (bool): Whether to skip the re-prompting messages at the beginning of the script
speak (bool): Whether to enable speak mode
debug (bool): Whether to enable debug mode
Expand Down Expand Up @@ -112,6 +114,19 @@ def create_config(
CFG.ai_settings_file = file
CFG.skip_reprompt = True

if prompt_settings_file:
file = prompt_settings_file

# Validate file
(validated, message) = utils.validate_yaml_file(file)
if not validated:
logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message)
logger.double_check()
exit(1)

logger.typewriter_log("Using Prompt Settings File:", Fore.GREEN, file)
CFG.prompt_settings_file = file

if browser_name:
CFG.selenium_web_browser = browser_name

Expand Down
2 changes: 2 additions & 0 deletions autogpt/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ def run_auto_gpt(
continuous: bool,
continuous_limit: int,
ai_settings: str,
prompt_settings: str,
skip_reprompt: bool,
speak: bool,
debug: bool,
Expand All @@ -50,6 +51,7 @@ def run_auto_gpt(
continuous,
continuous_limit,
ai_settings,
prompt_settings,
skip_reprompt,
speak,
debug,
Expand Down
46 changes: 11 additions & 35 deletions autogpt/prompts/prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

from autogpt.config.ai_config import AIConfig
from autogpt.config.config import Config
from autogpt.config.prompt_config import PromptConfig
from autogpt.llm import ApiManager
from autogpt.logs import logger
from autogpt.prompts.generator import PromptGenerator
Expand All @@ -27,46 +28,21 @@ def build_default_prompt_generator() -> PromptGenerator:
# Initialize the PromptGenerator object
prompt_generator = PromptGenerator()

# Initialize the PromptConfig object and load the file set in the main config (default: prompts_settings.yaml)
prompt_config = PromptConfig(CFG.prompt_settings_file)

# Add constraints to the PromptGenerator object
prompt_generator.add_constraint(
"~4000 word limit for short term memory. Your short term memory is short, so"
" immediately save important information to files."
)
prompt_generator.add_constraint(
"If you are unsure how you previously did something or want to recall past"
" events, thinking about similar events will help you remember."
)
prompt_generator.add_constraint("No user assistance")
prompt_generator.add_constraint(
"Exclusively use the commands listed below e.g. command_name"
)
for constraint in prompt_config.constraints:
prompt_generator.add_constraint(constraint)

# Add resources to the PromptGenerator object
prompt_generator.add_resource(
"Internet access for searches and information gathering."
)
prompt_generator.add_resource("Long Term memory management.")
prompt_generator.add_resource(
"GPT-3.5 powered Agents for delegation of simple tasks."
)
prompt_generator.add_resource("File output.")
for resource in prompt_config.resources:
prompt_generator.add_resource(resource)

# Add performance evaluations to the PromptGenerator object
prompt_generator.add_performance_evaluation(
"Continuously review and analyze your actions to ensure you are performing to"
" the best of your abilities."
)
prompt_generator.add_performance_evaluation(
"Constructively self-criticize your big-picture behavior constantly."
)
prompt_generator.add_performance_evaluation(
"Reflect on past decisions and strategies to refine your approach."
)
prompt_generator.add_performance_evaluation(
"Every command has a cost, so be smart and efficient. Aim to complete tasks in"
" the least number of steps."
)
prompt_generator.add_performance_evaluation("Write all code to a file.")
for performance_evaluation in prompt_config.performance_evaluations:
prompt_generator.add_performance_evaluation(performance_evaluation)

return prompt_generator


Expand Down
11 changes: 7 additions & 4 deletions docs/usage.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,13 @@ Running with `--help` lists all the possible command line arguments you can pass
Here are some common arguments you can use when running Auto-GPT:

* Run Auto-GPT with a different AI Settings file

:::shell
./run.sh --ai-settings <filename>

``` shell
./run.sh --ai-settings <filename>
```
* Run Auto-GPT with a different Prompt Settings file
``` shell
./run.sh --prompt-settings <filename>
```
* Specify a memory backend

:::shell
Expand Down
19 changes: 19 additions & 0 deletions prompt_settings.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
constraints: [
'~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.',
'If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.',
'No user assistance',
'Exclusively use the commands listed below e.g. command_name'
]
resources: [
'Internet access for searches and information gathering.',
'Long Term memory management.',
'GPT-3.5 powered Agents for delegation of simple tasks.',
'File output.'
]
performance_evaluations: [
'Continuously review and analyze your actions to ensure you are performing to the best of your abilities.',
'Constructively self-criticize your big-picture behavior constantly.',
'Reflect on past decisions and strategies to refine your approach.',
'Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.',
'Write all code to a file.'
]
48 changes: 48 additions & 0 deletions tests/test_prompt_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
from autogpt.config.prompt_config import PromptConfig

"""
Test cases for the PromptConfig class, which handles loads the Prompts configuration
settings from a YAML file.
"""


def test_prompt_config_loading(tmp_path):
"""Test if the prompt configuration loads correctly"""

yaml_content = """
constraints:
- A test constraint
- Another test constraint
- A third test constraint
resources:
- A test resource
- Another test resource
- A third test resource
performance_evaluations:
- A test performance evaluation
- Another test performance evaluation
- A third test performance evaluation
"""
config_file = tmp_path / "test_prompt_settings.yaml"
config_file.write_text(yaml_content)

prompt_config = PromptConfig(config_file)

assert len(prompt_config.constraints) == 3
assert prompt_config.constraints[0] == "A test constraint"
assert prompt_config.constraints[1] == "Another test constraint"
assert prompt_config.constraints[2] == "A third test constraint"
assert len(prompt_config.resources) == 3
assert prompt_config.resources[0] == "A test resource"
assert prompt_config.resources[1] == "Another test resource"
assert prompt_config.resources[2] == "A third test resource"
assert len(prompt_config.performance_evaluations) == 3
assert prompt_config.performance_evaluations[0] == "A test performance evaluation"
assert (
prompt_config.performance_evaluations[1]
== "Another test performance evaluation"
)
assert (
prompt_config.performance_evaluations[2]
== "A third test performance evaluation"
)

1 comment on commit 42a5a0c

@vercel
Copy link

@vercel vercel bot commented on 42a5a0c May 17, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.