Skip to content

Commit

Permalink
Release v0.4.3 (#4802)
Browse files Browse the repository at this point in the history
  • Loading branch information
lc0rp authored Jun 28, 2023
2 parents 3e697d5 + 4452283 commit 80151dd
Show file tree
Hide file tree
Showing 108 changed files with 2,422 additions and 3,059 deletions.
6 changes: 5 additions & 1 deletion .env.template
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,14 @@ OPENAI_API_KEY=your-openai-api-key
## PROMPT_SETTINGS_FILE - Specifies which Prompt Settings file to use (defaults to prompt_settings.yaml)
# PROMPT_SETTINGS_FILE=prompt_settings.yaml

## OPENAI_API_BASE_URL - Custom url for the OpenAI API, useful for connecting to custom backends. No effect if USE_AZURE is true, leave blank to keep the default url
## OPENAI_API_BASE_URL - Custom url for the OpenAI API, useful for connecting to custom backends. No effect if USE_AZURE is true, leave blank to keep the default url
# the following is an example:
# OPENAI_API_BASE_URL=http://localhost:443/v1

## OPENAI_FUNCTIONS - Enables OpenAI functions: https://platform.openai.com/docs/guides/gpt/function-calling
## WARNING: this feature is only supported by OpenAI's newest models. Until these models become the default on 27 June, add a '-0613' suffix to the model of your choosing.
# OPENAI_FUNCTIONS=False

## AUTHORISE COMMAND KEY - Key to authorise commands
# AUTHORISE_COMMAND_KEY=y

Expand Down
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ __pycache__/
build/
develop-eggs/
dist/
plugins/
/plugins/
plugins_config.yaml
downloads/
eggs/
Expand Down
23 changes: 9 additions & 14 deletions BULLETIN.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,20 +8,15 @@ Since releasing v0.3.0, whave been working on re-architecting the Auto-GPT core
Check out the contribution guide on our wiki:
https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing

# 🚀 v0.4.1 Release 🚀
Two weeks and 50+ pull requests have passed since v0.4.0, and we are happy to announce the release of v0.4.1!

Highlights and notable changes since v0.4.0:
- The .env.template is more readable and better explains the purpose of each environment variable.
- More dependable search
- The CUSTOM_SEARCH_ENGINE_ID variable has been replaced to GOOGLE_CUSTOM_SEARCH_ENGINE_ID, make sure you update it.
- Better read_file
- More reliable python code execution
- Lots of JSON error fixes
- Directory-based plugins

## Further fixes and changes 🛠️
Under the hood, we've done a bunch of work improving architectures and streamlining code. Most of that won't be user-visible
# 🚀 v0.4.3 Release 🚀
We're happy to announce the 0.4.3 maintenance release, which primarily focuses on refining the LLM command execution,
extending support for OpenAI's latest models (including the powerful GPT-3 16k model), and laying the groundwork
for future compatibility with OpenAI's function calling feature.

Key Highlights:
- OpenAI API Key Prompt: Auto-GPT will now courteously prompt users for their OpenAI API key, if it's not already provided.
- Summarization Enhancements: We've optimized Auto-GPT's use of the LLM context window even further.
- JSON Memory Reading: Support for reading memories from JSON files has been improved, resulting in enhanced task execution.
- Deprecated commands, removed for a leaner, more performant LLM: analyze_code, write_tests, improve_code, audio_text, web_playwright, web_requests
## Take a look at the Release Notes on Github for the full changelog!
https://github.com/Significant-Gravitas/Auto-GPT/releases
91 changes: 16 additions & 75 deletions autogpt/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,25 +5,22 @@

from colorama import Fore, Style

from autogpt.commands.command import CommandRegistry
from autogpt.config import Config
from autogpt.config.ai_config import AIConfig
from autogpt.json_utils.utilities import extract_json_from_response, validate_json
from autogpt.llm.base import ChatSequence
from autogpt.llm.chat import chat_with_ai, create_chat_completion
from autogpt.llm.chat import chat_with_ai
from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS
from autogpt.llm.utils import count_string_tokens
from autogpt.log_cycle.log_cycle import (
FULL_MESSAGE_HISTORY_FILE_NAME,
NEXT_ACTION_FILE_NAME,
PROMPT_SUPERVISOR_FEEDBACK_FILE_NAME,
SUPERVISOR_FEEDBACK_FILE_NAME,
USER_INPUT_FILE_NAME,
LogCycleHandler,
)
from autogpt.logs import logger, print_assistant_thoughts
from autogpt.logs import logger, print_assistant_thoughts, remove_ansi_escape
from autogpt.memory.message_history import MessageHistory
from autogpt.memory.vector import VectorMemory
from autogpt.models.command_registry import CommandRegistry
from autogpt.speech import say_text
from autogpt.spinner import Spinner
from autogpt.utils import clean_input
Expand Down Expand Up @@ -145,8 +142,10 @@ def signal_handler(signum, frame):
)

try:
assistant_reply_json = extract_json_from_response(assistant_reply)
validate_json(assistant_reply_json)
assistant_reply_json = extract_json_from_response(
assistant_reply.content
)
validate_json(assistant_reply_json, self.config)
except json.JSONDecodeError as e:
logger.error(f"Exception while validating assistant reply JSON: {e}")
assistant_reply_json = {}
Expand All @@ -161,9 +160,11 @@ def signal_handler(signum, frame):
# Get command name and arguments
try:
print_assistant_thoughts(
self.ai_name, assistant_reply_json, self.config.speak_mode
self.ai_name, assistant_reply_json, self.config
)
command_name, arguments = get_command(
assistant_reply_json, assistant_reply, self.config
)
command_name, arguments = get_command(assistant_reply_json)
if self.config.speak_mode:
say_text(f"I want to execute {command_name}")

Expand All @@ -184,7 +185,7 @@ def signal_handler(signum, frame):
logger.typewriter_log(
"NEXT ACTION: ",
Fore.CYAN,
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} "
f"COMMAND = {Fore.CYAN}{remove_ansi_escape(command_name)}{Style.RESET_ALL} "
f"ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
)

Expand All @@ -200,32 +201,16 @@ def signal_handler(signum, frame):
)
while True:
if self.config.chat_messages_enabled:
console_input = clean_input("Waiting for your response...")
console_input = clean_input(
self.config, "Waiting for your response..."
)
else:
console_input = clean_input(
Fore.MAGENTA + "Input:" + Style.RESET_ALL
self.config, Fore.MAGENTA + "Input:" + Style.RESET_ALL
)
if console_input.lower().strip() == self.config.authorise_key:
user_input = "GENERATE NEXT COMMAND JSON"
break
elif console_input.lower().strip() == "s":
logger.typewriter_log(
"-=-=-=-=-=-=-= THOUGHTS, REASONING, PLAN AND CRITICISM WILL NOW BE VERIFIED BY AGENT -=-=-=-=-=-=-=",
Fore.GREEN,
"",
)
thoughts = assistant_reply_json.get("thoughts", {})
self_feedback_resp = self.get_self_feedback(
thoughts, self.config.fast_llm_model
)
logger.typewriter_log(
f"SELF FEEDBACK: {self_feedback_resp}",
Fore.YELLOW,
"",
)
user_input = self_feedback_resp
command_name = "self_feedback"
break
elif console_input.lower().strip() == "":
logger.warn("Invalid input format.")
continue
Expand Down Expand Up @@ -281,8 +266,6 @@ def signal_handler(signum, frame):
result = f"Could not execute command: {arguments}"
elif command_name == "human_feedback":
result = f"Human feedback: {user_input}"
elif command_name == "self_feedback":
result = f"Self feedback: {user_input}"
else:
for plugin in self.config.plugins:
if not plugin.can_handle_pre_command():
Expand Down Expand Up @@ -335,45 +318,3 @@ def _resolve_pathlike_command_args(self, command_args):
self.workspace.get_path(command_args[pathlike])
)
return command_args

def get_self_feedback(self, thoughts: dict, llm_model: str) -> str:
"""Generates a feedback response based on the provided thoughts dictionary.
This method takes in a dictionary of thoughts containing keys such as 'reasoning',
'plan', 'thoughts', and 'criticism'. It combines these elements into a single
feedback message and uses the create_chat_completion() function to generate a
response based on the input message.
Args:
thoughts (dict): A dictionary containing thought elements like reasoning,
plan, thoughts, and criticism.
Returns:
str: A feedback response generated using the provided thoughts dictionary.
"""
ai_role = self.ai_config.ai_role

feedback_prompt = f"Below is a message from me, an AI Agent, assuming the role of {ai_role}. whilst keeping knowledge of my slight limitations as an AI Agent Please evaluate my thought process, reasoning, and plan, and provide a concise paragraph outlining potential improvements. Consider adding or removing ideas that do not align with my role and explaining why, prioritizing thoughts based on their significance, or simply refining my overall thought process."
reasoning = thoughts.get("reasoning", "")
plan = thoughts.get("plan", "")
thought = thoughts.get("thoughts", "")
feedback_thoughts = thought + reasoning + plan

prompt = ChatSequence.for_model(llm_model)
prompt.add("user", feedback_prompt + feedback_thoughts)

self.log_cycle_handler.log_cycle(
self.ai_config.ai_name,
self.created_at,
self.cycle_count,
prompt.raw(),
PROMPT_SUPERVISOR_FEEDBACK_FILE_NAME,
)

feedback = create_chat_completion(prompt)

self.log_cycle_handler.log_cycle(
self.ai_config.ai_name,
self.created_at,
self.cycle_count,
feedback,
SUPERVISOR_FEEDBACK_FILE_NAME,
)
return feedback
24 changes: 14 additions & 10 deletions autogpt/agent/agent_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,12 @@
class AgentManager(metaclass=Singleton):
"""Agent manager for managing GPT agents"""

def __init__(self):
def __init__(self, config: Config):
self.next_key = 0
self.agents: dict[
int, tuple[str, list[Message], str]
] = {} # key, (task, full_message_history, model)
self.cfg = Config()
self.config = config

# Create new GPT agent
# TODO: Centralise use of create_chat_completion() to globally enforce token limit
Expand All @@ -35,18 +35,20 @@ def create_agent(
"""
messages = ChatSequence.for_model(model, [Message("user", creation_prompt)])

for plugin in self.cfg.plugins:
for plugin in self.config.plugins:
if not plugin.can_handle_pre_instruction():
continue
if plugin_messages := plugin.pre_instruction(messages.raw()):
messages.extend([Message(**raw_msg) for raw_msg in plugin_messages])
# Start GPT instance
agent_reply = create_chat_completion(prompt=messages)
agent_reply = create_chat_completion(
prompt=messages, config=self.config
).content

messages.add("assistant", agent_reply)

plugins_reply = ""
for i, plugin in enumerate(self.cfg.plugins):
for i, plugin in enumerate(self.config.plugins):
if not plugin.can_handle_on_instruction():
continue
if plugin_result := plugin.on_instruction([m.raw() for m in messages]):
Expand All @@ -62,7 +64,7 @@ def create_agent(

self.agents[key] = (task, list(messages), model)

for plugin in self.cfg.plugins:
for plugin in self.config.plugins:
if not plugin.can_handle_post_instruction():
continue
agent_reply = plugin.post_instruction(agent_reply)
Expand All @@ -85,19 +87,21 @@ def message_agent(self, key: str | int, message: str) -> str:
messages = ChatSequence.for_model(model, messages)
messages.add("user", message)

for plugin in self.cfg.plugins:
for plugin in self.config.plugins:
if not plugin.can_handle_pre_instruction():
continue
if plugin_messages := plugin.pre_instruction([m.raw() for m in messages]):
messages.extend([Message(**raw_msg) for raw_msg in plugin_messages])

# Start GPT instance
agent_reply = create_chat_completion(prompt=messages)
agent_reply = create_chat_completion(
prompt=messages, config=self.config
).content

messages.add("assistant", agent_reply)

plugins_reply = agent_reply
for i, plugin in enumerate(self.cfg.plugins):
for i, plugin in enumerate(self.config.plugins):
if not plugin.can_handle_on_instruction():
continue
if plugin_result := plugin.on_instruction([m.raw() for m in messages]):
Expand All @@ -107,7 +111,7 @@ def message_agent(self, key: str | int, message: str) -> str:
if plugins_reply and plugins_reply != "":
messages.add("assistant", plugins_reply)

for plugin in self.cfg.plugins:
for plugin in self.config.plugins:
if not plugin.can_handle_post_instruction():
continue
agent_reply = plugin.post_instruction(agent_reply)
Expand Down
Loading

0 comments on commit 80151dd

Please sign in to comment.