Skip to content

Commit

Permalink
Release v0.4.4 (#4906)
Browse files Browse the repository at this point in the history
  • Loading branch information
Pwuts authored Jul 11, 2023
2 parents 5318535 + 46f31cb commit 2240033
Show file tree
Hide file tree
Showing 124 changed files with 4,950 additions and 755 deletions.
12 changes: 8 additions & 4 deletions .env.template
Original file line number Diff line number Diff line change
Expand Up @@ -58,15 +58,19 @@ OPENAI_API_KEY=your-openai-api-key
## USE_AZURE - Use Azure OpenAI or not (Default: False)
# USE_AZURE=False

## AZURE_CONFIG_FILE - The path to the azure.yaml file (Default: azure.yaml)
# AZURE_CONFIG_FILE=azure.yaml


################################################################################
### LLM MODELS
################################################################################

## SMART_LLM_MODEL - Smart language model (Default: gpt-3.5-turbo)
# SMART_LLM_MODEL=gpt-3.5-turbo
## SMART_LLM - Smart language model (Default: gpt-4)
# SMART_LLM=gpt-4

## FAST_LLM_MODEL - Fast language model (Default: gpt-3.5-turbo)
# FAST_LLM_MODEL=gpt-3.5-turbo
## FAST_LLM - Fast language model (Default: gpt-3.5-turbo)
# FAST_LLM=gpt-3.5-turbo

## EMBEDDING_MODEL - Model to use for creating embeddings
# EMBEDDING_MODEL=text-embedding-ada-002
Expand Down
3 changes: 2 additions & 1 deletion .github/CODEOWNERS
Validating CODEOWNERS rules …
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
.github/workflows/ @Significant-Gravitas/Auto-GPT-Source
.github/workflows/ @Significant-Gravitas/maintainers
autogpt/core @collijk
6 changes: 3 additions & 3 deletions .github/ISSUE_TEMPLATE/1.bug.yml
Original file line number Diff line number Diff line change
Expand Up @@ -140,8 +140,8 @@ body:
⚠️The following is OPTIONAL, please keep in mind that the log files may contain personal information such as credentials.⚠️
"The log files are located in the folder 'logs' inside the main auto-gpt folder."
- type: input
- type: textarea
attributes:
label: Upload Activity Log Content
description: |
Expand All @@ -152,7 +152,7 @@ body:
validations:
required: false

- type: input
- type: textarea
attributes:
label: Upload Error Log Content
description: |
Expand Down
13 changes: 9 additions & 4 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -108,22 +108,27 @@ jobs:
if: ${{ startsWith(github.event_name, 'pull_request') }}
run: |
cassette_branch="${{ github.event.pull_request.user.login }}-${{ github.event.pull_request.head.ref }}"
cassette_base_branch="${{ github.event.pull_request.base.ref }}"
cd tests/Auto-GPT-test-cassettes
if ! git ls-remote --exit-code --heads origin $cassette_base_branch ; then
cassette_base_branch="master"
fi
if git ls-remote --exit-code --heads origin $cassette_branch ; then
git fetch origin $cassette_branch
git fetch origin ${{ github.event.pull_request.base.ref }}
git fetch origin $cassette_base_branch
git checkout $cassette_branch
# Pick non-conflicting cassette updates from the base branch
git merge --no-commit --strategy-option=ours origin/${{ github.event.pull_request.base.ref }}
git merge --no-commit --strategy-option=ours origin/$cassette_base_branch
echo "Using cassettes from mirror branch '$cassette_branch'," \
"synced to upstream branch '${{ github.event.pull_request.base.ref }}'."
"synced to upstream branch '$cassette_base_branch'."
else
git checkout -b $cassette_branch
echo "Branch '$cassette_branch' does not exist in cassette submodule." \
"Using cassettes from '${{ github.event.pull_request.base.ref }}'."
"Using cassettes from '$cassette_base_branch'."
fi
- name: Set up Python ${{ matrix.python-version }}
Expand Down
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ repos:
types: [ python ]
- id: pytest-check
name: pytest-check
entry: pytest --cov=autogpt --without-integration --without-slow-integration
entry: pytest --cov=autogpt tests/unit
language: system
pass_filenames: false
always_run: true
43 changes: 25 additions & 18 deletions BULLETIN.md
Original file line number Diff line number Diff line change
@@ -1,22 +1,29 @@
# Website and Documentation Site 📰📖
Check out *https://agpt.co*, the official news & updates site for Auto-GPT!
The documentation also has a place here, at *https://docs.agpt.co*
# QUICK LINKS 🔗
# --------------
🌎 *Official Website*: https://agpt.co.
📖 *User Guide*: https://docs.agpt.co.
👩 *Contributors Wiki*: https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing.

# For contributors 👷🏼
Since releasing v0.3.0, whave been working on re-architecting the Auto-GPT core to make it more extensible and make room for structural performance-oriented R&D.
# v0.4.4 RELEASE HIGHLIGHTS! 🚀
# -----------------------------
## GPT-4 is back!
Following OpenAI's recent GPT-4 GA announcement, the SMART_LLM .env setting
now defaults to GPT-4, and Auto-GPT will use GPT-4 by default in its main loop.

Check out the contribution guide on our wiki:
https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing
### !! High Costs Warning !! 💰💀🚨
GPT-4 costs ~20x more than GPT-3.5-turbo.
Please take note of this before using SMART_LLM. You can use `--gpt3only`
or `--gpt4only` to force the use of GPT-3.5-turbo or GPT-4, respectively,
at runtime.

# 🚀 v0.4.3 Release 🚀
We're happy to announce the 0.4.3 maintenance release, which primarily focuses on refining the LLM command execution,
extending support for OpenAI's latest models (including the powerful GPT-3 16k model), and laying the groundwork
for future compatibility with OpenAI's function calling feature.
## Re-arch v1 preview release!
We've released a preview version of the re-arch code, under `autogpt/core`.
This is a major milestone for us, and we're excited to continue working on it.
We look forward to your feedback. Follow the process here:
https://github.com/Significant-Gravitas/Auto-GPT/issues/4770.

Key Highlights:
- OpenAI API Key Prompt: Auto-GPT will now courteously prompt users for their OpenAI API key, if it's not already provided.
- Summarization Enhancements: We've optimized Auto-GPT's use of the LLM context window even further.
- JSON Memory Reading: Support for reading memories from JSON files has been improved, resulting in enhanced task execution.
- Deprecated commands, removed for a leaner, more performant LLM: analyze_code, write_tests, improve_code, audio_text, web_playwright, web_requests
## Take a look at the Release Notes on Github for the full changelog!
https://github.com/Significant-Gravitas/Auto-GPT/releases
## Other highlights
Other fixes include plugins regressions, Azure config and security patches.

Take a look at the Release Notes on Github for the full changelog!
https://github.com/Significant-Gravitas/Auto-GPT/releases.
32 changes: 16 additions & 16 deletions autogpt/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import signal
import sys
from datetime import datetime
from pathlib import Path

from colorama import Fore, Style

Expand Down Expand Up @@ -64,7 +65,7 @@ def __init__(
ai_config: AIConfig,
system_prompt: str,
triggering_prompt: str,
workspace_directory: str,
workspace_directory: str | Path,
config: Config,
):
self.ai_name = ai_name
Expand All @@ -80,13 +81,11 @@ def __init__(
self.created_at = datetime.now().strftime("%Y%m%d_%H%M%S")
self.cycle_count = 0
self.log_cycle_handler = LogCycleHandler()
self.fast_token_limit = OPEN_AI_CHAT_MODELS.get(
config.fast_llm_model
).max_tokens
self.smart_token_limit = OPEN_AI_CHAT_MODELS.get(config.smart_llm).max_tokens

def start_interaction_loop(self):
# Avoid circular imports
from autogpt.app import execute_command, get_command
from autogpt.app import execute_command, extract_command

# Interaction Loop
self.cycle_count = 0
Expand Down Expand Up @@ -137,8 +136,8 @@ def signal_handler(signum, frame):
self,
self.system_prompt,
self.triggering_prompt,
self.fast_token_limit,
self.config.fast_llm_model,
self.smart_token_limit,
self.config.smart_llm,
)

try:
Expand All @@ -162,11 +161,11 @@ def signal_handler(signum, frame):
print_assistant_thoughts(
self.ai_name, assistant_reply_json, self.config
)
command_name, arguments = get_command(
command_name, arguments = extract_command(
assistant_reply_json, assistant_reply, self.config
)
if self.config.speak_mode:
say_text(f"I want to execute {command_name}")
say_text(f"I want to execute {command_name}", self.config)

arguments = self._resolve_pathlike_command_args(arguments)

Expand Down Expand Up @@ -195,8 +194,9 @@ def signal_handler(signum, frame):
# to exit
self.user_input = ""
logger.info(
"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 's' to run self-feedback commands, "
"'n' to exit program, or enter feedback for "
f"Enter '{self.config.authorise_key}' to authorise command, "
f"'{self.config.authorise_key} -N' to run N continuous commands, "
f"'{self.config.exit_key}' to exit program, or enter feedback for "
f"{self.ai_name}..."
)
while True:
Expand Down Expand Up @@ -224,8 +224,8 @@ def signal_handler(signum, frame):
user_input = "GENERATE NEXT COMMAND JSON"
except ValueError:
logger.warn(
"Invalid input format. Please enter 'y -n' where n is"
" the number of continuous tasks."
f"Invalid input format. Please enter '{self.config.authorise_key} -n' "
"where n is the number of continuous tasks."
)
continue
break
Expand Down Expand Up @@ -281,12 +281,12 @@ def signal_handler(signum, frame):
result = f"Command {command_name} returned: " f"{command_result}"

result_tlength = count_string_tokens(
str(command_result), self.config.fast_llm_model
str(command_result), self.config.smart_llm
)
memory_tlength = count_string_tokens(
str(self.history.summary_message()), self.config.fast_llm_model
str(self.history.summary_message()), self.config.smart_llm
)
if result_tlength + memory_tlength + 600 > self.fast_token_limit:
if result_tlength + memory_tlength + 600 > self.smart_token_limit:
result = f"Failure: command {command_name} returned too much output. \
Do not execute this command again with the same arguments."

Expand Down
40 changes: 9 additions & 31 deletions autogpt/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def is_valid_int(value: str) -> bool:
return False


def get_command(
def extract_command(
assistant_reply_json: Dict, assistant_reply: ChatModelResponse, config: Config
):
"""Parse the response and return the command name and arguments
Expand Down Expand Up @@ -78,21 +78,6 @@ def get_command(
return "Error:", str(e)


def map_command_synonyms(command_name: str):
"""Takes the original command name given by the AI, and checks if the
string matches a list of common/known hallucinations
"""
synonyms = [
("write_file", "write_to_file"),
("create_file", "write_to_file"),
("search", "google"),
]
for seen_command, actual_command_name in synonyms:
if command_name == seen_command:
return actual_command_name
return command_name


def execute_command(
command_name: str,
arguments: dict[str, str],
Expand All @@ -109,28 +94,21 @@ def execute_command(
str: The result of the command
"""
try:
cmd = agent.command_registry.commands.get(command_name)
# Execute a native command with the same name or alias, if it exists
if command := agent.command_registry.get_command(command_name):
return command(**arguments, agent=agent)

# If the command is found, call it with the provided arguments
if cmd:
return cmd(**arguments, agent=agent)

# TODO: Remove commands below after they are moved to the command registry.
command_name = map_command_synonyms(command_name.lower())

# TODO: Change these to take in a file rather than pasted code, if
# non-file is given, return instructions "Input should be a python
# filepath, write your code to file and try again
# Handle non-native commands (e.g. from plugins)
for command in agent.ai_config.prompt_generator.commands:
if (
command_name == command["label"].lower()
or command_name == command["name"].lower()
):
return command["function"](**arguments)
return (
f"Unknown command '{command_name}'. Please refer to the 'COMMANDS'"
" list for available commands and only respond in the specified JSON"
" format."

raise RuntimeError(
f"Cannot execute '{command_name}': unknown command."
" Do not try to use this command again."
)
except Exception as e:
return f"Error: {str(e)}"
24 changes: 24 additions & 0 deletions autogpt/cli.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
"""Main script for the autogpt package."""
from typing import Optional

import click


Expand Down Expand Up @@ -65,6 +67,22 @@
is_flag=True,
help="Installs external dependencies for 3rd party plugins.",
)
@click.option(
"--ai-name",
type=str,
help="AI name override",
)
@click.option(
"--ai-role",
type=str,
help="AI role override",
)
@click.option(
"--ai-goal",
type=str,
multiple=True,
help="AI goal override; may be used multiple times to pass multiple goals",
)
@click.pass_context
def main(
ctx: click.Context,
Expand All @@ -83,6 +101,9 @@ def main(
skip_news: bool,
workspace_directory: str,
install_plugin_deps: bool,
ai_name: Optional[str],
ai_role: Optional[str],
ai_goal: tuple[str],
) -> None:
"""
Welcome to AutoGPT an experimental open-source application showcasing the capabilities of the GPT-4 pushing the boundaries of AI.
Expand All @@ -109,6 +130,9 @@ def main(
skip_news,
workspace_directory,
install_plugin_deps,
ai_name,
ai_role,
ai_goal,
)


Expand Down
2 changes: 2 additions & 0 deletions autogpt/command_decorator.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ def command(
parameters: dict[str, CommandParameterSpec],
enabled: bool | Callable[[Config], bool] = True,
disabled_reason: Optional[str] = None,
aliases: list[str] = [],
) -> Callable[..., Any]:
"""The command decorator is used to create Command objects from ordinary functions."""

Expand All @@ -40,6 +41,7 @@ def decorator(func: Callable[..., Any]) -> Command:
parameters=typed_parameters,
enabled=enabled,
disabled_reason=disabled_reason,
aliases=aliases,
)

@functools.wraps(func)
Expand Down
1 change: 1 addition & 0 deletions autogpt/commands/file_operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -189,6 +189,7 @@ def ingest_file(
"required": True,
},
},
aliases=["write_file", "create_file"],
)
def write_to_file(filename: str, text: str, agent: Agent) -> str:
"""Write text to a file
Expand Down
Loading

0 comments on commit 2240033

Please sign in to comment.