Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

refactor(platform/rest): Update REST API to use standard FastAPI structure #8519

Merged
778 changes: 100 additions & 678 deletions autogpt_platform/backend/backend/server/rest_api.py

Large diffs are not rendered by default.

539 changes: 539 additions & 0 deletions autogpt_platform/backend/backend/server/routers/v1.py

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -252,7 +252,7 @@ async def block_autogen_agent():
test_user = await create_test_user()
test_graph = await create_graph(create_test_graph(), user_id=test_user.id)
input_data = {"input": "Write me a block that writes a string into a file."}
response = server.agent_server.execute_graph(
response = await server.agent_server.test_execute_graph(
test_graph.id, input_data, test_user.id
)
print(response)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ async def reddit_marketing_agent():
test_user = await create_test_user()
test_graph = await create_graph(create_test_graph(), user_id=test_user.id)
input_data = {"subreddit": "AutoGPT"}
response = server.agent_server.execute_graph(
response = await server.agent_server.test_execute_graph(
test_graph.id, input_data, test_user.id
)
print(response)
Expand Down
2 changes: 1 addition & 1 deletion autogpt_platform/backend/backend/usecases/sample.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ async def sample_agent():
test_user = await create_test_user()
test_graph = await create_graph(create_test_graph(), test_user.id)
input_data = {"input_1": "Hello", "input_2": "World"}
response = server.agent_server.execute_graph(
response = await server.agent_server.test_execute_graph(
test_graph.id, input_data, test_user.id
)
print(response)
Expand Down
24 changes: 14 additions & 10 deletions autogpt_platform/backend/backend/util/test.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import logging
import time

from backend.data import db
Expand All @@ -6,9 +7,10 @@
from backend.data.model import CREDENTIALS_FIELD_NAME
from backend.data.user import create_default_user
from backend.executor import DatabaseManager, ExecutionManager, ExecutionScheduler
from backend.server.rest_api import AgentServer, get_user_id
from backend.server.rest_api import AgentServer
from backend.server.utils import get_user_id

log = print
log = logging.getLogger(__name__)


class SpinTestServer:
Expand Down Expand Up @@ -57,17 +59,19 @@ async def wait_execution(
timeout: int = 20,
) -> list:
async def is_execution_completed():
status = await AgentServer().get_graph_run_status(
status = await AgentServer().test_get_graph_run_status(
graph_id, graph_exec_id, user_id
)
log.info(f"Execution status: {status}")
if status == ExecutionStatus.FAILED:
log.info("Execution failed")
raise Exception("Execution failed")
return status == ExecutionStatus.COMPLETED

# Wait for the executions to complete
for i in range(timeout):
if await is_execution_completed():
return await AgentServer().get_graph_run_node_execution_results(
return await AgentServer().test_get_graph_run_node_execution_results(
graph_id, graph_exec_id, user_id
)
time.sleep(1)
Expand All @@ -79,23 +83,23 @@ def execute_block_test(block: Block):
prefix = f"[Test-{block.name}]"

if not block.test_input or not block.test_output:
log(f"{prefix} No test data provided")
log.info(f"{prefix} No test data provided")
return
if not isinstance(block.test_input, list):
block.test_input = [block.test_input]
if not isinstance(block.test_output, list):
block.test_output = [block.test_output]

output_index = 0
log(f"{prefix} Executing {len(block.test_input)} tests...")
log.info(f"{prefix} Executing {len(block.test_input)} tests...")
prefix = " " * 4 + prefix

for mock_name, mock_obj in (block.test_mock or {}).items():
log(f"{prefix} mocking {mock_name}...")
log.info(f"{prefix} mocking {mock_name}...")
if hasattr(block, mock_name):
setattr(block, mock_name, mock_obj)
else:
log(f"{prefix} mock {mock_name} not found in block")
log.info(f"{prefix} mock {mock_name} not found in block")

extra_exec_kwargs = {}

Expand All @@ -107,7 +111,7 @@ def execute_block_test(block: Block):
extra_exec_kwargs[CREDENTIALS_FIELD_NAME] = block.test_credentials

for input_data in block.test_input:
log(f"{prefix} in: {input_data}")
log.info(f"{prefix} in: {input_data}")

for output_name, output_data in block.execute(input_data, **extra_exec_kwargs):
if output_index >= len(block.test_output):
Expand All @@ -125,7 +129,7 @@ def compare(data, expected_data):
is_matching = False

mark = "✅" if is_matching else "❌"
log(f"{prefix} {mark} comparing `{data}` vs `{expected_data}`")
log.info(f"{prefix} {mark} comparing `{data}` vs `{expected_data}`")
if not is_matching:
raise ValueError(
f"{prefix}: wrong output {data} vs {expected_data}"
Expand Down
26 changes: 20 additions & 6 deletions autogpt_platform/backend/test/conftest.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,20 @@
import logging

import pytest

from backend.util.test import SpinTestServer

# NOTE: You can run tests like with the --log-cli-level=INFO to see the logs
# Set up logging
logger = logging.getLogger(__name__)

# Create console handler with formatting
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)


@pytest.fixture(scope="session")
async def server():
Expand All @@ -12,7 +25,7 @@ async def server():
@pytest.fixture(scope="session", autouse=True)
async def graph_cleanup(server):
created_graph_ids = []
original_create_graph = server.agent_server.create_graph
original_create_graph = server.agent_server.test_create_graph
Swiftyos marked this conversation as resolved.
Show resolved Hide resolved

async def create_graph_wrapper(*args, **kwargs):
created_graph = await original_create_graph(*args, **kwargs)
Expand All @@ -22,13 +35,14 @@ async def create_graph_wrapper(*args, **kwargs):
return created_graph

try:
server.agent_server.create_graph = create_graph_wrapper
server.agent_server.test_create_graph = create_graph_wrapper
yield # This runs the test function
finally:
server.agent_server.create_graph = original_create_graph
server.agent_server.test_create_graph = original_create_graph

# Delete the created graphs and assert they were deleted
for graph_id, user_id in created_graph_ids:
resp = await server.agent_server.delete_graph(graph_id, user_id)
num_deleted = resp["version_counts"]
assert num_deleted > 0, f"Graph {graph_id} was not deleted."
if user_id:
resp = await server.agent_server.test_delete_graph(graph_id, user_id)
num_deleted = resp["version_counts"]
assert num_deleted > 0, f"Graph {graph_id} was not deleted."
18 changes: 9 additions & 9 deletions autogpt_platform/backend/test/data/test_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,15 +47,15 @@ async def test_graph_creation(server: SpinTestServer):
create_graph = CreateGraph(graph=graph)

try:
await server.agent_server.create_graph(create_graph, False, DEFAULT_USER_ID)
await server.agent_server.test_create_graph(create_graph, DEFAULT_USER_ID)
assert False, "Should not be able to connect nodes from different subgraphs"
except ValueError as e:
assert "different subgraph" in str(e)

# Change node_1 <-> node_3 link to node_1 <-> node_2 (input for subgraph_1)
graph.links[0].sink_id = "node_2"
created_graph = await server.agent_server.create_graph(
create_graph, False, DEFAULT_USER_ID
created_graph = await server.agent_server.test_create_graph(
create_graph, DEFAULT_USER_ID
)

assert UUID(created_graph.id)
Expand Down Expand Up @@ -102,8 +102,8 @@ async def test_get_input_schema(server: SpinTestServer):
)

create_graph = CreateGraph(graph=graph)
created_graph = await server.agent_server.create_graph(
create_graph, False, DEFAULT_USER_ID
created_graph = await server.agent_server.test_create_graph(
create_graph, DEFAULT_USER_ID
)

input_schema = created_graph.get_input_schema()
Expand Down Expand Up @@ -138,8 +138,8 @@ async def test_get_input_schema_none_required(server: SpinTestServer):
)

create_graph = CreateGraph(graph=graph)
created_graph = await server.agent_server.create_graph(
create_graph, False, DEFAULT_USER_ID
created_graph = await server.agent_server.test_create_graph(
create_graph, DEFAULT_USER_ID
)

input_schema = created_graph.get_input_schema()
Expand Down Expand Up @@ -180,8 +180,8 @@ async def test_get_input_schema_with_linked_blocks(server: SpinTestServer):
)

create_graph = CreateGraph(graph=graph)
created_graph = await server.agent_server.create_graph(
create_graph, False, DEFAULT_USER_ID
created_graph = await server.agent_server.test_create_graph(
create_graph, DEFAULT_USER_ID
)

input_schema = created_graph.get_input_schema()
Expand Down
39 changes: 33 additions & 6 deletions autogpt_platform/backend/test/executor/test_manager.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import logging

import pytest
from prisma.models import User

Expand All @@ -9,9 +11,12 @@
from backend.usecases.sample import create_test_graph, create_test_user
from backend.util.test import SpinTestServer, wait_execution

logger = logging.getLogger(__name__)

Swiftyos marked this conversation as resolved.
Show resolved Hide resolved

async def create_graph(s: SpinTestServer, g: graph.Graph, u: User) -> graph.Graph:
return await s.agent_server.create_graph(CreateGraph(graph=g), False, u.id)
logger.info(f"Creating graph for user {u.id}")
return await s.agent_server.test_create_graph(CreateGraph(graph=g), u.id)


async def execute_graph(
Expand All @@ -21,12 +26,20 @@ async def execute_graph(
input_data: dict,
num_execs: int = 4,
) -> str:
logger.info(f"Executing graph {test_graph.id} for user {test_user.id}")
logger.info(f"Input data: {input_data}")

# --- Test adding new executions --- #
response = agent_server.execute_graph(test_graph.id, input_data, test_user.id)
response = await agent_server.test_execute_graph(
test_graph.id, input_data, test_user.id
)
graph_exec_id = response["id"]
logger.info(f"Created execution with ID: {graph_exec_id}")

# Execution queue should be empty
logger.info("Waiting for execution to complete...")
result = await wait_execution(test_user.id, test_graph.id, graph_exec_id)
logger.info(f"Execution completed with {len(result)} results")
assert result and len(result) == num_execs
return graph_exec_id

Expand All @@ -37,7 +50,8 @@ async def assert_sample_graph_executions(
test_user: User,
graph_exec_id: str,
):
executions = await agent_server.get_graph_run_node_execution_results(
logger.info(f"Checking execution results for graph {test_graph.id}")
executions = await agent_server.test_get_graph_run_node_execution_results(
test_graph.id,
graph_exec_id,
test_user.id,
Expand All @@ -57,6 +71,7 @@ async def assert_sample_graph_executions(

# Executing StoreValueBlock
exec = executions[0]
logger.info(f"Checking first StoreValueBlock execution: {exec}")
assert exec.status == execution.ExecutionStatus.COMPLETED
assert exec.graph_exec_id == graph_exec_id
assert (
Expand All @@ -69,6 +84,7 @@ async def assert_sample_graph_executions(

# Executing StoreValueBlock
exec = executions[1]
logger.info(f"Checking second StoreValueBlock execution: {exec}")
assert exec.status == execution.ExecutionStatus.COMPLETED
assert exec.graph_exec_id == graph_exec_id
assert (
Expand All @@ -81,6 +97,7 @@ async def assert_sample_graph_executions(

# Executing FillTextTemplateBlock
exec = executions[2]
logger.info(f"Checking FillTextTemplateBlock execution: {exec}")
assert exec.status == execution.ExecutionStatus.COMPLETED
assert exec.graph_exec_id == graph_exec_id
assert exec.output_data == {"output": ["Hello, World!!!"]}
Expand All @@ -95,6 +112,7 @@ async def assert_sample_graph_executions(

# Executing PrintToConsoleBlock
exec = executions[3]
logger.info(f"Checking PrintToConsoleBlock execution: {exec}")
assert exec.status == execution.ExecutionStatus.COMPLETED
assert exec.graph_exec_id == graph_exec_id
assert exec.output_data == {"status": ["printed"]}
Expand All @@ -104,6 +122,7 @@ async def assert_sample_graph_executions(

@pytest.mark.asyncio(scope="session")
async def test_agent_execution(server: SpinTestServer):
logger.info("Starting test_agent_execution")
test_user = await create_test_user()
test_graph = await create_graph(server, create_test_graph(), test_user)
data = {"input_1": "Hello", "input_2": "World"}
Expand All @@ -117,6 +136,7 @@ async def test_agent_execution(server: SpinTestServer):
await assert_sample_graph_executions(
server.agent_server, test_graph, test_user, graph_exec_id
)
logger.info("Completed test_agent_execution")


@pytest.mark.asyncio(scope="session")
Expand All @@ -132,6 +152,7 @@ async def test_input_pin_always_waited(server: SpinTestServer):
// key
StoreValueBlock2
"""
logger.info("Starting test_input_pin_always_waited")
nodes = [
graph.Node(
block_id=StoreValueBlock().id,
Expand Down Expand Up @@ -172,14 +193,16 @@ async def test_input_pin_always_waited(server: SpinTestServer):
server.agent_server, test_graph, test_user, {}, 3
)

executions = await server.agent_server.get_graph_run_node_execution_results(
logger.info("Checking execution results")
executions = await server.agent_server.test_get_graph_run_node_execution_results(
test_graph.id, graph_exec_id, test_user.id
)
assert len(executions) == 3
# FindInDictionaryBlock should wait for the input pin to be provided,
# Hence executing extraction of "key" from {"key1": "value1", "key2": "value2"}
assert executions[2].status == execution.ExecutionStatus.COMPLETED
assert executions[2].output_data == {"output": ["value2"]}
logger.info("Completed test_input_pin_always_waited")


@pytest.mark.asyncio(scope="session")
Expand All @@ -197,6 +220,7 @@ async def test_static_input_link_on_graph(server: SpinTestServer):
And later, another output is produced on input pin `b`, which is a static link,
this input will complete the input of those three incomplete executions.
"""
logger.info("Starting test_static_input_link_on_graph")
nodes = [
graph.Node(block_id=StoreValueBlock().id, input_default={"input": 4}), # a
graph.Node(block_id=StoreValueBlock().id, input_default={"input": 4}), # a
Expand Down Expand Up @@ -252,11 +276,14 @@ async def test_static_input_link_on_graph(server: SpinTestServer):
graph_exec_id = await execute_graph(
server.agent_server, test_graph, test_user, {}, 8
)
executions = await server.agent_server.get_graph_run_node_execution_results(
logger.info("Checking execution results")
executions = await server.agent_server.test_get_graph_run_node_execution_results(
test_graph.id, graph_exec_id, test_user.id
)
assert len(executions) == 8
# The last 3 executions will be a+b=4+5=9
for exec_data in executions[-3:]:
for i, exec_data in enumerate(executions[-3:]):
logger.info(f"Checking execution {i+1} of last 3: {exec_data}")
assert exec_data.status == execution.ExecutionStatus.COMPLETED
assert exec_data.output_data == {"result": [9]}
logger.info("Completed test_static_input_link_on_graph")
2 changes: 1 addition & 1 deletion autogpt_platform/backend/test/executor/test_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
async def test_agent_schedule(server: SpinTestServer):
await db.connect()
test_user = await create_test_user()
test_graph = await server.agent_server.create_graph(
test_graph = await server.agent_server.test_create_graph(
create_graph=CreateGraph(graph=create_test_graph()),
is_template=False,
user_id=test_user.id,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ export default class BaseAutoGPTServerAPI {

constructor(
baseUrl: string = process.env.NEXT_PUBLIC_AGPT_SERVER_URL ||
"http://localhost:8006/api",
"http://localhost:8006/api/v1",
wsUrl: string = process.env.NEXT_PUBLIC_AGPT_WS_SERVER_URL ||
"ws://localhost:8001/ws",
supabaseClient: SupabaseClient | null = null,
Expand Down
Loading