Skip to content

Commit

Permalink
LTM summary (#1005)
Browse files Browse the repository at this point in the history
  • Loading branch information
jagtarcontlo authored Aug 10, 2023
1 parent b9ac4f2 commit b1d955f
Show file tree
Hide file tree
Showing 9 changed files with 212 additions and 12 deletions.
2 changes: 1 addition & 1 deletion superagi/agent/agent_iteration_step_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def execute_step(self):
prompt=iteration_workflow_step.prompt,
agent_tools=agent_tools)

messages = AgentLlmMessageBuilder(self.session, self.llm.get_model(), self.agent_id, self.agent_execution_id) \
messages = AgentLlmMessageBuilder(self.session, self.llm, self.agent_id, self.agent_execution_id) \
.build_agent_messages(prompt, agent_feeds, history_enabled=iteration_workflow_step.history_enabled,
completion_prompt=iteration_workflow_step.completion_prompt)

Expand Down
93 changes: 89 additions & 4 deletions superagi/agent/agent_message_builder.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,22 @@
import time
from typing import Tuple, List
from sqlalchemy import asc

from superagi.config.config import get_config
from superagi.helper.prompt_reader import PromptReader
from superagi.helper.token_counter import TokenCounter
from superagi.models.agent_execution import AgentExecution
from superagi.models.agent_execution_feed import AgentExecutionFeed
from superagi.types.common import BaseMessage
from superagi.models.agent_execution_config import AgentExecutionConfiguration


class AgentLlmMessageBuilder:
"""Agent message builder for LLM agent."""
def __init__(self, session, llm_model: str, agent_id: int, agent_execution_id: int):
def __init__(self, session, llm, agent_id: int, agent_execution_id: int):
self.session = session
self.llm_model = llm_model
self.llm = llm
self.llm_model = llm.get_model()
self.agent_id = agent_id
self.agent_execution_id = agent_execution_id

Expand All @@ -32,9 +37,15 @@ def build_agent_messages(self, prompt: str, agent_feeds: list, history_enabled=F
if history_enabled:
messages.append({"role": "system", "content": f"The current time and date is {time.strftime('%c')}"})
base_token_limit = TokenCounter.count_message_tokens(messages, self.llm_model)
full_message_history = [{'role': role, 'content': feed} for role, feed in agent_feeds]
full_message_history = [{'role': agent_feed.role, 'content': agent_feed.feed, 'chat_id': agent_feed.id}
for agent_feed in agent_feeds]
past_messages, current_messages = self._split_history(full_message_history,
token_limit - base_token_limit - max_output_token_limit)
((token_limit - base_token_limit - max_output_token_limit) // 4) * 3)
if past_messages:
ltm_summary = self._build_ltm_summary(past_messages=past_messages,
output_token_limit=(token_limit - base_token_limit - max_output_token_limit) // 4)
messages.append({"role": "assistant", "content": ltm_summary})

for history in current_messages:
messages.append({"role": history["role"], "content": history["content"]})
messages.append({"role": "user", "content": completion_prompt})
Expand All @@ -51,6 +62,7 @@ def _split_history(self, history: List, pending_token_limit: int) -> Tuple[List[
self.llm_model)
hist_token_count += token_count
if hist_token_count > pending_token_limit:
self._add_or_update_last_agent_feed_ltm_summary_id(str(history[i-1]['chat_id']))
return history[:i], history[i:]
i -= 1
return [], history
Expand All @@ -66,3 +78,76 @@ def _add_initial_feeds(self, agent_feeds: list, messages: list):
feed_group_id="DEFAULT")
self.session.add(agent_execution_feed)
self.session.commit()

def _add_or_update_last_agent_feed_ltm_summary_id(self, last_agent_feed_ltm_summary_id):
execution = AgentExecution(id=self.agent_execution_id)
agent_execution_configs = {"last_agent_feed_ltm_summary_id": last_agent_feed_ltm_summary_id}
AgentExecutionConfiguration.add_or_update_agent_execution_config(self.session, execution,
agent_execution_configs)


def _build_ltm_summary(self, past_messages, output_token_limit) -> str:
ltm_prompt = self._build_prompt_for_ltm_summary(past_messages=past_messages,
token_limit=output_token_limit)

summary = AgentExecutionConfiguration.fetch_value(self.session, self.agent_execution_id, "ltm_summary")
previous_ltm_summary = summary.value if summary is not None else ""

ltm_summary_base_token_limit = 10
if ((TokenCounter.count_text_tokens(ltm_prompt) + ltm_summary_base_token_limit + output_token_limit)
- TokenCounter.token_limit()) > 0:
last_agent_feed_ltm_summary_id = AgentExecutionConfiguration.fetch_value(self.session,
self.agent_execution_id, "last_agent_feed_ltm_summary_id")
last_agent_feed_ltm_summary_id = int(last_agent_feed_ltm_summary_id.value)

past_messages = self.session.query(AgentExecutionFeed.role, AgentExecutionFeed.feed,
AgentExecutionFeed.id) \
.filter(AgentExecutionFeed.agent_execution_id == self.agent_execution_id,
AgentExecutionFeed.id > last_agent_feed_ltm_summary_id) \
.order_by(asc(AgentExecutionFeed.created_at)) \
.all()

past_messages = [
{'role': past_message.role, 'content': past_message.feed, 'chat_id': past_message.id}
for past_message in past_messages]

ltm_prompt = self._build_prompt_for_recursive_ltm_summary_using_previous_ltm_summary(
previous_ltm_summary=previous_ltm_summary, past_messages=past_messages, token_limit=output_token_limit)

msgs = [{"role": "system", "content": "You are GPT Prompt writer"},
{"role": "assistant", "content": ltm_prompt}]
ltm_summary = self.llm.chat_completion(msgs)

execution = AgentExecution(id=self.agent_execution_id)
agent_execution_configs = {"ltm_summary": ltm_summary["content"]}
AgentExecutionConfiguration.add_or_update_agent_execution_config(session=self.session, execution=execution,
agent_execution_configs=agent_execution_configs)

return ltm_summary["content"]

def _build_prompt_for_ltm_summary(self, past_messages: List[BaseMessage], token_limit: int):
ltm_summary_prompt = PromptReader.read_agent_prompt(__file__, "agent_summary.txt")

past_messages_prompt = ""
for past_message in past_messages:
past_messages_prompt += past_message["role"] + ": " + past_message["content"] + "\n"
ltm_summary_prompt = ltm_summary_prompt.replace("{past_messages}", past_messages_prompt)

ltm_summary_prompt = ltm_summary_prompt.replace("{char_limit}", str(token_limit*4))

return ltm_summary_prompt

def _build_prompt_for_recursive_ltm_summary_using_previous_ltm_summary(self, previous_ltm_summary: str,
past_messages: List[BaseMessage], token_limit: int):
ltm_summary_prompt = PromptReader.read_agent_prompt(__file__, "agent_recursive_summary.txt")

ltm_summary_prompt = ltm_summary_prompt.replace("{previous_ltm_summary}", previous_ltm_summary)

past_messages_prompt = ""
for past_message in past_messages:
past_messages_prompt += past_message["role"] + ": " + past_message["content"] + "\n"
ltm_summary_prompt = ltm_summary_prompt.replace("{past_messages}", past_messages_prompt)

ltm_summary_prompt = ltm_summary_prompt.replace("{char_limit}", str(token_limit*4))

return ltm_summary_prompt
2 changes: 1 addition & 1 deletion superagi/agent/agent_tool_step_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ def _process_input_instruction(self, agent_config, agent_execution_config, step_
prompt = self._build_tool_input_prompt(step_tool, tool_obj, agent_execution_config)
logger.info("Prompt: ", prompt)
agent_feeds = AgentExecutionFeed.fetch_agent_execution_feeds(self.session, self.agent_execution_id)
messages = AgentLlmMessageBuilder(self.session, self.llm.get_model(), self.agent_id, self.agent_execution_id) \
messages = AgentLlmMessageBuilder(self.session, self.llm, self.agent_id, self.agent_execution_id) \
.build_agent_messages(prompt, agent_feeds, history_enabled=step_tool.history_enabled,
completion_prompt=step_tool.completion_prompt)
# print(messages)
Expand Down
11 changes: 11 additions & 0 deletions superagi/agent/prompts/agent_recursive_summary.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
AI, you are provided with a previous summary of interactions between the system, user, and assistant, as well as additional conversations that were not included in the original summary.
If the previous summary is empty, your task is to create a summary based solely on the new interactions.

Previous Summary: {previous_ltm_summary}

{past_messages}

If the previous summary is not empty, your final summary should integrate the new interactions into the existing summary to create a comprehensive recap of all interactions.
If the previous summary is empty, your summary should encapsulate the main points of the new conversations.
In both cases, highlight the key issues discussed, decisions made, and any actions assigned.
Please ensure that the final summary does not exceed {char_limit} characters.
8 changes: 8 additions & 0 deletions superagi/agent/prompts/agent_summary.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
AI, your task is to generate a concise summary of the previous interactions between the system, user, and assistant.
The interactions are as follows:

{past_messages}

This summary should encapsulate the main points of the conversation, highlighting the key issues discussed, decisions made, and any actions assigned.
It should serve as a recap of the past interaction, providing a clear understanding of the conversation's context and outcomes.
Please ensure that the summary does not exceed {char_limit} characters.
2 changes: 1 addition & 1 deletion superagi/agent/queue_step_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def _process_input_instruction(self, step_tool):
prompt = self._build_queue_input_prompt(step_tool)
logger.info("Prompt: ", prompt)
agent_feeds = AgentExecutionFeed.fetch_agent_execution_feeds(self.session, self.agent_execution_id)
messages = AgentLlmMessageBuilder(self.session, self.llm.get_model(), self.agent_id, self.agent_execution_id) \
messages = AgentLlmMessageBuilder(self.session, self.llm, self.agent_id, self.agent_execution_id) \
.build_agent_messages(prompt, agent_feeds, history_enabled=step_tool.history_enabled,
completion_prompt=step_tool.completion_prompt)
current_tokens = TokenCounter.count_message_tokens(messages, self.llm.get_model())
Expand Down
23 changes: 21 additions & 2 deletions superagi/models/agent_execution_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def build_agent_execution_config(cls, session, agent, results_agent, results_age
for key, value in results_agent_execution_dict.items():
if key in results_agent_dict and value is not None:
results_agent_dict[key] = value

# Construct the response
if 'goal' in results_agent_dict:
results_agent_dict['goal'] = eval(results_agent_dict['goal'])
Expand Down Expand Up @@ -147,6 +147,25 @@ def build_agent_execution_config(cls, session, agent, results_agent, results_age
results_agent_dict['knowledge'] = int(results_agent_dict['knowledge'])
knowledge = session.query(Knowledges).filter(Knowledges.id == results_agent_dict['knowledge']).first()
knowledge_name = knowledge.name if knowledge is not None else ""
results_agent_dict['knowledge_name'] = knowledge_name
results_agent_dict['knowledge_name'] = knowledge_name

return results_agent_dict

@classmethod
def fetch_value(cls, session, execution_id: int, key: str):
"""
Fetches the value of a specific execution configuration setting for an agent.
Args:
session: The database session object.
execution_id (int): The ID of the agent execution.
key (str): The key of the execution configuration setting.
Returns:
AgentExecutionConfiguration: The execution configuration setting object if found, else None.
"""

return session.query(AgentExecutionConfiguration).filter(
AgentExecutionConfiguration.agent_execution_id == execution_id,
AgentExecutionConfiguration.key == key).first()
2 changes: 1 addition & 1 deletion superagi/models/agent_execution_feed.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def get_last_tool_response(cls, session: Session, agent_execution_id: int, tool_
@classmethod
def fetch_agent_execution_feeds(cls, session, agent_execution_id: int):
agent_execution = AgentExecution.find_by_id(session, agent_execution_id)
agent_feeds = session.query(AgentExecutionFeed.role, AgentExecutionFeed.feed) \
agent_feeds = session.query(AgentExecutionFeed.role, AgentExecutionFeed.feed, AgentExecutionFeed.id) \
.filter(AgentExecutionFeed.agent_execution_id == agent_execution_id,
AgentExecutionFeed.feed_group_id == agent_execution.current_feed_group_id) \
.order_by(asc(AgentExecutionFeed.created_at)) \
Expand Down
81 changes: 79 additions & 2 deletions tests/unit_tests/agent/test_agent_message_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,12 @@
from superagi.agent.agent_message_builder import AgentLlmMessageBuilder
from superagi.models.agent_execution_feed import AgentExecutionFeed


@patch('superagi.helper.token_counter.TokenCounter.token_limit')
@patch('superagi.config.config.get_config')
def test_build_agent_messages(mock_get_config, mock_token_limit):
mock_session = Mock()
llm_model = 'model_1'
llm = Mock()
agent_id = 1
agent_execution_id = 1
prompt = "start"
Expand All @@ -19,7 +20,7 @@ def test_build_agent_messages(mock_get_config, mock_token_limit):
mock_token_limit.return_value = 1000
mock_get_config.return_value = 600

builder = AgentLlmMessageBuilder(mock_session, llm_model, agent_id, agent_execution_id)
builder = AgentLlmMessageBuilder(mock_session, llm, agent_id, agent_execution_id)
messages = builder.build_agent_messages(prompt, agent_feeds, history_enabled=True, completion_prompt=completion_prompt)

# Test prompt message
Expand All @@ -38,3 +39,79 @@ def test_build_agent_messages(mock_get_config, mock_token_limit):
assert feed_obj.agent_id == agent_id
assert feed_obj.feed == messages[i]["content"]
assert feed_obj.role == messages[i]["role"]

@patch('superagi.models.agent_execution_config.AgentExecutionConfiguration.fetch_value')
@patch('superagi.models.agent_execution_config.AgentExecutionConfiguration.add_or_update_agent_execution_config')
@patch('superagi.agent.agent_message_builder.AgentLlmMessageBuilder._build_prompt_for_recursive_ltm_summary_using_previous_ltm_summary')
@patch('superagi.agent.agent_message_builder.AgentLlmMessageBuilder._build_prompt_for_ltm_summary')
@patch('superagi.helper.token_counter.TokenCounter.count_text_tokens')
@patch('superagi.helper.token_counter.TokenCounter.token_limit')
def test_build_ltm_summary(mock_token_limit, mock_count_text_tokens, mock_build_prompt_for_ltm_summary,
mock_build_prompt_for_recursive_ltm_summary, mock_add_or_update_agent_execution_config,
mock_fetch_value):
mock_session = Mock()
llm = Mock()
agent_id = 1
agent_execution_id = 1

builder = AgentLlmMessageBuilder(mock_session, llm, agent_id, agent_execution_id)

past_messages = [{"role": "user", "content": "Hello"}, {"role": "assistant", "content": "Hi"}]
output_token_limit = 100

mock_token_limit.return_value = 1000
mock_count_text_tokens.return_value = 200
mock_build_prompt_for_ltm_summary.return_value = "ltm_summary_prompt"
mock_build_prompt_for_recursive_ltm_summary.return_value = "recursive_ltm_summary_prompt"
mock_fetch_value.return_value = Mock(value="ltm_summary")
llm.chat_completion.return_value = {"content": "ltm_summary"}

ltm_summary = builder._build_ltm_summary(past_messages, output_token_limit)

assert ltm_summary == "ltm_summary"

mock_add_or_update_agent_execution_config.assert_called_once()

llm.chat_completion.assert_called_once_with([{"role": "system", "content": "You are GPT Prompt writer"},
{"role": "assistant", "content": "ltm_summary_prompt"}])

@patch('superagi.helper.prompt_reader.PromptReader.read_agent_prompt')
def test_build_prompt_for_ltm_summary(mock_read_agent_prompt):
mock_session = Mock()
llm = Mock()
agent_id = 1
agent_execution_id = 1

builder = AgentLlmMessageBuilder(mock_session, llm, agent_id, agent_execution_id)

past_messages = [{"role": "user", "content": "Hello"}, {"role": "assistant", "content": "Hi"}]
token_limit = 100

mock_read_agent_prompt.return_value = "{past_messages}\n{char_limit}"

prompt = builder._build_prompt_for_ltm_summary(past_messages, token_limit)

assert "user: Hello\nassistant: Hi\n" in prompt
assert "400" in prompt


@patch('superagi.helper.prompt_reader.PromptReader.read_agent_prompt')
def test_build_prompt_for_recursive_ltm_summary_using_previous_ltm_summary(mock_read_agent_prompt):
mock_session = Mock()
llm = Mock()
agent_id = 1
agent_execution_id = 1

builder = AgentLlmMessageBuilder(mock_session, llm, agent_id, agent_execution_id)

previous_ltm_summary = "Summary"
past_messages = [{"role": "user", "content": "Hello"}, {"role": "assistant", "content": "Hi"}]
token_limit = 100

mock_read_agent_prompt.return_value = "{previous_ltm_summary}\n{past_messages}\n{char_limit}"

prompt = builder._build_prompt_for_recursive_ltm_summary_using_previous_ltm_summary(previous_ltm_summary, past_messages, token_limit)

assert "Summary" in prompt
assert "user: Hello\nassistant: Hi\n" in prompt
assert "400" in prompt

0 comments on commit b1d955f

Please sign in to comment.