diff --git a/superagi/agent/agent_iteration_step_handler.py b/superagi/agent/agent_iteration_step_handler.py index a8f37d94a..00d73b8c6 100644 --- a/superagi/agent/agent_iteration_step_handler.py +++ b/superagi/agent/agent_iteration_step_handler.py @@ -65,7 +65,7 @@ def execute_step(self): prompt=iteration_workflow_step.prompt, agent_tools=agent_tools) - messages = AgentLlmMessageBuilder(self.session, self.llm.get_model(), self.agent_id, self.agent_execution_id) \ + messages = AgentLlmMessageBuilder(self.session, self.llm, self.agent_id, self.agent_execution_id) \ .build_agent_messages(prompt, agent_feeds, history_enabled=iteration_workflow_step.history_enabled, completion_prompt=iteration_workflow_step.completion_prompt) diff --git a/superagi/agent/agent_message_builder.py b/superagi/agent/agent_message_builder.py index 801d71fb3..65a14b7ef 100644 --- a/superagi/agent/agent_message_builder.py +++ b/superagi/agent/agent_message_builder.py @@ -1,17 +1,22 @@ import time from typing import Tuple, List +from sqlalchemy import asc from superagi.config.config import get_config +from superagi.helper.prompt_reader import PromptReader from superagi.helper.token_counter import TokenCounter +from superagi.models.agent_execution import AgentExecution from superagi.models.agent_execution_feed import AgentExecutionFeed from superagi.types.common import BaseMessage +from superagi.models.agent_execution_config import AgentExecutionConfiguration class AgentLlmMessageBuilder: """Agent message builder for LLM agent.""" - def __init__(self, session, llm_model: str, agent_id: int, agent_execution_id: int): + def __init__(self, session, llm, agent_id: int, agent_execution_id: int): self.session = session - self.llm_model = llm_model + self.llm = llm + self.llm_model = llm.get_model() self.agent_id = agent_id self.agent_execution_id = agent_execution_id @@ -32,9 +37,15 @@ def build_agent_messages(self, prompt: str, agent_feeds: list, history_enabled=F if history_enabled: messages.append({"role": "system", "content": f"The current time and date is {time.strftime('%c')}"}) base_token_limit = TokenCounter.count_message_tokens(messages, self.llm_model) - full_message_history = [{'role': role, 'content': feed} for role, feed in agent_feeds] + full_message_history = [{'role': agent_feed.role, 'content': agent_feed.feed, 'chat_id': agent_feed.id} + for agent_feed in agent_feeds] past_messages, current_messages = self._split_history(full_message_history, - token_limit - base_token_limit - max_output_token_limit) + ((token_limit - base_token_limit - max_output_token_limit) // 4) * 3) + if past_messages: + ltm_summary = self._build_ltm_summary(past_messages=past_messages, + output_token_limit=(token_limit - base_token_limit - max_output_token_limit) // 4) + messages.append({"role": "assistant", "content": ltm_summary}) + for history in current_messages: messages.append({"role": history["role"], "content": history["content"]}) messages.append({"role": "user", "content": completion_prompt}) @@ -51,6 +62,7 @@ def _split_history(self, history: List, pending_token_limit: int) -> Tuple[List[ self.llm_model) hist_token_count += token_count if hist_token_count > pending_token_limit: + self._add_or_update_last_agent_feed_ltm_summary_id(str(history[i-1]['chat_id'])) return history[:i], history[i:] i -= 1 return [], history @@ -66,3 +78,76 @@ def _add_initial_feeds(self, agent_feeds: list, messages: list): feed_group_id="DEFAULT") self.session.add(agent_execution_feed) self.session.commit() + + def _add_or_update_last_agent_feed_ltm_summary_id(self, last_agent_feed_ltm_summary_id): + execution = AgentExecution(id=self.agent_execution_id) + agent_execution_configs = {"last_agent_feed_ltm_summary_id": last_agent_feed_ltm_summary_id} + AgentExecutionConfiguration.add_or_update_agent_execution_config(self.session, execution, + agent_execution_configs) + + + def _build_ltm_summary(self, past_messages, output_token_limit) -> str: + ltm_prompt = self._build_prompt_for_ltm_summary(past_messages=past_messages, + token_limit=output_token_limit) + + summary = AgentExecutionConfiguration.fetch_value(self.session, self.agent_execution_id, "ltm_summary") + previous_ltm_summary = summary.value if summary is not None else "" + + ltm_summary_base_token_limit = 10 + if ((TokenCounter.count_text_tokens(ltm_prompt) + ltm_summary_base_token_limit + output_token_limit) + - TokenCounter.token_limit()) > 0: + last_agent_feed_ltm_summary_id = AgentExecutionConfiguration.fetch_value(self.session, + self.agent_execution_id, "last_agent_feed_ltm_summary_id") + last_agent_feed_ltm_summary_id = int(last_agent_feed_ltm_summary_id.value) + + past_messages = self.session.query(AgentExecutionFeed.role, AgentExecutionFeed.feed, + AgentExecutionFeed.id) \ + .filter(AgentExecutionFeed.agent_execution_id == self.agent_execution_id, + AgentExecutionFeed.id > last_agent_feed_ltm_summary_id) \ + .order_by(asc(AgentExecutionFeed.created_at)) \ + .all() + + past_messages = [ + {'role': past_message.role, 'content': past_message.feed, 'chat_id': past_message.id} + for past_message in past_messages] + + ltm_prompt = self._build_prompt_for_recursive_ltm_summary_using_previous_ltm_summary( + previous_ltm_summary=previous_ltm_summary, past_messages=past_messages, token_limit=output_token_limit) + + msgs = [{"role": "system", "content": "You are GPT Prompt writer"}, + {"role": "assistant", "content": ltm_prompt}] + ltm_summary = self.llm.chat_completion(msgs) + + execution = AgentExecution(id=self.agent_execution_id) + agent_execution_configs = {"ltm_summary": ltm_summary["content"]} + AgentExecutionConfiguration.add_or_update_agent_execution_config(session=self.session, execution=execution, + agent_execution_configs=agent_execution_configs) + + return ltm_summary["content"] + + def _build_prompt_for_ltm_summary(self, past_messages: List[BaseMessage], token_limit: int): + ltm_summary_prompt = PromptReader.read_agent_prompt(__file__, "agent_summary.txt") + + past_messages_prompt = "" + for past_message in past_messages: + past_messages_prompt += past_message["role"] + ": " + past_message["content"] + "\n" + ltm_summary_prompt = ltm_summary_prompt.replace("{past_messages}", past_messages_prompt) + + ltm_summary_prompt = ltm_summary_prompt.replace("{char_limit}", str(token_limit*4)) + + return ltm_summary_prompt + + def _build_prompt_for_recursive_ltm_summary_using_previous_ltm_summary(self, previous_ltm_summary: str, + past_messages: List[BaseMessage], token_limit: int): + ltm_summary_prompt = PromptReader.read_agent_prompt(__file__, "agent_recursive_summary.txt") + + ltm_summary_prompt = ltm_summary_prompt.replace("{previous_ltm_summary}", previous_ltm_summary) + + past_messages_prompt = "" + for past_message in past_messages: + past_messages_prompt += past_message["role"] + ": " + past_message["content"] + "\n" + ltm_summary_prompt = ltm_summary_prompt.replace("{past_messages}", past_messages_prompt) + + ltm_summary_prompt = ltm_summary_prompt.replace("{char_limit}", str(token_limit*4)) + + return ltm_summary_prompt diff --git a/superagi/agent/agent_tool_step_handler.py b/superagi/agent/agent_tool_step_handler.py index 6b423b63c..673eb8fc2 100644 --- a/superagi/agent/agent_tool_step_handler.py +++ b/superagi/agent/agent_tool_step_handler.py @@ -95,7 +95,7 @@ def _process_input_instruction(self, agent_config, agent_execution_config, step_ prompt = self._build_tool_input_prompt(step_tool, tool_obj, agent_execution_config) logger.info("Prompt: ", prompt) agent_feeds = AgentExecutionFeed.fetch_agent_execution_feeds(self.session, self.agent_execution_id) - messages = AgentLlmMessageBuilder(self.session, self.llm.get_model(), self.agent_id, self.agent_execution_id) \ + messages = AgentLlmMessageBuilder(self.session, self.llm, self.agent_id, self.agent_execution_id) \ .build_agent_messages(prompt, agent_feeds, history_enabled=step_tool.history_enabled, completion_prompt=step_tool.completion_prompt) # print(messages) diff --git a/superagi/agent/prompts/agent_recursive_summary.txt b/superagi/agent/prompts/agent_recursive_summary.txt new file mode 100644 index 000000000..d2f61af27 --- /dev/null +++ b/superagi/agent/prompts/agent_recursive_summary.txt @@ -0,0 +1,11 @@ +AI, you are provided with a previous summary of interactions between the system, user, and assistant, as well as additional conversations that were not included in the original summary. +If the previous summary is empty, your task is to create a summary based solely on the new interactions. + +Previous Summary: {previous_ltm_summary} + +{past_messages} + +If the previous summary is not empty, your final summary should integrate the new interactions into the existing summary to create a comprehensive recap of all interactions. +If the previous summary is empty, your summary should encapsulate the main points of the new conversations. +In both cases, highlight the key issues discussed, decisions made, and any actions assigned. +Please ensure that the final summary does not exceed {char_limit} characters. \ No newline at end of file diff --git a/superagi/agent/prompts/agent_summary.txt b/superagi/agent/prompts/agent_summary.txt new file mode 100644 index 000000000..ce4877187 --- /dev/null +++ b/superagi/agent/prompts/agent_summary.txt @@ -0,0 +1,8 @@ +AI, your task is to generate a concise summary of the previous interactions between the system, user, and assistant. +The interactions are as follows: + +{past_messages} + +This summary should encapsulate the main points of the conversation, highlighting the key issues discussed, decisions made, and any actions assigned. +It should serve as a recap of the past interaction, providing a clear understanding of the conversation's context and outcomes. +Please ensure that the summary does not exceed {char_limit} characters. \ No newline at end of file diff --git a/superagi/agent/queue_step_handler.py b/superagi/agent/queue_step_handler.py index b0187be86..4b85202d2 100644 --- a/superagi/agent/queue_step_handler.py +++ b/superagi/agent/queue_step_handler.py @@ -82,7 +82,7 @@ def _process_input_instruction(self, step_tool): prompt = self._build_queue_input_prompt(step_tool) logger.info("Prompt: ", prompt) agent_feeds = AgentExecutionFeed.fetch_agent_execution_feeds(self.session, self.agent_execution_id) - messages = AgentLlmMessageBuilder(self.session, self.llm.get_model(), self.agent_id, self.agent_execution_id) \ + messages = AgentLlmMessageBuilder(self.session, self.llm, self.agent_id, self.agent_execution_id) \ .build_agent_messages(prompt, agent_feeds, history_enabled=step_tool.history_enabled, completion_prompt=step_tool.completion_prompt) current_tokens = TokenCounter.count_message_tokens(messages, self.llm.get_model()) diff --git a/superagi/models/agent_execution_config.py b/superagi/models/agent_execution_config.py index b7ce61c16..598f1a1a5 100644 --- a/superagi/models/agent_execution_config.py +++ b/superagi/models/agent_execution_config.py @@ -116,7 +116,7 @@ def build_agent_execution_config(cls, session, agent, results_agent, results_age for key, value in results_agent_execution_dict.items(): if key in results_agent_dict and value is not None: results_agent_dict[key] = value - + # Construct the response if 'goal' in results_agent_dict: results_agent_dict['goal'] = eval(results_agent_dict['goal']) @@ -147,6 +147,25 @@ def build_agent_execution_config(cls, session, agent, results_agent, results_age results_agent_dict['knowledge'] = int(results_agent_dict['knowledge']) knowledge = session.query(Knowledges).filter(Knowledges.id == results_agent_dict['knowledge']).first() knowledge_name = knowledge.name if knowledge is not None else "" - results_agent_dict['knowledge_name'] = knowledge_name + results_agent_dict['knowledge_name'] = knowledge_name return results_agent_dict + + @classmethod + def fetch_value(cls, session, execution_id: int, key: str): + """ + Fetches the value of a specific execution configuration setting for an agent. + + Args: + session: The database session object. + execution_id (int): The ID of the agent execution. + key (str): The key of the execution configuration setting. + + Returns: + AgentExecutionConfiguration: The execution configuration setting object if found, else None. + + """ + + return session.query(AgentExecutionConfiguration).filter( + AgentExecutionConfiguration.agent_execution_id == execution_id, + AgentExecutionConfiguration.key == key).first() \ No newline at end of file diff --git a/superagi/models/agent_execution_feed.py b/superagi/models/agent_execution_feed.py index 356f6cfc7..61f449585 100644 --- a/superagi/models/agent_execution_feed.py +++ b/superagi/models/agent_execution_feed.py @@ -56,7 +56,7 @@ def get_last_tool_response(cls, session: Session, agent_execution_id: int, tool_ @classmethod def fetch_agent_execution_feeds(cls, session, agent_execution_id: int): agent_execution = AgentExecution.find_by_id(session, agent_execution_id) - agent_feeds = session.query(AgentExecutionFeed.role, AgentExecutionFeed.feed) \ + agent_feeds = session.query(AgentExecutionFeed.role, AgentExecutionFeed.feed, AgentExecutionFeed.id) \ .filter(AgentExecutionFeed.agent_execution_id == agent_execution_id, AgentExecutionFeed.feed_group_id == agent_execution.current_feed_group_id) \ .order_by(asc(AgentExecutionFeed.created_at)) \ diff --git a/tests/unit_tests/agent/test_agent_message_builder.py b/tests/unit_tests/agent/test_agent_message_builder.py index 088cfe8cf..fb101764d 100644 --- a/tests/unit_tests/agent/test_agent_message_builder.py +++ b/tests/unit_tests/agent/test_agent_message_builder.py @@ -4,11 +4,12 @@ from superagi.agent.agent_message_builder import AgentLlmMessageBuilder from superagi.models.agent_execution_feed import AgentExecutionFeed + @patch('superagi.helper.token_counter.TokenCounter.token_limit') @patch('superagi.config.config.get_config') def test_build_agent_messages(mock_get_config, mock_token_limit): mock_session = Mock() - llm_model = 'model_1' + llm = Mock() agent_id = 1 agent_execution_id = 1 prompt = "start" @@ -19,7 +20,7 @@ def test_build_agent_messages(mock_get_config, mock_token_limit): mock_token_limit.return_value = 1000 mock_get_config.return_value = 600 - builder = AgentLlmMessageBuilder(mock_session, llm_model, agent_id, agent_execution_id) + builder = AgentLlmMessageBuilder(mock_session, llm, agent_id, agent_execution_id) messages = builder.build_agent_messages(prompt, agent_feeds, history_enabled=True, completion_prompt=completion_prompt) # Test prompt message @@ -38,3 +39,79 @@ def test_build_agent_messages(mock_get_config, mock_token_limit): assert feed_obj.agent_id == agent_id assert feed_obj.feed == messages[i]["content"] assert feed_obj.role == messages[i]["role"] + +@patch('superagi.models.agent_execution_config.AgentExecutionConfiguration.fetch_value') +@patch('superagi.models.agent_execution_config.AgentExecutionConfiguration.add_or_update_agent_execution_config') +@patch('superagi.agent.agent_message_builder.AgentLlmMessageBuilder._build_prompt_for_recursive_ltm_summary_using_previous_ltm_summary') +@patch('superagi.agent.agent_message_builder.AgentLlmMessageBuilder._build_prompt_for_ltm_summary') +@patch('superagi.helper.token_counter.TokenCounter.count_text_tokens') +@patch('superagi.helper.token_counter.TokenCounter.token_limit') +def test_build_ltm_summary(mock_token_limit, mock_count_text_tokens, mock_build_prompt_for_ltm_summary, + mock_build_prompt_for_recursive_ltm_summary, mock_add_or_update_agent_execution_config, + mock_fetch_value): + mock_session = Mock() + llm = Mock() + agent_id = 1 + agent_execution_id = 1 + + builder = AgentLlmMessageBuilder(mock_session, llm, agent_id, agent_execution_id) + + past_messages = [{"role": "user", "content": "Hello"}, {"role": "assistant", "content": "Hi"}] + output_token_limit = 100 + + mock_token_limit.return_value = 1000 + mock_count_text_tokens.return_value = 200 + mock_build_prompt_for_ltm_summary.return_value = "ltm_summary_prompt" + mock_build_prompt_for_recursive_ltm_summary.return_value = "recursive_ltm_summary_prompt" + mock_fetch_value.return_value = Mock(value="ltm_summary") + llm.chat_completion.return_value = {"content": "ltm_summary"} + + ltm_summary = builder._build_ltm_summary(past_messages, output_token_limit) + + assert ltm_summary == "ltm_summary" + + mock_add_or_update_agent_execution_config.assert_called_once() + + llm.chat_completion.assert_called_once_with([{"role": "system", "content": "You are GPT Prompt writer"}, + {"role": "assistant", "content": "ltm_summary_prompt"}]) + +@patch('superagi.helper.prompt_reader.PromptReader.read_agent_prompt') +def test_build_prompt_for_ltm_summary(mock_read_agent_prompt): + mock_session = Mock() + llm = Mock() + agent_id = 1 + agent_execution_id = 1 + + builder = AgentLlmMessageBuilder(mock_session, llm, agent_id, agent_execution_id) + + past_messages = [{"role": "user", "content": "Hello"}, {"role": "assistant", "content": "Hi"}] + token_limit = 100 + + mock_read_agent_prompt.return_value = "{past_messages}\n{char_limit}" + + prompt = builder._build_prompt_for_ltm_summary(past_messages, token_limit) + + assert "user: Hello\nassistant: Hi\n" in prompt + assert "400" in prompt + + +@patch('superagi.helper.prompt_reader.PromptReader.read_agent_prompt') +def test_build_prompt_for_recursive_ltm_summary_using_previous_ltm_summary(mock_read_agent_prompt): + mock_session = Mock() + llm = Mock() + agent_id = 1 + agent_execution_id = 1 + + builder = AgentLlmMessageBuilder(mock_session, llm, agent_id, agent_execution_id) + + previous_ltm_summary = "Summary" + past_messages = [{"role": "user", "content": "Hello"}, {"role": "assistant", "content": "Hi"}] + token_limit = 100 + + mock_read_agent_prompt.return_value = "{previous_ltm_summary}\n{past_messages}\n{char_limit}" + + prompt = builder._build_prompt_for_recursive_ltm_summary_using_previous_ltm_summary(previous_ltm_summary, past_messages, token_limit) + + assert "Summary" in prompt + assert "user: Hello\nassistant: Hi\n" in prompt + assert "400" in prompt